summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc/libwebrtcglue
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /dom/media/webrtc/libwebrtcglue
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media/webrtc/libwebrtcglue')
-rw-r--r--dom/media/webrtc/libwebrtcglue/AudioConduit.cpp1192
-rw-r--r--dom/media/webrtc/libwebrtcglue/AudioConduit.h382
-rw-r--r--dom/media/webrtc/libwebrtcglue/CodecConfig.h214
-rw-r--r--dom/media/webrtc/libwebrtcglue/CodecStatistics.cpp167
-rw-r--r--dom/media/webrtc/libwebrtcglue/CodecStatistics.h102
-rw-r--r--dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp18
-rw-r--r--dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h19
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.cpp41
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.h31
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaConduitErrors.h46
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaConduitInterface.h618
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp67
-rw-r--r--dom/media/webrtc/libwebrtcglue/MediaDataCodec.h31
-rw-r--r--dom/media/webrtc/libwebrtcglue/RtcpEventObserver.h20
-rw-r--r--dom/media/webrtc/libwebrtcglue/RtpPacketQueue.h60
-rw-r--r--dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h20
-rw-r--r--dom/media/webrtc/libwebrtcglue/RtpSourceObserver.cpp197
-rw-r--r--dom/media/webrtc/libwebrtcglue/RtpSourceObserver.h180
-rw-r--r--dom/media/webrtc/libwebrtcglue/RunningStat.h48
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoConduit.cpp2524
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoConduit.h687
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp261
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoStreamFactory.h75
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoTypes.h60
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp993
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h492
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h54
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.cpp1291
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.h153
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp192
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.h74
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp338
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h71
-rw-r--r--dom/media/webrtc/libwebrtcglue/moz.build37
34 files changed, 10755 insertions, 0 deletions
diff --git a/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
new file mode 100644
index 0000000000..5ee8bba599
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
@@ -0,0 +1,1192 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "common/browser_logging/CSFLog.h"
+#include "nspr.h"
+
+#ifdef HAVE_NETINET_IN_H
+# include <netinet/in.h>
+#elif defined XP_WIN
+# include <winsock2.h>
+#endif
+
+#include "AudioConduit.h"
+#include "nsCOMPtr.h"
+#include "mozilla/media/MediaUtils.h"
+#include "nsServiceManagerUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Telemetry.h"
+#include "transport/runnable_utils.h"
+
+#include "pk11pub.h"
+
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_encoder_factory.h"
+
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "webrtc/voice_engine/include/voe_errors.h"
+#include "webrtc/voice_engine/voice_engine_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+# include "AndroidBridge.h"
+#endif
+
+namespace mozilla {
+
+static const char* acLogTag = "WebrtcAudioSessionConduit";
+#ifdef LOGTAG
+# undef LOGTAG
+#endif
+#define LOGTAG acLogTag
+
+// 32 bytes is what WebRTC CodecInst expects
+const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32;
+
+using LocalDirection = MediaSessionConduitLocalDirection;
+/**
+ * Factory Method for AudioConduit
+ */
+RefPtr<AudioSessionConduit> AudioSessionConduit::Create(
+ RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ WebrtcAudioConduit* obj = new WebrtcAudioConduit(aCall, aStsThread);
+ if (obj->Init() != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s AudioConduit Init Failed ", __FUNCTION__);
+ delete obj;
+ return nullptr;
+ }
+ CSFLogDebug(LOGTAG, "%s Successfully created AudioConduit ", __FUNCTION__);
+ return obj;
+}
+
+/**
+ * Destruction defines for our super-classes
+ */
+WebrtcAudioConduit::~WebrtcAudioConduit() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ MutexAutoLock lock(mMutex);
+ DeleteSendStream();
+ DeleteRecvStream();
+
+ DeleteChannels();
+
+ // We don't Terminate() the VoEBase here, because the Call (owned by
+ // PeerConnectionMedia) actually owns the (shared) VoEBase/VoiceEngine
+ // here
+ mPtrVoEBase = nullptr;
+}
+
+bool WebrtcAudioConduit::SetLocalSSRCs(const std::vector<uint32_t>& aSSRCs,
+ const std::vector<uint32_t>& aRtxSSRCs) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aSSRCs.size() == 1,
+ "WebrtcAudioConduit::SetLocalSSRCs accepts exactly 1 ssrc.");
+
+ // We ignore aRtxSSRCs, it is only used in the VideoConduit.
+ if (aSSRCs.empty()) {
+ return false;
+ }
+
+ // Special case: the local SSRCs are the same - do nothing.
+ if (mSendStreamConfig.rtp.ssrc == aSSRCs[0]) {
+ return true;
+ }
+ // Update the value of the ssrcs in the config structure.
+ mRecvStreamConfig.rtp.local_ssrc = aSSRCs[0];
+ mSendStreamConfig.rtp.ssrc = aSSRCs[0];
+
+ mRecvChannelProxy->SetLocalSSRC(aSSRCs[0]);
+
+ return RecreateSendStreamIfExists();
+}
+
+std::vector<uint32_t> WebrtcAudioConduit::GetLocalSSRCs() {
+ MutexAutoLock lock(mMutex);
+ return std::vector<uint32_t>(1, mRecvStreamConfig.rtp.local_ssrc);
+}
+
+bool WebrtcAudioConduit::SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // We ignore aRtxSsrc, it is only used in the VideoConduit.
+ if (mRecvStreamConfig.rtp.remote_ssrc == ssrc) {
+ return true;
+ }
+ mRecvStreamConfig.rtp.remote_ssrc = ssrc;
+
+ return RecreateRecvStreamIfExists();
+}
+
+bool WebrtcAudioConduit::GetRemoteSSRC(uint32_t* ssrc) {
+ {
+ MutexAutoLock lock(mMutex);
+ if (!mRecvStream) {
+ return false;
+ }
+
+ const webrtc::AudioReceiveStream::Stats& stats = mRecvStream->GetStats();
+ *ssrc = stats.remote_ssrc;
+ }
+
+ return true;
+}
+
+bool WebrtcAudioConduit::SetLocalCNAME(const char* cname) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mSendChannelProxy->SetRTCP_CNAME(cname);
+ return true;
+}
+
+bool WebrtcAudioConduit::SetLocalMID(const std::string& mid) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mSendChannelProxy->SetLocalMID(mid.c_str());
+ return true;
+}
+
+void WebrtcAudioConduit::SetSyncGroup(const std::string& group) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mRecvStreamConfig.sync_group = group;
+}
+
+bool WebrtcAudioConduit::GetSendPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) {
+ ASSERT_ON_THREAD(mStsThread);
+ MutexAutoLock lock(mMutex);
+ if (!mSendStream) {
+ return false;
+ }
+ return mSendChannelProxy->GetRTCPPacketTypeCounters(*aPacketCounts);
+}
+
+bool WebrtcAudioConduit::GetRecvPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) {
+ ASSERT_ON_THREAD(mStsThread);
+ MutexAutoLock lock(mMutex);
+ if (!mEngineReceiving) {
+ return false;
+ }
+ return mRecvChannelProxy->GetRTCPPacketTypeCounters(*aPacketCounts);
+}
+
+bool WebrtcAudioConduit::GetRTPReceiverStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) {
+ ASSERT_ON_THREAD(mStsThread);
+ *jitterMs = 0;
+ *cumulativeLost = 0;
+ MutexAutoLock lock(mMutex);
+ if (!mRecvStream) {
+ return false;
+ }
+ auto stats = mRecvStream->GetStats();
+ *jitterMs = stats.jitter_ms;
+ *cumulativeLost = stats.packets_lost;
+ return true;
+}
+
+bool WebrtcAudioConduit::GetRTCPReceiverReport(uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ Maybe<double>* aOutRttSec) {
+ ASSERT_ON_THREAD(mStsThread);
+ double fractionLost = 0.0;
+ int64_t timestampTmp = 0;
+ int64_t rttMsTmp = 0;
+ bool res = false;
+ MutexAutoLock lock(mMutex);
+ if (mSendChannelProxy) {
+ res = mSendChannelProxy->GetRTCPReceiverStatistics(
+ &timestampTmp, jitterMs, cumulativeLost, packetsReceived, bytesReceived,
+ &fractionLost, &rttMsTmp);
+ }
+
+ const auto stats = mCall->Call()->GetStats();
+ const auto rtt = stats.rtt_ms;
+ if (rtt > static_cast<decltype(stats.rtt_ms)>(INT32_MAX)) {
+ // If we get a bogus RTT we will keep using the previous RTT
+#ifdef DEBUG
+ CSFLogError(LOGTAG,
+ "%s for AudioConduit:%p RTT is larger than the"
+ " maximum size of an RTCP RTT.",
+ __FUNCTION__, this);
+#endif
+ } else {
+ if (mRttSec && rtt < 0) {
+ CSFLogError(LOGTAG,
+ "%s for AudioConduit:%p RTT returned an error after "
+ " previously succeeding.",
+ __FUNCTION__, this);
+ mRttSec = Nothing();
+ }
+ if (rtt >= 0) {
+ mRttSec = Some(static_cast<DOMHighResTimeStamp>(rtt) / 1000.0);
+ }
+ }
+ *aOutRttSec = mRttSec;
+ return res;
+}
+
+bool WebrtcAudioConduit::GetRTCPSenderReport(
+ unsigned int* packetsSent, uint64_t* bytesSent,
+ DOMHighResTimeStamp* aRemoteTimestamp) {
+ ASSERT_ON_THREAD(mStsThread);
+ MutexAutoLock lock(mMutex);
+ if (!mRecvChannelProxy) {
+ return false;
+ }
+
+ webrtc::CallStatistics stats = mRecvChannelProxy->GetRTCPStatistics();
+ *packetsSent = stats.rtcp_sender_packets_sent;
+ *bytesSent = stats.rtcp_sender_octets_sent;
+ *aRemoteTimestamp = stats.rtcp_sender_ntp_timestamp.ToMs();
+ return *packetsSent > 0 && *bytesSent > 0;
+}
+
+Maybe<mozilla::dom::RTCBandwidthEstimationInternal>
+WebrtcAudioConduit::GetBandwidthEstimation() {
+ ASSERT_ON_THREAD(mStsThread);
+
+ const auto& stats = mCall->Call()->GetStats();
+ dom::RTCBandwidthEstimationInternal bw;
+ bw.mSendBandwidthBps.Construct(stats.send_bandwidth_bps / 8);
+ bw.mMaxPaddingBps.Construct(stats.max_padding_bitrate_bps / 8);
+ bw.mReceiveBandwidthBps.Construct(stats.recv_bandwidth_bps / 8);
+ bw.mPacerDelayMs.Construct(stats.pacer_delay_ms);
+ if (stats.rtt_ms >= 0) {
+ bw.mRttMs.Construct(stats.rtt_ms);
+ }
+ return Some(std::move(bw));
+}
+bool WebrtcAudioConduit::SetDtmfPayloadType(unsigned char type, int freq) {
+ CSFLogInfo(LOGTAG, "%s : setting dtmf payload %d", __FUNCTION__, (int)type);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ bool result = mSendChannelProxy->SetSendTelephoneEventPayloadType(type, freq);
+ if (!result) {
+ CSFLogError(LOGTAG,
+ "%s Failed call to SetSendTelephoneEventPayloadType(%u, %d)",
+ __FUNCTION__, type, freq);
+ }
+ return result;
+}
+
+bool WebrtcAudioConduit::InsertDTMFTone(int channel, int eventCode,
+ bool outOfBand, int lengthMs,
+ int attenuationDb) {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (!mSendChannelProxy || !mDtmfEnabled || !outOfBand) {
+ return false;
+ }
+
+ return mSendChannelProxy->SendTelephoneEventOutband(eventCode, lengthMs);
+}
+
+void WebrtcAudioConduit::OnRtpPacket(const webrtc::RTPHeader& aHeader,
+ const int64_t aTimestamp,
+ const uint32_t aJitter) {
+ ASSERT_ON_THREAD(mStsThread);
+ mRtpSourceObserver->OnRtpPacket(aHeader, aJitter);
+}
+
+void WebrtcAudioConduit::OnRtcpBye() {
+ RefPtr<WebrtcAudioConduit> self = this;
+ NS_DispatchToMainThread(media::NewRunnableFrom([self]() mutable {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (self->mRtcpEventObserver) {
+ self->mRtcpEventObserver->OnRtcpBye();
+ }
+ return NS_OK;
+ }));
+}
+
+void WebrtcAudioConduit::OnRtcpTimeout() {
+ RefPtr<WebrtcAudioConduit> self = this;
+ NS_DispatchToMainThread(media::NewRunnableFrom([self]() mutable {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (self->mRtcpEventObserver) {
+ self->mRtcpEventObserver->OnRtcpTimeout();
+ }
+ return NS_OK;
+ }));
+}
+
+void WebrtcAudioConduit::SetRtcpEventObserver(
+ mozilla::RtcpEventObserver* observer) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mRtcpEventObserver = observer;
+}
+
+void WebrtcAudioConduit::GetRtpSources(
+ nsTArray<dom::RTCRtpSourceEntry>& outSources) {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mRtpSourceObserver->GetRtpSources(outSources);
+}
+
+// test-only: inserts a CSRC entry in a RtpSourceObserver's history for
+// getContributingSources mochitests
+void InsertAudioLevelForContributingSource(RtpSourceObserver& observer,
+ const uint32_t aCsrcSource,
+ const int64_t aTimestamp,
+ const uint32_t aRtpTimestamp,
+ const bool aHasAudioLevel,
+ const uint8_t aAudioLevel) {
+ using EntryType = dom::RTCRtpSourceEntryType;
+ auto key = RtpSourceObserver::GetKey(aCsrcSource, EntryType::Contributing);
+ auto& hist = observer.mRtpSources[key];
+ hist.Insert(aTimestamp, aTimestamp, aRtpTimestamp, aHasAudioLevel,
+ aAudioLevel);
+}
+
+void WebrtcAudioConduit::InsertAudioLevelForContributingSource(
+ const uint32_t aCsrcSource, const int64_t aTimestamp,
+ const uint32_t aRtpTimestamp, const bool aHasAudioLevel,
+ const uint8_t aAudioLevel) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mozilla::InsertAudioLevelForContributingSource(
+ *mRtpSourceObserver, aCsrcSource, aTimestamp, aRtpTimestamp,
+ aHasAudioLevel, aAudioLevel);
+}
+
+/*
+ * WebRTCAudioConduit Implementation
+ */
+MediaConduitErrorCode WebrtcAudioConduit::Init() {
+ CSFLogDebug(LOGTAG, "%s this=%p", __FUNCTION__, this);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (!(mPtrVoEBase = webrtc::VoEBase::GetInterface(GetVoiceEngine()))) {
+ CSFLogError(LOGTAG, "%s Unable to initialize VoEBase", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ CreateChannels();
+
+ CSFLogDebug(LOGTAG, "%s AudioSessionConduit Initialization Done (%p)",
+ __FUNCTION__, this);
+ return kMediaConduitNoError;
+}
+
+// AudioSessionConduit Implementation
+MediaConduitErrorCode WebrtcAudioConduit::SetTransmitterTransport(
+ RefPtr<TransportInterface> aTransport) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mTransmitterTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::SetReceiverTransport(
+ RefPtr<TransportInterface> aTransport) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mReceiverTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::ConfigureSendMediaCodec(
+ const AudioCodecConfig* codecConfig) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+
+ {
+ // validate codec param
+ if ((condError = ValidateCodecConfig(codecConfig, true)) !=
+ kMediaConduitNoError) {
+ return condError;
+ }
+ }
+
+ condError = StopTransmitting();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if (!CodecConfigToWebRTCCodec(codecConfig, mSendStreamConfig)) {
+ CSFLogError(LOGTAG, "%s CodecConfig to WebRTC Codec Failed ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ mDtmfEnabled = codecConfig->mDtmfEnabled;
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<AudioCodecConfig>>& codecConfigList) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ bool success = false;
+
+ // Are we receiving already? If so, stop receiving and playout
+ // since we can't apply new recv codec when the engine is playing.
+ condError = StopReceiving();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if (codecConfigList.empty()) {
+ CSFLogError(LOGTAG, "%s Zero number of codecs to configure", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Try Applying the codecs in the list.
+ // We succeed if at least one codec was applied and reception was
+ // started successfully.
+ mRecvStreamConfig.decoder_factory = mCall->mDecoderFactory;
+ mRecvStreamConfig.decoder_map.clear();
+ for (const auto& codec : codecConfigList) {
+ // if the codec param is invalid or diplicate, return error
+ if ((condError = ValidateCodecConfig(codec.get(), false)) !=
+ kMediaConduitNoError) {
+ return condError;
+ }
+
+ webrtc::SdpAudioFormat::Parameters parameters;
+ if (codec->mName == "opus") {
+ if (codec->mChannels == 2) {
+ parameters["stereo"] = "1";
+ }
+ if (codec->mFECEnabled) {
+ parameters["useinbandfec"] = "1";
+ }
+ if (codec->mDTXEnabled) {
+ parameters["usedtx"] = "1";
+ }
+ if (codec->mMaxPlaybackRate) {
+ parameters["maxplaybackrate"] = std::to_string(codec->mMaxPlaybackRate);
+ }
+ if (codec->mMaxAverageBitrate) {
+ parameters["maxaveragebitrate"] =
+ std::to_string(codec->mMaxAverageBitrate);
+ }
+ if (codec->mFrameSizeMs) {
+ parameters["ptime"] = std::to_string(codec->mFrameSizeMs);
+ }
+ if (codec->mMinFrameSizeMs) {
+ parameters["minptime"] = std::to_string(codec->mMinFrameSizeMs);
+ }
+ if (codec->mMaxFrameSizeMs) {
+ parameters["maxptime"] = std::to_string(codec->mMaxFrameSizeMs);
+ }
+ if (codec->mCbrEnabled) {
+ parameters["cbr"] = "1";
+ }
+ }
+
+ webrtc::SdpAudioFormat format(codec->mName, codec->mFreq, codec->mChannels,
+ parameters);
+ mRecvStreamConfig.decoder_map.emplace(codec->mType, format);
+
+ mRecvStreamConfig.voe_channel_id = mRecvChannel;
+ success = true;
+ } // end for
+
+ mRecvSSRC = mRecvStreamConfig.rtp.remote_ssrc;
+
+ if (!success) {
+ CSFLogError(LOGTAG, "%s Setting Receive Codec Failed ", __FUNCTION__);
+ return kMediaConduitInvalidReceiveCodec;
+ }
+
+ // If we are here, at least one codec should have been set
+ {
+ MutexAutoLock lock(mMutex);
+ DeleteRecvStream();
+ condError = StartReceivingLocked();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::SetLocalRTPExtensions(
+ LocalDirection aDirection, const RtpExtList& extensions) {
+ MOZ_ASSERT(NS_IsMainThread());
+ CSFLogDebug(LOGTAG, "%s direction: %s", __FUNCTION__,
+ MediaSessionConduit::LocalDirectionToString(aDirection).c_str());
+
+ bool isSend = aDirection == LocalDirection::kSend;
+ RtpExtList filteredExtensions;
+
+ int ssrcAudioLevelId = -1;
+ int csrcAudioLevelId = -1;
+ int midId = -1;
+
+ for (const auto& extension : extensions) {
+ // ssrc-audio-level RTP header extension
+ if (extension.uri == webrtc::RtpExtension::kAudioLevelUri) {
+ ssrcAudioLevelId = extension.id;
+ filteredExtensions.push_back(
+ webrtc::RtpExtension(extension.uri, extension.id));
+ }
+
+ // csrc-audio-level RTP header extension
+ if (extension.uri == webrtc::RtpExtension::kCsrcAudioLevelUri) {
+ if (isSend) {
+ CSFLogError(LOGTAG,
+ "%s SetSendAudioLevelIndicationStatus Failed"
+ " can not send CSRC audio levels.",
+ __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+ csrcAudioLevelId = extension.id;
+ filteredExtensions.push_back(
+ webrtc::RtpExtension(extension.uri, extension.id));
+ }
+
+ // MID RTP header extension
+ if (extension.uri == webrtc::RtpExtension::kMIdUri) {
+ if (!isSend) {
+ // TODO(bug 1405495): Why do we error out for csrc-audio-level, but not
+ // mid?
+ continue;
+ }
+ midId = extension.id;
+ filteredExtensions.push_back(
+ webrtc::RtpExtension(extension.uri, extension.id));
+ }
+ }
+
+ auto& currentExtensions = isSend ? mSendStreamConfig.rtp.extensions
+ : mRecvStreamConfig.rtp.extensions;
+ if (filteredExtensions == currentExtensions) {
+ return kMediaConduitNoError;
+ }
+
+ currentExtensions = filteredExtensions;
+
+ if (isSend) {
+ mSendChannelProxy->SetSendAudioLevelIndicationStatus(ssrcAudioLevelId != -1,
+ ssrcAudioLevelId);
+ mSendChannelProxy->SetSendMIDStatus(midId != -1, midId);
+ } else {
+ mRecvChannelProxy->SetReceiveAudioLevelIndicationStatus(
+ ssrcAudioLevelId != -1, ssrcAudioLevelId);
+ mRecvChannelProxy->SetReceiveCsrcAudioLevelIndicationStatus(
+ csrcAudioLevelId != -1, csrcAudioLevelId);
+ // TODO(bug 1405495): recv mid support
+ }
+
+ if (isSend) {
+ RecreateSendStreamIfExists();
+ } else {
+ RecreateRecvStreamIfExists();
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::SendAudioFrame(
+ const int16_t audio_data[],
+ int32_t lengthSamples, // per channel
+ int32_t samplingFreqHz, uint32_t channels, int32_t capture_delay) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ // Following checks need to be performed
+ // 1. Non null audio buffer pointer,
+ // 2. invalid sampling frequency - less than 0 or unsupported ones
+ // 3. Appropriate Sample Length for 10 ms audio-frame. This represents
+ // block size the VoiceEngine feeds into encoder for passed in audio-frame
+ // Ex: for 16000 sampling rate , valid block-length is 160
+ // Similarly for 32000 sampling rate, valid block length is 320
+ // We do the check by the verify modular operator below to be zero
+
+ if (!audio_data || (lengthSamples <= 0) ||
+ (IsSamplingFreqSupported(samplingFreqHz) == false) ||
+ ((lengthSamples % (samplingFreqHz / 100) != 0))) {
+ CSFLogError(LOGTAG, "%s Invalid Parameters ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // validate capture time
+ if (capture_delay < 0) {
+ CSFLogError(LOGTAG, "%s Invalid Capture Delay ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // if transmission is not started .. conduit cannot insert frames
+ if (!mEngineTransmitting) {
+ CSFLogError(LOGTAG, "%s Engine not transmitting ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ // Insert the samples
+ mPtrVoEBase->audio_transport()->PushCaptureData(
+ mSendChannel, audio_data,
+ sizeof(audio_data[0]) * 8, // bits
+ samplingFreqHz, channels, lengthSamples);
+ // we should be good here
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ size_t& numChannels,
+ size_t& lengthSamples) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ // validate params
+ if (!speechData) {
+ CSFLogError(LOGTAG, "%s Null Audio Buffer Pointer", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Validate sample length
+ if (GetNum10msSamplesForFrequency(samplingFreqHz) == 0) {
+ CSFLogError(LOGTAG, "%s Invalid Sampling Frequency ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // validate capture time
+ if (capture_delay < 0) {
+ CSFLogError(LOGTAG, "%s Invalid Capture Delay ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Conduit should have reception enabled before we ask for decoded
+ // samples
+ if (!mEngineReceiving) {
+ CSFLogError(LOGTAG, "%s Engine not Receiving ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ size_t lengthSamplesAllowed = lengthSamples;
+ lengthSamples = 0; // output paramter
+
+ mRecvChannelProxy->GetAudioFrameWithInfo(samplingFreqHz, &mAudioFrame);
+ numChannels = mAudioFrame.num_channels_;
+
+ if (numChannels == 0) {
+ CSFLogError(LOGTAG, "%s Audio frame has zero channels", __FUNCTION__);
+ return kMediaConduitPlayoutError;
+ }
+
+ // XXX Annoying, have to copy to our buffers -- refactor?
+ lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
+ MOZ_RELEASE_ASSERT(lengthSamples <= lengthSamplesAllowed);
+ PodCopy(speechData, mAudioFrame.data(), lengthSamples);
+
+ CSFLogDebug(LOGTAG, "%s GetAudioFrame:Got samples: length %zu ", __FUNCTION__,
+ lengthSamples);
+ return kMediaConduitNoError;
+}
+
+// Transport Layer Callbacks
+MediaConduitErrorCode WebrtcAudioConduit::ReceivedRTPPacket(
+ const void* data, int len, webrtc::RTPHeader& header) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ // Handle the unknown ssrc (and ssrc-not-signaled case).
+ // We can't just do this here; it has to happen on MainThread :-(
+ // We also don't want to drop the packet, nor stall this thread, so we hold
+ // the packet (and any following) for inserting once the SSRC is set.
+
+ // capture packet for insertion after ssrc is set -- do this before
+ // sending the runnable, since it may pull from this. Since it
+ // dispatches back to us, it's less critial to do this here, but doesn't
+ // hurt.
+ if (mRtpPacketQueue.IsQueueActive()) {
+ mRtpPacketQueue.Enqueue(data, len);
+ return kMediaConduitNoError;
+ }
+
+ if (mRecvSSRC != header.ssrc) {
+ // a new switch needs to be done
+ // any queued packets are from a previous switch that hasn't completed
+ // yet; drop them and only process the latest SSRC
+ mRtpPacketQueue.Clear();
+ mRtpPacketQueue.Enqueue(data, len);
+
+ CSFLogDebug(LOGTAG, "%s: switching from SSRC %u to %u", __FUNCTION__,
+ static_cast<uint32_t>(mRecvSSRC), header.ssrc);
+
+ // we "switch" here immediately, but buffer until the queue is released
+ mRecvSSRC = header.ssrc;
+
+ // Ensure lamba captures refs
+ RefPtr<WebrtcAudioConduit> self = this;
+ nsCOMPtr<nsIThread> thread;
+ if (NS_WARN_IF(NS_FAILED(NS_GetCurrentThread(getter_AddRefs(thread))))) {
+ return kMediaConduitRTPProcessingFailed;
+ }
+ NS_DispatchToMainThread(
+ media::NewRunnableFrom([self, thread, ssrc = header.ssrc]() mutable {
+ self->SetRemoteSSRC(ssrc, 0);
+ // We want to unblock the queued packets on the original thread
+ thread->Dispatch(media::NewRunnableFrom([self, ssrc]() mutable {
+ if (ssrc == self->mRecvSSRC) {
+ // SSRC is set; insert queued packets
+ self->mRtpPacketQueue.DequeueAll(self);
+ }
+ // else this is an intermediate switch; another is
+ // in-flight
+ return NS_OK;
+ }),
+ NS_DISPATCH_NORMAL);
+ return NS_OK;
+ }));
+ return kMediaConduitNoError;
+ }
+
+ CSFLogVerbose(LOGTAG, "%s: seq# %u, Len %d, SSRC %u (0x%x) ", __FUNCTION__,
+ (uint16_t)ntohs(((uint16_t*)data)[1]), len,
+ (uint32_t)ntohl(((uint32_t*)data)[2]),
+ (uint32_t)ntohl(((uint32_t*)data)[2]));
+
+ if (DeliverPacket(data, len) != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s RTP Processing Failed", __FUNCTION__);
+ return kMediaConduitRTPProcessingFailed;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::ReceivedRTCPPacket(const void* data,
+ int len) {
+ CSFLogDebug(LOGTAG, "%s : channel %d", __FUNCTION__, mRecvChannel);
+ ASSERT_ON_THREAD(mStsThread);
+
+ if (DeliverPacket(data, len) != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s RTCP Processing Failed", __FUNCTION__);
+ return kMediaConduitRTPProcessingFailed;
+ }
+
+ // TODO(bug 1496533): We will need to keep separate timestamps for each SSRC,
+ // and for each SSRC we will need to keep a timestamp for SR and RR.
+ mLastRtcpReceived = Some(GetNow());
+ return kMediaConduitNoError;
+}
+
+// TODO(bug 1496533): We will need to add a type (ie; SR or RR) param here, or
+// perhaps break this function into two functions, one for each type.
+Maybe<DOMHighResTimeStamp> WebrtcAudioConduit::LastRtcpReceived() const {
+ ASSERT_ON_THREAD(mStsThread);
+ return mLastRtcpReceived;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StopTransmitting() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StopTransmittingLocked();
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StartTransmitting() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StartTransmittingLocked();
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StopReceiving() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StopReceivingLocked();
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StartReceiving() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StartReceivingLocked();
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StopTransmittingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineTransmitting) {
+ MOZ_ASSERT(mSendStream);
+ CSFLogDebug(LOGTAG, "%s Engine Already Sending. Attemping to Stop ",
+ __FUNCTION__);
+ mSendStream->Stop();
+ mEngineTransmitting = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StartTransmittingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineTransmitting) {
+ return kMediaConduitNoError;
+ }
+
+ if (!mSendStream) {
+ CreateSendStream();
+ }
+
+ mCall->Call()->SignalChannelNetworkState(webrtc::MediaType::AUDIO,
+ webrtc::kNetworkUp);
+ mSendStream->Start();
+ mEngineTransmitting = true;
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StopReceivingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineReceiving) {
+ MOZ_ASSERT(mRecvStream);
+ mRecvStream->Stop();
+ mEngineReceiving = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::StartReceivingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineReceiving) {
+ return kMediaConduitNoError;
+ }
+
+ if (!mRecvStream) {
+ CreateRecvStream();
+ }
+
+ mCall->Call()->SignalChannelNetworkState(webrtc::MediaType::AUDIO,
+ webrtc::kNetworkUp);
+ mRecvStream->Start();
+ mEngineReceiving = true;
+
+ return kMediaConduitNoError;
+}
+
+// WebRTC::RTP Callback Implementation
+// Called on AudioGUM or MTG thread
+bool WebrtcAudioConduit::SendRtp(const uint8_t* data, size_t len,
+ const webrtc::PacketOptions& options) {
+ CSFLogDebug(LOGTAG, "%s: len %lu", __FUNCTION__, (unsigned long)len);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (mTransmitterTransport &&
+ (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK)) {
+ CSFLogDebug(LOGTAG, "%s Sent RTP Packet ", __FUNCTION__);
+ if (options.packet_id >= 0) {
+ int64_t now_ms = PR_Now() / 1000;
+ mCall->Call()->OnSentPacket({options.packet_id, now_ms});
+ }
+ return true;
+ }
+ CSFLogError(LOGTAG, "%s RTP Packet Send Failed ", __FUNCTION__);
+ return false;
+}
+
+// Called on WebRTC Process thread and perhaps others
+bool WebrtcAudioConduit::SendRtcp(const uint8_t* data, size_t len) {
+ CSFLogDebug(LOGTAG, "%s : len %lu, first rtcp = %u ", __FUNCTION__,
+ (unsigned long)len, static_cast<unsigned>(data[1]));
+
+ // We come here if we have only one pipeline/conduit setup,
+ // such as for unidirectional streams.
+ // We also end up here if we are receiving
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (mReceiverTransport &&
+ mReceiverTransport->SendRtcpPacket(data, len) == NS_OK) {
+ // Might be a sender report, might be a receiver report, we don't know.
+ CSFLogDebug(LOGTAG, "%s Sent RTCP Packet ", __FUNCTION__);
+ return true;
+ }
+ if (mTransmitterTransport &&
+ (mTransmitterTransport->SendRtcpPacket(data, len) == NS_OK)) {
+ CSFLogDebug(LOGTAG, "%s Sent RTCP Packet (sender report) ", __FUNCTION__);
+ return true;
+ }
+ CSFLogError(LOGTAG, "%s RTCP Packet Send Failed ", __FUNCTION__);
+ return false;
+}
+
+/**
+ * Converts between CodecConfig to WebRTC Codec Structure.
+ */
+
+bool WebrtcAudioConduit::CodecConfigToWebRTCCodec(
+ const AudioCodecConfig* codecInfo,
+ webrtc::AudioSendStream::Config& config) {
+ config.encoder_factory = webrtc::CreateBuiltinAudioEncoderFactory();
+
+ webrtc::SdpAudioFormat::Parameters parameters;
+ if (codecInfo->mName == "opus") {
+ if (codecInfo->mChannels == 2) {
+ parameters["stereo"] = "1";
+ }
+ if (codecInfo->mFECEnabled) {
+ parameters["useinbandfec"] = "1";
+ }
+ if (codecInfo->mDTXEnabled) {
+ parameters["usedtx"] = "1";
+ }
+ if (codecInfo->mMaxPlaybackRate) {
+ parameters["maxplaybackrate"] =
+ std::to_string(codecInfo->mMaxPlaybackRate);
+ }
+ if (codecInfo->mMaxAverageBitrate) {
+ parameters["maxaveragebitrate"] =
+ std::to_string(codecInfo->mMaxAverageBitrate);
+ }
+ if (codecInfo->mFrameSizeMs) {
+ parameters["ptime"] = std::to_string(codecInfo->mFrameSizeMs);
+ }
+ if (codecInfo->mMinFrameSizeMs) {
+ parameters["minptime"] = std::to_string(codecInfo->mMinFrameSizeMs);
+ }
+ if (codecInfo->mMaxFrameSizeMs) {
+ parameters["maxptime"] = std::to_string(codecInfo->mMaxFrameSizeMs);
+ }
+ if (codecInfo->mCbrEnabled) {
+ parameters["cbr"] = "1";
+ }
+ }
+
+ webrtc::SdpAudioFormat format(codecInfo->mName, codecInfo->mFreq,
+ codecInfo->mChannels, parameters);
+ webrtc::AudioSendStream::Config::SendCodecSpec spec(codecInfo->mType, format);
+ config.send_codec_spec = spec;
+
+ return true;
+}
+
+/**
+ * Supported Sampling Frequencies.
+ */
+bool WebrtcAudioConduit::IsSamplingFreqSupported(int freq) const {
+ return GetNum10msSamplesForFrequency(freq) != 0;
+}
+
+/* Return block-length of 10 ms audio frame in number of samples */
+unsigned int WebrtcAudioConduit::GetNum10msSamplesForFrequency(
+ int samplingFreqHz) const {
+ switch (samplingFreqHz) {
+ case 16000:
+ return 160; // 160 samples
+ case 32000:
+ return 320; // 320 samples
+ case 44100:
+ return 441; // 441 samples
+ case 48000:
+ return 480; // 480 samples
+ default:
+ return 0; // invalid or unsupported
+ }
+}
+
+/**
+ * Perform validation on the codecConfig to be applied.
+ * Verifies if the codec is already applied.
+ */
+MediaConduitErrorCode WebrtcAudioConduit::ValidateCodecConfig(
+ const AudioCodecConfig* codecInfo, bool send) {
+ if (!codecInfo) {
+ CSFLogError(LOGTAG, "%s Null CodecConfig ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ if ((codecInfo->mName.empty()) ||
+ (codecInfo->mName.length() >= CODEC_PLNAME_SIZE)) {
+ CSFLogError(LOGTAG, "%s Invalid Payload Name Length ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Only mono or stereo channels supported
+ if ((codecInfo->mChannels != 1) && (codecInfo->mChannels != 2)) {
+ CSFLogError(LOGTAG, "%s Channel Unsupported ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcAudioConduit::DeleteSendStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+ if (mSendStream) {
+ mSendStream->Stop();
+ mEngineTransmitting = false;
+ mCall->Call()->DestroyAudioSendStream(mSendStream);
+ mSendStream = nullptr;
+ }
+ // Destroying the stream unregisters the transport
+ mSendChannelProxy->RegisterTransport(nullptr);
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::CreateSendStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ mSendStream = mCall->Call()->CreateAudioSendStream(mSendStreamConfig);
+ if (!mSendStream) {
+ return kMediaConduitUnknownError;
+ }
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcAudioConduit::DeleteRecvStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+ if (mRecvStream) {
+ mRecvStream->Stop();
+ mEngineReceiving = false;
+ mCall->Call()->DestroyAudioReceiveStream(mRecvStream);
+ mRecvStream = nullptr;
+ }
+ // Destroying the stream unregisters the transport
+ mRecvChannelProxy->RegisterTransport(nullptr);
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::CreateRecvStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ mRecvStreamConfig.rtcp_send_transport = this;
+ mRecvStream = mCall->Call()->CreateAudioReceiveStream(mRecvStreamConfig);
+ if (!mRecvStream) {
+ return kMediaConduitUnknownError;
+ }
+
+ return kMediaConduitNoError;
+}
+
+bool WebrtcAudioConduit::RecreateSendStreamIfExists() {
+ MutexAutoLock lock(mMutex);
+ bool wasTransmitting = mEngineTransmitting;
+ bool hadSendStream = mSendStream;
+ DeleteSendStream();
+
+ if (wasTransmitting) {
+ if (StartTransmittingLocked() != kMediaConduitNoError) {
+ return false;
+ }
+ } else if (hadSendStream) {
+ if (CreateSendStream() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WebrtcAudioConduit::RecreateRecvStreamIfExists() {
+ MutexAutoLock lock(mMutex);
+ bool wasReceiving = mEngineReceiving;
+ bool hadRecvStream = mRecvStream;
+ DeleteRecvStream();
+
+ if (wasReceiving) {
+ if (StartReceivingLocked() != kMediaConduitNoError) {
+ return false;
+ }
+ } else if (hadRecvStream) {
+ if (CreateRecvStream() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+ return true;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::DeliverPacket(const void* data,
+ int len) {
+ // Bug 1499796 - we need to get passed the time the packet was received
+ webrtc::PacketReceiver::DeliveryStatus status =
+ mCall->Call()->Receiver()->DeliverPacket(
+ webrtc::MediaType::AUDIO, static_cast<const uint8_t*>(data), len,
+ webrtc::PacketTime());
+
+ if (status != webrtc::PacketReceiver::DELIVERY_OK) {
+ CSFLogError(LOGTAG, "%s DeliverPacket Failed, %d", __FUNCTION__, status);
+ return kMediaConduitRTPProcessingFailed;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcAudioConduit::CreateChannels() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if ((mRecvChannel = mPtrVoEBase->CreateChannel()) == -1) {
+ CSFLogError(LOGTAG, "%s VoiceEngine Channel creation failed", __FUNCTION__);
+ return kMediaConduitChannelError;
+ }
+ mRecvStreamConfig.voe_channel_id = mRecvChannel;
+
+ if ((mSendChannel = mPtrVoEBase->CreateChannel()) == -1) {
+ CSFLogError(LOGTAG, "%s VoiceEngine Channel creation failed", __FUNCTION__);
+ return kMediaConduitChannelError;
+ }
+ mSendStreamConfig.voe_channel_id = mSendChannel;
+
+ webrtc::VoiceEngineImpl* vei;
+ vei = static_cast<webrtc::VoiceEngineImpl*>(GetVoiceEngine());
+ mRecvChannelProxy = vei->GetChannelProxy(mRecvChannel);
+ if (!mRecvChannelProxy) {
+ CSFLogError(LOGTAG, "%s VoiceEngine Send ChannelProxy creation failed",
+ __FUNCTION__);
+ return kMediaConduitChannelError;
+ }
+
+ mRecvChannelProxy->SetRtpPacketObserver(this);
+ mRecvChannelProxy->SetRtcpEventObserver(this);
+ mRecvChannelProxy->RegisterTransport(this);
+
+ mSendChannelProxy = vei->GetChannelProxy(mSendChannel);
+ if (!mSendChannelProxy) {
+ CSFLogError(LOGTAG, "%s VoiceEngine ChannelProxy creation failed",
+ __FUNCTION__);
+ return kMediaConduitChannelError;
+ }
+ mSendChannelProxy->SetRtpPacketObserver(this);
+ mSendChannelProxy->RegisterTransport(this);
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcAudioConduit::DeleteChannels() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mSendChannel != -1) {
+ mSendChannelProxy = nullptr;
+ mPtrVoEBase->DeleteChannel(mSendChannel);
+ mSendChannel = -1;
+ }
+
+ if (mRecvChannel != -1) {
+ mRecvChannelProxy->SetRtcpEventObserver(nullptr);
+ mRecvChannelProxy = nullptr;
+ mPtrVoEBase->DeleteChannel(mRecvChannel);
+ mRecvChannel = -1;
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/AudioConduit.h b/dom/media/webrtc/libwebrtcglue/AudioConduit.h
new file mode 100644
index 0000000000..b3673c6cee
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/AudioConduit.h
@@ -0,0 +1,382 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AUDIO_SESSION_H_
+#define AUDIO_SESSION_H_
+
+#include "mozilla/Attributes.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
+
+#include "MediaConduitInterface.h"
+#include "common/MediaEngineWrapper.h"
+#include "RtpSourceObserver.h"
+#include "RtpPacketQueue.h"
+
+// Audio Engine Includes
+#include "webrtc/common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h"
+#include "webrtc/modules/audio_device/include/fake_audio_device.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/channel_proxy.h"
+
+/** This file hosts several structures identifying different aspects
+ * of a RTP Session.
+ */
+namespace mozilla {
+// Helper function
+
+DOMHighResTimeStamp NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow);
+
+/**
+ * Concrete class for Audio session. Hooks up
+ * - media-source and target to external transport
+ */
+class WebrtcAudioConduit : public AudioSessionConduit,
+ public webrtc::Transport,
+ public webrtc::RtcpEventObserver,
+ public webrtc::RtpPacketObserver {
+ public:
+ // VoiceEngine defined constant for Payload Name Size.
+ static const unsigned int CODEC_PLNAME_SIZE;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTP Frames to the VoiceEngine for decoding
+ */
+ MediaConduitErrorCode ReceivedRTPPacket(const void* data, int len,
+ webrtc::RTPHeader& header) override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTCP Frames to the VoiceEngine for decoding
+ */
+ MediaConduitErrorCode ReceivedRTCPPacket(const void* data, int len) override;
+ Maybe<DOMHighResTimeStamp> LastRtcpReceived() const override;
+ DOMHighResTimeStamp GetNow() const override { return mCall->GetNow(); }
+
+ MediaConduitErrorCode StopTransmitting() override;
+ MediaConduitErrorCode StartTransmitting() override;
+ MediaConduitErrorCode StopReceiving() override;
+ MediaConduitErrorCode StartReceiving() override;
+
+ MediaConduitErrorCode StopTransmittingLocked();
+ MediaConduitErrorCode StartTransmittingLocked();
+ MediaConduitErrorCode StopReceivingLocked();
+ MediaConduitErrorCode StartReceivingLocked();
+
+ /**
+ * Function to configure send codec for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the audio engine is configured with passed in codec
+ * for send On failure, audio engine transmit functionality is disabled. NOTE:
+ * This API can be invoked multiple time. Invoking this API may involve
+ * restarting transmission sub-system on the engine.
+ */
+ MediaConduitErrorCode ConfigureSendMediaCodec(
+ const AudioCodecConfig* codecConfig) override;
+ /**
+ * Function to configure list of receive codecs for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the audio engine is configured with passed in codec
+ * for send Also the playout is enabled. On failure, audio engine transmit
+ * functionality is disabled. NOTE: This API can be invoked multiple time.
+ * Invoking this API may involve restarting transmission sub-system on the
+ * engine.
+ */
+ MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<AudioCodecConfig>>& codecConfigList) override;
+
+ MediaConduitErrorCode SetLocalRTPExtensions(
+ MediaSessionConduitLocalDirection aDirection,
+ const RtpExtList& extensions) override;
+
+ /**
+ * Register External Transport to this Conduit. RTP and RTCP frames from the
+ * VoiceEngine shall be passed to the registered transport for transporting
+ * externally.
+ */
+ MediaConduitErrorCode SetTransmitterTransport(
+ RefPtr<TransportInterface> aTransport) override;
+
+ MediaConduitErrorCode SetReceiverTransport(
+ RefPtr<TransportInterface> aTransport) override;
+
+ /**
+ * Function to deliver externally captured audio sample for encoding and
+ * transport
+ * @param audioData [in]: Pointer to array containing a frame of audio
+ * @param lengthSamples [in]: Length of audio frame in samples in multiple of
+ * 10 milliseconds
+ * Ex: Frame length is 160, 320, 440 for 16, 32,
+ * 44 kHz sampling rates respectively.
+ * audioData[] should be of lengthSamples in
+ * size say, for 16kz sampling rate,
+ * audioData[] should contain 160 samples of
+ * 16-bits each for a 10m audio frame.
+ * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz
+ * ( 16000, 32000 ...)
+ * @param capture_delay [in]: Approx Delay from recording until it is
+ * delivered to VoiceEngine in milliseconds.
+ * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can
+ * be invoked. This ensures the inserted audio-samples can be transmitted by
+ * the conduit
+ */
+ MediaConduitErrorCode SendAudioFrame(const int16_t speechData[],
+ int32_t lengthSamples,
+ int32_t samplingFreqHz,
+ uint32_t channels,
+ int32_t capture_time) override;
+
+ /**
+ * Function to grab a decoded audio-sample from the media engine for
+ * rendering / playoutof length 10 milliseconds.
+ *
+ * @param speechData [in]: Pointer to a array to which a 10ms frame of audio
+ * will be copied
+ * @param samplingFreqHz [in]: Frequency of the sampling for playback in
+ * Hertz (16000, 32000,..)
+ * @param capture_delay [in]: Estimated Time between reading of the samples
+ * to rendering/playback
+ * @param lengthSamples [in]: Contain maximum length of speechData array.
+ * @param numChannels [out]: Number of channels in the audio frame,
+ * guaranteed to be non-zero.
+ * @param lengthSamples [out]: Will contain length of the audio frame in
+ * samples at return.
+ * Ex: A value of 160 implies 160 samples each of
+ * 16-bits was copied into speechData
+ * NOTE: This function should be invoked every 10 milliseconds for the best
+ * peformance
+ * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can
+ * be invoked
+ * This ensures the decoded samples are ready for reading and playout is
+ * enabled.
+ */
+ MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ size_t& numChannels,
+ size_t& lengthSamples) override;
+
+ /**
+ * Webrtc transport implementation to send and receive RTP packet.
+ * AudioConduit registers itself as ExternalTransport to the VoiceEngine
+ */
+ bool SendRtp(const uint8_t* data, size_t len,
+ const webrtc::PacketOptions& options) override;
+
+ /**
+ * Webrtc transport implementation to send and receive RTCP packet.
+ * AudioConduit registers itself as ExternalTransport to the VoiceEngine
+ */
+ bool SendRtcp(const uint8_t* data, size_t len) override;
+
+ uint64_t CodecPluginID() override { return 0; }
+ void SetPCHandle(const std::string& aPCHandle) override {}
+ MediaConduitErrorCode DeliverPacket(const void* data, int len) override;
+
+ void DeleteStreams() override {}
+
+ WebrtcAudioConduit(RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread)
+ : mTransportMonitor("WebrtcAudioConduit"),
+ mTransmitterTransport(nullptr),
+ mReceiverTransport(nullptr),
+ mCall(aCall),
+ mRecvStreamConfig(),
+ mRecvStream(nullptr),
+ mSendStreamConfig(
+ this) // 'this' is stored but not dereferenced in the constructor.
+ ,
+ mSendStream(nullptr),
+ mRecvSSRC(0),
+ mEngineTransmitting(false),
+ mEngineReceiving(false),
+ mRecvChannel(-1),
+ mSendChannel(-1),
+ mDtmfEnabled(false),
+ mMutex("WebrtcAudioConduit::mMutex"),
+ mRtpSourceObserver(new RtpSourceObserver(mCall->GetTimestampMaker())),
+ mStsThread(aStsThread) {}
+
+ virtual ~WebrtcAudioConduit();
+
+ virtual MediaConduitErrorCode Init();
+
+ int GetRecvChannel() { return mRecvChannel; }
+ webrtc::VoiceEngine* GetVoiceEngine() {
+ return mCall->Call()->voice_engine();
+ }
+
+ /* Set Local SSRC list.
+ * Note: Until the refactor of the VoE into the call API is complete
+ * this list should contain only a single ssrc.
+ */
+ bool SetLocalSSRCs(const std::vector<uint32_t>& aSSRCs,
+ const std::vector<uint32_t>& aRtxSSRCs) override;
+ std::vector<uint32_t> GetLocalSSRCs() override;
+ bool SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) override;
+ bool UnsetRemoteSSRC(uint32_t ssrc) override { return true; }
+ bool GetRemoteSSRC(uint32_t* ssrc) override;
+ bool SetLocalCNAME(const char* cname) override;
+ bool SetLocalMID(const std::string& mid) override;
+
+ void SetSyncGroup(const std::string& group) override;
+
+ bool GetSendPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) override;
+
+ bool GetRecvPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) override;
+
+ bool GetRTPReceiverStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) override;
+ bool GetRTCPReceiverReport(uint32_t* jitterMs, uint32_t* packetsReceived,
+ uint64_t* bytesReceived, uint32_t* cumulativeLost,
+ Maybe<double>* aOutRttSec) override;
+ bool GetRTCPSenderReport(unsigned int* packetsSent, uint64_t* bytesSent,
+ DOMHighResTimeStamp* aRemoteTimestamp) override;
+ Maybe<mozilla::dom::RTCBandwidthEstimationInternal> GetBandwidthEstimation()
+ override;
+
+ bool SetDtmfPayloadType(unsigned char type, int freq) override;
+
+ bool InsertDTMFTone(int channel, int eventCode, bool outOfBand, int lengthMs,
+ int attenuationDb) override;
+
+ void GetRtpSources(nsTArray<dom::RTCRtpSourceEntry>& outSources) override;
+
+ void OnRtpPacket(const webrtc::RTPHeader& aRtpHeader,
+ const int64_t aTimestamp, const uint32_t aJitter) override;
+
+ void OnRtcpBye() override;
+ void OnRtcpTimeout() override;
+
+ void SetRtcpEventObserver(mozilla::RtcpEventObserver* observer) override;
+
+ // test-only: inserts fake CSRCs and audio level data
+ void InsertAudioLevelForContributingSource(const uint32_t aCsrcSource,
+ const int64_t aTimestamp,
+ const uint32_t aRtpTimestamp,
+ const bool aHasAudioLevel,
+ const uint8_t aAudioLevel);
+
+ bool IsSamplingFreqSupported(int freq) const override;
+
+ protected:
+ // These are protected so they can be accessed by unit tests
+
+ // Written only on main thread. Accessed from audio thread.
+ // Accessed from mStsThread during stats calls.
+ // This is safe, provided audio and stats calls stop before we
+ // destroy the AudioConduit.
+ std::unique_ptr<webrtc::voe::ChannelProxy> mRecvChannelProxy = nullptr;
+
+ // Written only on main thread. Accessed from mStsThread during stats calls.
+ // This is safe, provided stats calls stop before we destroy the
+ // AudioConduit.
+ std::unique_ptr<webrtc::voe::ChannelProxy> mSendChannelProxy = nullptr;
+
+ private:
+ WebrtcAudioConduit(const WebrtcAudioConduit& other) = delete;
+ void operator=(const WebrtcAudioConduit& other) = delete;
+
+ // Function to convert between WebRTC and Conduit codec structures
+ bool CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo,
+ webrtc::AudioSendStream::Config& config);
+
+ // Generate block size in sample lenght for a given sampling frequency
+ unsigned int GetNum10msSamplesForFrequency(int samplingFreqHz) const;
+
+ // Checks the codec to be applied
+ MediaConduitErrorCode ValidateCodecConfig(const AudioCodecConfig* codecInfo,
+ bool send);
+
+ MediaConduitErrorCode CreateSendStream();
+ void DeleteSendStream();
+ MediaConduitErrorCode CreateRecvStream();
+ void DeleteRecvStream();
+
+ bool RecreateSendStreamIfExists();
+ bool RecreateRecvStreamIfExists();
+
+ MediaConduitErrorCode CreateChannels();
+ virtual void DeleteChannels();
+
+ mozilla::ReentrantMonitor mTransportMonitor;
+
+ // Accessed on any thread under mTransportMonitor.
+ RefPtr<TransportInterface> mTransmitterTransport;
+
+ // Accessed on any thread under mTransportMonitor.
+ RefPtr<TransportInterface> mReceiverTransport;
+
+ // Accessed from main thread and audio threads. Used to create and destroy
+ // channels and to send audio data. Access to channels is protected by
+ // locking in channel.cc.
+ ScopedCustomReleasePtr<webrtc::VoEBase> mPtrVoEBase;
+
+ // Const so can be accessed on any thread. Most methods are called on
+ // main thread.
+ const RefPtr<WebRtcCallWrapper> mCall;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::AudioReceiveStream::Config mRecvStreamConfig;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::AudioReceiveStream* mRecvStream;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::AudioSendStream::Config mSendStreamConfig;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::AudioSendStream* mSendStream;
+
+ // accessed on creation, and when receiving packets
+ Atomic<uint32_t> mRecvSSRC; // this can change during a stream!
+
+ // Accessed only on mStsThread.
+ RtpPacketQueue mRtpPacketQueue;
+
+ // engine states of our interets
+ mozilla::Atomic<bool>
+ mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up
+ mozilla::Atomic<bool>
+ mEngineReceiving; // If true => VoiceEngine Receive-subsystem is up
+ // and playout is enabled
+
+ // Accessed only on main thread.
+ int mRecvChannel;
+
+ // Accessed on main thread and from audio thread.
+ int mSendChannel;
+
+ // Accessed only on main thread.
+ bool mDtmfEnabled;
+
+ Mutex mMutex;
+
+ // Accessed from audio thread.
+ webrtc::AudioFrame mAudioFrame; // for output pulls
+
+ // Accessed from both main and mStsThread. Uses locks internally.
+ RefPtr<RtpSourceObserver> mRtpSourceObserver;
+
+ // Socket transport service thread. Any thread.
+ const nsCOMPtr<nsISerialEventTarget> mStsThread;
+
+ // Accessed from mStsThread. Last successfully polled RTT
+ Maybe<DOMHighResTimeStamp> mRttSec;
+
+ // Accessed only on mStsThread
+ Maybe<DOMHighResTimeStamp> mLastRtcpReceived;
+
+ // Accessed only on main thread.
+ mozilla::RtcpEventObserver* mRtcpEventObserver = nullptr;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/CodecConfig.h b/dom/media/webrtc/libwebrtcglue/CodecConfig.h
new file mode 100644
index 0000000000..0176d807d6
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/CodecConfig.h
@@ -0,0 +1,214 @@
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CODEC_CONFIG_H_
+#define CODEC_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "common/EncodingConstraints.h"
+
+namespace mozilla {
+
+/**
+ * Minimalistic Audio Codec Config Params
+ */
+struct AudioCodecConfig {
+ /*
+ * The data-types for these properties mimic the
+ * corresponding webrtc::CodecInst data-types.
+ */
+ int mType;
+ std::string mName;
+ int mFreq;
+ int mChannels;
+
+ bool mFECEnabled;
+ bool mDtmfEnabled;
+ uint32_t mFrameSizeMs;
+ uint32_t mMaxFrameSizeMs;
+ uint32_t mMinFrameSizeMs;
+
+ // OPUS-specific
+ bool mDTXEnabled;
+ uint32_t mMaxAverageBitrate;
+ int mMaxPlaybackRate;
+ bool mCbrEnabled;
+
+ AudioCodecConfig(int type, std::string name, int freq, int channels,
+ bool FECEnabled)
+ : mType(type),
+ mName(name),
+ mFreq(freq),
+ mChannels(channels),
+ mFECEnabled(FECEnabled),
+ mDtmfEnabled(false),
+ mFrameSizeMs(0),
+ mMaxFrameSizeMs(0),
+ mMinFrameSizeMs(0),
+ mDTXEnabled(false),
+ mMaxAverageBitrate(0),
+ mMaxPlaybackRate(0),
+ mCbrEnabled(false) {}
+};
+
+/*
+ * Minimalistic video codec configuration
+ * More to be added later depending on the use-case
+ */
+
+#define MAX_SPROP_LEN 128
+
+// used for holding SDP negotiation results
+struct VideoCodecConfigH264 {
+ char sprop_parameter_sets[MAX_SPROP_LEN];
+ int packetization_mode;
+ int profile_level_id;
+ int tias_bw;
+};
+
+// class so the std::strings can get freed more easily/reliably
+class VideoCodecConfig {
+ public:
+ /*
+ * The data-types for these properties mimic the
+ * corresponding webrtc::VideoCodec data-types.
+ */
+ int mType; // payload type
+ std::string mName;
+
+ std::vector<std::string> mAckFbTypes;
+ std::vector<std::string> mNackFbTypes;
+ std::vector<std::string> mCcmFbTypes;
+ // Don't pass mOtherFbTypes from JsepVideoCodecDescription because we'd have
+ // to drag SdpRtcpFbAttributeList::Feedback along too.
+ bool mRembFbSet;
+ bool mFECFbSet;
+ bool mTransportCCFbSet;
+
+ int mULPFECPayloadType;
+ int mREDPayloadType;
+ int mREDRTXPayloadType;
+ int mRTXPayloadType;
+
+ uint32_t mTias;
+ EncodingConstraints mEncodingConstraints;
+ struct Encoding {
+ std::string rid;
+ EncodingConstraints constraints;
+ bool operator==(const Encoding& aOther) const {
+ return rid == aOther.rid && constraints == aOther.constraints;
+ }
+ };
+ std::vector<Encoding> mEncodings;
+ std::string mSpropParameterSets;
+ uint8_t mProfile;
+ uint8_t mConstraints;
+ uint8_t mLevel;
+ uint8_t mPacketizationMode;
+ // TODO: add external negotiated SPS/PPS
+
+ bool operator==(const VideoCodecConfig& aRhs) const {
+ if (mType != aRhs.mType || mName != aRhs.mName ||
+ mAckFbTypes != aRhs.mAckFbTypes || mNackFbTypes != aRhs.mNackFbTypes ||
+ mCcmFbTypes != aRhs.mCcmFbTypes || mRembFbSet != aRhs.mRembFbSet ||
+ mFECFbSet != aRhs.mFECFbSet ||
+ mTransportCCFbSet != aRhs.mTransportCCFbSet ||
+ mULPFECPayloadType != aRhs.mULPFECPayloadType ||
+ mREDPayloadType != aRhs.mREDPayloadType ||
+ mREDRTXPayloadType != aRhs.mREDRTXPayloadType ||
+ mRTXPayloadType != aRhs.mRTXPayloadType || mTias != aRhs.mTias ||
+ !(mEncodingConstraints == aRhs.mEncodingConstraints) ||
+ !(mEncodings == aRhs.mEncodings) ||
+ mSpropParameterSets != aRhs.mSpropParameterSets ||
+ mProfile != aRhs.mProfile || mConstraints != aRhs.mConstraints ||
+ mLevel != aRhs.mLevel ||
+ mPacketizationMode != aRhs.mPacketizationMode) {
+ return false;
+ }
+
+ return true;
+ }
+
+ VideoCodecConfig(int type, std::string name,
+ const EncodingConstraints& constraints,
+ const struct VideoCodecConfigH264* h264 = nullptr)
+ : mType(type),
+ mName(name),
+ mRembFbSet(false),
+ mFECFbSet(false),
+ mTransportCCFbSet(false),
+ mULPFECPayloadType(-1),
+ mREDPayloadType(-1),
+ mREDRTXPayloadType(-1),
+ mRTXPayloadType(-1),
+ mTias(0),
+ mEncodingConstraints(constraints),
+ mProfile(0x42),
+ mConstraints(0xE0),
+ mLevel(0x0C),
+ mPacketizationMode(1) {
+ if (h264) {
+ mProfile = (h264->profile_level_id & 0x00FF0000) >> 16;
+ mConstraints = (h264->profile_level_id & 0x0000FF00) >> 8;
+ mLevel = (h264->profile_level_id & 0x000000FF);
+ mPacketizationMode = h264->packetization_mode;
+ mSpropParameterSets = h264->sprop_parameter_sets;
+ }
+ }
+
+ bool ResolutionEquals(const VideoCodecConfig& aConfig) const {
+ if (mEncodings.size() != aConfig.mEncodings.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < mEncodings.size(); ++i) {
+ if (!mEncodings[i].constraints.ResolutionEquals(
+ aConfig.mEncodings[i].constraints)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Nothing seems to use this right now. Do we intend to support this
+ // someday?
+ bool RtcpFbAckIsSet(const std::string& type) const {
+ for (auto i = mAckFbTypes.begin(); i != mAckFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbNackIsSet(const std::string& type) const {
+ for (auto i = mNackFbTypes.begin(); i != mNackFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbCcmIsSet(const std::string& type) const {
+ for (auto i = mCcmFbTypes.begin(); i != mCcmFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbRembIsSet() const { return mRembFbSet; }
+
+ bool RtcpFbFECIsSet() const { return mFECFbSet; }
+
+ bool RtcpFbTransportCCIsSet() const { return mTransportCCFbSet; }
+
+ bool RtxPayloadTypeIsSet() const { return mRTXPayloadType != -1; }
+};
+} // namespace mozilla
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/CodecStatistics.cpp b/dom/media/webrtc/libwebrtcglue/CodecStatistics.cpp
new file mode 100644
index 0000000000..e5d9b88b1f
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/CodecStatistics.cpp
@@ -0,0 +1,167 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CodecStatistics.h"
+
+#include "CSFLog.h"
+#include "mozilla/Telemetry.h"
+
+using namespace mozilla;
+using namespace webrtc;
+
+// use the same tag as VideoConduit
+static const char* logTag = "WebrtcVideoSessionConduit";
+
+VideoCodecStatistics::VideoCodecStatistics(int channel, ViECodec* codec)
+ : mChannel(channel),
+ mSentRawFrames(0),
+ mPtrViECodec(codec),
+ mEncoderDroppedFrames(0),
+ mDecoderDiscardedPackets(0),
+ mRegisteredEncode(false),
+ mRegisteredDecode(false),
+ mReceiveState(kReceiveStateInitial),
+ mRecoveredBeforeLoss(0),
+ mRecoveredLosses(0) {
+ MOZ_ASSERT(mPtrViECodec);
+}
+
+VideoCodecStatistics::~VideoCodecStatistics() {
+ if (mRegisteredEncode) {
+ mPtrViECodec->DeregisterEncoderObserver(mChannel);
+ }
+ if (mRegisteredDecode) {
+ mPtrViECodec->DeregisterDecoderObserver(mChannel);
+ }
+}
+
+void VideoCodecStatistics::Register(bool encoder) {
+ if (encoder && !mRegisteredEncode) {
+ mPtrViECodec->RegisterEncoderObserver(mChannel, *this);
+ mRegisteredEncode = true;
+ } else if (!encoder && !mRegisteredDecode) {
+ mPtrViECodec->RegisterDecoderObserver(mChannel, *this);
+ mRegisteredDecode = true;
+ }
+}
+
+void VideoCodecStatistics::OutgoingRate(const int video_channel,
+ const uint32_t framerate,
+ const uint32_t bitrate) {
+ unsigned int keyFrames, deltaFrames;
+ mPtrViECodec->GetSendCodecStatistics(video_channel, keyFrames, deltaFrames);
+ uint32_t dropped = mSentRawFrames - (keyFrames + deltaFrames);
+ CSFLogDebug(
+ logTag,
+ "encoder statistics - framerate: %u, bitrate: %u, dropped frames: %u",
+ framerate, bitrate, dropped);
+ mEncoderBitRate.Push(bitrate);
+ mEncoderFps.Push(framerate);
+ mEncoderDroppedFrames += dropped;
+}
+
+void VideoCodecStatistics::IncomingCodecChanged(const int video_channel,
+ const VideoCodec& video_codec) {
+ CSFLogDebug(logTag, "channel %d change codec to \"%s\" ", video_channel,
+ video_codec.plName);
+}
+
+void VideoCodecStatistics::IncomingRate(const int video_channel,
+ const unsigned int framerate,
+ const unsigned int bitrate) {
+ unsigned int discarded = mPtrViECodec->GetDiscardedPackets(video_channel);
+ CSFLogDebug(
+ logTag,
+ "decoder statistics - framerate: %u, bitrate: %u, discarded packets %u",
+ framerate, bitrate, discarded);
+ mDecoderBitRate.Push(bitrate);
+ mDecoderFps.Push(framerate);
+ mDecoderDiscardedPackets += discarded;
+}
+
+void VideoCodecStatistics::ReceiveStateChange(const int aChannel,
+ VideoReceiveState aState) {
+ CSFLogDebug(logTag, "New state for %d: %d (was %d)", aChannel, aState,
+ mReceiveState);
+ if (mFirstDecodeTime.IsNull()) {
+ mFirstDecodeTime = TimeStamp::Now();
+ }
+ /*
+ * Invalid transitions:
+ * WaitingKey -> PreemptiveNACK
+ * DecodingWithErrors -> PreemptiveNACK
+ */
+
+ switch (mReceiveState) {
+ case kReceiveStateNormal:
+ case kReceiveStateInitial:
+ // in a normal state
+ if (aState != kReceiveStateNormal && aState != kReceiveStateInitial) {
+ // no longer in a normal state
+ if (aState != kReceiveStatePreemptiveNACK) {
+ mReceiveFailureTime = TimeStamp::Now();
+ }
+ } // else Normal<->Initial transition
+ break;
+ default:
+ // not in a normal state
+ if (aState == kReceiveStateNormal || aState == kReceiveStateInitial) {
+ if (mReceiveState == kReceiveStatePreemptiveNACK) {
+ mRecoveredBeforeLoss++;
+ CSFLogError(logTag, "Video error avoided by NACK recovery");
+ } else if (!mReceiveFailureTime.IsNull()) { // safety
+ TimeDuration timeDelta = TimeStamp::Now() - mReceiveFailureTime;
+ CSFLogError(logTag, "Video error duration: %u ms",
+ static_cast<uint32_t>(timeDelta.ToMilliseconds()));
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_VIDEO_ERROR_RECOVERY_MS,
+ static_cast<uint32_t>(timeDelta.ToMilliseconds()));
+
+ mRecoveredLosses++; // to calculate losses per minute
+ mTotalLossTime += timeDelta; // To calculate % time in recovery
+ }
+ } // else non-Normal to different non-normal transition
+ break;
+ }
+
+ mReceiveState = aState;
+}
+
+void VideoCodecStatistics::EndOfCallStats() {
+ if (!mFirstDecodeTime.IsNull()) {
+ TimeDuration callDelta = TimeStamp::Now() - mFirstDecodeTime;
+ if (callDelta.ToSeconds() != 0) {
+ uint32_t recovered_per_min =
+ mRecoveredBeforeLoss / (callDelta.ToSeconds() / 60);
+ CSFLogError(logTag, "Video recovery before error per min %u",
+ recovered_per_min);
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN,
+ recovered_per_min);
+ uint32_t err_per_min = mRecoveredLosses / (callDelta.ToSeconds() / 60);
+ CSFLogError(logTag, "Video recovery after error per min %u", err_per_min);
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN, err_per_min);
+ float percent =
+ (mTotalLossTime.ToSeconds() * 100) / callDelta.ToSeconds();
+ CSFLogError(logTag, "Video error time percentage %f%%", percent);
+ Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE,
+ static_cast<uint32_t>(percent * 10));
+ }
+ }
+}
+
+void VideoCodecStatistics::SentFrame() { mSentRawFrames++; }
+
+void VideoCodecStatistics::Dump() {
+ Dump(mEncoderBitRate, "encoder bitrate");
+ Dump(mEncoderFps, "encoder fps");
+ Dump(mDecoderBitRate, "decoder bitrate");
+ Dump(mDecoderFps, "decoder fps");
+}
+
+void VideoCodecStatistics::Dump(RunningStat& s, const char* name) {
+ CSFLogDebug(logTag, "%s, mean: %f, variance: %f, standard deviation: %f",
+ name, s.Mean(), s.Variance(), s.StandardDeviation());
+}
diff --git a/dom/media/webrtc/libwebrtcglue/CodecStatistics.h b/dom/media/webrtc/libwebrtcglue/CodecStatistics.h
new file mode 100644
index 0000000000..7b3400b6b0
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/CodecStatistics.h
@@ -0,0 +1,102 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef CODEC_STATISTICS_H_
+#define CODEC_STATISTICS_H_
+#include <math.h>
+
+#include "nsTArray.h"
+#include "nsISupportsImpl.h"
+#include "mozilla/TimeStamp.h"
+#include "webrtc/common_types.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "MediaEngineWrapper.h"
+#include "RunningStat.h"
+
+namespace mozilla {
+
+// Statistics-gathering observer for Video Encoder and Decoder
+
+class VideoCodecStatistics : public webrtc::ViEEncoderObserver,
+ public webrtc::ViEDecoderObserver {
+ public:
+ VideoCodecStatistics(int channel, webrtc::ViECodec* vieCodec);
+ ~VideoCodecStatistics();
+ void Register(bool encoder);
+
+ void SentFrame();
+ virtual void OutgoingRate(const int video_channel,
+ const unsigned int framerate,
+ const unsigned int bitrate) override;
+
+ virtual void IncomingCodecChanged(
+ const int video_channel, const webrtc::VideoCodec& video_codec) override;
+
+ virtual void IncomingRate(const int video_channel,
+ const unsigned int framerate,
+ const unsigned int bitrate) override;
+
+ void ReceiveStateChange(const int video_channel,
+ webrtc::VideoReceiveState state) override;
+
+ void EndOfCallStats();
+
+ virtual void RequestNewKeyFrame(const int video_channel) override{};
+
+ virtual void SuspendChange(int video_channel, bool is_suspended) override{};
+ virtual void DecoderTiming(int decode_ms, int max_decode_ms,
+ int current_delay_ms, int target_delay_ms,
+ int jitter_buffer_ms, int min_playout_delay_ms,
+ int render_delay_ms) override {}
+
+ bool GetEncoderStats(double* framerateMean, double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* droppedFrames) {
+ *framerateMean = mEncoderFps.Mean();
+ *framerateStdDev = mEncoderFps.StandardDeviation();
+ *bitrateMean = mEncoderBitRate.Mean();
+ *bitrateStdDev = mEncoderBitRate.StandardDeviation();
+ *droppedFrames = mEncoderDroppedFrames;
+ return true;
+ }
+
+ bool GetDecoderStats(double* framerateMean, double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* discardedPackets) {
+ *framerateMean = mDecoderFps.Mean();
+ *framerateStdDev = mDecoderFps.StandardDeviation();
+ *bitrateMean = mDecoderBitRate.Mean();
+ *bitrateStdDev = mDecoderBitRate.StandardDeviation();
+ *discardedPackets = mDecoderDiscardedPackets;
+ return true;
+ }
+
+ void Dump();
+
+ private:
+ void Dump(RunningStat& s, const char* name);
+
+ int mChannel;
+ uint32_t mSentRawFrames;
+ ScopedCustomReleasePtr<webrtc::ViECodec> mPtrViECodec; // back-pointer
+
+ RunningStat mEncoderBitRate;
+ RunningStat mEncoderFps;
+ uint32_t mEncoderDroppedFrames;
+ RunningStat mDecoderBitRate;
+ RunningStat mDecoderFps;
+ uint32_t mDecoderDiscardedPackets;
+ bool mRegisteredEncode;
+ bool mRegisteredDecode;
+
+ webrtc::VideoReceiveState mReceiveState;
+ TimeStamp mFirstDecodeTime;
+ TimeStamp mReceiveFailureTime;
+ TimeDuration mTotalLossTime;
+ uint32_t mRecoveredBeforeLoss;
+ uint32_t mRecoveredLosses;
+};
+
+} // namespace mozilla
+
+#endif // CODEC_STATISTICS_H_
diff --git a/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp b/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp
new file mode 100644
index 0000000000..1fb486c558
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp
@@ -0,0 +1,18 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcGmpVideoCodec.h"
+#include "GmpVideoCodec.h"
+
+namespace mozilla {
+
+WebrtcVideoEncoder* GmpVideoCodec::CreateEncoder() {
+ return new WebrtcVideoEncoderProxy(new WebrtcGmpVideoEncoder());
+}
+
+WebrtcVideoDecoder* GmpVideoCodec::CreateDecoder() {
+ return new WebrtcVideoDecoderProxy();
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h b/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h
new file mode 100644
index 0000000000..318a891d06
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GMPVIDEOCODEC_H_
+#define GMPVIDEOCODEC_H_
+
+#include "MediaConduitInterface.h"
+
+namespace mozilla {
+class GmpVideoCodec {
+ public:
+ static WebrtcVideoEncoder* CreateEncoder();
+ static WebrtcVideoDecoder* CreateDecoder();
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.cpp b/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.cpp
new file mode 100644
index 0000000000..14765ecc33
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.cpp
@@ -0,0 +1,41 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "common/browser_logging/CSFLog.h"
+#include "nspr.h"
+#include "mozilla/StaticPrefs_media.h"
+
+#include "WebrtcMediaCodecVP8VideoCodec.h"
+#include "MediaCodecVideoCodec.h"
+
+namespace mozilla {
+
+static const char* mcvcLogTag = "MediaCodecVideoCodec";
+#ifdef LOGTAG
+# undef LOGTAG
+#endif
+#define LOGTAG mcvcLogTag
+
+WebrtcVideoEncoder* MediaCodecVideoCodec::CreateEncoder(CodecType aCodecType) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ if (aCodecType == CODEC_VP8) {
+ if (StaticPrefs::
+ media_navigator_hardware_vp8_encode_acceleration_remote_enabled()) {
+ return new WebrtcMediaCodecVP8VideoRemoteEncoder();
+ } else {
+ return new WebrtcMediaCodecVP8VideoEncoder();
+ }
+ }
+ return nullptr;
+}
+
+WebrtcVideoDecoder* MediaCodecVideoCodec::CreateDecoder(CodecType aCodecType) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ if (aCodecType == CODEC_VP8) {
+ return new WebrtcMediaCodecVP8VideoDecoder();
+ }
+ return nullptr;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.h b/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.h
new file mode 100644
index 0000000000..3aeb5bcc03
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaCodecVideoCodec.h
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaCodecVideoCodec_h__
+#define MediaCodecVideoCodec_h__
+
+#include "MediaConduitInterface.h"
+
+namespace mozilla {
+class MediaCodecVideoCodec {
+ public:
+ enum CodecType {
+ CODEC_VP8,
+ };
+ /**
+ * Create encoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static WebrtcVideoEncoder* CreateEncoder(CodecType aCodecType);
+
+ /**
+ * Create decoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static WebrtcVideoDecoder* CreateDecoder(CodecType aCodecType);
+};
+
+} // namespace mozilla
+
+#endif // MediaCodecVideoCodec_h__
diff --git a/dom/media/webrtc/libwebrtcglue/MediaConduitErrors.h b/dom/media/webrtc/libwebrtcglue/MediaConduitErrors.h
new file mode 100644
index 0000000000..34487d77a0
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaConduitErrors.h
@@ -0,0 +1,46 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIA_SESSION_ERRORS_H_
+#define MEDIA_SESSION_ERRORS_H_
+
+namespace mozilla {
+enum MediaConduitErrorCode {
+ kMediaConduitNoError = 0, // 0 for Success,greater than 0 imples error
+ kMediaConduitSessionNotInited =
+ 10100, // Session not initialized.10100 serves as
+ // base for the conduit errors
+ kMediaConduitMalformedArgument, // Malformed input to Conduit API
+ kMediaConduitCaptureError, // WebRTC capture APIs failed
+ kMediaConduitInvalidSendCodec, // Wrong Send codec
+ kMediaConduitInvalidReceiveCodec, // Wrong Recv Codec
+ kMediaConduitCodecInUse, // Already applied Codec
+ kMediaConduitInvalidRenderer, // Null or Wrong Renderer object
+ kMediaConduitRendererFail, // Add Render called multiple times
+ kMediaConduitSendingAlready, // Engine already trasmitting
+ kMediaConduitReceivingAlready, // Engine already receiving
+ kMediaConduitTransportRegistrationFail, // Null or wrong transport interface
+ kMediaConduitInvalidTransport, // Null or wrong transport interface
+ kMediaConduitChannelError, // Configuration Error
+ kMediaConduitSocketError, // Media Engine transport socket error
+ kMediaConduitRTPRTCPModuleError, // Couldn't start RTP/RTCP processing
+ kMediaConduitRTPProcessingFailed, // Processing incoming RTP frame failed
+ kMediaConduitUnknownError, // More information can be found in logs
+ kMediaConduitExternalRecordingError, // Couldn't start external recording
+ kMediaConduitRecordingError, // Runtime recording error
+ kMediaConduitExternalPlayoutError, // Couldn't start external playout
+ kMediaConduitPlayoutError, // Runtime playout error
+ kMediaConduitMTUError, // Can't set MTU
+ kMediaConduitRTCPStatusError, // Can't set RTCP mode
+ kMediaConduitKeyFrameRequestError, // Can't set KeyFrameRequest mode
+ kMediaConduitNACKStatusError, // Can't set NACK mode
+ kMediaConduitTMMBRStatusError, // Can't set TMMBR mode
+ kMediaConduitFECStatusError, // Can't set FEC mode
+ kMediaConduitHybridNACKFECStatusError, // Can't set Hybrid NACK / FEC mode
+ kMediaConduitVideoSendStreamError // WebRTC video send stream failure
+};
+
+}
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/MediaConduitInterface.h b/dom/media/webrtc/libwebrtcglue/MediaConduitInterface.h
new file mode 100644
index 0000000000..1ef7ab7443
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaConduitInterface.h
@@ -0,0 +1,618 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIA_CONDUIT_ABSTRACTION_
+#define MEDIA_CONDUIT_ABSTRACTION_
+
+#include "nsISupportsImpl.h"
+#include "nsXPCOM.h"
+#include "nsDOMNavigationTiming.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/UniquePtr.h"
+#include "RtpSourceObserver.h"
+#include "RtcpEventObserver.h"
+#include "CodecConfig.h"
+#include "VideoTypes.h"
+#include "MediaConduitErrors.h"
+#include "jsapi/RTCStatsReport.h"
+
+#include "ImageContainer.h"
+
+#include "webrtc/call/call.h"
+#include "webrtc/common_types.h"
+#include "webrtc/common_types.h"
+#include "webrtc/api/video/video_frame_buffer.h"
+#include "webrtc/logging/rtc_event_log/rtc_event_log.h"
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
+#include "webrtc/modules/audio_device/include/fake_audio_device.h"
+#include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+
+#include <vector>
+#include <set>
+
+namespace webrtc {
+class VideoFrame;
+}
+
+namespace mozilla {
+
+enum class MediaSessionConduitLocalDirection : int { kSend, kRecv };
+
+class VideoSessionConduit;
+class AudioSessionConduit;
+class RtpRtcpConfig;
+
+using RtpExtList = std::vector<webrtc::RtpExtension>;
+
+/**
+ * Abstract Interface for transporting RTP packets - audio/vidoeo
+ * The consumers of this interface are responsible for passing in
+ * the RTPfied media packets
+ */
+class TransportInterface {
+ protected:
+ virtual ~TransportInterface() {}
+
+ public:
+ /**
+ * RTP Transport Function to be implemented by concrete transport
+ * implementation
+ * @param data : RTP Packet (audio/video) to be transported
+ * @param len : Length of the media packet
+ * @result : NS_OK on success, NS_ERROR_FAILURE otherwise
+ */
+ virtual nsresult SendRtpPacket(const uint8_t* data, size_t len) = 0;
+
+ /**
+ * RTCP Transport Function to be implemented by concrete transport
+ * implementation
+ * @param data : RTCP Packet to be transported
+ * @param len : Length of the RTCP packet
+ * @result : NS_OK on success, NS_ERROR_FAILURE otherwise
+ */
+ virtual nsresult SendRtcpPacket(const uint8_t* data, size_t len) = 0;
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TransportInterface)
+};
+
+/**
+ * 1. Abstract renderer for video data
+ * 2. This class acts as abstract interface between the video-engine and
+ * video-engine agnostic renderer implementation.
+ * 3. Concrete implementation of this interface is responsible for
+ * processing and/or rendering the obtained raw video frame to appropriate
+ * output , say, <video>
+ */
+class VideoRenderer {
+ protected:
+ virtual ~VideoRenderer() {}
+
+ public:
+ /**
+ * Callback Function reportng any change in the video-frame dimensions
+ * @param width: current width of the video @ decoder
+ * @param height: current height of the video @ decoder
+ */
+ virtual void FrameSizeChange(unsigned int width, unsigned int height) = 0;
+
+ /**
+ * Callback Function reporting decoded frame for processing.
+ * @param buffer: reference to decoded video frame
+ * @param buffer_size: size of the decoded frame
+ * @param time_stamp: Decoder timestamp, typically 90KHz as per RTP
+ * @render_time: Wall-clock time at the decoder for synchronization
+ * purposes in milliseconds
+ * NOTE: If decoded video frame is passed through buffer , it is the
+ * responsibility of the concrete implementations of this class to own copy
+ * of the frame if needed for time longer than scope of this callback.
+ * Such implementations should be quick in processing the frames and return
+ * immediately.
+ */
+ virtual void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
+ uint32_t time_stamp, int64_t render_time) = 0;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
+};
+
+/**
+ * Generic Interface for representing Audio/Video Session
+ * MediaSession conduit is identified by 2 main components
+ * 1. Attached Transport Interface for inbound and outbound RTP transport
+ * 2. Attached Renderer Interface for rendering media data off the network
+ * This class hides specifics of Media-Engine implementation from the consumers
+ * of this interface.
+ * Also provides codec configuration API for the media sent and recevied
+ */
+class MediaSessionConduit {
+ protected:
+ virtual ~MediaSessionConduit() {}
+
+ public:
+ enum Type { AUDIO, VIDEO };
+
+ static std::string LocalDirectionToString(
+ const MediaSessionConduitLocalDirection aDirection) {
+ return aDirection == MediaSessionConduitLocalDirection::kSend ? "send"
+ : "receive";
+ }
+
+ virtual Type type() const = 0;
+
+ /**
+ * Function triggered on Incoming RTP packet from the remote
+ * endpoint by the transport implementation.
+ * @param data : RTP Packet (audio/video) to be processed
+ * @param len : Length of the media packet
+ * Obtained packets are passed to the Media-Engine for further
+ * processing , say, decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTPPacket(
+ const void* data, int len, webrtc::RTPHeader& header) = 0;
+
+ /**
+ * Function triggered on Incoming RTCP packet from the remote
+ * endpoint by the transport implementation.
+ * @param data : RTCP Packet (audio/video) to be processed
+ * @param len : Length of the media packet
+ * Obtained packets are passed to the Media-Engine for further
+ * processing , say, decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTCPPacket(const void* data,
+ int len) = 0;
+
+ virtual Maybe<DOMHighResTimeStamp> LastRtcpReceived() const = 0;
+ virtual DOMHighResTimeStamp GetNow() const = 0;
+
+ virtual MediaConduitErrorCode StopTransmitting() = 0;
+ virtual MediaConduitErrorCode StartTransmitting() = 0;
+ virtual MediaConduitErrorCode StopReceiving() = 0;
+ virtual MediaConduitErrorCode StartReceiving() = 0;
+
+ /**
+ * Function to attach transmitter transport end-point of the Media conduit.
+ * @param aTransport: Reference to the concrete teansport implementation
+ * When nullptr, unsets the transmitter transport endpoint.
+ * Note: Multiple invocations of this call , replaces existing transport with
+ * with the new one.
+ * Note: This transport is used for RTP, and RTCP if no receiver transport is
+ * set. In the future, we should ensure that RTCP sender reports use this
+ * regardless of whether the receiver transport is set.
+ */
+ virtual MediaConduitErrorCode SetTransmitterTransport(
+ RefPtr<TransportInterface> aTransport) = 0;
+
+ /**
+ * Function to attach receiver transport end-point of the Media conduit.
+ * @param aTransport: Reference to the concrete teansport implementation
+ * When nullptr, unsets the receiver transport endpoint.
+ * Note: Multiple invocations of this call , replaces existing transport with
+ * with the new one.
+ * Note: This transport is used for RTCP.
+ * Note: In the future, we should avoid using this for RTCP sender reports.
+ */
+ virtual MediaConduitErrorCode SetReceiverTransport(
+ RefPtr<TransportInterface> aTransport) = 0;
+
+ /* Sets the local SSRCs
+ * @return true iff the local ssrcs == aSSRCs upon return
+ * Note: this is an ordered list and {a,b,c} != {b,a,c}
+ */
+ virtual bool SetLocalSSRCs(const std::vector<uint32_t>& aSSRCs,
+ const std::vector<uint32_t>& aRtxSSRCs) = 0;
+ virtual std::vector<uint32_t> GetLocalSSRCs() = 0;
+
+ /**
+ * Adds negotiated RTP header extensions to the the conduit. Unknown
+ * extensions are ignored.
+ * @param aDirection the local direction to set the RTP header extensions for
+ * @param aExtensions the RTP header extensions to set
+ * @return if all extensions were set it returns a success code,
+ * if an extension fails to set it may immediately return an error
+ * code
+ * TODO webrtc.org 64 update: make return type void again
+ */
+ virtual MediaConduitErrorCode SetLocalRTPExtensions(
+ MediaSessionConduitLocalDirection aDirection,
+ const RtpExtList& aExtensions) = 0;
+
+ virtual bool GetRemoteSSRC(uint32_t* ssrc) = 0;
+ virtual bool SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) = 0;
+ virtual bool UnsetRemoteSSRC(uint32_t ssrc) = 0;
+ virtual bool SetLocalCNAME(const char* cname) = 0;
+
+ virtual bool SetLocalMID(const std::string& mid) = 0;
+
+ virtual void SetSyncGroup(const std::string& group) = 0;
+
+ /**
+ * Functions returning stats needed by w3c stats model.
+ */
+
+ virtual bool GetSendPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) = 0;
+
+ virtual bool GetRecvPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) = 0;
+
+ virtual bool GetRTPReceiverStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) = 0;
+ virtual bool GetRTCPReceiverReport(uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ Maybe<double>* aOutRttMs) = 0;
+ virtual bool GetRTCPSenderReport(unsigned int* packetsSent,
+ uint64_t* bytesSent,
+ DOMHighResTimeStamp* aRemoteTimestamp) = 0;
+
+ virtual Maybe<mozilla::dom::RTCBandwidthEstimationInternal>
+ GetBandwidthEstimation() = 0;
+
+ virtual void GetRtpSources(nsTArray<dom::RTCRtpSourceEntry>& outSources) = 0;
+
+ virtual uint64_t CodecPluginID() = 0;
+
+ virtual void SetPCHandle(const std::string& aPCHandle) = 0;
+
+ virtual MediaConduitErrorCode DeliverPacket(const void* data, int len) = 0;
+
+ virtual void DeleteStreams() = 0;
+
+ virtual Maybe<RefPtr<VideoSessionConduit>> AsVideoSessionConduit() = 0;
+
+ virtual void SetRtcpEventObserver(RtcpEventObserver* observer) = 0;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSessionConduit)
+};
+
+// Wrap the webrtc.org Call class adding mozilla add/ref support.
+class WebRtcCallWrapper : public RefCounted<WebRtcCallWrapper> {
+ public:
+ typedef webrtc::Call::Config Config;
+
+ static RefPtr<WebRtcCallWrapper> Create(
+ const dom::RTCStatsTimestampMaker& aTimestampMaker) {
+ return new WebRtcCallWrapper(aTimestampMaker);
+ }
+
+ static RefPtr<WebRtcCallWrapper> Create(UniquePtr<webrtc::Call>&& aCall) {
+ return new WebRtcCallWrapper(std::move(aCall));
+ }
+
+ // Don't allow copying/assigning.
+ WebRtcCallWrapper(const WebRtcCallWrapper&) = delete;
+ void operator=(const WebRtcCallWrapper&) = delete;
+
+ webrtc::Call* Call() const { return mCall.get(); }
+
+ virtual ~WebRtcCallWrapper() {
+ if (mCall->voice_engine()) {
+ webrtc::VoiceEngine* voice_engine = mCall->voice_engine();
+ mCall.reset(nullptr); // Force it to release the voice engine reference
+ // Delete() must be after all refs are released
+ webrtc::VoiceEngine::Delete(voice_engine);
+ } else {
+ // Must ensure it's destroyed *before* the EventLog!
+ mCall.reset(nullptr);
+ }
+ }
+
+ bool UnsetRemoteSSRC(uint32_t ssrc) {
+ for (auto conduit : mConduits) {
+ if (!conduit->UnsetRemoteSSRC(ssrc)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void RegisterConduit(MediaSessionConduit* conduit) {
+ mConduits.insert(conduit);
+ }
+
+ void UnregisterConduit(MediaSessionConduit* conduit) {
+ mConduits.erase(conduit);
+ }
+
+ DOMHighResTimeStamp GetNow() const { return mTimestampMaker.GetNow(); }
+
+ const dom::RTCStatsTimestampMaker& GetTimestampMaker() const {
+ return mTimestampMaker;
+ }
+
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(WebRtcCallWrapper)
+
+ rtc::scoped_refptr<webrtc::AudioDecoderFactory> mDecoderFactory;
+
+ private:
+ explicit WebRtcCallWrapper(const dom::RTCStatsTimestampMaker& aTimestampMaker)
+ : mTimestampMaker(aTimestampMaker) {
+ auto voice_engine = webrtc::VoiceEngine::Create();
+ mDecoderFactory = webrtc::CreateBuiltinAudioDecoderFactory();
+
+ webrtc::AudioState::Config audio_state_config;
+ audio_state_config.voice_engine = voice_engine;
+ audio_state_config.audio_mixer = webrtc::AudioMixerImpl::Create();
+ audio_state_config.audio_processing = webrtc::AudioProcessing::Create();
+ mFakeAudioDeviceModule.reset(new webrtc::FakeAudioDeviceModule());
+ auto voe_base = webrtc::VoEBase::GetInterface(voice_engine);
+ voe_base->Init(mFakeAudioDeviceModule.get(),
+ audio_state_config.audio_processing.get(), mDecoderFactory);
+ voe_base->Release();
+ auto audio_state = webrtc::AudioState::Create(audio_state_config);
+ webrtc::Call::Config config(&mEventLog);
+ config.audio_state = audio_state;
+ mCall.reset(webrtc::Call::Create(config));
+ }
+
+ explicit WebRtcCallWrapper(UniquePtr<webrtc::Call>&& aCall) {
+ MOZ_ASSERT(aCall);
+ mCall = std::move(aCall);
+ }
+
+ UniquePtr<webrtc::Call> mCall;
+ UniquePtr<webrtc::FakeAudioDeviceModule> mFakeAudioDeviceModule;
+ webrtc::RtcEventLogNullImpl mEventLog;
+ // Allows conduits to know about one another, to avoid remote SSRC
+ // collisions.
+ std::set<MediaSessionConduit*> mConduits;
+ dom::RTCStatsTimestampMaker mTimestampMaker;
+};
+
+// Abstract base classes for external encoder/decoder.
+class CodecPluginID {
+ public:
+ virtual ~CodecPluginID() {}
+
+ virtual uint64_t PluginID() const = 0;
+};
+
+class VideoEncoder : public CodecPluginID {
+ public:
+ virtual ~VideoEncoder() {}
+};
+
+class VideoDecoder : public CodecPluginID {
+ public:
+ virtual ~VideoDecoder() {}
+};
+
+/**
+ * MediaSessionConduit for video
+ * Refer to the comments on MediaSessionConduit above for overall
+ * information
+ */
+class VideoSessionConduit : public MediaSessionConduit {
+ public:
+ /**
+ * Factory function to create and initialize a Video Conduit Session
+ * @param webrtc::Call instance shared by paired audio and video
+ * media conduits
+ * @result Concrete VideoSessionConduitObject or nullptr in the case
+ * of failure
+ */
+ static RefPtr<VideoSessionConduit> Create(
+ RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread);
+
+ enum FrameRequestType {
+ FrameRequestNone,
+ FrameRequestFir,
+ FrameRequestPli,
+ FrameRequestUnknown
+ };
+
+ VideoSessionConduit()
+ : mFrameRequestMethod(FrameRequestNone),
+ mUsingNackBasic(false),
+ mUsingTmmbr(false),
+ mUsingFEC(false) {}
+
+ virtual ~VideoSessionConduit() {}
+
+ Type type() const override { return VIDEO; }
+
+ /**
+ * Function to attach Renderer end-point of the Media-Video conduit.
+ * @param aRenderer : Reference to the concrete Video renderer implementation
+ * Note: Multiple invocations of this API shall remove an existing renderer
+ * and attaches the new to the Conduit.
+ */
+ virtual MediaConduitErrorCode AttachRenderer(
+ RefPtr<mozilla::VideoRenderer> aRenderer) = 0;
+ virtual void DetachRenderer() = 0;
+
+ virtual void DisableSsrcChanges() = 0;
+
+ bool SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) override = 0;
+ bool UnsetRemoteSSRC(uint32_t ssrc) override = 0;
+
+ /**
+ * Function to deliver a capture video frame for encoding and transport.
+ * If the frame's timestamp is 0, it will be automatcally generated.
+ *
+ * NOTE: ConfigureSendMediaCodec() must be called before this function can
+ * be invoked. This ensures the inserted video-frames can be
+ * transmitted by the conduit.
+ */
+ virtual MediaConduitErrorCode SendVideoFrame(
+ const webrtc::VideoFrame& frame) = 0;
+
+ virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) = 0;
+
+ /**
+ * Function to configure send codec for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec
+ * for send. On failure, video engine transmit functionality is
+ * disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve
+ * restarting transmission sub-system on the engine
+ *
+ */
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(
+ const VideoCodecConfig* sendSessionConfig,
+ const RtpRtcpConfig& aRtpRtcpConfig) = 0;
+
+ /**
+ * Function to configurelist of receive codecs for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve
+ * restarting reception sub-system on the engine
+ *
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<VideoCodecConfig>>& recvCodecConfigList,
+ const RtpRtcpConfig& aRtpRtcpConfig) = 0;
+
+ /**
+ * These methods allow unit tests to double-check that the
+ * rtcp-fb settings are as expected.
+ */
+ FrameRequestType FrameRequestMethod() const { return mFrameRequestMethod; }
+
+ bool UsingNackBasic() const { return mUsingNackBasic; }
+
+ bool UsingTmmbr() const { return mUsingTmmbr; }
+
+ bool UsingFEC() const { return mUsingFEC; }
+
+ virtual bool GetVideoEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* droppedFrames,
+ uint32_t* framesEncoded,
+ Maybe<uint64_t>* qpSum) = 0;
+ virtual bool GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* discardedPackets,
+ uint32_t* framesDecoded) = 0;
+
+ virtual void RecordTelemetry() const = 0;
+
+ virtual bool AddFrameHistory(
+ dom::Sequence<dom::RTCVideoFrameHistoryInternal>* outHistories) const = 0;
+
+ protected:
+ /* RTCP feedback settings, for unit testing purposes */
+ FrameRequestType mFrameRequestMethod;
+ bool mUsingNackBasic;
+ bool mUsingTmmbr;
+ bool mUsingFEC;
+};
+
+/**
+ * MediaSessionConduit for audio
+ * Refer to the comments on MediaSessionConduit above for overall
+ * information
+ */
+class AudioSessionConduit : public MediaSessionConduit {
+ public:
+ /**
+ * Factory function to create and initialize an Audio Conduit Session
+ * @param webrtc::Call instance shared by paired audio and video
+ * media conduits
+ * @result Concrete AudioSessionConduitObject or nullptr in the case
+ * of failure
+ */
+ static RefPtr<AudioSessionConduit> Create(
+ RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread);
+
+ virtual ~AudioSessionConduit() {}
+
+ Type type() const override { return AUDIO; }
+
+ Maybe<RefPtr<VideoSessionConduit>> AsVideoSessionConduit() override {
+ return Nothing();
+ }
+
+ /**
+ * Function to deliver externally captured audio sample for encoding and
+ * transport
+ * @param audioData [in]: Pointer to array containing a frame of audio
+ * @param lengthSamples [in]: Length of audio frame in samples in multiple of
+ * 10 milliseconds
+ * Ex: Frame length is 160, 320, 440 for 16, 32,
+ * 44 kHz sampling rates respectively.
+ * audioData[] is lengthSamples in size
+ * say, for 16kz sampling rate, audioData[]
+ * should contain 160 samples of 16-bits each
+ * for a 10m audio frame.
+ * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000,
+ * 32000 ...)
+ * @param capture_delay [in]: Approx Delay from recording until it is
+ * delivered to VoiceEngine in milliseconds.
+ * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can
+ * be invoked. This ensures the inserted audio-samples can be transmitted by
+ * the conduit.
+ *
+ */
+ virtual MediaConduitErrorCode SendAudioFrame(const int16_t audioData[],
+ int32_t lengthSamples,
+ int32_t samplingFreqHz,
+ uint32_t channels,
+ int32_t capture_delay) = 0;
+
+ /**
+ * Function to grab a decoded audio-sample from the media engine for rendering
+ * / playoutof length 10 milliseconds.
+ *
+ * @param speechData [in]: Pointer to a array to which a 10ms frame of audio
+ * will be copied
+ * @param samplingFreqHz [in]: Frequency of the sampling for playback in
+ * Hertz (16000, 32000,..)
+ * @param capture_delay [in]: Estimated Time between reading of the samples
+ * to rendering/playback
+ * @param numChannels [out]: Number of channels in the audio frame,
+ * guaranteed to be non-zero.
+ * @param lengthSamples [out]: Will contain length of the audio frame in
+ * samples at return.
+ * Ex: A value of 160 implies 160 samples each of
+ * 16-bits was copied into speechData
+ * NOTE: This function should be invoked every 10 milliseconds for the best
+ * peformance
+ * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can
+ * be invoked. This ensures the decoded samples are ready for reading.
+ *
+ */
+ virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ size_t& numChannels,
+ size_t& lengthSamples) = 0;
+
+ /**
+ * Checks if given sampling frequency is supported
+ * @param freq: Sampling rate (in Hz) to check
+ */
+ virtual bool IsSamplingFreqSupported(int freq) const = 0;
+
+ /**
+ * Function to configure send codec for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: See VideoConduit for more information
+ */
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(
+ const AudioCodecConfig* sendCodecConfig) = 0;
+
+ /**
+ * Function to configure list of receive codecs for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: See VideoConduit for more information
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<AudioCodecConfig>>& recvCodecConfigList) = 0;
+
+ virtual bool SetDtmfPayloadType(unsigned char type, int freq) = 0;
+
+ virtual bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
+ int lengthMs, int attenuationDb) = 0;
+};
+} // namespace mozilla
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp b/dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp
new file mode 100644
index 0000000000..7824324325
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp
@@ -0,0 +1,67 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaDataCodec.h"
+
+#include "PDMFactory.h"
+#include "WebrtcGmpVideoCodec.h"
+#include "WebrtcMediaDataDecoderCodec.h"
+#include "WebrtcMediaDataEncoderCodec.h"
+#include "mozilla/StaticPrefs_media.h"
+
+namespace mozilla {
+
+/* static */
+WebrtcVideoEncoder* MediaDataCodec::CreateEncoder(
+ webrtc::VideoCodecType aCodecType) {
+#if defined(MOZ_APPLEMEDIA) || defined(MOZ_WIDGET_ANDROID)
+ if (aCodecType == webrtc::VideoCodecType::kVideoCodecH264) {
+ return new WebrtcVideoEncoderProxy(new WebrtcMediaDataEncoder());
+ }
+#endif
+ return nullptr;
+}
+
+/* static */
+WebrtcVideoDecoder* MediaDataCodec::CreateDecoder(
+ webrtc::VideoCodecType aCodecType) {
+ switch (aCodecType) {
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ if (!StaticPrefs::media_navigator_mediadatadecoder_vpx_enabled()) {
+ return nullptr;
+ }
+ break;
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ if (!StaticPrefs::media_navigator_mediadatadecoder_h264_enabled()) {
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+
+ nsAutoCString codec;
+ switch (aCodecType) {
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+ codec = "video/vp8";
+ break;
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ codec = "video/vp9";
+ break;
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ codec = "video/avc";
+ break;
+ default:
+ return nullptr;
+ }
+ RefPtr<PDMFactory> pdm = new PDMFactory();
+ if (!pdm->SupportsMimeType(codec, nullptr /* dddoctor */)) {
+ return nullptr;
+ }
+
+ return new WebrtcMediaDataDecoder(codec);
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/MediaDataCodec.h b/dom/media/webrtc/libwebrtcglue/MediaDataCodec.h
new file mode 100644
index 0000000000..f9c59ba971
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/MediaDataCodec.h
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIA_DATA_CODEC_H_
+#define MEDIA_DATA_CODEC_H_
+
+#include "MediaConduitInterface.h"
+#include "webrtc/common_types.h"
+
+namespace mozilla {
+
+class WebrtcVideoDecoder;
+class WebrtcVideoEncoder;
+class MediaDataCodec {
+ public:
+ /**
+ * Create encoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static WebrtcVideoEncoder* CreateEncoder(webrtc::VideoCodecType aCodecType);
+
+ /**
+ * Create decoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static WebrtcVideoDecoder* CreateDecoder(webrtc::VideoCodecType aCodecType);
+};
+} // namespace mozilla
+
+#endif // MEDIA_DATA_CODEC_H_
diff --git a/dom/media/webrtc/libwebrtcglue/RtcpEventObserver.h b/dom/media/webrtc/libwebrtcglue/RtcpEventObserver.h
new file mode 100644
index 0000000000..d30feb8d3e
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RtcpEventObserver.h
@@ -0,0 +1,20 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef RTCP_EVENT_OBSERVER_
+#define RTCP_EVENT_OBSERVER_
+
+namespace mozilla {
+/**
+ * This provides an interface to allow for receiving notifications
+ * of rtcp bye packets and timeouts.
+ */
+class RtcpEventObserver {
+ public:
+ virtual void OnRtcpBye() = 0;
+ virtual void OnRtcpTimeout() = 0;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/RtpPacketQueue.h b/dom/media/webrtc/libwebrtcglue/RtpPacketQueue.h
new file mode 100644
index 0000000000..b22142f18c
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RtpPacketQueue.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef RtpPacketQueue_h
+#define RtpPacketQueue_h
+
+#include "MediaConduitInterface.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+
+class RtpPacketQueue {
+ public:
+ void Clear() {
+ mQueuedPackets.Clear();
+ mQueueActive = false;
+ }
+
+ void DequeueAll(MediaSessionConduit* conduit) {
+ // SSRC is set; insert queued packets
+ for (auto& packet : mQueuedPackets) {
+ if (conduit->DeliverPacket(packet->mData, packet->mLen) !=
+ kMediaConduitNoError) {
+ // Keep delivering and then clear the queue
+ }
+ }
+ mQueuedPackets.Clear();
+ mQueueActive = false;
+ }
+
+ void Enqueue(const void* data, int len) {
+ UniquePtr<QueuedPacket> packet(new QueuedPacket(data, len));
+ mQueuedPackets.AppendElement(std::move(packet));
+ mQueueActive = true;
+ }
+
+ bool IsQueueActive() { return mQueueActive; }
+
+ private:
+ bool mQueueActive = false;
+ struct QueuedPacket {
+ const int mLen;
+ uint8_t* mData;
+
+ QueuedPacket(const void* aData, size_t aLen) : mLen(aLen) {
+ mData = new uint8_t[mLen];
+ memcpy(mData, aData, mLen);
+ }
+
+ ~QueuedPacket() { delete (mData); }
+ };
+ nsTArray<UniquePtr<QueuedPacket>> mQueuedPackets;
+};
+
+} // namespace mozilla
+
+#endif // RtpPacketQueue_h
diff --git a/dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h b/dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h
new file mode 100644
index 0000000000..7cfe279c9b
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h
@@ -0,0 +1,20 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __RTPRTCP_CONFIG_H__
+#define __RTPRTCP_CONFIG_H__
+#include "webrtc/common_types.h"
+
+namespace mozilla {
+class RtpRtcpConfig {
+ public:
+ RtpRtcpConfig() = delete;
+ explicit RtpRtcpConfig(const webrtc::RtcpMode aMode) : mRtcpMode(aMode) {}
+ webrtc::RtcpMode GetRtcpMode() const { return mRtcpMode; }
+
+ private:
+ webrtc::RtcpMode mRtcpMode;
+};
+} // namespace mozilla
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.cpp b/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.cpp
new file mode 100644
index 0000000000..f47e89a3c3
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.cpp
@@ -0,0 +1,197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "RtpSourceObserver.h"
+#include "nsThreadUtils.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace mozilla {
+
+using EntryType = dom::RTCRtpSourceEntryType;
+
+double RtpSourceObserver::RtpSourceEntry::ToLinearAudioLevel() const {
+ // Spec indicates that a value of 127 should be set to 0
+ if (audioLevel == 127) {
+ return 0;
+ }
+ // All other values are calculated as 10^(-rfc_level/20)
+ return std::pow(10, -static_cast<double>(audioLevel) / 20);
+}
+
+RtpSourceObserver::RtpSourceObserver(
+ const dom::RTCStatsTimestampMaker& aTimestampMaker)
+ : mMaxJitterWindow(0), mTimestampMaker(aTimestampMaker) {}
+
+void RtpSourceObserver::OnRtpPacket(const webrtc::RTPHeader& aHeader,
+ const uint32_t aJitter) {
+ DOMHighResTimeStamp jsNow = mTimestampMaker.GetNow();
+
+ RefPtr<Runnable> runnable = NS_NewRunnableFunction(
+ "RtpSourceObserver::OnRtpPacket",
+ [this, self = RefPtr<RtpSourceObserver>(this), aHeader, aJitter,
+ jsNow]() {
+ mMaxJitterWindow =
+ std::max(mMaxJitterWindow, static_cast<int64_t>(aJitter) * 2);
+ // We are supposed to report the time at which this packet was played
+ // out, but we have only just received the packet. We try to guess when
+ // it will be played out.
+ // TODO: We need to move where we update these stats to MediaPipeline,
+ // where we send frames to the media track graph. In order to do that,
+ // we will need to have the ssrc (and csrc) for decoded frames, but we
+ // don't have that right now. Once we move this to the correct place, we
+ // will no longer need to keep anything but the most recent data.
+ const auto predictedPlayoutTime = jsNow + aJitter;
+ auto& hist =
+ mRtpSources[GetKey(aHeader.ssrc, EntryType::Synchronization)];
+ hist.Prune(jsNow);
+ // ssrc-audio-level handling
+ hist.Insert(jsNow, predictedPlayoutTime, aHeader.timestamp,
+ aHeader.extension.hasAudioLevel,
+ aHeader.extension.audioLevel);
+
+ // csrc-audio-level handling
+ const auto& list = aHeader.extension.csrcAudioLevels;
+ for (uint8_t i = 0; i < aHeader.numCSRCs; i++) {
+ const uint32_t& csrc = aHeader.arrOfCSRCs[i];
+ auto& hist = mRtpSources[GetKey(csrc, EntryType::Contributing)];
+ hist.Prune(jsNow);
+ bool hasLevel = i < list.numAudioLevels;
+ uint8_t level = hasLevel ? list.arrOfAudioLevels[i] : 0;
+ hist.Insert(jsNow, predictedPlayoutTime, aHeader.timestamp, hasLevel,
+ level);
+ }
+ });
+
+ if (NS_IsMainThread()) {
+ // Code-path for gtests; everything happens on main, and there's no event
+ // loop.
+ runnable->Run();
+ } else {
+ NS_DispatchToMainThread(runnable);
+ }
+}
+
+void RtpSourceObserver::GetRtpSources(
+ nsTArray<dom::RTCRtpSourceEntry>& outSources) const {
+ MOZ_ASSERT(NS_IsMainThread());
+ outSources.Clear();
+ for (const auto& it : mRtpSources) {
+ const RtpSourceEntry* entry =
+ it.second.FindClosestNotAfter(mTimestampMaker.GetNow());
+ if (entry) {
+ dom::RTCRtpSourceEntry domEntry;
+ domEntry.mSource = GetSourceFromKey(it.first);
+ domEntry.mSourceType = GetTypeFromKey(it.first);
+ domEntry.mTimestamp = entry->predictedPlayoutTime;
+ domEntry.mRtpTimestamp = entry->rtpTimestamp;
+ if (entry->hasAudioLevel) {
+ domEntry.mAudioLevel.Construct(entry->ToLinearAudioLevel());
+ }
+ outSources.AppendElement(std::move(domEntry));
+ }
+ }
+}
+
+const RtpSourceObserver::RtpSourceEntry*
+RtpSourceObserver::RtpSourceHistory::FindClosestNotAfter(int64_t aTime) const {
+ MOZ_ASSERT(NS_IsMainThread());
+ // This method scans the history for the entry whose timestamp is closest to a
+ // given timestamp but no greater. Because it is scanning forward, it keeps
+ // track of the closest entry it has found so far in case it overshoots.
+ // There is no before map.begin() which complicates things, so found tracks
+ // if something was really found.
+ auto lastFound = mDetailedHistory.cbegin();
+ bool found = false;
+ for (const auto& it : mDetailedHistory) {
+ if (it.second.predictedPlayoutTime > aTime) {
+ break;
+ }
+ // lastFound can't start before begin, so the first inc must be skipped
+ if (found) {
+ lastFound++;
+ }
+ found = true;
+ }
+ if (found) {
+ return &lastFound->second;
+ }
+ if (HasEvicted() && aTime >= mLatestEviction.predictedPlayoutTime) {
+ return &mLatestEviction;
+ }
+ return nullptr;
+}
+
+void RtpSourceObserver::RtpSourceHistory::Prune(const int64_t aTimeNow) {
+ MOZ_ASSERT(NS_IsMainThread());
+ const auto aTimeT = aTimeNow - mMaxJitterWindow;
+ const auto aTimePrehistory = aTimeNow - kHistoryWindow;
+ bool found = false;
+ // New lower bound of the map
+ auto lower = mDetailedHistory.begin();
+ for (auto& it : mDetailedHistory) {
+ if (it.second.predictedPlayoutTime > aTimeT) {
+ found = true;
+ break;
+ }
+ if (found) {
+ lower++;
+ }
+ found = true;
+ }
+ if (found) {
+ if (lower->second.predictedPlayoutTime > aTimePrehistory) {
+ mLatestEviction = lower->second;
+ mHasEvictedEntry = true;
+ }
+ lower++;
+ mDetailedHistory.erase(mDetailedHistory.begin(), lower);
+ }
+ if (HasEvicted() &&
+ (mLatestEviction.predictedPlayoutTime + kHistoryWindow) < aTimeNow) {
+ mHasEvictedEntry = false;
+ }
+}
+
+void RtpSourceObserver::RtpSourceHistory::Insert(const int64_t aTimeNow,
+ const int64_t aTimestamp,
+ const uint32_t aRtpTimestamp,
+ const bool aHasAudioLevel,
+ const uint8_t aAudioLevel) {
+ MOZ_ASSERT(NS_IsMainThread());
+ Insert(aTimeNow, aTimestamp)
+ .Update(aTimestamp, aRtpTimestamp, aHasAudioLevel, aAudioLevel);
+}
+
+RtpSourceObserver::RtpSourceEntry& RtpSourceObserver::RtpSourceHistory::Insert(
+ const int64_t aTimeNow, const int64_t aTimestamp) {
+ MOZ_ASSERT(NS_IsMainThread());
+ // Time T is the oldest time inside the jitter window (now - jitter)
+ // Time J is the newest time inside the jitter window (now + jitter)
+ // Time x is the jitter adjusted entry time
+ // Time Z is the time of the long term storage element
+ // Times A, B, C are times of entries in the jitter window buffer
+ // x-axis: time
+ // x or x T J
+ // |------Z-----|ABC| -> |------Z-----|ABC|
+ if ((aTimestamp + kHistoryWindow) < aTimeNow ||
+ aTimestamp < mLatestEviction.predictedPlayoutTime) {
+ return mPrehistory; // A.K.A. /dev/null
+ }
+ mMaxJitterWindow = std::max(mMaxJitterWindow, (aTimestamp - aTimeNow) * 2);
+ const int64_t aTimeT = aTimeNow - mMaxJitterWindow;
+ // x T J
+ // |------Z-----|ABC| -> |--------x---|ABC|
+ if (aTimestamp < aTimeT) {
+ mHasEvictedEntry = true;
+ return mLatestEviction;
+ }
+ // T X J
+ // |------Z-----|AB-C| -> |--------x---|ABXC|
+ return mDetailedHistory[aTimestamp];
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.h b/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.h
new file mode 100644
index 0000000000..4fbf2fafe9
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RtpSourceObserver.h
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AUDIOLEVELOBSERVER_H
+#define AUDIOLEVELOBSERVER_H
+
+#include <vector>
+#include <map>
+
+#include "nsISupportsImpl.h"
+#include "mozilla/dom/RTCRtpSourcesBinding.h"
+#include "webrtc/common_types.h"
+#include "jsapi/RTCStatsReport.h"
+
+// Unit Test class
+namespace test {
+class RtpSourcesTest;
+}
+
+namespace mozilla {
+
+/* Observes reception of RTP packets and tabulates data about the
+ * most recent arival times by source (csrc or ssrc) and audio level information
+ * * csrc-audio-level RTP header extension
+ * * ssrc-audio-level RTP header extension
+ */
+class RtpSourceObserver {
+ public:
+ explicit RtpSourceObserver(
+ const dom::RTCStatsTimestampMaker& aTimestampMaker);
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RtpSourceObserver)
+
+ void OnRtpPacket(const webrtc::RTPHeader& aHeader, const uint32_t aJitter);
+
+ /*
+ * Get the most recent 10 second window of CSRC and SSRC sources.
+ * @param outLevels will be popluted with source entries
+ * Note: this takes jitter into account when calculating the window so
+ * the window is actually [time - jitter - 10 sec .. time - jitter]
+ */
+ void GetRtpSources(nsTArray<dom::RTCRtpSourceEntry>& outSources) const;
+
+ private:
+ virtual ~RtpSourceObserver() = default;
+
+ struct RtpSourceEntry {
+ RtpSourceEntry() = default;
+ void Update(const int64_t aTimestamp, const uint32_t aRtpTimestamp,
+ const bool aHasAudioLevel, const uint8_t aAudioLevel) {
+ predictedPlayoutTime = aTimestamp;
+ rtpTimestamp = aRtpTimestamp;
+ // Audio level range is 0 - 127 inclusive
+ hasAudioLevel = aHasAudioLevel && !(aAudioLevel & 0x80);
+ audioLevel = aAudioLevel;
+ }
+ // Sets the audio level nullable according to the linear scale
+ // outlined in the webrtc-pc spec.
+ double ToLinearAudioLevel() const;
+ // Time this information was received + jitter
+ int64_t predictedPlayoutTime = 0;
+ // The original RTP timestamp in the received packet
+ uint32_t rtpTimestamp = 0;
+ bool hasAudioLevel = false;
+ uint8_t audioLevel = 0;
+ };
+
+ /* Why this is needed:
+ * We are supposed to only report source stats for packets that have already
+ * been rendered. Unfortunately, we only know when these packets are
+ * _received_ right now. So, we need to make a guess at when each packet will
+ * be rendered, and hide its statistics until the clock reaches that estimate.
+ */
+ /* Maintains a history of packets for reporting with getContributingSources
+ * and getSynchronizationSources. It is expected that entries will not always
+ * be observed in chronological order, and that the correct entry for a query
+ * not be the most recently added item. Many times the query time is expected
+ * to fall within [now - Jitter window .. now + Jitter Window]. A full history
+ * is kept within the jitter window, and only the most recent to fall out of
+ * the window is stored for the full 10 seconds. This value is only likely to
+ * be returned when the stream is stopped or paused.
+ * x-axis: time (non-linear scale)
+ * let J = now + Jitter Window
+ * let T = now - Jitter Window
+ * now - 10 seconds T now J
+ * |-----------------Z--------------------------|-AB--CDEFG-HI--J|
+ * ^Latest evicted ^Jitter buffer entries
+ * Ex Query Time ^Q0 ^Q1 ^Q2 ^Q3 ^Q4
+ * Query result:
+ * Q0: Nothing
+ * Q1: Z
+ * Q2: B
+ * Q3: E
+ * Q4: I
+ */
+ class RtpSourceHistory {
+ public:
+ RtpSourceHistory() = default;
+ // Finds the closest entry to a time, and passes that value to a closure
+ // Note: the pointer is invalidated by any operation on the history
+ // Note: the pointer is owned by the RtpSourceHistory
+ const RtpSourceEntry* FindClosestNotAfter(int64_t aTime) const;
+ // Inserts data into the history, may silently drop data if it is too old
+ void Insert(const int64_t aTimeNow, const int64_t aTimestamp,
+ const uint32_t aRtpTimestamp, const bool aHasAudioLevel,
+ const uint8_t aAudioLevel);
+ // Removes aged out from the jitter window
+ void Prune(const int64_t aTimeNow);
+ // Set Source
+ void SetSource(uint32_t aSource, dom::RTCRtpSourceEntryType aType);
+
+ private:
+ // Finds a place to insert data and returns a reference to it
+ RtpSourceObserver::RtpSourceEntry& Insert(const int64_t aTimeNow,
+ const int64_t aTimestamp);
+ // Is the history buffer empty?
+ bool Empty() const { return !mDetailedHistory.size(); }
+ // Is there an evicted entry
+ bool HasEvicted() const { return mHasEvictedEntry; }
+
+ // Minimum amount of time (ms) to store a complete packet history
+ constexpr static int64_t kMinJitterWindow = 1000;
+ // Size of the history window (ms)
+ constexpr static int64_t kHistoryWindow = 10000;
+ // This is 2 x the maximum observed jitter or the min which ever is higher
+ int64_t mMaxJitterWindow = kMinJitterWindow;
+ // The least old entry to be kicked from the buffer.
+ RtpSourceEntry mLatestEviction;
+ // Is there an evicted entry?
+ bool mHasEvictedEntry = false;
+ std::map<int64_t, RtpSourceEntry> mDetailedHistory;
+ // Entry before history
+ RtpSourceEntry mPrehistory;
+ // Unit test
+ friend test::RtpSourcesTest;
+ };
+
+ // Do not copy or assign
+ RtpSourceObserver(const RtpSourceObserver&) = delete;
+ RtpSourceObserver& operator=(RtpSourceObserver const&) = delete;
+ // Returns a key for a source and a type
+ static uint64_t GetKey(const uint32_t id,
+ const dom::RTCRtpSourceEntryType aType) {
+ return (aType == dom::RTCRtpSourceEntryType::Synchronization)
+ ? (static_cast<uint64_t>(id) |
+ (static_cast<uint64_t>(0x1) << 32))
+ : (static_cast<uint64_t>(id));
+ }
+ // Returns the source from a key
+ static uint32_t GetSourceFromKey(const uint64_t aKey) {
+ return static_cast<uint32_t>(aKey & ~(static_cast<uint64_t>(0x1) << 32));
+ }
+ // Returns the type from a key
+ static dom::RTCRtpSourceEntryType GetTypeFromKey(const uint64_t aKey) {
+ return (aKey & (static_cast<uint64_t>(0x1) << 32))
+ ? dom::RTCRtpSourceEntryType::Synchronization
+ : dom::RTCRtpSourceEntryType::Contributing;
+ }
+ // Map CSRC to RtpSourceEntry
+ std::map<uint64_t, RtpSourceHistory> mRtpSources;
+ // 2 x the largest observed
+ int64_t mMaxJitterWindow;
+ dom::RTCStatsTimestampMaker mTimestampMaker;
+
+ // Unit test
+ friend test::RtpSourcesTest;
+
+ // Testing only
+ // Inserts additional csrc audio levels for mochitests
+ friend void InsertAudioLevelForContributingSource(
+ RtpSourceObserver& observer, const uint32_t aCsrcSource,
+ const int64_t aTimestamp, const uint32_t aRtpTimestamp,
+ const bool aHasAudioLevel, const uint8_t aAudioLevel);
+};
+} // namespace mozilla
+#undef NG
+#endif // AUDIOLEVELOBSERVER_H
diff --git a/dom/media/webrtc/libwebrtcglue/RunningStat.h b/dom/media/webrtc/libwebrtcglue/RunningStat.h
new file mode 100644
index 0000000000..7a0e88f193
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/RunningStat.h
@@ -0,0 +1,48 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+/* Adapted from "Accurately computing running variance - John D. Cook"
+ http://www.johndcook.com/standard_deviation.html */
+
+#ifndef RUNNING_STAT_H_
+#define RUNNING_STAT_H_
+#include <math.h>
+
+namespace mozilla {
+
+class RunningStat {
+ public:
+ RunningStat() : mN(0), mOldM(0.0), mNewM(0.0), mOldS(0.0), mNewS(0.0) {}
+
+ void Clear() { mN = 0; }
+
+ void Push(double x) {
+ mN++;
+
+ // See Knuth TAOCP vol 2, 3rd edition, page 232
+ if (mN == 1) {
+ mOldM = mNewM = x;
+ mOldS = 0.0;
+ } else {
+ mNewM = mOldM + (x - mOldM) / mN;
+ mNewS = mOldS + (x - mOldM) * (x - mNewM);
+
+ // set up for next iteration
+ mOldM = mNewM;
+ mOldS = mNewS;
+ }
+ }
+
+ int NumDataValues() const { return mN; }
+
+ double Mean() const { return (mN > 0) ? mNewM : 0.0; }
+
+ double Variance() const { return (mN > 1) ? mNewS / (mN - 1) : 0.0; }
+
+ double StandardDeviation() const { return sqrt(Variance()); }
+
+ private:
+ int mN;
+ double mOldM, mNewM, mOldS, mNewS;
+};
+} // namespace mozilla
+#endif // RUNNING_STAT_H_
diff --git a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
new file mode 100644
index 0000000000..18bcca75c5
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
@@ -0,0 +1,2524 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "common/browser_logging/CSFLog.h"
+#include "nspr.h"
+#include "plstr.h"
+
+#include "AudioConduit.h"
+#include "RtpRtcpConfig.h"
+#include "VideoConduit.h"
+#include "VideoStreamFactory.h"
+#include "common/YuvStamper.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "mozilla/TemplateLib.h"
+#include "mozilla/media/MediaUtils.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/UniquePtr.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIPrefBranch.h"
+#include "nsIGfxInfo.h"
+#include "nsIPrefService.h"
+#include "nsServiceManagerUtils.h"
+
+#include "nsThreadUtils.h"
+
+#include "pk11pub.h"
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/engine/encoder_simulcast_proxy.h"
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/media/base/mediaconstants.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
+#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
+
+#include "mozilla/Unused.h"
+
+#if defined(MOZ_WIDGET_ANDROID)
+# include "VideoEngine.h"
+#endif
+
+#include "GmpVideoCodec.h"
+
+#ifdef MOZ_WEBRTC_MEDIACODEC
+# include "MediaCodecVideoCodec.h"
+#endif
+#include "WebrtcGmpVideoCodec.h"
+
+#include "MediaDataCodec.h"
+
+// for ntohs
+#ifdef _MSC_VER
+# include "Winsock2.h"
+#else
+# include <netinet/in.h>
+#endif
+
+#include <algorithm>
+#include <math.h>
+#include <cinttypes>
+
+#define DEFAULT_VIDEO_MAX_FRAMERATE 30
+#define INVALID_RTP_PAYLOAD 255 // valid payload types are 0 to 127
+
+namespace mozilla {
+
+static const char* vcLogTag = "WebrtcVideoSessionConduit";
+#ifdef LOGTAG
+# undef LOGTAG
+#endif
+#define LOGTAG vcLogTag
+
+using LocalDirection = MediaSessionConduitLocalDirection;
+
+static const int kNullPayloadType = -1;
+static const char* kUlpFecPayloadName = "ulpfec";
+static const char* kRedPayloadName = "red";
+
+// The number of frame buffers WebrtcVideoConduit may create before returning
+// errors.
+// Sometimes these are released synchronously but they can be forwarded all the
+// way to the encoder for asynchronous encoding. With a pool size of 5,
+// we allow 1 buffer for the current conversion, and 4 buffers to be queued at
+// the encoder.
+#define SCALER_BUFFER_POOL_SIZE 5
+
+// The pixel alignment to use for the highest resolution layer when simulcast
+// is active and one or more layers are being scaled.
+#define SIMULCAST_RESOLUTION_ALIGNMENT 16
+
+// 32 bytes is what WebRTC CodecInst expects
+const unsigned int WebrtcVideoConduit::CODEC_PLNAME_SIZE = 32;
+
+template <typename T>
+T MinIgnoreZero(const T& a, const T& b) {
+ return std::min(a ? a : b, b ? b : a);
+}
+
+template <class t>
+static void ConstrainPreservingAspectRatioExact(uint32_t max_fs, t* width,
+ t* height) {
+ // We could try to pick a better starting divisor, but it won't make any real
+ // performance difference.
+ for (size_t d = 1; d < std::min(*width, *height); ++d) {
+ if ((*width % d) || (*height % d)) {
+ continue; // Not divisible
+ }
+
+ if (((*width) * (*height)) / (d * d) <= max_fs) {
+ *width /= d;
+ *height /= d;
+ return;
+ }
+ }
+
+ *width = 0;
+ *height = 0;
+}
+
+template <class t>
+static void ConstrainPreservingAspectRatio(uint16_t max_width,
+ uint16_t max_height, t* width,
+ t* height) {
+ if (((*width) <= max_width) && ((*height) <= max_height)) {
+ return;
+ }
+
+ if ((*width) * max_height > max_width * (*height)) {
+ (*height) = max_width * (*height) / (*width);
+ (*width) = max_width;
+ } else {
+ (*width) = max_height * (*width) / (*height);
+ (*height) = max_height;
+ }
+}
+
+/**
+ * Function to select and change the encoding frame rate based on incoming frame
+ * rate and max-mbps setting.
+ * @param current framerate
+ * @result new framerate
+ */
+static unsigned int SelectSendFrameRate(const VideoCodecConfig* codecConfig,
+ unsigned int old_framerate,
+ unsigned short sending_width,
+ unsigned short sending_height) {
+ unsigned int new_framerate = old_framerate;
+
+ // Limit frame rate based on max-mbps
+ if (codecConfig && codecConfig->mEncodingConstraints.maxMbps) {
+ unsigned int cur_fs, mb_width, mb_height;
+
+ mb_width = (sending_width + 15) >> 4;
+ mb_height = (sending_height + 15) >> 4;
+
+ cur_fs = mb_width * mb_height;
+ if (cur_fs > 0) { // in case no frames have been sent
+ new_framerate = codecConfig->mEncodingConstraints.maxMbps / cur_fs;
+
+ new_framerate = MinIgnoreZero(new_framerate,
+ codecConfig->mEncodingConstraints.maxFps);
+ }
+ }
+ return new_framerate;
+}
+
+/**
+ * Perform validation on the codecConfig to be applied
+ */
+static MediaConduitErrorCode ValidateCodecConfig(
+ const VideoCodecConfig* codecInfo) {
+ if (!codecInfo) {
+ CSFLogError(LOGTAG, "%s Null CodecConfig ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ if ((codecInfo->mName.empty()) ||
+ (codecInfo->mName.length() >= WebrtcVideoConduit::CODEC_PLNAME_SIZE)) {
+ CSFLogError(LOGTAG, "%s Invalid Payload Name Length ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcVideoConduit::CallStatistics::Update(
+ const webrtc::Call::Stats& aStats) {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ mStats = Some(aStats);
+ const auto rtt = aStats.rtt_ms;
+ if (rtt > static_cast<decltype(aStats.rtt_ms)>(INT32_MAX)) {
+ // If we get a bogus RTT we will keep using the previous RTT
+#ifdef DEBUG
+ CSFLogError(LOGTAG,
+ "%s for VideoConduit:%p RTT is larger than the"
+ " maximum size of an RTCP RTT.",
+ __FUNCTION__, this);
+#endif
+ mRttSec = Nothing();
+ } else {
+ if (mRttSec && rtt < 0) {
+ CSFLogError(LOGTAG,
+ "%s for VideoConduit:%p RTT returned an error after "
+ " previously succeeding.",
+ __FUNCTION__, this);
+ mRttSec = Nothing();
+ }
+ if (rtt >= 0) {
+ mRttSec = Some(static_cast<DOMHighResTimeStamp>(rtt) / 1000.0);
+ }
+ }
+}
+
+Maybe<DOMHighResTimeStamp> WebrtcVideoConduit::CallStatistics::RttSec() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mRttSec;
+}
+
+Maybe<mozilla::dom::RTCBandwidthEstimationInternal>
+WebrtcVideoConduit::CallStatistics::Stats() const {
+ ASSERT_ON_THREAD(mStatsThread);
+ if (mStats.isNothing()) {
+ return Nothing();
+ }
+ const auto& stats = mStats.value();
+ dom::RTCBandwidthEstimationInternal bw;
+ bw.mSendBandwidthBps.Construct(stats.send_bandwidth_bps / 8);
+ bw.mMaxPaddingBps.Construct(stats.max_padding_bitrate_bps / 8);
+ bw.mReceiveBandwidthBps.Construct(stats.recv_bandwidth_bps / 8);
+ bw.mPacerDelayMs.Construct(stats.pacer_delay_ms);
+ if (stats.rtt_ms >= 0) {
+ bw.mRttMs.Construct(stats.rtt_ms);
+ }
+ return Some(std::move(bw));
+}
+
+void WebrtcVideoConduit::StreamStatistics::Update(
+ const double aFrameRate, const double aBitrate,
+ const webrtc::RtcpPacketTypeCounter& aPacketCounts) {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ mFrameRate.Push(aFrameRate);
+ mBitRate.Push(aBitrate);
+ mPacketCounts = aPacketCounts;
+}
+
+bool WebrtcVideoConduit::StreamStatistics::GetVideoStreamStats(
+ double& aOutFrMean, double& aOutFrStdDev, double& aOutBrMean,
+ double& aOutBrStdDev) const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ if (mFrameRate.NumDataValues() && mBitRate.NumDataValues()) {
+ aOutFrMean = mFrameRate.Mean();
+ aOutFrStdDev = mFrameRate.StandardDeviation();
+ aOutBrMean = mBitRate.Mean();
+ aOutBrStdDev = mBitRate.StandardDeviation();
+ return true;
+ }
+ return false;
+}
+
+void WebrtcVideoConduit::StreamStatistics::RecordTelemetry() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ if (!mActive) {
+ return;
+ }
+ using namespace Telemetry;
+ Accumulate(IsSend() ? WEBRTC_VIDEO_ENCODER_BITRATE_AVG_PER_CALL_KBPS
+ : WEBRTC_VIDEO_DECODER_BITRATE_AVG_PER_CALL_KBPS,
+ mBitRate.Mean() / 1000);
+ Accumulate(IsSend() ? WEBRTC_VIDEO_ENCODER_BITRATE_STD_DEV_PER_CALL_KBPS
+ : WEBRTC_VIDEO_DECODER_BITRATE_STD_DEV_PER_CALL_KBPS,
+ mBitRate.StandardDeviation() / 1000);
+ Accumulate(IsSend() ? WEBRTC_VIDEO_ENCODER_FRAMERATE_AVG_PER_CALL
+ : WEBRTC_VIDEO_DECODER_FRAMERATE_AVG_PER_CALL,
+ mFrameRate.Mean());
+ Accumulate(IsSend() ? WEBRTC_VIDEO_ENCODER_FRAMERATE_10X_STD_DEV_PER_CALL
+ : WEBRTC_VIDEO_DECODER_FRAMERATE_10X_STD_DEV_PER_CALL,
+ mFrameRate.StandardDeviation() * 10);
+}
+
+const webrtc::RtcpPacketTypeCounter&
+WebrtcVideoConduit::StreamStatistics::PacketCounts() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mPacketCounts;
+}
+
+bool WebrtcVideoConduit::StreamStatistics::Active() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mActive;
+}
+
+void WebrtcVideoConduit::StreamStatistics::SetActive(bool aActive) {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ mActive = aActive;
+}
+
+uint32_t WebrtcVideoConduit::SendStreamStatistics::DroppedFrames() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mDroppedFrames;
+}
+
+uint32_t WebrtcVideoConduit::SendStreamStatistics::FramesEncoded() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mFramesEncoded;
+}
+
+void WebrtcVideoConduit::SendStreamStatistics::FrameDeliveredToEncoder() {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ ++mFramesDeliveredToEncoder;
+}
+
+bool WebrtcVideoConduit::SendStreamStatistics::SsrcFound() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mSsrcFound;
+}
+
+uint32_t WebrtcVideoConduit::SendStreamStatistics::JitterMs() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mJitterMs;
+}
+
+uint32_t WebrtcVideoConduit::SendStreamStatistics::PacketsLost() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mPacketsLost;
+}
+
+uint64_t WebrtcVideoConduit::SendStreamStatistics::BytesReceived() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mBytesReceived;
+}
+
+uint32_t WebrtcVideoConduit::SendStreamStatistics::PacketsReceived() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mPacketsReceived;
+}
+
+Maybe<uint64_t> WebrtcVideoConduit::SendStreamStatistics::QpSum() const {
+ ASSERT_ON_THREAD(mStatsThread);
+ return mQpSum;
+}
+
+void WebrtcVideoConduit::SendStreamStatistics::Update(
+ const webrtc::VideoSendStream::Stats& aStats, uint32_t aConfiguredSsrc) {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ mSsrcFound = false;
+
+ if (aStats.substreams.empty()) {
+ CSFLogVerbose(LOGTAG, "%s stats.substreams is empty", __FUNCTION__);
+ return;
+ }
+
+ auto ind = aStats.substreams.find(aConfiguredSsrc);
+ if (ind == aStats.substreams.end()) {
+ CSFLogError(LOGTAG,
+ "%s for VideoConduit:%p ssrc not found in SendStream stats.",
+ __FUNCTION__, this);
+ return;
+ }
+
+ mSsrcFound = true;
+
+ StreamStatistics::Update(aStats.encode_frame_rate, aStats.media_bitrate_bps,
+ ind->second.rtcp_packet_type_counts);
+ if (aStats.qp_sum) {
+ mQpSum = Some(aStats.qp_sum.value());
+ } else {
+ mQpSum = Nothing();
+ }
+
+ const webrtc::FrameCounts& fc = ind->second.frame_counts;
+ mFramesEncoded = fc.key_frames + fc.delta_frames;
+ CSFLogVerbose(
+ LOGTAG, "%s: framerate: %u, bitrate: %u, dropped frames delta: %u",
+ __FUNCTION__, aStats.encode_frame_rate, aStats.media_bitrate_bps,
+ mFramesDeliveredToEncoder - mFramesEncoded - mDroppedFrames);
+ mDroppedFrames = mFramesDeliveredToEncoder - mFramesEncoded;
+ mJitterMs = ind->second.rtcp_stats.jitter /
+ (webrtc::kVideoPayloadTypeFrequency / 1000);
+ mPacketsLost = ind->second.rtcp_stats.packets_lost;
+ mBytesReceived = ind->second.rtp_stats.MediaPayloadBytes();
+ mPacketsReceived = ind->second.rtp_stats.transmitted.packets;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::BytesSent() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mBytesSent;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::DiscardedPackets() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mDiscardedPackets;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::FramesDecoded() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mFramesDecoded;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::JitterMs() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mJitterMs;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::PacketsLost() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mPacketsLost;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::PacketsSent() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mPacketsSent;
+}
+
+uint32_t WebrtcVideoConduit::ReceiveStreamStatistics::Ssrc() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mSsrc;
+}
+
+DOMHighResTimeStamp
+WebrtcVideoConduit::ReceiveStreamStatistics::RemoteTimestamp() const {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ return mRemoteTimestamp;
+}
+
+void WebrtcVideoConduit::ReceiveStreamStatistics::Update(
+ const webrtc::VideoReceiveStream::Stats& aStats) {
+ ASSERT_ON_THREAD(mStatsThread);
+
+ CSFLogVerbose(LOGTAG, "%s ", __FUNCTION__);
+ StreamStatistics::Update(aStats.decode_frame_rate, aStats.total_bitrate_bps,
+ aStats.rtcp_packet_type_counts);
+ mBytesSent = aStats.rtcp_sender_octets_sent;
+ mDiscardedPackets = aStats.discarded_packets;
+ mFramesDecoded =
+ aStats.frame_counts.key_frames + aStats.frame_counts.delta_frames;
+ mJitterMs =
+ aStats.rtcp_stats.jitter / (webrtc::kVideoPayloadTypeFrequency / 1000);
+ mPacketsLost = aStats.rtcp_stats.packets_lost;
+ mPacketsSent = aStats.rtcp_sender_packets_sent;
+ mRemoteTimestamp = aStats.rtcp_sender_ntp_timestamp.ToMs();
+ mSsrc = aStats.ssrc;
+}
+
+/**
+ * Factory Method for VideoConduit
+ */
+RefPtr<VideoSessionConduit> VideoSessionConduit::Create(
+ RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aCall, "missing required parameter: aCall");
+ CSFLogVerbose(LOGTAG, "%s", __FUNCTION__);
+
+ if (!aCall) {
+ return nullptr;
+ }
+
+ auto obj = MakeRefPtr<WebrtcVideoConduit>(aCall, aStsThread);
+ if (obj->Init() != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s VideoConduit Init Failed ", __FUNCTION__);
+ return nullptr;
+ }
+ CSFLogVerbose(LOGTAG, "%s Successfully created VideoConduit ", __FUNCTION__);
+ return obj.forget();
+}
+
+WebrtcVideoConduit::WebrtcVideoConduit(
+ RefPtr<WebRtcCallWrapper> aCall, nsCOMPtr<nsISerialEventTarget> aStsThread)
+ : mTransportMonitor("WebrtcVideoConduit"),
+ mStsThread(aStsThread),
+ mMutex("WebrtcVideoConduit::mMutex"),
+ mVideoAdapter(MakeUnique<cricket::VideoAdapter>()),
+ mBufferPool(false, SCALER_BUFFER_POOL_SIZE),
+ mEngineTransmitting(false),
+ mEngineReceiving(false),
+ mSendStreamStats(aStsThread),
+ mRecvStreamStats(aStsThread),
+ mCallStats(aStsThread),
+ mSendingFramerate(DEFAULT_VIDEO_MAX_FRAMERATE),
+ mActiveCodecMode(webrtc::kRealtimeVideo),
+ mCodecMode(webrtc::kRealtimeVideo),
+ mCall(aCall),
+ mSendStreamConfig(
+ this) // 'this' is stored but not dereferenced in the constructor.
+ ,
+ mRecvStreamConfig(
+ this) // 'this' is stored but not dereferenced in the constructor.
+ ,
+ mRecvSSRC(0),
+ mRemoteSSRC(0),
+ mVideoStatsTimer(NS_NewTimer()),
+ mRtpSourceObserver(new RtpSourceObserver(mCall->GetTimestampMaker())) {
+ mCall->RegisterConduit(this);
+ mRecvStreamConfig.renderer = this;
+ mRecvStreamConfig.rtcp_event_observer = this;
+}
+
+WebrtcVideoConduit::~WebrtcVideoConduit() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ mCall->UnregisterConduit(this);
+
+ // Release AudioConduit first by dropping reference on MainThread, where it
+ // expects to be
+ MOZ_ASSERT(!mSendStream && !mRecvStream,
+ "Call DeleteStreams prior to ~WebrtcVideoConduit.");
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::SetLocalRTPExtensions(
+ LocalDirection aDirection, const RtpExtList& aExtensions) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ auto& extList = aDirection == LocalDirection::kSend
+ ? mSendStreamConfig.rtp.extensions
+ : mRecvStreamConfig.rtp.extensions;
+ extList = aExtensions;
+ return kMediaConduitNoError;
+}
+
+bool WebrtcVideoConduit::SetLocalSSRCs(
+ const std::vector<unsigned int>& aSSRCs,
+ const std::vector<unsigned int>& aRtxSSRCs) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // Special case: the local SSRCs are the same - do nothing.
+ if (mSendStreamConfig.rtp.ssrcs == aSSRCs &&
+ mSendStreamConfig.rtp.rtx.ssrcs == aRtxSSRCs) {
+ return true;
+ }
+
+ {
+ MutexAutoLock lock(mMutex);
+ // Update the value of the ssrcs in the config structure.
+ mSendStreamConfig.rtp.ssrcs = aSSRCs;
+ mSendStreamConfig.rtp.rtx.ssrcs = aRtxSSRCs;
+
+ bool wasTransmitting = mEngineTransmitting;
+ if (StopTransmittingLocked() != kMediaConduitNoError) {
+ return false;
+ }
+
+ // On the next StartTransmitting() or ConfigureSendMediaCodec, force
+ // building a new SendStream to switch SSRCs.
+ DeleteSendStream();
+
+ if (wasTransmitting) {
+ if (StartTransmittingLocked() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+std::vector<unsigned int> WebrtcVideoConduit::GetLocalSSRCs() {
+ MutexAutoLock lock(mMutex);
+
+ return mSendStreamConfig.rtp.ssrcs;
+}
+
+bool WebrtcVideoConduit::SetLocalCNAME(const char* cname) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ mSendStreamConfig.rtp.c_name = cname;
+ return true;
+}
+
+bool WebrtcVideoConduit::SetLocalMID(const std::string& mid) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ mSendStreamConfig.rtp.mid = mid;
+ return true;
+}
+
+void WebrtcVideoConduit::SetSyncGroup(const std::string& group) {
+ mRecvStreamConfig.sync_group = group;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::ConfigureCodecMode(
+ webrtc::VideoCodecMode mode) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogVerbose(LOGTAG, "%s ", __FUNCTION__);
+ if (mode == webrtc::VideoCodecMode::kRealtimeVideo ||
+ mode == webrtc::VideoCodecMode::kScreensharing) {
+ mCodecMode = mode;
+ if (mVideoStreamFactory) {
+ mVideoStreamFactory->SetCodecMode(mCodecMode);
+ }
+ return kMediaConduitNoError;
+ }
+
+ return kMediaConduitMalformedArgument;
+}
+
+void WebrtcVideoConduit::DeleteSendStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mSendStream) {
+ mCall->Call()->DestroyVideoSendStream(mSendStream);
+ mSendStream = nullptr;
+ mEncoder = nullptr;
+ }
+}
+
+webrtc::VideoCodecType SupportedCodecType(webrtc::VideoCodecType aType) {
+ switch (aType) {
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ return aType;
+ default:
+ return webrtc::VideoCodecType::kVideoCodecUnknown;
+ }
+ // NOTREACHED
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::CreateSendStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ nsAutoString codecName;
+ codecName.AssignASCII(
+ mSendStreamConfig.encoder_settings.payload_name.c_str());
+ Telemetry::ScalarAdd(Telemetry::ScalarID::WEBRTC_VIDEO_SEND_CODEC_USED,
+ codecName, 1);
+
+ webrtc::VideoCodecType encoder_type =
+ SupportedCodecType(webrtc::PayloadStringToCodecType(
+ mSendStreamConfig.encoder_settings.payload_name));
+ if (encoder_type == webrtc::VideoCodecType::kVideoCodecUnknown) {
+ return kMediaConduitInvalidSendCodec;
+ }
+
+ std::unique_ptr<webrtc::VideoEncoder> encoder(CreateEncoder(encoder_type));
+ if (!encoder) {
+ return kMediaConduitInvalidSendCodec;
+ }
+
+ mSendStreamConfig.encoder_settings.encoder = encoder.get();
+
+ MOZ_ASSERT(
+ mSendStreamConfig.rtp.ssrcs.size() == mEncoderConfig.number_of_streams,
+ "Each video substream must have a corresponding ssrc.");
+
+ mSendStream = mCall->Call()->CreateVideoSendStream(mSendStreamConfig.Copy(),
+ mEncoderConfig.Copy());
+
+ if (!mSendStream) {
+ return kMediaConduitVideoSendStreamError;
+ }
+ mSendStream->SetSource(
+ this, webrtc::VideoSendStream::DegradationPreference::kBalanced);
+
+ mEncoder = std::move(encoder);
+
+ mActiveCodecMode = mCodecMode;
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcVideoConduit::DeleteRecvStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mRecvStream) {
+ mRecvStream->RemoveSecondarySink(this);
+ mCall->Call()->DestroyVideoReceiveStream(mRecvStream);
+ mRecvStream = nullptr;
+ mDecoders.clear();
+ }
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::CreateRecvStream() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ webrtc::VideoReceiveStream::Decoder decoder_desc;
+ std::unique_ptr<webrtc::VideoDecoder> decoder;
+ webrtc::VideoCodecType decoder_type;
+
+ mRecvStreamConfig.decoders.clear();
+ for (auto& config : mRecvCodecList) {
+ nsAutoString codecName;
+ codecName.AssignASCII(config->mName.c_str());
+ Telemetry::ScalarAdd(Telemetry::ScalarID::WEBRTC_VIDEO_RECV_CODEC_USED,
+ codecName, 1);
+
+ decoder_type =
+ SupportedCodecType(webrtc::PayloadStringToCodecType(config->mName));
+ if (decoder_type == webrtc::VideoCodecType::kVideoCodecUnknown) {
+ CSFLogError(LOGTAG, "%s Unknown decoder type: %s", __FUNCTION__,
+ config->mName.c_str());
+ continue;
+ }
+
+ decoder = CreateDecoder(decoder_type);
+
+ if (!decoder) {
+ // This really should never happen unless something went wrong
+ // in the negotiation code
+ NS_ASSERTION(decoder, "Failed to create video decoder");
+ CSFLogError(LOGTAG, "Failed to create decoder of type %s (%d)",
+ config->mName.c_str(), decoder_type);
+ // don't stop
+ continue;
+ }
+
+ decoder_desc.decoder = decoder.get();
+ mDecoders.push_back(std::move(decoder));
+ decoder_desc.payload_name = config->mName;
+ decoder_desc.payload_type = config->mType;
+ // XXX Ok, add:
+ // Set decoder_desc.codec_params (fmtp)
+ mRecvStreamConfig.decoders.push_back(decoder_desc);
+ }
+
+ mRecvStream =
+ mCall->Call()->CreateVideoReceiveStream(mRecvStreamConfig.Copy());
+ if (!mRecvStream) {
+ mDecoders.clear();
+ return kMediaConduitUnknownError;
+ }
+
+ // Add RTPPacketSinkInterface for synchronization source tracking
+ mRecvStream->AddSecondarySink(this);
+
+ CSFLogDebug(LOGTAG, "Created VideoReceiveStream %p for SSRC %u (0x%x)",
+ mRecvStream, mRecvStreamConfig.rtp.remote_ssrc,
+ mRecvStreamConfig.rtp.remote_ssrc);
+
+ return kMediaConduitNoError;
+}
+
+static rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
+ConfigureVideoEncoderSettings(const VideoCodecConfig* aConfig,
+ const WebrtcVideoConduit* aConduit) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ bool is_screencast =
+ aConduit->CodecMode() == webrtc::VideoCodecMode::kScreensharing;
+ // No automatic resizing when using simulcast or screencast.
+ bool automatic_resize = !is_screencast && aConfig->mEncodings.size() <= 1;
+ bool frame_dropping = !is_screencast;
+ bool denoising;
+ bool codec_default_denoising = false;
+ if (is_screencast) {
+ denoising = false;
+ } else {
+ // Use codec default if video_noise_reduction is unset.
+ denoising = aConduit->Denoising();
+ codec_default_denoising = !denoising;
+ }
+
+ if (aConfig->mName == "H264") {
+ webrtc::VideoCodecH264 h264_settings =
+ webrtc::VideoEncoder::GetDefaultH264Settings();
+ h264_settings.frameDroppingOn = frame_dropping;
+ h264_settings.packetizationMode = aConfig->mPacketizationMode;
+ return new rtc::RefCountedObject<
+ webrtc::VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings);
+ }
+ if (aConfig->mName == "VP8") {
+ webrtc::VideoCodecVP8 vp8_settings =
+ webrtc::VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.automaticResizeOn = automatic_resize;
+ // VP8 denoising is enabled by default.
+ vp8_settings.denoisingOn = codec_default_denoising ? true : denoising;
+ vp8_settings.frameDroppingOn = frame_dropping;
+ return new rtc::RefCountedObject<
+ webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ }
+ if (aConfig->mName == "VP9") {
+ webrtc::VideoCodecVP9 vp9_settings =
+ webrtc::VideoEncoder::GetDefaultVp9Settings();
+ if (is_screencast) {
+ // TODO(asapersson): Set to 2 for now since there is a DCHECK in
+ // VideoSendStream::ReconfigureVideoEncoder.
+ vp9_settings.numberOfSpatialLayers = 2;
+ } else {
+ vp9_settings.numberOfSpatialLayers = aConduit->SpatialLayers();
+ }
+ // VP9 denoising is disabled by default.
+ vp9_settings.denoisingOn = codec_default_denoising ? false : denoising;
+ vp9_settings.frameDroppingOn = true; // This must be true for VP9
+ return new rtc::RefCountedObject<
+ webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+ return nullptr;
+}
+
+// Compare lists of codecs
+static bool CodecsDifferent(const nsTArray<UniquePtr<VideoCodecConfig>>& a,
+ const nsTArray<UniquePtr<VideoCodecConfig>>& b) {
+ // return a != b;
+ // would work if UniquePtr<> operator== compared contents!
+ auto len = a.Length();
+ if (len != b.Length()) {
+ return true;
+ }
+
+ // XXX std::equal would work, if we could use it on this - fails for the
+ // same reason as above. c++14 would let us pass a comparator function.
+ for (uint32_t i = 0; i < len; ++i) {
+ if (!(*a[i] == *b[i])) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Note: Setting the send-codec on the Video Engine will restart the encoder,
+ * sets up new SSRC and reset RTP_RTCP module with the new codec setting.
+ *
+ * Note: this is called from MainThread, and the codec settings are read on
+ * videoframe delivery threads (i.e in SendVideoFrame(). With
+ * renegotiation/reconfiguration, this now needs a lock! Alternatively
+ * changes could be queued until the next frame is delivered using an
+ * Atomic pointer and swaps.
+ */
+MediaConduitErrorCode WebrtcVideoConduit::ConfigureSendMediaCodec(
+ const VideoCodecConfig* codecConfig, const RtpRtcpConfig& aRtpRtcpConfig) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+ mUpdateResolution = true;
+
+ CSFLogDebug(LOGTAG, "%s for %s", __FUNCTION__,
+ codecConfig ? codecConfig->mName.c_str() : "<null>");
+
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+
+ // validate basic params
+ if ((condError = ValidateCodecConfig(codecConfig)) != kMediaConduitNoError) {
+ return condError;
+ }
+
+ size_t streamCount = std::min(codecConfig->mEncodings.size(),
+ (size_t)webrtc::kMaxSimulcastStreams);
+ size_t highestResolutionIndex = 0;
+ for (size_t i = 1; i < streamCount; ++i) {
+ if (codecConfig->mEncodings[i].constraints.scaleDownBy <
+ codecConfig->mEncodings[highestResolutionIndex]
+ .constraints.scaleDownBy) {
+ highestResolutionIndex = i;
+ }
+ }
+
+ MOZ_RELEASE_ASSERT(streamCount >= 1, "streamCount should be at least one");
+
+ CSFLogDebug(LOGTAG, "%s for VideoConduit:%p stream count:%zu", __FUNCTION__,
+ this, streamCount);
+
+ mSendingFramerate = 0;
+ mSendStreamConfig.rtp.rids.clear();
+
+ int max_framerate;
+ if (codecConfig->mEncodingConstraints.maxFps > 0) {
+ max_framerate = codecConfig->mEncodingConstraints.maxFps;
+ } else {
+ max_framerate = DEFAULT_VIDEO_MAX_FRAMERATE;
+ }
+ // apply restrictions from maxMbps/etc
+ mSendingFramerate =
+ SelectSendFrameRate(codecConfig, max_framerate, mLastWidth, mLastHeight);
+
+ // So we can comply with b=TIAS/b=AS/maxbr=X when input resolution changes
+ mNegotiatedMaxBitrate = codecConfig->mTias;
+
+ if (mLastWidth == 0 && mMinBitrateEstimate != 0) {
+ // Only do this at the start; use "have we send a frame" as a reasonable
+ // stand-in. min <= start <= max (which can be -1, note!)
+ webrtc::Call::Config::BitrateConfig config;
+ config.min_bitrate_bps = mMinBitrateEstimate;
+ if (config.start_bitrate_bps < mMinBitrateEstimate) {
+ config.start_bitrate_bps = mMinBitrateEstimate;
+ }
+ if (config.max_bitrate_bps > 0 &&
+ config.max_bitrate_bps < mMinBitrateEstimate) {
+ config.max_bitrate_bps = mMinBitrateEstimate;
+ }
+ mCall->Call()->SetBitrateConfig(config);
+ }
+
+ mVideoStreamFactory = new rtc::RefCountedObject<VideoStreamFactory>(
+ *codecConfig, mCodecMode, mMinBitrate, mStartBitrate, mPrefMaxBitrate,
+ mNegotiatedMaxBitrate, mSendingFramerate);
+ mEncoderConfig.video_stream_factory = mVideoStreamFactory.get();
+
+ // Reset the VideoAdapter. SelectResolution will ensure limits are set.
+ mVideoAdapter = MakeUnique<cricket::VideoAdapter>(
+ streamCount > 1 ? SIMULCAST_RESOLUTION_ALIGNMENT : 1);
+ mVideoAdapter->OnScaleResolutionBy(
+ codecConfig->mEncodings[highestResolutionIndex].constraints.scaleDownBy >
+ 1.0
+ ? rtc::Optional<float>(codecConfig->mEncodings[highestResolutionIndex]
+ .constraints.scaleDownBy)
+ : rtc::Optional<float>());
+
+ // XXX parse the encoded SPS/PPS data and set spsData/spsLen/ppsData/ppsLen
+ mEncoderConfig.encoder_specific_settings =
+ ConfigureVideoEncoderSettings(codecConfig, this);
+
+ mEncoderConfig.content_type =
+ mCodecMode == webrtc::kRealtimeVideo
+ ? webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo
+ : webrtc::VideoEncoderConfig::ContentType::kScreen;
+ // for the GMP H.264 encoder/decoder!!
+ mEncoderConfig.min_transmit_bitrate_bps = 0;
+ // Expected max number of encodings
+ mEncoderConfig.number_of_streams = streamCount;
+
+ // If only encoder stream attibutes have been changed, there is no need to
+ // stop, create a new webrtc::VideoSendStream, and restart. Recreating on
+ // PayloadType change may be overkill, but is safe.
+ if (mSendStream) {
+ if (!RequiresNewSendStream(*codecConfig) &&
+ mActiveCodecMode == mCodecMode) {
+ mCurSendCodecConfig->mEncodingConstraints =
+ codecConfig->mEncodingConstraints;
+ mCurSendCodecConfig->mEncodings = codecConfig->mEncodings;
+ mSendStream->ReconfigureVideoEncoder(mEncoderConfig.Copy());
+ return kMediaConduitNoError;
+ }
+
+ condError = StopTransmittingLocked();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ // This will cause a new encoder to be created by StartTransmitting()
+ DeleteSendStream();
+ }
+
+ mSendStreamConfig.encoder_settings.payload_name = codecConfig->mName;
+ mSendStreamConfig.encoder_settings.payload_type = codecConfig->mType;
+ mSendStreamConfig.rtp.rtcp_mode = aRtpRtcpConfig.GetRtcpMode();
+ mSendStreamConfig.rtp.max_packet_size = kVideoMtu;
+ if (codecConfig->RtxPayloadTypeIsSet()) {
+ mSendStreamConfig.rtp.rtx.payload_type = codecConfig->mRTXPayloadType;
+ } else {
+ mSendStreamConfig.rtp.rtx.payload_type = -1;
+ mSendStreamConfig.rtp.rtx.ssrcs.clear();
+ }
+
+ // See Bug 1297058, enabling FEC when basic NACK is to be enabled in H.264 is
+ // problematic
+ if (codecConfig->RtcpFbFECIsSet() &&
+ !(codecConfig->mName == "H264" && codecConfig->RtcpFbNackIsSet(""))) {
+ mSendStreamConfig.rtp.ulpfec.ulpfec_payload_type =
+ codecConfig->mULPFECPayloadType;
+ mSendStreamConfig.rtp.ulpfec.red_payload_type =
+ codecConfig->mREDPayloadType;
+ mSendStreamConfig.rtp.ulpfec.red_rtx_payload_type =
+ codecConfig->mREDRTXPayloadType;
+ } else {
+ // Reset to defaults
+ mSendStreamConfig.rtp.ulpfec.ulpfec_payload_type = -1;
+ mSendStreamConfig.rtp.ulpfec.red_payload_type = -1;
+ mSendStreamConfig.rtp.ulpfec.red_rtx_payload_type = -1;
+ }
+
+ mSendStreamConfig.rtp.nack.rtp_history_ms =
+ codecConfig->RtcpFbNackIsSet("") ? 1000 : 0;
+
+ // Copy the applied config for future reference.
+ mCurSendCodecConfig = MakeUnique<VideoCodecConfig>(*codecConfig);
+
+ mSendStreamConfig.rtp.rids.clear();
+ bool has_rid = false;
+ for (size_t idx = 0; idx < streamCount; idx++) {
+ auto& encoding = mCurSendCodecConfig->mEncodings[idx];
+ if (encoding.rid[0]) {
+ has_rid = true;
+ break;
+ }
+ }
+ if (has_rid) {
+ for (size_t idx = streamCount; idx > 0; idx--) {
+ auto& encoding = mCurSendCodecConfig->mEncodings[idx - 1];
+ mSendStreamConfig.rtp.rids.push_back(encoding.rid);
+ }
+ }
+
+ return condError;
+}
+
+static uint32_t GenerateRandomSSRC() {
+ uint32_t ssrc;
+ do {
+ SECStatus rv = PK11_GenerateRandom(reinterpret_cast<unsigned char*>(&ssrc),
+ sizeof(ssrc));
+ if (rv != SECSuccess) {
+ CSFLogError(LOGTAG, "%s: PK11_GenerateRandom failed with error %d",
+ __FUNCTION__, rv);
+ return 0;
+ }
+ } while (ssrc == 0); // webrtc.org code has fits if you select an SSRC of 0
+
+ return ssrc;
+}
+
+bool WebrtcVideoConduit::SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return SetRemoteSSRCLocked(ssrc, rtxSsrc);
+}
+
+bool WebrtcVideoConduit::SetRemoteSSRCLocked(uint32_t ssrc, uint32_t rtxSsrc) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mRecvStreamConfig.rtp.remote_ssrc == ssrc &&
+ mRecvStreamConfig.rtp.rtx_ssrc == rtxSsrc) {
+ return true;
+ }
+
+ bool wasReceiving = mEngineReceiving;
+ if (NS_WARN_IF(StopReceivingLocked() != kMediaConduitNoError)) {
+ return false;
+ }
+
+ {
+ CSFLogDebug(LOGTAG, "%s: SSRC %u (0x%x)", __FUNCTION__, ssrc, ssrc);
+ MutexAutoUnlock unlock(mMutex);
+ if (!mCall->UnsetRemoteSSRC(ssrc)) {
+ CSFLogError(LOGTAG,
+ "%s: Failed to unset SSRC %u (0x%x) on other conduits,"
+ " bailing",
+ __FUNCTION__, ssrc, ssrc);
+ return false;
+ }
+ }
+
+ mRemoteSSRC = ssrc;
+ mRecvStreamConfig.rtp.remote_ssrc = ssrc;
+ mRecvStreamConfig.rtp.rtx_ssrc = rtxSsrc;
+ mStsThread->Dispatch(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::WaitingForInitialSsrcNoMore",
+ [this, self = RefPtr<WebrtcVideoConduit>(this)]() mutable {
+ mWaitingForInitialSsrc = false;
+ NS_ReleaseOnMainThread(
+ "WebrtcVideoConduit::WaitingForInitialSsrcNoMore", self.forget());
+ }));
+ // On the next StartReceiving() or ConfigureRecvMediaCodec, force
+ // building a new RecvStream to switch SSRCs.
+ DeleteRecvStream();
+
+ if (wasReceiving) {
+ if (StartReceivingLocked() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool WebrtcVideoConduit::UnsetRemoteSSRC(uint32_t ssrc) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ if (mRecvStreamConfig.rtp.remote_ssrc != ssrc &&
+ mRecvStreamConfig.rtp.rtx_ssrc != ssrc) {
+ return true;
+ }
+
+ mRecvStreamConfig.rtp.rtx_ssrc = 0;
+
+ uint32_t our_ssrc = 0;
+ do {
+ our_ssrc = GenerateRandomSSRC();
+ if (our_ssrc == 0) {
+ return false;
+ }
+ } while (our_ssrc == ssrc);
+
+ // There is a (tiny) chance that this new random ssrc will collide with some
+ // other conduit's remote ssrc, in which case that conduit will choose a new
+ // one.
+ SetRemoteSSRCLocked(our_ssrc, 0);
+ return true;
+}
+
+bool WebrtcVideoConduit::GetRemoteSSRC(uint32_t* ssrc) {
+ if (NS_IsMainThread()) {
+ if (!mRecvStream) {
+ return false;
+ }
+ }
+ // libwebrtc uses 0 to mean a lack of SSRC. That is not to spec.
+ *ssrc = mRemoteSSRC;
+ return true;
+}
+
+bool WebrtcVideoConduit::GetSendPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ MutexAutoLock lock(mMutex);
+ if (!mSendStreamStats.Active()) {
+ return false;
+ }
+ *aPacketCounts = mSendStreamStats.PacketCounts();
+ return true;
+}
+
+bool WebrtcVideoConduit::GetRecvPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ if (!mRecvStreamStats.Active()) {
+ return false;
+ }
+ *aPacketCounts = mRecvStreamStats.PacketCounts();
+ return true;
+}
+
+void WebrtcVideoConduit::PollStats() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsTArray<RefPtr<Runnable>> runnables(2);
+ if (mEngineTransmitting) {
+ MOZ_RELEASE_ASSERT(mSendStream);
+ if (!mSendStreamConfig.rtp.ssrcs.empty()) {
+ uint32_t ssrc = mSendStreamConfig.rtp.ssrcs.front();
+ webrtc::VideoSendStream::Stats stats = mSendStream->GetStats();
+ runnables.AppendElement(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::SendStreamStatistics::Update",
+ [this, self = RefPtr<WebrtcVideoConduit>(this),
+ stats = std::move(stats),
+ ssrc]() { mSendStreamStats.Update(stats, ssrc); }));
+ }
+ }
+ if (mEngineReceiving) {
+ MOZ_RELEASE_ASSERT(mRecvStream);
+ webrtc::VideoReceiveStream::Stats stats = mRecvStream->GetStats();
+ runnables.AppendElement(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::RecvStreamStatistics::Update",
+ [this, self = RefPtr<WebrtcVideoConduit>(this),
+ stats = std::move(stats)]() { mRecvStreamStats.Update(stats); }));
+ }
+ webrtc::Call::Stats stats = mCall->Call()->GetStats();
+ mStsThread->Dispatch(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::UpdateStreamStatistics",
+ [this, self = RefPtr<WebrtcVideoConduit>(this), stats = std::move(stats),
+ runnables = std::move(runnables)]() mutable {
+ mCallStats.Update(stats);
+ for (const auto& runnable : runnables) {
+ runnable->Run();
+ }
+ NS_ReleaseOnMainThread("WebrtcVideoConduit::UpdateStreamStatistics",
+ self.forget());
+ }));
+}
+
+void WebrtcVideoConduit::UpdateVideoStatsTimer() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ bool transmitting = mEngineTransmitting;
+ bool receiving = mEngineReceiving;
+ mStsThread->Dispatch(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::SetSendStreamStatsActive",
+ [this, self = RefPtr<WebrtcVideoConduit>(this), transmitting,
+ receiving]() mutable {
+ mSendStreamStats.SetActive(transmitting);
+ mRecvStreamStats.SetActive(receiving);
+ NS_ReleaseOnMainThread("WebrtcVideoConduit::SetSendStreamStatsActive",
+ self.forget());
+ }));
+
+ bool shouldBeActive = transmitting || receiving;
+ if (mVideoStatsTimerActive == shouldBeActive) {
+ return;
+ }
+ mVideoStatsTimerActive = shouldBeActive;
+ if (shouldBeActive) {
+ nsTimerCallbackFunc callback = [](nsITimer*, void* aClosure) {
+ CSFLogDebug(LOGTAG, "StreamStats polling scheduled for VideoConduit: %p",
+ aClosure);
+ static_cast<WebrtcVideoConduit*>(aClosure)->PollStats();
+ };
+ mVideoStatsTimer->InitWithNamedFuncCallback(
+ callback, this, 1000, nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP,
+ "WebrtcVideoConduit::SendStreamStatsUpdater");
+ } else {
+ mVideoStatsTimer->Cancel();
+ }
+}
+
+bool WebrtcVideoConduit::GetVideoEncoderStats(
+ double* framerateMean, double* framerateStdDev, double* bitrateMean,
+ double* bitrateStdDev, uint32_t* droppedFrames, uint32_t* framesEncoded,
+ Maybe<uint64_t>* qpSum) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ MutexAutoLock lock(mMutex);
+ if (!mEngineTransmitting || !mSendStream) {
+ return false;
+ }
+ mSendStreamStats.GetVideoStreamStats(*framerateMean, *framerateStdDev,
+ *bitrateMean, *bitrateStdDev);
+ *droppedFrames = mSendStreamStats.DroppedFrames();
+ *framesEncoded = mSendStreamStats.FramesEncoded();
+ *qpSum = mSendStreamStats.QpSum();
+ return true;
+}
+
+bool WebrtcVideoConduit::GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets,
+ uint32_t* framesDecoded) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ MutexAutoLock lock(mMutex);
+ if (!mEngineReceiving || !mRecvStream) {
+ return false;
+ }
+ mRecvStreamStats.GetVideoStreamStats(*framerateMean, *framerateStdDev,
+ *bitrateMean, *bitrateStdDev);
+ *discardedPackets = mRecvStreamStats.DiscardedPackets();
+ *framesDecoded = mRecvStreamStats.FramesDecoded();
+ return true;
+}
+
+bool WebrtcVideoConduit::GetRTPReceiverStats(uint32_t* jitterMs,
+ uint32_t* packetsLost) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ CSFLogVerbose(LOGTAG, "%s for VideoConduit:%p", __FUNCTION__, this);
+ MutexAutoLock lock(mMutex);
+ if (!mRecvStream) {
+ return false;
+ }
+
+ *jitterMs = mRecvStreamStats.JitterMs();
+ *packetsLost = mRecvStreamStats.PacketsLost();
+ return true;
+}
+
+bool WebrtcVideoConduit::GetRTCPReceiverReport(uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ Maybe<double>* aOutRttSec) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ CSFLogVerbose(LOGTAG, "%s for VideoConduit:%p", __FUNCTION__, this);
+ aOutRttSec->reset();
+ if (!mSendStreamStats.Active()) {
+ return false;
+ }
+ if (!mSendStreamStats.SsrcFound()) {
+ return false;
+ }
+ *jitterMs = mSendStreamStats.JitterMs();
+ *packetsReceived = mSendStreamStats.PacketsReceived();
+ *bytesReceived = mSendStreamStats.BytesReceived();
+ *cumulativeLost = mSendStreamStats.PacketsLost();
+ *aOutRttSec = mCallStats.RttSec();
+ return true;
+}
+
+bool WebrtcVideoConduit::GetRTCPSenderReport(
+ unsigned int* packetsSent, uint64_t* bytesSent,
+ DOMHighResTimeStamp* aRemoteTimestamp) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ CSFLogVerbose(LOGTAG, "%s for VideoConduit:%p", __FUNCTION__, this);
+
+ if (!mRecvStreamStats.Active()) {
+ return false;
+ }
+
+ *packetsSent = mRecvStreamStats.PacketsSent();
+ *bytesSent = mRecvStreamStats.BytesSent();
+ *aRemoteTimestamp = mRecvStreamStats.RemoteTimestamp();
+ return true;
+}
+
+Maybe<mozilla::dom::RTCBandwidthEstimationInternal>
+WebrtcVideoConduit::GetBandwidthEstimation() {
+ ASSERT_ON_THREAD(mStsThread);
+ return mCallStats.Stats();
+}
+
+void WebrtcVideoConduit::GetRtpSources(
+ nsTArray<dom::RTCRtpSourceEntry>& outSources) {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mRtpSourceObserver->GetRtpSources(outSources);
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::InitMain() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsresult rv;
+ nsCOMPtr<nsIPrefService> prefs =
+ do_GetService("@mozilla.org/preferences-service;1", &rv);
+ if (!NS_WARN_IF(NS_FAILED(rv))) {
+ nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
+
+ if (branch) {
+ int32_t temp;
+ Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref(
+ "media.video.test_latency", &mVideoLatencyTestEnable)));
+ Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref(
+ "media.video.test_latency", &mVideoLatencyTestEnable)));
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.min_bitrate", &temp)))) {
+ if (temp >= 0) {
+ mMinBitrate = KBPS(temp);
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.start_bitrate", &temp)))) {
+ if (temp >= 0) {
+ mStartBitrate = KBPS(temp);
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.max_bitrate", &temp)))) {
+ if (temp >= 0) {
+ mPrefMaxBitrate = KBPS(temp);
+ }
+ }
+ if (mMinBitrate != 0 && mMinBitrate < kViEMinCodecBitrate_bps) {
+ mMinBitrate = kViEMinCodecBitrate_bps;
+ }
+ if (mStartBitrate < mMinBitrate) {
+ mStartBitrate = mMinBitrate;
+ }
+ if (mPrefMaxBitrate && mStartBitrate > mPrefMaxBitrate) {
+ mStartBitrate = mPrefMaxBitrate;
+ }
+ // XXX We'd love if this was a live param for testing adaptation/etc
+ // in automation
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.min_bitrate_estimate", &temp)))) {
+ if (temp >= 0) {
+ mMinBitrateEstimate = temp; // bps!
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.svc.spatial", &temp)))) {
+ if (temp >= 0) {
+ mSpatialLayers = temp;
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref(
+ "media.peerconnection.video.svc.temporal", &temp)))) {
+ if (temp >= 0) {
+ mTemporalLayers = temp;
+ }
+ }
+ Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref(
+ "media.peerconnection.video.denoising", &mDenoising)));
+ Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref(
+ "media.peerconnection.video.lock_scaling", &mLockScaling)));
+ }
+ }
+#ifdef MOZ_WIDGET_ANDROID
+ if (mozilla::camera::VideoEngine::SetAndroidObjects() != 0) {
+ CSFLogError(LOGTAG, "%s: could not set Android objects", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+#endif // MOZ_WIDGET_ANDROID
+ return kMediaConduitNoError;
+}
+
+/**
+ * Performs initialization of the MANDATORY components of the Video Engine
+ */
+MediaConduitErrorCode WebrtcVideoConduit::Init() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s this=%p", __FUNCTION__, this);
+ MediaConduitErrorCode result;
+ result = InitMain();
+ if (result != kMediaConduitNoError) {
+ return result;
+ }
+
+ CSFLogDebug(LOGTAG, "%s Initialization Done", __FUNCTION__);
+ return kMediaConduitNoError;
+}
+
+void WebrtcVideoConduit::DeleteStreams() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // We can't delete the VideoEngine until all these are released!
+ // And we can't use a Scoped ptr, since the order is arbitrary
+
+ MutexAutoLock lock(mMutex);
+ DeleteSendStream();
+ DeleteRecvStream();
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::AttachRenderer(
+ RefPtr<mozilla::VideoRenderer> aVideoRenderer) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s", __FUNCTION__);
+
+ // null renderer
+ if (!aVideoRenderer) {
+ CSFLogError(LOGTAG, "%s NULL Renderer", __FUNCTION__);
+ MOZ_ASSERT(false);
+ return kMediaConduitInvalidRenderer;
+ }
+
+ // This function is called only from main, so we only need to protect against
+ // modifying mRenderer while any webrtc.org code is trying to use it.
+ {
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ mRenderer = aVideoRenderer;
+ // Make sure the renderer knows the resolution
+ mRenderer->FrameSizeChange(mReceivingWidth, mReceivingHeight);
+ }
+
+ return kMediaConduitNoError;
+}
+
+void WebrtcVideoConduit::DetachRenderer() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (mRenderer) {
+ mRenderer = nullptr;
+ }
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::SetTransmitterTransport(
+ RefPtr<TransportInterface> aTransport) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mTransmitterTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::SetReceiverTransport(
+ RefPtr<TransportInterface> aTransport) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mReceiverTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<VideoCodecConfig>>& codecConfigList,
+ const RtpRtcpConfig& aRtpRtcpConfig) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ std::string payloadName;
+
+ if (codecConfigList.empty()) {
+ CSFLogError(LOGTAG, "%s Zero number of codecs to configure", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ webrtc::KeyFrameRequestMethod kf_request_method = webrtc::kKeyFrameReqPliRtcp;
+ bool kf_request_enabled = false;
+ bool use_nack_basic = false;
+ bool use_tmmbr = false;
+ bool use_remb = false;
+ bool use_fec = false;
+ bool use_transport_cc = false;
+ int ulpfec_payload_type = kNullPayloadType;
+ int red_payload_type = kNullPayloadType;
+ bool configuredH264 = false;
+ nsTArray<UniquePtr<VideoCodecConfig>> recv_codecs;
+
+ // Try Applying the codecs in the list
+ // we treat as success if at least one codec was applied and reception was
+ // started successfully.
+ std::set<unsigned int> codec_types_seen;
+ for (const auto& codec_config : codecConfigList) {
+ if ((condError = ValidateCodecConfig(codec_config.get())) !=
+ kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s Invalid config for %s decoder: %i", __FUNCTION__,
+ codec_config ? codec_config->mName.c_str() : "<null>",
+ condError);
+ continue;
+ }
+ if (codec_config->mName == "H264") {
+ // TODO(bug 1200768): We can only handle configuring one recv H264 codec
+ if (configuredH264) {
+ continue;
+ }
+ configuredH264 = true;
+ }
+
+ if (codec_config->mName == kUlpFecPayloadName) {
+ ulpfec_payload_type = codec_config->mType;
+ continue;
+ }
+
+ if (codec_config->mName == kRedPayloadName) {
+ red_payload_type = codec_config->mType;
+ continue;
+ }
+
+ // Check for the keyframe request type: PLI is preferred
+ // over FIR, and FIR is preferred over none.
+ // XXX (See upstream issue
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=7002): There is no
+ // 'none' option in webrtc.org
+ if (codec_config->RtcpFbNackIsSet("pli")) {
+ kf_request_enabled = true;
+ kf_request_method = webrtc::kKeyFrameReqPliRtcp;
+ } else if (!kf_request_enabled && codec_config->RtcpFbCcmIsSet("fir")) {
+ kf_request_enabled = true;
+ kf_request_method = webrtc::kKeyFrameReqFirRtcp;
+ }
+
+ // What if codec A has Nack and REMB, and codec B has TMMBR, and codec C has
+ // none? In practice, that's not a useful configuration, and
+ // VideoReceiveStream::Config can't represent that, so simply union the
+ // (boolean) settings
+ use_nack_basic |= codec_config->RtcpFbNackIsSet("");
+ use_tmmbr |= codec_config->RtcpFbCcmIsSet("tmmbr");
+ use_remb |= codec_config->RtcpFbRembIsSet();
+ use_fec |= codec_config->RtcpFbFECIsSet();
+ use_transport_cc |= codec_config->RtcpFbTransportCCIsSet();
+
+ recv_codecs.AppendElement(new VideoCodecConfig(*codec_config));
+ }
+
+ if (!recv_codecs.Length()) {
+ CSFLogError(LOGTAG, "%s Found no valid receive codecs", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Now decide if we need to recreate the receive stream, or can keep it
+ if (!mRecvStream || CodecsDifferent(recv_codecs, mRecvCodecList) ||
+ mRecvStreamConfig.rtp.nack.rtp_history_ms !=
+ (use_nack_basic ? 1000 : 0) ||
+ mRecvStreamConfig.rtp.remb != use_remb ||
+ mRecvStreamConfig.rtp.transport_cc != use_transport_cc ||
+ mRecvStreamConfig.rtp.tmmbr != use_tmmbr ||
+ mRecvStreamConfig.rtp.keyframe_method != kf_request_method ||
+ (use_fec &&
+ (mRecvStreamConfig.rtp.ulpfec_payload_type != ulpfec_payload_type ||
+ mRecvStreamConfig.rtp.red_payload_type != red_payload_type))) {
+ MutexAutoLock lock(mMutex);
+
+ condError = StopReceivingLocked();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ // If we fail after here things get ugly
+ mRecvStreamConfig.rtp.rtcp_mode = aRtpRtcpConfig.GetRtcpMode();
+ mRecvStreamConfig.rtp.nack.rtp_history_ms = use_nack_basic ? 1000 : 0;
+ mRecvStreamConfig.rtp.remb = use_remb;
+ mRecvStreamConfig.rtp.transport_cc = use_transport_cc;
+ mRecvStreamConfig.rtp.tmmbr = use_tmmbr;
+ mRecvStreamConfig.rtp.keyframe_method = kf_request_method;
+
+ if (use_fec) {
+ mRecvStreamConfig.rtp.ulpfec_payload_type = ulpfec_payload_type;
+ mRecvStreamConfig.rtp.red_payload_type = red_payload_type;
+ } else {
+ // Reset to defaults
+ mRecvStreamConfig.rtp.ulpfec_payload_type = -1;
+ mRecvStreamConfig.rtp.red_payload_type = -1;
+ }
+
+ mRecvStreamConfig.rtp.rtx_associated_payload_types.clear();
+ for (auto& codec : recv_codecs) {
+ if (codec->RtxPayloadTypeIsSet()) {
+ mRecvStreamConfig.rtp.AddRtxBinding(codec->mRTXPayloadType,
+ codec->mType);
+ }
+ }
+ // SetRemoteSSRC should have populated this already
+ mRecvSSRC = mRecvStreamConfig.rtp.remote_ssrc;
+
+ // XXX ugh! same SSRC==0 problem that webrtc.org has
+ if (mRecvSSRC == 0) {
+ // Handle un-signalled SSRCs by creating a random one and then when it
+ // actually gets set, we'll destroy and recreate. Simpler than trying to
+ // unwind all the logic that assumes the receive stream is created and
+ // started when we ConfigureRecvMediaCodecs()
+ uint32_t ssrc = GenerateRandomSSRC();
+ if (ssrc == 0) {
+ // webrtc.org code has fits if you select an SSRC of 0, so that's how
+ // we signal an error.
+ return kMediaConduitUnknownError;
+ }
+
+ mRecvStreamConfig.rtp.remote_ssrc = ssrc;
+ mRecvSSRC = ssrc;
+ }
+
+ // 0 isn't allowed. Would be best to ask for a random SSRC from the
+ // RTP code. Would need to call rtp_sender.cc -- GenerateNewSSRC(),
+ // which isn't exposed. It's called on collision, or when we decide to
+ // send. it should be called on receiver creation. Here, we're
+ // generating the SSRC value - but this causes ssrc_forced in set in
+ // rtp_sender, which locks us into the SSRC - even a collision won't
+ // change it!!!
+ MOZ_ASSERT(!mSendStreamConfig.rtp.ssrcs.empty());
+ auto ssrc = mSendStreamConfig.rtp.ssrcs.front();
+ Unused << NS_WARN_IF(ssrc == mRecvStreamConfig.rtp.remote_ssrc);
+
+ while (ssrc == mRecvStreamConfig.rtp.remote_ssrc) {
+ ssrc = GenerateRandomSSRC();
+ if (ssrc == 0) {
+ return kMediaConduitUnknownError;
+ }
+ }
+
+ mRecvStreamConfig.rtp.local_ssrc = ssrc;
+ CSFLogDebug(LOGTAG,
+ "%s (%p): Local SSRC 0x%08x (of %u), remote SSRC 0x%08x",
+ __FUNCTION__, (void*)this, ssrc,
+ (uint32_t)mSendStreamConfig.rtp.ssrcs.size(),
+ mRecvStreamConfig.rtp.remote_ssrc);
+
+ // XXX Copy over those that are the same and don't rebuild them
+ mRecvCodecList = std::move(recv_codecs);
+
+ DeleteRecvStream();
+ return StartReceivingLocked();
+ }
+ return kMediaConduitNoError;
+}
+
+std::unique_ptr<webrtc::VideoDecoder> WebrtcVideoConduit::CreateDecoder(
+ webrtc::VideoCodecType aType) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ std::unique_ptr<webrtc::VideoDecoder> decoder = nullptr;
+ mRecvCodecPluginID = 0;
+
+#ifdef MOZ_WEBRTC_MEDIACODEC
+ bool enabled = false;
+#endif
+
+ // Attempt to create a decoder using MediaDataDecoder.
+ decoder.reset(MediaDataCodec::CreateDecoder(aType));
+ if (decoder) {
+ return decoder;
+ }
+
+ switch (aType) {
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ // get an external decoder
+ decoder.reset(GmpVideoCodec::CreateDecoder());
+ if (decoder) {
+ mRecvCodecPluginID =
+ static_cast<WebrtcVideoDecoder*>(decoder.get())->PluginID();
+ }
+ break;
+
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+#ifdef MOZ_WEBRTC_MEDIACODEC
+ // attempt to get a decoder
+ enabled = mozilla::Preferences::GetBool(
+ "media.navigator.hardware.vp8_decode.acceleration_enabled", false);
+ if (enabled) {
+ nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
+ if (gfxInfo) {
+ int32_t status;
+ nsCString discardFailureId;
+
+ if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(
+ nsIGfxInfo::FEATURE_WEBRTC_HW_ACCELERATION_DECODE,
+ discardFailureId, &status))) {
+ if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
+ NS_WARNING(
+ "VP8 decoder hardware is not whitelisted: disabling.\n");
+ } else {
+ decoder = MediaCodecVideoCodec::CreateDecoder(
+ MediaCodecVideoCodec::CodecType::CODEC_VP8);
+ }
+ }
+ }
+ }
+#endif
+ // Use a software VP8 decoder as a fallback.
+ if (!decoder) {
+ decoder = webrtc::VP8Decoder::Create();
+ }
+ break;
+
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ MOZ_ASSERT(webrtc::VP9Decoder::IsSupported());
+ decoder = webrtc::VP9Decoder::Create();
+ break;
+
+ default:
+ break;
+ }
+
+ return decoder;
+}
+
+std::unique_ptr<webrtc::VideoEncoder> WebrtcVideoConduit::CreateEncoder(
+ webrtc::VideoCodecType aType) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ std::unique_ptr<webrtc::VideoEncoder> encoder = nullptr;
+ mSendCodecPluginID = 0;
+
+#ifdef MOZ_WEBRTC_MEDIACODEC
+ bool enabled = false;
+#endif
+
+ if (StaticPrefs::media_webrtc_platformencoder()) {
+ encoder.reset(MediaDataCodec::CreateEncoder(aType));
+ if (encoder) {
+ return encoder;
+ }
+ }
+
+ switch (aType) {
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ // get an external encoder
+ encoder.reset(GmpVideoCodec::CreateEncoder());
+ if (encoder) {
+ mSendCodecPluginID =
+ static_cast<WebrtcVideoEncoder*>(encoder.get())->PluginID();
+ }
+ break;
+
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+ encoder.reset(new webrtc::EncoderSimulcastProxy(
+ this, webrtc::SdpVideoFormat(cricket::kVp8CodecName)));
+ break;
+
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ encoder = webrtc::VP9Encoder::Create();
+ break;
+
+ default:
+ break;
+ }
+ return encoder;
+}
+
+std::vector<webrtc::SdpVideoFormat> WebrtcVideoConduit::GetSupportedFormats()
+ const {
+ MOZ_ASSERT_UNREACHABLE("Unexpected call");
+ CSFLogError(LOGTAG, "Unexpected call to GetSupportedFormats()");
+ return {webrtc::SdpVideoFormat("VP8")};
+}
+
+WebrtcVideoConduit::CodecInfo WebrtcVideoConduit::QueryVideoEncoder(
+ const webrtc::SdpVideoFormat& format) const {
+ MOZ_ASSERT_UNREACHABLE("Unexpected call");
+ CSFLogError(LOGTAG, "Unexpected call to QueryVideoEncoder()");
+ CodecInfo info;
+ info.is_hardware_accelerated = false;
+ info.has_internal_source = false;
+ return info;
+}
+
+std::unique_ptr<webrtc::VideoEncoder> WebrtcVideoConduit::CreateVideoEncoder(
+ const webrtc::SdpVideoFormat& format) {
+ MOZ_ASSERT(format.name == "VP8");
+ std::unique_ptr<webrtc::VideoEncoder> encoder = nullptr;
+#ifdef MOZ_WEBRTC_MEDIACODEC
+ // attempt to get a encoder
+ enabled = mozilla::Preferences::GetBool(
+ "media.navigator.hardware.vp8_encode.acceleration_enabled", false);
+ if (enabled) {
+ nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
+ if (gfxInfo) {
+ int32_t status;
+ nsCString discardFailureId;
+
+ if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(
+ nsIGfxInfo::FEATURE_WEBRTC_HW_ACCELERATION_ENCODE,
+ discardFailureId, &status))) {
+ if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
+ NS_WARNING("VP8 encoder hardware is not whitelisted: disabling.\n");
+ } else {
+ encoder = MediaCodecVideoCodec::CreateEncoder(
+ MediaCodecVideoCodec::CodecType::CODEC_VP8);
+ }
+ }
+ }
+ }
+#endif
+ // Use a software VP8 encoder as a fallback.
+ encoder = webrtc::VP8Encoder::Create();
+ return encoder;
+}
+
+// XXX we need to figure out how to feed back changes in preferred capture
+// resolution to the getUserMedia source.
+void WebrtcVideoConduit::SelectSendResolution(unsigned short width,
+ unsigned short height) {
+ mMutex.AssertCurrentThreadOwns();
+ // XXX This will do bandwidth-resolution adaptation as well - bug 877954
+
+ // Enforce constraints
+ if (mCurSendCodecConfig) {
+ uint16_t max_width = mCurSendCodecConfig->mEncodingConstraints.maxWidth;
+ uint16_t max_height = mCurSendCodecConfig->mEncodingConstraints.maxHeight;
+ if (max_width || max_height) {
+ max_width = max_width ? max_width : UINT16_MAX;
+ max_height = max_height ? max_height : UINT16_MAX;
+ ConstrainPreservingAspectRatio(max_width, max_height, &width, &height);
+ }
+
+ int max_fs = mSinkWantsPixelCount;
+ // Limit resolution to max-fs
+ if (mCurSendCodecConfig->mEncodingConstraints.maxFs) {
+ // max-fs is in macroblocks, convert to pixels
+ max_fs = std::min(
+ max_fs,
+ static_cast<int>(mCurSendCodecConfig->mEncodingConstraints.maxFs *
+ (16 * 16)));
+ }
+ mVideoAdapter->OnResolutionFramerateRequest(
+ rtc::Optional<int>(), max_fs, std::numeric_limits<int>::max());
+ }
+
+ unsigned int framerate = SelectSendFrameRate(
+ mCurSendCodecConfig.get(), mSendingFramerate, width, height);
+ if (mSendingFramerate != framerate) {
+ CSFLogDebug(LOGTAG, "%s: framerate changing to %u (from %u)", __FUNCTION__,
+ framerate, mSendingFramerate);
+ mSendingFramerate = framerate;
+ mVideoStreamFactory->SetSendingFramerate(mSendingFramerate);
+ }
+}
+
+void WebrtcVideoConduit::AddOrUpdateSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ if (!NS_IsMainThread()) {
+ // This may be called off main thread, but only to update an already added
+ // sink. If we add it after the dispatch we're at risk of a UAF.
+ NS_DispatchToMainThread(
+ NS_NewRunnableFunction("WebrtcVideoConduit::UpdateSink",
+ [this, self = RefPtr<WebrtcVideoConduit>(this),
+ sink, wants = std::move(wants)]() {
+ if (mRegisteredSinks.Contains(sink)) {
+ AddOrUpdateSinkNotLocked(sink, wants);
+ }
+ }));
+ return;
+ }
+
+ mMutex.AssertCurrentThreadOwns();
+ if (!mRegisteredSinks.Contains(sink)) {
+ mRegisteredSinks.AppendElement(sink);
+ }
+ mVideoBroadcaster.AddOrUpdateSink(sink, wants);
+ OnSinkWantsChanged(mVideoBroadcaster.wants());
+}
+
+void WebrtcVideoConduit::AddOrUpdateSinkNotLocked(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ MutexAutoLock lock(mMutex);
+ AddOrUpdateSink(sink, wants);
+}
+
+void WebrtcVideoConduit::RemoveSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ mRegisteredSinks.RemoveElement(sink);
+ mVideoBroadcaster.RemoveSink(sink);
+ OnSinkWantsChanged(mVideoBroadcaster.wants());
+}
+
+void WebrtcVideoConduit::RemoveSinkNotLocked(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ MutexAutoLock lock(mMutex);
+ RemoveSink(sink);
+}
+
+void WebrtcVideoConduit::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mLockScaling) {
+ return;
+ }
+
+ CSFLogDebug(LOGTAG, "%s (send SSRC %u (0x%x)) - wants pixels = %d",
+ __FUNCTION__, mSendStreamConfig.rtp.ssrcs.front(),
+ mSendStreamConfig.rtp.ssrcs.front(), wants.max_pixel_count);
+
+ if (!mCurSendCodecConfig) {
+ return;
+ }
+
+ mSinkWantsPixelCount = wants.max_pixel_count;
+ mUpdateResolution = true;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::SendVideoFrame(
+ const webrtc::VideoFrame& frame) {
+ // XXX Google uses a "timestamp_aligner" to translate timestamps from the
+ // camera via TranslateTimestamp(); we should look at doing the same. This
+ // avoids sampling error when capturing frames, but google had to deal with
+ // some broken cameras, include Logitech c920's IIRC.
+
+ int cropWidth;
+ int cropHeight;
+ int adaptedWidth;
+ int adaptedHeight;
+ {
+ MutexAutoLock lock(mMutex);
+ CSFLogVerbose(LOGTAG, "WebrtcVideoConduit %p %s (send SSRC %u (0x%x))",
+ this, __FUNCTION__, mSendStreamConfig.rtp.ssrcs.front(),
+ mSendStreamConfig.rtp.ssrcs.front());
+
+ if (mUpdateResolution || frame.width() != mLastWidth ||
+ frame.height() != mLastHeight) {
+ // See if we need to recalculate what we're sending.
+ CSFLogVerbose(LOGTAG, "%s: call SelectSendResolution with %ux%u",
+ __FUNCTION__, frame.width(), frame.height());
+ MOZ_ASSERT(frame.width() != 0 && frame.height() != 0);
+ // Note coverity will flag this since it thinks they can be 0
+ MOZ_ASSERT(mCurSendCodecConfig);
+
+ mLastWidth = frame.width();
+ mLastHeight = frame.height();
+ mUpdateResolution = false;
+ SelectSendResolution(frame.width(), frame.height());
+ }
+
+ // adapt input video to wants of sink
+ if (!mVideoBroadcaster.frame_wanted()) {
+ return kMediaConduitNoError;
+ }
+
+ if (!mVideoAdapter->AdaptFrameResolution(
+ frame.width(), frame.height(),
+ frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec, &cropWidth,
+ &cropHeight, &adaptedWidth, &adaptedHeight)) {
+ // VideoAdapter dropped the frame.
+ return kMediaConduitNoError;
+ }
+ }
+
+ // If we have zero width or height, drop the frame here. Attempting to send
+ // it will cause all sorts of problems in the webrtc.org code.
+ if (cropWidth == 0 || cropHeight == 0) {
+ return kMediaConduitNoError;
+ }
+
+ int cropX = (frame.width() - cropWidth) / 2;
+ int cropY = (frame.height() - cropHeight) / 2;
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer;
+ if (adaptedWidth == frame.width() && adaptedHeight == frame.height()) {
+ // No adaption - optimized path.
+ buffer = frame.video_frame_buffer();
+ } else {
+ // Adapted I420 frame.
+ rtc::scoped_refptr<webrtc::I420Buffer> i420Buffer =
+ mBufferPool.CreateBuffer(adaptedWidth, adaptedHeight);
+ if (!i420Buffer) {
+ CSFLogWarn(LOGTAG, "Creating a buffer for scaling failed, pool is empty");
+ return kMediaConduitNoError;
+ }
+ i420Buffer->CropAndScaleFrom(*frame.video_frame_buffer()->GetI420().get(),
+ cropX, cropY, cropWidth, cropHeight);
+ buffer = i420Buffer;
+ }
+
+ mVideoBroadcaster.OnFrame(webrtc::VideoFrame(
+ buffer, frame.timestamp(), frame.render_time_ms(), frame.rotation()));
+
+ mStsThread->Dispatch(NS_NewRunnableFunction(
+ "SendStreamStatistics::FrameDeliveredToEncoder",
+ [self = RefPtr<WebrtcVideoConduit>(this), this]() mutable {
+ mSendStreamStats.FrameDeliveredToEncoder();
+ NS_ReleaseOnMainThread("SendStreamStatistics::FrameDeliveredToEncoder",
+ self.forget());
+ }));
+ return kMediaConduitNoError;
+}
+
+// Transport Layer Callbacks
+
+MediaConduitErrorCode WebrtcVideoConduit::DeliverPacket(const void* data,
+ int len) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ // Bug 1499796 - we need to get passed the time the packet was received
+ webrtc::PacketReceiver::DeliveryStatus status =
+ mCall->Call()->Receiver()->DeliverPacket(
+ webrtc::MediaType::VIDEO, static_cast<const uint8_t*>(data), len,
+ webrtc::PacketTime());
+
+ if (status != webrtc::PacketReceiver::DELIVERY_OK) {
+ CSFLogError(LOGTAG, "%s DeliverPacket Failed, %d", __FUNCTION__, status);
+ return kMediaConduitRTPProcessingFailed;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::ReceivedRTPPacket(
+ const void* data, int len, webrtc::RTPHeader& header) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ if (mAllowSsrcChange || mWaitingForInitialSsrc) {
+ // Handle the unknown ssrc (and ssrc-not-signaled case).
+ // We can't just do this here; it has to happen on MainThread :-(
+ // We also don't want to drop the packet, nor stall this thread, so we hold
+ // the packet (and any following) for inserting once the SSRC is set.
+ if (mRtpPacketQueue.IsQueueActive()) {
+ mRtpPacketQueue.Enqueue(data, len);
+ return kMediaConduitNoError;
+ }
+
+ bool switchRequired = mRecvSSRC != header.ssrc;
+ if (switchRequired) {
+ // We need to check that the newly received ssrc is not already
+ // associated with ulpfec or rtx. This is how webrtc.org handles
+ // things, see https://codereview.webrtc.org/1226093002.
+ MutexAutoLock lock(mMutex);
+ const webrtc::VideoReceiveStream::Config::Rtp& rtp =
+ mRecvStreamConfig.rtp;
+ switchRequired =
+ rtp.rtx_associated_payload_types.find(header.payloadType) ==
+ rtp.rtx_associated_payload_types.end() &&
+ rtp.ulpfec_payload_type != header.payloadType;
+ }
+
+ if (switchRequired) {
+ // a new switch needs to be done
+ // any queued packets are from a previous switch that hasn't completed
+ // yet; drop them and only process the latest SSRC
+ mRtpPacketQueue.Clear();
+ mRtpPacketQueue.Enqueue(data, len);
+
+ CSFLogDebug(LOGTAG, "%s: switching from SSRC %u to %u", __FUNCTION__,
+ static_cast<uint32_t>(mRecvSSRC), header.ssrc);
+ // we "switch" here immediately, but buffer until the queue is released
+ mRecvSSRC = header.ssrc;
+
+ // Ensure lamba captures refs
+ NS_DispatchToMainThread(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::WebrtcGmpPCHandleSetter",
+ [this, self = RefPtr<WebrtcVideoConduit>(this),
+ ssrc = header.ssrc]() mutable {
+ // Normally this is done in CreateOrUpdateMediaPipeline() for
+ // initial creation and renegotiation, but here we're rebuilding the
+ // Receive channel at a lower level. This is needed whenever we're
+ // creating a GMPVideoCodec (in particular, H264) so it can
+ // communicate errors to the PC.
+ WebrtcGmpPCHandleSetter setter(mPCHandle);
+ // TODO: This is problematic with rtx enabled, we don't know if
+ // new ssrc is for rtx or not. This is fixed in a later patch in
+ // this series.
+ SetRemoteSSRC(
+ ssrc, 0); // this will likely re-create the VideoReceiveStream
+ // We want to unblock the queued packets on the original thread
+ mStsThread->Dispatch(NS_NewRunnableFunction(
+ "WebrtcVideoConduit::QueuedPacketsHandler",
+ [this, self = RefPtr<WebrtcVideoConduit>(this),
+ ssrc]() mutable {
+ if (ssrc != mRecvSSRC) {
+ // this is an intermediate switch; another is in-flight
+ return;
+ }
+ mRtpPacketQueue.DequeueAll(this);
+ NS_ReleaseOnMainThread(
+ "WebrtcVideoConduit::QueuedPacketsHandler",
+ self.forget());
+ }));
+ }));
+ return kMediaConduitNoError;
+ }
+ }
+
+ CSFLogVerbose(LOGTAG, "%s: seq# %u, Len %d, SSRC %u (0x%x) ", __FUNCTION__,
+ (uint16_t)ntohs(((uint16_t*)data)[1]), len,
+ (uint32_t)ntohl(((uint32_t*)data)[2]),
+ (uint32_t)ntohl(((uint32_t*)data)[2]));
+
+ if (DeliverPacket(data, len) != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s RTP Processing Failed", __FUNCTION__);
+ return kMediaConduitRTPProcessingFailed;
+ }
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::ReceivedRTCPPacket(const void* data,
+ int len) {
+ ASSERT_ON_THREAD(mStsThread);
+
+ CSFLogVerbose(LOGTAG, " %s Len %d ", __FUNCTION__, len);
+
+ if (DeliverPacket(data, len) != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s RTCP Processing Failed", __FUNCTION__);
+ return kMediaConduitRTPProcessingFailed;
+ }
+
+ // TODO(bug 1496533): We will need to keep separate timestamps for each SSRC,
+ // and for each SSRC we will need to keep a timestamp for SR and RR.
+ mLastRtcpReceived = Some(GetNow());
+ return kMediaConduitNoError;
+}
+
+// TODO(bug 1496533): We will need to add a type (ie; SR or RR) param here, or
+// perhaps break this function into two functions, one for each type.
+Maybe<DOMHighResTimeStamp> WebrtcVideoConduit::LastRtcpReceived() const {
+ ASSERT_ON_THREAD(mStsThread);
+ return mLastRtcpReceived;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StopTransmitting() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StopTransmittingLocked();
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StartTransmitting() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StartTransmittingLocked();
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StopReceiving() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StopReceivingLocked();
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StartReceiving() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mMutex);
+
+ return StartReceivingLocked();
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StopTransmittingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineTransmitting) {
+ if (mSendStream) {
+ CSFLogDebug(LOGTAG, "%s Engine Already Sending. Attemping to Stop ",
+ __FUNCTION__);
+ mSendStream->Stop();
+ }
+
+ mEngineTransmitting = false;
+ UpdateVideoStatsTimer();
+ }
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StartTransmittingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineTransmitting) {
+ return kMediaConduitNoError;
+ }
+
+ CSFLogDebug(LOGTAG, "%s Attemping to start... ", __FUNCTION__);
+ // Start Transmitting on the video engine
+ if (!mSendStream) {
+ MediaConduitErrorCode rval = CreateSendStream();
+ if (rval != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s Start Send Error %d ", __FUNCTION__, rval);
+ return rval;
+ }
+ }
+
+ mSendStream->Start();
+ // XXX File a bug to consider hooking this up to the state of mtransport
+ mCall->Call()->SignalChannelNetworkState(webrtc::MediaType::VIDEO,
+ webrtc::kNetworkUp);
+ mEngineTransmitting = true;
+ UpdateVideoStatsTimer();
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StopReceivingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ // Are we receiving already? If so, stop receiving and playout
+ // since we can't apply new recv codec when the engine is playing.
+ if (mEngineReceiving && mRecvStream) {
+ CSFLogDebug(LOGTAG, "%s Engine Already Receiving . Attemping to Stop ",
+ __FUNCTION__);
+ mRecvStream->Stop();
+ }
+
+ mEngineReceiving = false;
+ UpdateVideoStatsTimer();
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode WebrtcVideoConduit::StartReceivingLocked() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mMutex.AssertCurrentThreadOwns();
+
+ if (mEngineReceiving) {
+ return kMediaConduitNoError;
+ }
+
+ CSFLogDebug(LOGTAG, "%s Attemping to start... (SSRC %u (0x%x))", __FUNCTION__,
+ static_cast<uint32_t>(mRecvSSRC),
+ static_cast<uint32_t>(mRecvSSRC));
+ // Start Receiving on the video engine
+ if (!mRecvStream) {
+ MediaConduitErrorCode rval = CreateRecvStream();
+ if (rval != kMediaConduitNoError) {
+ CSFLogError(LOGTAG, "%s Start Receive Error %d ", __FUNCTION__, rval);
+ return rval;
+ }
+ }
+
+ mRecvStream->Start();
+ // XXX File a bug to consider hooking this up to the state of mtransport
+ mCall->Call()->SignalChannelNetworkState(webrtc::MediaType::VIDEO,
+ webrtc::kNetworkUp);
+ mEngineReceiving = true;
+ UpdateVideoStatsTimer();
+
+ return kMediaConduitNoError;
+}
+
+// WebRTC::RTP Callback Implementation
+// Called on MTG thread
+bool WebrtcVideoConduit::SendRtp(const uint8_t* packet, size_t length,
+ const webrtc::PacketOptions& options) {
+ CSFLogVerbose(LOGTAG, "%s Sent RTP Packet seq %d, len %lu, SSRC %u (0x%x)",
+ __FUNCTION__, (uint16_t)ntohs(*((uint16_t*)&packet[2])),
+ (unsigned long)length,
+ (uint32_t)ntohl(*((uint32_t*)&packet[8])),
+ (uint32_t)ntohl(*((uint32_t*)&packet[8])));
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (!mTransmitterTransport ||
+ NS_FAILED(mTransmitterTransport->SendRtpPacket(packet, length))) {
+ CSFLogError(LOGTAG, "%s RTP Packet Send Failed ", __FUNCTION__);
+ return false;
+ }
+ if (options.packet_id >= 0) {
+ int64_t now_ms = PR_Now() / 1000;
+ mCall->Call()->OnSentPacket({options.packet_id, now_ms});
+ }
+ return true;
+}
+
+// Called from multiple threads including webrtc Process thread
+bool WebrtcVideoConduit::SendRtcp(const uint8_t* packet, size_t length) {
+ CSFLogVerbose(LOGTAG, "%s : len %lu ", __FUNCTION__, (unsigned long)length);
+ // We come here if we have only one pipeline/conduit setup,
+ // such as for unidirectional streams.
+ // We also end up here if we are receiving
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (mReceiverTransport &&
+ NS_SUCCEEDED(mReceiverTransport->SendRtcpPacket(packet, length))) {
+ // Might be a sender report, might be a receiver report, we don't know.
+ CSFLogDebug(LOGTAG, "%s Sent RTCP Packet ", __FUNCTION__);
+ return true;
+ }
+ if (mTransmitterTransport &&
+ NS_SUCCEEDED(mTransmitterTransport->SendRtcpPacket(packet, length))) {
+ return true;
+ }
+
+ CSFLogError(LOGTAG, "%s RTCP Packet Send Failed ", __FUNCTION__);
+ return false;
+}
+
+void WebrtcVideoConduit::OnFrame(const webrtc::VideoFrame& video_frame) {
+ CSFLogVerbose(LOGTAG, "%s: recv SSRC %u (0x%x), size %ux%u", __FUNCTION__,
+ static_cast<uint32_t>(mRecvSSRC),
+ static_cast<uint32_t>(mRecvSSRC), video_frame.width(),
+ video_frame.height());
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+
+ if (!mRenderer) {
+ CSFLogError(LOGTAG, "%s Renderer is NULL ", __FUNCTION__);
+ return;
+ }
+
+ bool needsNewHistoryElement = !mReceivedFrameHistory.mEntries.Length();
+
+ if (mReceivingWidth != video_frame.width() ||
+ mReceivingHeight != video_frame.height()) {
+ mReceivingWidth = video_frame.width();
+ mReceivingHeight = video_frame.height();
+ mRenderer->FrameSizeChange(mReceivingWidth, mReceivingHeight);
+ needsNewHistoryElement = true;
+ }
+
+ uint32_t remoteSsrc;
+ if (!GetRemoteSSRC(&remoteSsrc) && needsNewHistoryElement) {
+ // Frame was decoded after the connection ended
+ return;
+ }
+
+ if (!needsNewHistoryElement) {
+ auto& currentEntry = mReceivedFrameHistory.mEntries.LastElement();
+ needsNewHistoryElement =
+ currentEntry.mRotationAngle !=
+ static_cast<unsigned long>(video_frame.rotation()) ||
+ currentEntry.mLocalSsrc != mRecvSSRC ||
+ currentEntry.mRemoteSsrc != remoteSsrc;
+ }
+
+ // Record frame history
+ const auto historyNow = mCall->GetNow();
+ if (needsNewHistoryElement) {
+ dom::RTCVideoFrameHistoryEntryInternal frameHistoryElement;
+ frameHistoryElement.mConsecutiveFrames = 0;
+ frameHistoryElement.mWidth = video_frame.width();
+ frameHistoryElement.mHeight = video_frame.height();
+ frameHistoryElement.mRotationAngle =
+ static_cast<unsigned long>(video_frame.rotation());
+ frameHistoryElement.mFirstFrameTimestamp = historyNow;
+ frameHistoryElement.mLocalSsrc = mRecvSSRC;
+ frameHistoryElement.mRemoteSsrc = remoteSsrc;
+ if (!mReceivedFrameHistory.mEntries.AppendElement(frameHistoryElement,
+ fallible)) {
+ mozalloc_handle_oom(0);
+ }
+ }
+ auto& currentEntry = mReceivedFrameHistory.mEntries.LastElement();
+
+ currentEntry.mConsecutiveFrames++;
+ currentEntry.mLastFrameTimestamp = historyNow;
+ // Attempt to retrieve an timestamp encoded in the image pixels if enabled.
+ if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) {
+ uint64_t now = PR_Now();
+ uint64_t timestamp = 0;
+ uint8_t* data = const_cast<uint8_t*>(
+ video_frame.video_frame_buffer()->GetI420()->DataY());
+ bool ok = YuvStamper::Decode(
+ mReceivingWidth, mReceivingHeight, mReceivingWidth, data,
+ reinterpret_cast<unsigned char*>(&timestamp), sizeof(timestamp), 0, 0);
+ if (ok) {
+ VideoLatencyUpdate(now - timestamp);
+ }
+ }
+
+ mRenderer->RenderVideoFrame(*video_frame.video_frame_buffer(),
+ video_frame.timestamp(),
+ video_frame.render_time_ms());
+}
+
+bool WebrtcVideoConduit::AddFrameHistory(
+ dom::Sequence<dom::RTCVideoFrameHistoryInternal>* outHistories) const {
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if (!outHistories->AppendElement(mReceivedFrameHistory, fallible)) {
+ mozalloc_handle_oom(0);
+ return false;
+ }
+ return true;
+}
+
+void WebrtcVideoConduit::DumpCodecDB() const {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ for (auto& entry : mRecvCodecList) {
+ CSFLogDebug(LOGTAG, "Payload Name: %s", entry->mName.c_str());
+ CSFLogDebug(LOGTAG, "Payload Type: %d", entry->mType);
+ CSFLogDebug(LOGTAG, "Payload Max Frame Size: %d",
+ entry->mEncodingConstraints.maxFs);
+ CSFLogDebug(LOGTAG, "Payload Max Frame Rate: %d",
+ entry->mEncodingConstraints.maxFps);
+ }
+}
+
+void WebrtcVideoConduit::VideoLatencyUpdate(uint64_t newSample) {
+ mTransportMonitor.AssertCurrentThreadIn();
+
+ mVideoLatencyAvg =
+ (sRoundingPadding * newSample + sAlphaNum * mVideoLatencyAvg) / sAlphaDen;
+}
+
+uint64_t WebrtcVideoConduit::MozVideoLatencyAvg() {
+ mTransportMonitor.AssertCurrentThreadIn();
+
+ return mVideoLatencyAvg / sRoundingPadding;
+}
+
+void WebrtcVideoConduit::OnRtpPacket(const webrtc::RtpPacketReceived& aPacket) {
+ ASSERT_ON_THREAD(mStsThread);
+ webrtc::RTPHeader header;
+ aPacket.GetHeader(&header);
+ if (header.extension.hasAudioLevel ||
+ header.extension.csrcAudioLevels.numAudioLevels) {
+ CSFLogDebug(LOGTAG,
+ "Video packet has audio level extension."
+ "RTP source tracking ignored for this packet.");
+ return;
+ }
+ mRtpSourceObserver->OnRtpPacket(header, mRecvStreamStats.JitterMs());
+}
+
+void WebrtcVideoConduit::OnRtcpBye() {
+ RefPtr<WebrtcVideoConduit> self = this;
+ NS_DispatchToMainThread(media::NewRunnableFrom([self]() mutable {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (self->mRtcpEventObserver) {
+ self->mRtcpEventObserver->OnRtcpBye();
+ }
+ return NS_OK;
+ }));
+}
+
+void WebrtcVideoConduit::OnRtcpTimeout() {
+ RefPtr<WebrtcVideoConduit> self = this;
+ NS_DispatchToMainThread(media::NewRunnableFrom([self]() mutable {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (self->mRtcpEventObserver) {
+ self->mRtcpEventObserver->OnRtcpTimeout();
+ }
+ return NS_OK;
+ }));
+}
+
+void WebrtcVideoConduit::SetRtcpEventObserver(
+ mozilla::RtcpEventObserver* observer) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mRtcpEventObserver = observer;
+}
+
+uint64_t WebrtcVideoConduit::CodecPluginID() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (mSendCodecPluginID) {
+ return mSendCodecPluginID;
+ }
+ if (mRecvCodecPluginID) {
+ return mRecvCodecPluginID;
+ }
+
+ return 0;
+}
+
+bool WebrtcVideoConduit::RequiresNewSendStream(
+ const VideoCodecConfig& newConfig) const {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ return !mCurSendCodecConfig ||
+ mCurSendCodecConfig->mName != newConfig.mName ||
+ mCurSendCodecConfig->mType != newConfig.mType ||
+ mCurSendCodecConfig->RtcpFbNackIsSet("") !=
+ newConfig.RtcpFbNackIsSet("") ||
+ mCurSendCodecConfig->RtcpFbFECIsSet() != newConfig.RtcpFbFECIsSet()
+#if 0
+ // XXX Do we still want/need to do this?
+ || (newConfig.mName == "H264" &&
+ !CompatibleH264Config(mEncoderSpecificH264, newConfig))
+#endif
+ ;
+}
+
+bool WebrtcVideoConduit::HasH264Hardware() {
+ nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
+ if (!gfxInfo) {
+ return false;
+ }
+ int32_t status;
+ nsCString discardFailureId;
+ return NS_SUCCEEDED(gfxInfo->GetFeatureStatus(
+ nsIGfxInfo::FEATURE_WEBRTC_HW_ACCELERATION_H264, discardFailureId,
+ &status)) &&
+ status == nsIGfxInfo::FEATURE_STATUS_OK;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/VideoConduit.h b/dom/media/webrtc/libwebrtcglue/VideoConduit.h
new file mode 100644
index 0000000000..0528d06501
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/VideoConduit.h
@@ -0,0 +1,687 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef VIDEO_SESSION_H_
+#define VIDEO_SESSION_H_
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/UniquePtr.h"
+#include "nsITimer.h"
+
+#include "MediaConduitInterface.h"
+#include "common/MediaEngineWrapper.h"
+#include "RunningStat.h"
+#include "RtpPacketQueue.h"
+#include "transport/runnable_utils.h"
+
+// conflicts with #include of scoped_ptr.h
+#undef FF
+// Video Engine Includes
+#include "api/video_codecs/video_encoder_factory.h"
+#include "webrtc/call/call.h"
+#include "webrtc/common_types.h"
+#ifdef FF
+# undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h.
+#endif
+#include "webrtc/api/video_codecs/video_decoder.h"
+#include "webrtc/api/video_codecs/video_encoder.h"
+#include "webrtc/api/video_codecs/sdp_video_format.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/media/base/videosinkinterface.h"
+#include "webrtc/media/base/videoadapter.h"
+#include "webrtc/media/base/videobroadcaster.h"
+#include <functional>
+#include <memory>
+/** This file hosts several structures identifying different aspects
+ * of a RTP Session.
+ */
+
+namespace mozilla {
+
+// Convert (SI) kilobits/sec to (SI) bits/sec
+#define KBPS(kbps) kbps * 1000
+
+const int kViEMinCodecBitrate_bps = KBPS(30);
+const unsigned int kVideoMtu = 1200;
+const int kQpMax = 56;
+
+template <typename T>
+T MinIgnoreZero(const T& a, const T& b);
+
+class VideoStreamFactory;
+class WebrtcAudioConduit;
+
+// Interface of external video encoder for WebRTC.
+class WebrtcVideoEncoder : public VideoEncoder, public webrtc::VideoEncoder {};
+
+// Interface of external video decoder for WebRTC.
+class WebrtcVideoDecoder : public VideoDecoder, public webrtc::VideoDecoder {};
+
+/**
+ * Concrete class for Video session. Hooks up
+ * - media-source and target to external transport
+ */
+class WebrtcVideoConduit
+ : public VideoSessionConduit,
+ public webrtc::RtcpEventObserver,
+ public webrtc::RtpPacketSinkInterface,
+ public webrtc::Transport,
+ public webrtc::VideoEncoderFactory,
+ public rtc::VideoSinkInterface<webrtc::VideoFrame>,
+ public rtc::VideoSourceInterface<webrtc::VideoFrame> {
+ public:
+ // VoiceEngine defined constant for Payload Name Size.
+ static const unsigned int CODEC_PLNAME_SIZE;
+
+ // Returns true when both encoder and decoder are HW accelerated.
+ static bool HasH264Hardware();
+
+ MediaConduitErrorCode SetLocalRTPExtensions(
+ MediaSessionConduitLocalDirection aDirection,
+ const RtpExtList& aExtensions) override;
+
+ /**
+ * Function to attach Renderer end-point for the Media-Video conduit.
+ * @param aRenderer : Reference to the concrete mozilla Video renderer
+ * implementation Note: Multiple invocations of this API shall remove an
+ * existing renderer and attaches the new to the Conduit.
+ */
+ MediaConduitErrorCode AttachRenderer(
+ RefPtr<mozilla::VideoRenderer> aVideoRenderer) override;
+ void DetachRenderer() override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTP Frames to the VideoEngine for decoding
+ */
+ MediaConduitErrorCode ReceivedRTPPacket(const void* data, int len,
+ webrtc::RTPHeader& header) override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTCP Frames to the VideoEngine for decoding
+ */
+ MediaConduitErrorCode ReceivedRTCPPacket(const void* data, int len) override;
+ Maybe<DOMHighResTimeStamp> LastRtcpReceived() const override;
+ DOMHighResTimeStamp GetNow() const override { return mCall->GetNow(); }
+
+ MediaConduitErrorCode StopTransmitting() override;
+ MediaConduitErrorCode StartTransmitting() override;
+ MediaConduitErrorCode StopReceiving() override;
+ MediaConduitErrorCode StartReceiving() override;
+
+ MediaConduitErrorCode StopTransmittingLocked();
+ MediaConduitErrorCode StartTransmittingLocked();
+ MediaConduitErrorCode StopReceivingLocked();
+ MediaConduitErrorCode StartReceivingLocked();
+
+ /**
+ * Function to configure sending codec mode for different content
+ */
+ MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) override;
+
+ /**
+ * Function to configure send codec for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec
+ * for send
+ * On failure, video engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve
+ * restarting transmission sub-system on the engine.
+ */
+ MediaConduitErrorCode ConfigureSendMediaCodec(
+ const VideoCodecConfig* codecInfo,
+ const RtpRtcpConfig& aRtpRtcpConfig) override;
+
+ /**
+ * Function to configure list of receive codecs for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec
+ * for send
+ * Also the playout is enabled.
+ * On failure, video engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve
+ * restarting transmission sub-system on the engine.
+ */
+ MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<UniquePtr<VideoCodecConfig>>& codecConfigList,
+ const RtpRtcpConfig& aRtpRtcpConfig) override;
+
+ /**
+ * Register Transport for this Conduit. RTP and RTCP frames from the
+ * VideoEngine shall be passed to the registered transport for transporting
+ * externally.
+ */
+ MediaConduitErrorCode SetTransmitterTransport(
+ RefPtr<TransportInterface> aTransport) override;
+
+ MediaConduitErrorCode SetReceiverTransport(
+ RefPtr<TransportInterface> aTransport) override;
+
+ /**
+ * Function to select and change the encoding resolution based on incoming
+ * frame size and current available bandwidth.
+ * @param width, height: dimensions of the frame
+ * @param frame: optional frame to submit for encoding after reconfig
+ */
+ void SelectSendResolution(unsigned short width, unsigned short height);
+
+ /**
+ * Function to deliver a capture video frame for encoding and transport.
+ * If the frame's timestamp is 0, it will be automatically generated.
+ *
+ * NOTE: ConfigureSendMediaCodec() must be called before this function can
+ * be invoked. This ensures the inserted video-frames can be
+ * transmitted by the conduit.
+ */
+ MediaConduitErrorCode SendVideoFrame(
+ const webrtc::VideoFrame& frame) override;
+
+ /**
+ * webrtc::Transport method implementation
+ * ---------------------------------------
+ * Webrtc transport implementation to send and receive RTP packet.
+ * VideoConduit registers itself as ExternalTransport to the VideoStream
+ */
+ bool SendRtp(const uint8_t* packet, size_t length,
+ const webrtc::PacketOptions& options) override;
+
+ /**
+ * webrtc::Transport method implementation
+ * ---------------------------------------
+ * Webrtc transport implementation to send and receive RTCP packet.
+ * VideoConduit registers itself as ExternalTransport to the VideoEngine
+ */
+ bool SendRtcp(const uint8_t* packet, size_t length) override;
+
+ /*
+ * webrtc:VideoSinkInterface implementation
+ * -------------------------------
+ */
+ void OnFrame(const webrtc::VideoFrame& frame) override;
+
+ /*
+ * webrtc:VideoSourceInterface implementation
+ * -------------------------------
+ */
+ void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override;
+ void AddOrUpdateSinkNotLocked(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants);
+
+ void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+ void RemoveSinkNotLocked(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink);
+
+ void OnSinkWantsChanged(const rtc::VideoSinkWants& wants);
+
+ uint64_t CodecPluginID() override;
+
+ void SetPCHandle(const std::string& aPCHandle) override {
+ MOZ_ASSERT(NS_IsMainThread());
+ mPCHandle = aPCHandle;
+ }
+
+ void DeleteStreams() override;
+
+ bool Denoising() const { return mDenoising; }
+
+ uint8_t SpatialLayers() const { return mSpatialLayers; }
+
+ uint8_t TemporalLayers() const { return mTemporalLayers; }
+
+ webrtc::VideoCodecMode CodecMode() const {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mCodecMode;
+ }
+
+ WebrtcVideoConduit(RefPtr<WebRtcCallWrapper> aCall,
+ nsCOMPtr<nsISerialEventTarget> aStsThread);
+ virtual ~WebrtcVideoConduit();
+
+ MediaConduitErrorCode InitMain();
+ virtual MediaConduitErrorCode Init();
+
+ std::vector<uint32_t> GetLocalSSRCs() override;
+ bool SetLocalSSRCs(const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtxSsrcs) override;
+ // Can be called from any thread
+ bool GetRemoteSSRC(uint32_t* ssrc) override;
+ bool SetRemoteSSRC(uint32_t ssrc, uint32_t rtxSsrc) override;
+ bool UnsetRemoteSSRC(uint32_t ssrc) override;
+ bool SetLocalCNAME(const char* cname) override;
+ bool SetLocalMID(const std::string& mid) override;
+
+ void SetSyncGroup(const std::string& group) override;
+
+ bool SetRemoteSSRCLocked(uint32_t ssrc, uint32_t rtxSsrc);
+
+ bool GetSendPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) override;
+
+ bool GetRecvPacketTypeStats(
+ webrtc::RtcpPacketTypeCounter* aPacketCounts) override;
+
+ void PollStats();
+ void UpdateVideoStatsTimer();
+ bool GetVideoEncoderStats(double* framerateMean, double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* droppedFrames, uint32_t* framesEncoded,
+ Maybe<uint64_t>* qpSum) override;
+ bool GetVideoDecoderStats(double* framerateMean, double* framerateStdDev,
+ double* bitrateMean, double* bitrateStdDev,
+ uint32_t* discardedPackets,
+ uint32_t* framesDecoded) override;
+ bool GetRTPReceiverStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) override;
+ bool GetRTCPReceiverReport(uint32_t* jitterMs, uint32_t* packetsReceived,
+ uint64_t* bytesReceived, uint32_t* cumulativeLost,
+ Maybe<double>* aOutRttSec) override;
+ bool GetRTCPSenderReport(unsigned int* packetsSent, uint64_t* bytesSent,
+ DOMHighResTimeStamp* aRemoteTimestamp) override;
+
+ Maybe<mozilla::dom::RTCBandwidthEstimationInternal> GetBandwidthEstimation()
+ override;
+
+ void GetRtpSources(nsTArray<dom::RTCRtpSourceEntry>& outSources) override;
+ bool AddFrameHistory(dom::Sequence<dom::RTCVideoFrameHistoryInternal>*
+ outHistories) const override;
+
+ uint64_t MozVideoLatencyAvg();
+
+ void DisableSsrcChanges() override {
+ ASSERT_ON_THREAD(mStsThread);
+ mAllowSsrcChange = false;
+ }
+
+ /**
+ * Callback from libwebrtc with the parsed packet for synchronization
+ * source tracking. STS thread only.
+ */
+ void OnRtpPacket(const webrtc::RtpPacketReceived& packet) override;
+
+ Maybe<RefPtr<VideoSessionConduit>> AsVideoSessionConduit() override {
+ return Some(RefPtr<VideoSessionConduit>(this));
+ }
+
+ void RecordTelemetry() const override {
+ ASSERT_ON_THREAD(mStsThread);
+ mSendStreamStats.RecordTelemetry();
+ mRecvStreamStats.RecordTelemetry();
+ }
+
+ void OnRtcpBye() override;
+
+ void OnRtcpTimeout() override;
+
+ void SetRtcpEventObserver(mozilla::RtcpEventObserver* observer) override;
+
+ private:
+ // Don't allow copying/assigning.
+ WebrtcVideoConduit(const WebrtcVideoConduit&) = delete;
+ void operator=(const WebrtcVideoConduit&) = delete;
+
+ /**
+ * Statistics for the Call associated with this VideoConduit.
+ * Single threaded.
+ */
+ class CallStatistics {
+ public:
+ explicit CallStatistics(nsCOMPtr<nsISerialEventTarget> aStatsThread)
+ : mStatsThread(aStatsThread) {}
+ void Update(const webrtc::Call::Stats& aStats);
+ Maybe<mozilla::dom::RTCBandwidthEstimationInternal> Stats() const;
+ Maybe<DOMHighResTimeStamp> RttSec() const;
+
+ protected:
+ const nsCOMPtr<nsISerialEventTarget> mStatsThread;
+
+ private:
+ Maybe<webrtc::Call::Stats> mStats = Nothing();
+ Maybe<DOMHighResTimeStamp> mRttSec = Nothing();
+ };
+
+ /**
+ * Shared statistics for receive and transmit video streams.
+ * Single threaded.
+ */
+ class StreamStatistics {
+ public:
+ explicit StreamStatistics(nsCOMPtr<nsISerialEventTarget> aStatsThread)
+ : mStatsThread(aStatsThread) {}
+ void Update(const double aFrameRate, const double aBitrate,
+ const webrtc::RtcpPacketTypeCounter& aPacketCounts);
+ /**
+ * Returns gathered stream statistics
+ * @param aOutFrMean: mean framerate
+ * @param aOutFrStdDev: standard deviation of framerate
+ * @param aOutBrMean: mean bitrate
+ * @param aOutBrStdDev: standard deviation of bitrate
+ */
+ bool GetVideoStreamStats(double& aOutFrMean, double& aOutFrStdDev,
+ double& aOutBrMean, double& aOutBrStdDev) const;
+
+ /**
+ * Accumulates video quality telemetry
+ */
+ void RecordTelemetry() const;
+ const webrtc::RtcpPacketTypeCounter& PacketCounts() const;
+ bool Active() const;
+ void SetActive(bool aActive);
+ virtual bool IsSend() const { return false; };
+
+ protected:
+ const nsCOMPtr<nsISerialEventTarget> mStatsThread;
+
+ private:
+ bool mActive = false;
+ RunningStat mFrameRate;
+ RunningStat mBitRate;
+ webrtc::RtcpPacketTypeCounter mPacketCounts;
+ };
+
+ /**
+ * Statistics for sending streams. Single threaded.
+ */
+ class SendStreamStatistics : public StreamStatistics {
+ public:
+ explicit SendStreamStatistics(nsCOMPtr<nsISerialEventTarget> aStatsThread)
+ : StreamStatistics(
+ std::forward<nsCOMPtr<nsISerialEventTarget>>(aStatsThread)) {}
+ /**
+ * Returns the calculate number of dropped frames
+ */
+ uint32_t DroppedFrames() const;
+ /**
+ * Returns the number of frames that have been encoded so far
+ */
+ uint32_t FramesEncoded() const;
+ void Update(const webrtc::VideoSendStream::Stats& aStats,
+ uint32_t aConfiguredSsrc);
+ /**
+ * Call once for every frame delivered for encoding
+ */
+ void FrameDeliveredToEncoder();
+
+ bool SsrcFound() const;
+ uint32_t JitterMs() const;
+ uint32_t PacketsLost() const;
+ uint64_t BytesReceived() const;
+ uint32_t PacketsReceived() const;
+ Maybe<uint64_t> QpSum() const;
+ bool IsSend() const override { return true; };
+
+ private:
+ uint32_t mDroppedFrames = 0;
+ uint32_t mFramesEncoded = 0;
+ int32_t mFramesDeliveredToEncoder = 0;
+
+ bool mSsrcFound = false;
+ uint32_t mJitterMs = 0;
+ uint32_t mPacketsLost = 0;
+ uint64_t mBytesReceived = 0;
+ uint32_t mPacketsReceived = 0;
+ Maybe<uint64_t> mQpSum;
+ };
+
+ /**
+ * Statistics for receiving streams. Single threaded.
+ */
+ class ReceiveStreamStatistics : public StreamStatistics {
+ public:
+ explicit ReceiveStreamStatistics(
+ nsCOMPtr<nsISerialEventTarget> aStatsThread)
+ : StreamStatistics(
+ std::forward<nsCOMPtr<nsISerialEventTarget>>(aStatsThread)) {}
+ uint32_t BytesSent() const;
+ /**
+ * Returns the number of discarded packets
+ */
+ uint32_t DiscardedPackets() const;
+ /**
+ * Returns the number of frames decoded
+ */
+ uint32_t FramesDecoded() const;
+ uint32_t JitterMs() const;
+ uint32_t PacketsLost() const;
+ uint32_t PacketsSent() const;
+ uint32_t Ssrc() const;
+ DOMHighResTimeStamp RemoteTimestamp() const;
+ void Update(const webrtc::VideoReceiveStream::Stats& aStats);
+
+ private:
+ uint32_t mBytesSent = 0;
+ uint32_t mDiscardedPackets = 0;
+ uint32_t mFramesDecoded = 0;
+ uint32_t mJitterMs = 0;
+ uint32_t mPacketsLost = 0;
+ uint32_t mPacketsSent = 0;
+ uint32_t mSsrc = 0;
+ DOMHighResTimeStamp mRemoteTimestamp = 0;
+ };
+
+ // Utility function to dump recv codec database
+ void DumpCodecDB() const;
+
+ // Video Latency Test averaging filter
+ void VideoLatencyUpdate(uint64_t new_sample);
+
+ MediaConduitErrorCode CreateSendStream();
+ void DeleteSendStream();
+ MediaConduitErrorCode CreateRecvStream();
+ void DeleteRecvStream();
+
+ std::unique_ptr<webrtc::VideoDecoder> CreateDecoder(
+ webrtc::VideoCodecType aType);
+ std::unique_ptr<webrtc::VideoEncoder> CreateEncoder(
+ webrtc::VideoCodecType aType);
+
+ // webrtc::VideoEncoderFactory
+ std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
+
+ CodecInfo QueryVideoEncoder(
+ const webrtc::SdpVideoFormat& format) const override;
+
+ std::unique_ptr<webrtc::VideoEncoder> CreateVideoEncoder(
+ const webrtc::SdpVideoFormat& format) override;
+
+ MediaConduitErrorCode DeliverPacket(const void* data, int len) override;
+
+ bool RequiresNewSendStream(const VideoCodecConfig& newConfig) const;
+
+ mutable mozilla::ReentrantMonitor mTransportMonitor;
+
+ // Accessed on any thread under mTransportMonitor.
+ RefPtr<TransportInterface> mTransmitterTransport;
+
+ // Accessed on any thread under mTransportMonitor.
+ RefPtr<TransportInterface> mReceiverTransport;
+
+ // Accessed on any thread under mTransportMonitor.
+ RefPtr<mozilla::VideoRenderer> mRenderer;
+
+ // Accessed on any thread under mTransportMonitor.
+ unsigned short mReceivingWidth = 0;
+
+ // Accessed on any thread under mTransportMonitor.
+ unsigned short mReceivingHeight = 0;
+
+ // Socket transport service thread that runs stats queries against us. Any
+ // thread.
+ const nsCOMPtr<nsISerialEventTarget> mStsThread;
+
+ Mutex mMutex;
+
+ // Adapter handling resolution constraints from signaling and sinks.
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ UniquePtr<cricket::VideoAdapter> mVideoAdapter;
+
+ // Our own record of the sinks added to mVideoBroadcaster so we can support
+ // dispatching updates to sinks from off-main-thread. Main thread only.
+ AutoTArray<rtc::VideoSinkInterface<webrtc::VideoFrame>*, 1> mRegisteredSinks;
+
+ // Broadcaster that distributes our frames to all registered sinks.
+ // Sinks can only be added, updated and removed on main thread.
+ // Frames can be passed in on any thread.
+ rtc::VideoBroadcaster mVideoBroadcaster;
+
+ // Buffer pool used for scaling frames.
+ // Accessed on the frame-feeding thread only.
+ webrtc::I420BufferPool mBufferPool;
+
+ // Engine state we are concerned with. Written on main thread and read
+ // anywhere.
+ mozilla::Atomic<bool>
+ mEngineTransmitting; // If true ==> Transmit Subsystem is up and running
+ mozilla::Atomic<bool>
+ mEngineReceiving; // if true ==> Receive Subsystem up and running
+
+ // Local database of currently applied receive codecs. Main thread only.
+ nsTArray<UniquePtr<VideoCodecConfig>> mRecvCodecList;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ UniquePtr<VideoCodecConfig> mCurSendCodecConfig;
+
+ bool mUpdateResolution = false;
+ int mSinkWantsPixelCount = std::numeric_limits<int>::max();
+
+ // Bookkeeping of send stream stats. Sts thread only.
+ SendStreamStatistics mSendStreamStats;
+
+ // Bookkeeping of send stream stats. Sts thread only.
+ ReceiveStreamStatistics mRecvStreamStats;
+
+ // Bookkeeping of call stats. Sts thread only.
+ CallStatistics mCallStats;
+
+ // Must call webrtc::Call::DestroyVideoReceive/SendStream to delete this.
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::VideoReceiveStream* mRecvStream = nullptr;
+
+ // Must call webrtc::Call::DestroyVideoReceive/SendStream to delete this.
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::VideoSendStream* mSendStream = nullptr;
+
+ // Written on the frame feeding thread.
+ // Guarded by mMutex, except for reads on the frame feeding thread.
+ unsigned short mLastWidth = 0;
+
+ // Written on the frame feeding thread.
+ // Guarded by mMutex, except for reads on the frame feeding thread.
+ unsigned short mLastHeight = 0;
+
+ // Accessed under mMutex.
+ unsigned int mSendingFramerate;
+
+ // Written on main thread at creation,
+ // then written or read on any thread under mTransportMonitor.
+ bool mVideoLatencyTestEnable = false;
+
+ // Accessed from any thread under mTransportMonitor.
+ uint64_t mVideoLatencyAvg = 0;
+
+ // All in bps.
+ // All written on main thread and guarded by mMutex, except for reads on main.
+ int mMinBitrate = 0;
+ int mStartBitrate = 0;
+ int mPrefMaxBitrate = 0;
+ int mNegotiatedMaxBitrate = 0;
+ int mMinBitrateEstimate = 0;
+
+ // Set to true to force denoising on.
+ // Written at creation, then read anywhere.
+ bool mDenoising = false;
+
+ // Set to true to ignore sink wants (scaling due to bwe and cpu usage).
+ // Written at creation, then read anywhere.
+ bool mLockScaling = false;
+
+ // Written at creation, then read anywhere.
+ uint8_t mSpatialLayers = 1;
+
+ // Written at creation, then read anywhere.
+ uint8_t mTemporalLayers = 1;
+
+ static const unsigned int sAlphaNum = 7;
+ static const unsigned int sAlphaDen = 8;
+ static const unsigned int sRoundingPadding = 1024;
+
+ // Main thread only.
+ RefPtr<WebrtcAudioConduit> mSyncedTo;
+
+ // Main thread only.
+ webrtc::VideoCodecMode mActiveCodecMode;
+ webrtc::VideoCodecMode mCodecMode;
+
+ // WEBRTC.ORG Call API
+ // Const so can be accessed on any thread. Most methods are called on
+ // main thread, though Receiver() is called on STS. This seems fine.
+ const RefPtr<WebRtcCallWrapper> mCall;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ webrtc::VideoSendStream::Config mSendStreamConfig;
+
+ // Main thread only.
+ webrtc::VideoEncoderConfig mEncoderConfig;
+
+ // Written only on main thread. Guarded by mMutex, except for reads on main.
+ // Calls can happen on any thread.
+ RefPtr<rtc::RefCountedObject<VideoStreamFactory>> mVideoStreamFactory;
+
+ // Main thread only.
+ webrtc::VideoReceiveStream::Config mRecvStreamConfig;
+
+ // Are SSRC changes without signaling allowed or not.
+ // Accessed only on mStsThread.
+ bool mAllowSsrcChange = true;
+
+ // Accessed only on mStsThread.
+ bool mWaitingForInitialSsrc = true;
+
+ // Accessed during configuration/signaling (main),
+ // and when receiving packets (sts).
+ Atomic<uint32_t> mRecvSSRC; // this can change during a stream!
+ // Accessed from both the STS and main thread for a variety of things
+ // Set when receiving packets
+ Atomic<uint32_t> mRemoteSSRC; // this can change during a stream!
+
+ // Accessed only on mStsThread.
+ RtpPacketQueue mRtpPacketQueue;
+
+ // The lifetime of these codecs are maintained by the VideoConduit instance.
+ // They are passed to the webrtc::VideoSendStream or VideoReceiveStream,
+ // on construction.
+ std::unique_ptr<webrtc::VideoEncoder> mEncoder; // only one encoder for now
+ std::vector<std::unique_ptr<webrtc::VideoDecoder>> mDecoders;
+ // Main thread only
+ uint64_t mSendCodecPluginID = 0;
+ // Main thread only
+ uint64_t mRecvCodecPluginID = 0;
+
+ // Timer that updates video stats periodically. Main thread only.
+ nsCOMPtr<nsITimer> mVideoStatsTimer;
+ // True if mVideoStatsTimer is running. Main thread only.
+ bool mVideoStatsTimerActive = false;
+
+ // Main thread only
+ std::string mPCHandle;
+
+ // Accessed only on mStsThread
+ Maybe<DOMHighResTimeStamp> mLastRtcpReceived;
+
+ // Accessed only on main thread.
+ mozilla::RtcpEventObserver* mRtcpEventObserver = nullptr;
+
+ // Accessed from main and mStsThread. Uses locks internally.
+ RefPtr<RtpSourceObserver> mRtpSourceObserver;
+ // Tracking the attributes of received frames over time
+ // Protected by mTransportMonitor
+ dom::RTCVideoFrameHistoryInternal mReceivedFrameHistory;
+};
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp
new file mode 100644
index 0000000000..c1bf9cb62d
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp
@@ -0,0 +1,261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "VideoStreamFactory.h"
+
+#include "common/browser_logging/CSFLog.h"
+#include "nsThreadUtils.h"
+#include "VideoConduit.h"
+
+namespace mozilla {
+
+#ifdef LOGTAG
+# undef LOGTAG
+#endif
+#define LOGTAG "WebrtcVideoSessionConduit"
+
+#define MB_OF(w, h) \
+ ((unsigned int)((((w + 15) >> 4)) * ((unsigned int)((h + 15) >> 4))))
+// For now, try to set the max rates well above the knee in the curve.
+// Chosen somewhat arbitrarily; it's hard to find good data oriented for
+// realtime interactive/talking-head recording. These rates assume
+// 30fps.
+
+// XXX Populate this based on a pref (which we should consider sorting because
+// people won't assume they need to).
+static VideoStreamFactory::ResolutionAndBitrateLimits
+ kResolutionAndBitrateLimits[] = {
+ // clang-format off
+ {MB_OF(1920, 1200), KBPS(1500), KBPS(2000), KBPS(10000)}, // >HD (3K, 4K, etc)
+ {MB_OF(1280, 720), KBPS(1200), KBPS(1500), KBPS(5000)}, // HD ~1080-1200
+ {MB_OF(800, 480), KBPS(200), KBPS(800), KBPS(2500)}, // HD ~720
+ {MB_OF(480, 270), KBPS(150), KBPS(500), KBPS(2000)}, // WVGA
+ {tl::Max<MB_OF(400, 240), MB_OF(352, 288)>::value, KBPS(125), KBPS(300), KBPS(1300)}, // VGA
+ {MB_OF(176, 144), KBPS(100), KBPS(150), KBPS(500)}, // WQVGA, CIF
+ {0 , KBPS(40), KBPS(80), KBPS(250)} // QCIF and below
+ // clang-format on
+};
+
+static VideoStreamFactory::ResolutionAndBitrateLimits GetLimitsFor(
+ unsigned int aWidth, unsigned int aHeight, int aCapBps = 0) {
+ // max bandwidth should be proportional (not linearly!) to resolution, and
+ // proportional (perhaps linearly, or close) to current frame rate.
+ int fs = MB_OF(aWidth, aHeight);
+
+ for (const auto& resAndLimits : kResolutionAndBitrateLimits) {
+ if (fs > resAndLimits.resolution_in_mb &&
+ // pick the highest range where at least start rate is within cap
+ // (or if we're at the end of the array).
+ (aCapBps == 0 || resAndLimits.start_bitrate_bps <= aCapBps ||
+ resAndLimits.resolution_in_mb == 0)) {
+ return resAndLimits;
+ }
+ }
+
+ MOZ_CRASH("Loop should have handled fallback");
+}
+
+/**
+ * Function to set the encoding bitrate limits based on incoming frame size and
+ * rate
+ * @param width, height: dimensions of the frame
+ * @param min: minimum bitrate in bps
+ * @param start: bitrate in bps that the encoder should start with
+ * @param cap: user-enforced max bitrate, or 0
+ * @param pref_cap: cap enforced by prefs
+ * @param negotiated_cap: cap negotiated through SDP
+ * @param aVideoStream stream to apply bitrates to
+ */
+static void SelectBitrates(unsigned short width, unsigned short height, int min,
+ int start, int cap, int pref_cap, int negotiated_cap,
+ webrtc::VideoStream& aVideoStream) {
+ int& out_min = aVideoStream.min_bitrate_bps;
+ int& out_start = aVideoStream.target_bitrate_bps;
+ int& out_max = aVideoStream.max_bitrate_bps;
+
+ VideoStreamFactory::ResolutionAndBitrateLimits resAndLimits =
+ GetLimitsFor(width, height);
+ out_min = MinIgnoreZero(resAndLimits.min_bitrate_bps, cap);
+ out_start = MinIgnoreZero(resAndLimits.start_bitrate_bps, cap);
+ out_max = MinIgnoreZero(resAndLimits.max_bitrate_bps, cap);
+
+ // Note: negotiated_cap is the max transport bitrate - it applies to
+ // a single codec encoding, but should also apply to the sum of all
+ // simulcast layers in this encoding! So sum(layers.maxBitrate) <=
+ // negotiated_cap
+ // Note that out_max already has had pref_cap applied to it
+ out_max = MinIgnoreZero(negotiated_cap, out_max);
+ out_min = std::min(out_min, out_max);
+ out_start = std::min(out_start, out_max);
+
+ if (min && min > out_min) {
+ out_min = min;
+ }
+ // If we try to set a minimum bitrate that is too low, ViE will reject it.
+ out_min = std::max(kViEMinCodecBitrate_bps, out_min);
+ out_max = std::max(kViEMinCodecBitrate_bps, out_max);
+ if (start && start > out_start) {
+ out_start = start;
+ }
+
+ // Ensure that min <= start <= max
+ if (out_min > out_max) {
+ out_min = out_max;
+ }
+ out_start = std::min(out_max, std::max(out_start, out_min));
+
+ MOZ_ASSERT(pref_cap == 0 || out_max <= pref_cap);
+}
+
+void VideoStreamFactory::SetCodecMode(webrtc::VideoCodecMode aCodecMode) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mCodecMode = aCodecMode;
+}
+
+void VideoStreamFactory::SetSendingFramerate(unsigned int aSendingFramerate) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mSendingFramerate = aSendingFramerate;
+}
+
+std::vector<webrtc::VideoStream> VideoStreamFactory::CreateEncoderStreams(
+ int width, int height, const webrtc::VideoEncoderConfig& config) {
+ // We only allow one layer when screensharing
+ const size_t streamCount =
+ mCodecMode == webrtc::VideoCodecMode::kScreensharing
+ ? 1
+ : config.number_of_streams;
+
+ MOZ_RELEASE_ASSERT(streamCount >= 1, "Should request at least one stream");
+
+ std::vector<webrtc::VideoStream> streams;
+ streams.reserve(streamCount);
+
+ // Find the highest-resolution stream
+ int highestResolutionIndex = 0;
+ for (size_t i = 1; i < streamCount; ++i) {
+ if (mCodecConfig.mEncodings[i].constraints.scaleDownBy <
+ mCodecConfig.mEncodings[highestResolutionIndex]
+ .constraints.scaleDownBy) {
+ highestResolutionIndex = i;
+ }
+ }
+
+ // This ensures all simulcast layers will be of the same aspect ratio as the
+ // input.
+ mSimulcastAdapter->OnOutputFormatRequest(
+ cricket::VideoFormat(width, height, 0, 0));
+
+ for (int idx = streamCount - 1; idx >= 0; --idx) {
+ webrtc::VideoStream video_stream;
+ auto& encoding = mCodecConfig.mEncodings[idx];
+ MOZ_ASSERT(encoding.constraints.scaleDownBy >= 1.0);
+
+ // All streams' dimensions must retain the aspect ratio of the input stream.
+ // Note that the first stream might already have been scaled by us.
+ // Webrtc.org doesn't know this, so we have to adjust lower layers manually.
+ int unusedCropWidth, unusedCropHeight, outWidth, outHeight;
+ if (idx == highestResolutionIndex) {
+ // This is the highest-resolution stream. We avoid calling
+ // AdaptFrameResolution on this because precision errors in VideoAdapter
+ // can cause the out-resolution to be an odd pixel smaller than the
+ // source (1920x1419 has caused this). We shortcut this instead.
+ outWidth = width;
+ outHeight = height;
+ } else {
+ float effectiveScaleDownBy =
+ encoding.constraints.scaleDownBy /
+ mCodecConfig.mEncodings[highestResolutionIndex]
+ .constraints.scaleDownBy;
+ MOZ_ASSERT(effectiveScaleDownBy >= 1.0);
+ mSimulcastAdapter->OnScaleResolutionBy(
+ effectiveScaleDownBy > 1.0
+ ? rtc::Optional<float>(effectiveScaleDownBy)
+ : rtc::Optional<float>());
+ bool rv = mSimulcastAdapter->AdaptFrameResolution(
+ width, height,
+ 0, // Ok, since we don't request an output format with an interval
+ &unusedCropWidth, &unusedCropHeight, &outWidth, &outHeight);
+
+ if (!rv) {
+ // The only thing that can make AdaptFrameResolution fail in this case
+ // is if this layer is scaled so far down that it has less than one
+ // pixel.
+ outWidth = 0;
+ outHeight = 0;
+ }
+ }
+
+ if (outWidth == 0 || outHeight == 0) {
+ CSFLogInfo(LOGTAG,
+ "%s Stream with RID %s ignored because of no resolution.",
+ __FUNCTION__, encoding.rid.c_str());
+ continue;
+ }
+
+ MOZ_ASSERT(outWidth > 0);
+ MOZ_ASSERT(outHeight > 0);
+ video_stream.width = outWidth;
+ video_stream.height = outHeight;
+
+ CSFLogInfo(LOGTAG, "%s Input frame %ux%u, RID %s scaling to %zux%zu",
+ __FUNCTION__, width, height, encoding.rid.c_str(),
+ video_stream.width, video_stream.height);
+
+ if (video_stream.width * height != width * video_stream.height) {
+ CSFLogInfo(LOGTAG,
+ "%s Stream with RID %s ignored because of bad aspect ratio.",
+ __FUNCTION__, encoding.rid.c_str());
+ continue;
+ }
+
+ // We want to ensure this picks up the current framerate, so indirect
+ video_stream.max_framerate = mSendingFramerate;
+
+ SelectBitrates(video_stream.width, video_stream.height, mMinBitrate,
+ mStartBitrate, encoding.constraints.maxBr, mPrefMaxBitrate,
+ mNegotiatedMaxBitrate, video_stream);
+
+ video_stream.max_qp = kQpMax;
+ video_stream.SetRid(encoding.rid);
+
+ // leave vector temporal_layer_thresholds_bps empty for non-simulcast
+ video_stream.temporal_layer_thresholds_bps.clear();
+ if (streamCount > 1) {
+ // XXX Note: in simulcast.cc in upstream code, the array value is
+ // 3(-1) for all streams, though it's in an array, except for screencasts,
+ // which use 1 (i.e 2 layers).
+
+ // Oddly, though this is a 'bps' array, nothing really looks at the
+ // values for normal video, just the size of the array to know the
+ // number of temporal layers.
+ // For VideoEncoderConfig::ContentType::kScreen, though, in
+ // video_codec_initializer.cc it uses [0] to set the target bitrate
+ // for the screenshare.
+ if (mCodecMode == webrtc::VideoCodecMode::kScreensharing) {
+ video_stream.temporal_layer_thresholds_bps.push_back(
+ video_stream.target_bitrate_bps);
+ } else {
+ video_stream.temporal_layer_thresholds_bps.resize(2);
+ }
+ // XXX Bug 1390215 investigate using more of
+ // simulcast.cc:GetSimulcastConfig() or our own algorithm to replace it
+ }
+
+ if (mCodecConfig.mName == "H264") {
+ if (mCodecConfig.mEncodingConstraints.maxMbps > 0) {
+ // Not supported yet!
+ CSFLogError(LOGTAG, "%s H.264 max_mbps not supported yet",
+ __FUNCTION__);
+ }
+ }
+ streams.push_back(video_stream);
+ }
+
+ MOZ_RELEASE_ASSERT(streams.size(), "Should configure at least one stream");
+ return streams;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.h b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.h
new file mode 100644
index 0000000000..86f8170336
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef VideoStreamFactory_h
+#define VideoStreamFactory_h
+
+#include "CodecConfig.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/UniquePtr.h"
+#include "webrtc/media/base/videoadapter.h"
+#include "call/video_config.h"
+
+namespace mozilla {
+
+// Factory class for VideoStreams... vie_encoder.cc will call this to
+// reconfigure.
+class VideoStreamFactory
+ : public webrtc::VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ struct ResolutionAndBitrateLimits {
+ int resolution_in_mb;
+ int min_bitrate_bps;
+ int start_bitrate_bps;
+ int max_bitrate_bps;
+ };
+
+ VideoStreamFactory(VideoCodecConfig aConfig,
+ webrtc::VideoCodecMode aCodecMode, int aMinBitrate,
+ int aStartBitrate, int aPrefMaxBitrate,
+ int aNegotiatedMaxBitrate, unsigned int aSendingFramerate)
+ : mCodecMode(aCodecMode),
+ mSendingFramerate(aSendingFramerate),
+ mCodecConfig(std::forward<VideoCodecConfig>(aConfig)),
+ mMinBitrate(aMinBitrate),
+ mStartBitrate(aStartBitrate),
+ mPrefMaxBitrate(aPrefMaxBitrate),
+ mNegotiatedMaxBitrate(aNegotiatedMaxBitrate),
+ mSimulcastAdapter(MakeUnique<cricket::VideoAdapter>()) {}
+
+ void SetCodecMode(webrtc::VideoCodecMode aCodecMode);
+ void SetSendingFramerate(unsigned int aSendingFramerate);
+
+ // This gets called off-main thread and may hold internal webrtc.org
+ // locks. May *NOT* lock the conduit's mutex, to avoid deadlocks.
+ std::vector<webrtc::VideoStream> CreateEncoderStreams(
+ int width, int height, const webrtc::VideoEncoderConfig& config) override;
+
+ private:
+ // Used to limit number of streams for screensharing.
+ Atomic<webrtc::VideoCodecMode> mCodecMode;
+
+ // The framerate we're currently sending at.
+ Atomic<unsigned int> mSendingFramerate;
+
+ // The current send codec config, containing simulcast layer configs.
+ const VideoCodecConfig mCodecConfig;
+
+ // Bitrate limits in bps.
+ const int mMinBitrate = 0;
+ const int mStartBitrate = 0;
+ const int mPrefMaxBitrate = 0;
+ const int mNegotiatedMaxBitrate = 0;
+
+ // Adapter for simulcast layers. We use this to handle scaleResolutionDownBy
+ // for layers. It's separate from the conduit's mVideoAdapter to not affect
+ // scaling settings for incoming frames.
+ UniquePtr<cricket::VideoAdapter> mSimulcastAdapter;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/VideoTypes.h b/dom/media/webrtc/libwebrtcglue/VideoTypes.h
new file mode 100644
index 0000000000..e6602de5b6
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/VideoTypes.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VIDEO_TYPE_
+#define VIDEO_TYPE_
+
+namespace mozilla {
+/*
+ * Enumeration for different video types supported by the
+ * video-engine. If more types will be supported in the future
+ * newer one shall be appended to the bottom of the list
+ */
+enum VideoType {
+ kVideoI420 = 0,
+ kVideoYV12 = 1,
+ kVideoYUY2 = 2,
+ kVideoUYVY = 3,
+ kVideoIYUV = 4,
+ kVideoARGB = 5,
+ kVideoRGB24 = 6,
+ kVideoRGB565 = 7,
+ kVideoARGB4444 = 8,
+ kVideoARGB1555 = 9,
+ kVideoMJPEG = 10,
+ kVideoNV12 = 11,
+ kVideoNV21 = 12,
+ kVideoBGRA = 13,
+ kVideoUnknown = 99
+};
+} // namespace mozilla
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp
new file mode 100644
index 0000000000..6328c43fa1
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp
@@ -0,0 +1,993 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcGmpVideoCodec.h"
+
+#include <utility>
+#include <vector>
+
+#include "GMPLog.h"
+#include "MainThreadUtils.h"
+#include "VideoConduit.h"
+#include "gmp-video-frame-encoded.h"
+#include "gmp-video-frame-i420.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/SyncRunnable.h"
+#include "nsServiceManagerUtils.h"
+#include "transport/runnable_utils.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
+#include "webrtc/rtc_base/bind.h"
+
+namespace mozilla {
+
+WebrtcGmpPCHandleSetter::WebrtcGmpPCHandleSetter(const std::string& aPCHandle) {
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return;
+ }
+ MOZ_ASSERT(sCurrentHandle.empty());
+ sCurrentHandle = aPCHandle;
+}
+
+WebrtcGmpPCHandleSetter::~WebrtcGmpPCHandleSetter() {
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return;
+ }
+
+ sCurrentHandle.clear();
+}
+
+/* static */ std::string WebrtcGmpPCHandleSetter::GetCurrentHandle() {
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return "";
+ }
+
+ return sCurrentHandle;
+}
+
+std::string WebrtcGmpPCHandleSetter::sCurrentHandle;
+
+// Encoder.
+WebrtcGmpVideoEncoder::WebrtcGmpVideoEncoder()
+ : mGMP(nullptr),
+ mInitting(false),
+ mHost(nullptr),
+ mMaxPayloadSize(0),
+ mCallbackMutex("WebrtcGmpVideoEncoder encoded callback mutex"),
+ mCallback(nullptr),
+ mCachedPluginId(0) {
+ mCodecParams.mGMPApiVersion = 0;
+ mCodecParams.mCodecType = kGMPVideoCodecInvalid;
+ mCodecParams.mPLType = 0;
+ mCodecParams.mWidth = 0;
+ mCodecParams.mHeight = 0;
+ mCodecParams.mStartBitrate = 0;
+ mCodecParams.mMaxBitrate = 0;
+ mCodecParams.mMinBitrate = 0;
+ mCodecParams.mMaxFramerate = 0;
+ mCodecParams.mFrameDroppingOn = false;
+ mCodecParams.mKeyFrameInterval = 0;
+ mCodecParams.mQPMax = 0;
+ mCodecParams.mNumberOfSimulcastStreams = 0;
+ mCodecParams.mMode = kGMPCodecModeInvalid;
+ if (mPCHandle.empty()) {
+ mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle();
+ }
+ MOZ_ASSERT(!mPCHandle.empty());
+}
+
+WebrtcGmpVideoEncoder::~WebrtcGmpVideoEncoder() {
+ // We should not have been destroyed if we never closed our GMP
+ MOZ_ASSERT(!mGMP);
+}
+
+static int WebrtcFrameTypeToGmpFrameType(webrtc::FrameType aIn,
+ GMPVideoFrameType* aOut) {
+ MOZ_ASSERT(aOut);
+ switch (aIn) {
+ case webrtc::kVideoFrameKey:
+ *aOut = kGMPKeyFrame;
+ break;
+ case webrtc::kVideoFrameDelta:
+ *aOut = kGMPDeltaFrame;
+ break;
+ case webrtc::kEmptyFrame:
+ *aOut = kGMPSkipFrame;
+ break;
+ default:
+ MOZ_CRASH("Unexpected webrtc::FrameType");
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+static int GmpFrameTypeToWebrtcFrameType(GMPVideoFrameType aIn,
+ webrtc::FrameType* aOut) {
+ MOZ_ASSERT(aOut);
+ switch (aIn) {
+ case kGMPKeyFrame:
+ *aOut = webrtc::kVideoFrameKey;
+ break;
+ case kGMPDeltaFrame:
+ *aOut = webrtc::kVideoFrameDelta;
+ break;
+ case kGMPSkipFrame:
+ *aOut = webrtc::kEmptyFrame;
+ break;
+ default:
+ MOZ_CRASH("Unexpected GMPVideoFrameType");
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcGmpVideoEncoder::InitEncode(
+ const webrtc::VideoCodec* aCodecSettings, int32_t aNumberOfCores,
+ size_t aMaxPayloadSize) {
+ if (!mMPS) {
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ }
+ MOZ_ASSERT(mMPS);
+
+ if (!mGMPThread) {
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ // Bug XXXXXX: transfer settings from codecSettings to codec.
+ GMPVideoCodec codecParams;
+ memset(&codecParams, 0, sizeof(codecParams));
+
+ codecParams.mGMPApiVersion = 33;
+ codecParams.mStartBitrate = aCodecSettings->startBitrate;
+ codecParams.mMinBitrate = aCodecSettings->minBitrate;
+ codecParams.mMaxBitrate = aCodecSettings->maxBitrate;
+ codecParams.mMaxFramerate = aCodecSettings->maxFramerate;
+ mMaxPayloadSize = aMaxPayloadSize;
+
+ memset(&mCodecSpecificInfo.codecSpecific, 0,
+ sizeof(mCodecSpecificInfo.codecSpecific));
+ mCodecSpecificInfo.codecType = webrtc::kVideoCodecH264;
+ mCodecSpecificInfo.codecSpecific.H264.packetization_mode =
+ aCodecSettings->H264().packetizationMode == 1
+ ? webrtc::H264PacketizationMode::NonInterleaved
+ : webrtc::H264PacketizationMode::SingleNalUnit;
+
+ if (mCodecSpecificInfo.codecSpecific.H264.packetization_mode ==
+ webrtc::H264PacketizationMode::NonInterleaved) {
+ mMaxPayloadSize = 0; // No limit, use FUAs
+ }
+
+ if (aCodecSettings->mode == webrtc::kScreensharing) {
+ codecParams.mMode = kGMPScreensharing;
+ } else {
+ codecParams.mMode = kGMPRealtimeVideo;
+ }
+
+ codecParams.mWidth = aCodecSettings->width;
+ codecParams.mHeight = aCodecSettings->height;
+
+ RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle));
+ mGMPThread->Dispatch(
+ WrapRunnableNM(WebrtcGmpVideoEncoder::InitEncode_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this), codecParams,
+ aNumberOfCores, aMaxPayloadSize, initDone),
+ NS_DISPATCH_NORMAL);
+
+ // Since init of the GMP encoder is a multi-step async dispatch (including
+ // dispatches to main), and since this function is invoked on main, there's
+ // no safe way to block until this init is done. If an error occurs, we'll
+ // handle it later.
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void WebrtcGmpVideoEncoder::InitEncode_g(
+ const RefPtr<WebrtcGmpVideoEncoder>& aThis,
+ const GMPVideoCodec& aCodecParams, int32_t aNumberOfCores,
+ uint32_t aMaxPayloadSize, const RefPtr<GmpInitDoneRunnable>& aInitDone) {
+ nsTArray<nsCString> tags;
+ tags.AppendElement("h264"_ns);
+ UniquePtr<GetGMPVideoEncoderCallback> callback(
+ new InitDoneCallback(aThis, aInitDone, aCodecParams, aMaxPayloadSize));
+ aThis->mInitting = true;
+ nsresult rv = aThis->mMPS->GetGMPVideoEncoder(nullptr, &tags, ""_ns,
+ std::move(callback));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ GMP_LOG_DEBUG("GMP Encode: GetGMPVideoEncoder failed");
+ aThis->Close_g();
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Encode: GetGMPVideoEncoder failed");
+ }
+}
+
+int32_t WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut) {
+ if (!mInitting || !aGMP || !aHost) {
+ *aErrorOut =
+ "GMP Encode: Either init was aborted, "
+ "or init failed to supply either a GMP Encoder or GMP host.";
+ if (aGMP) {
+ // This could destroy us, since aGMP may be the last thing holding a ref
+ // Return immediately.
+ aGMP->Close();
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mInitting = false;
+
+ if (mGMP && mGMP != aGMP) {
+ Close_g();
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+ mCachedPluginId = mGMP->GetPluginId();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize,
+ std::string* aErrorOut) {
+ int32_t r = GmpInitDone(aGMP, aHost, aErrorOut);
+ if (r != WEBRTC_VIDEO_CODEC_OK) {
+ // We might have been destroyed if GmpInitDone failed.
+ // Return immediately.
+ return r;
+ }
+ mCodecParams = aCodecParams;
+ return InitEncoderForSize(aCodecParams.mWidth, aCodecParams.mHeight,
+ aErrorOut);
+}
+
+void WebrtcGmpVideoEncoder::Close_g() {
+ GMPVideoEncoderProxy* gmp(mGMP);
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+
+ if (gmp) {
+ // Do this last, since this could cause us to be destroyed
+ gmp->Close();
+ }
+}
+
+int32_t WebrtcGmpVideoEncoder::InitEncoderForSize(unsigned short aWidth,
+ unsigned short aHeight,
+ std::string* aErrorOut) {
+ mCodecParams.mWidth = aWidth;
+ mCodecParams.mHeight = aHeight;
+ // Pass dummy codecSpecific data for now...
+ nsTArray<uint8_t> codecSpecific;
+
+ GMPErr err =
+ mGMP->InitEncode(mCodecParams, codecSpecific, this, 1, mMaxPayloadSize);
+ if (err != GMPNoErr) {
+ *aErrorOut = "GMP Encode: InitEncode failed";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcGmpVideoEncoder::Encode(
+ const webrtc::VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) {
+ MOZ_ASSERT(aInputImage.width() >= 0 && aInputImage.height() >= 0);
+ if (!aFrameTypes) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // It is safe to copy aInputImage here because the frame buffer is held by
+ // a refptr.
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::Encode_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this),
+ aInputImage, *aFrameTypes),
+ NS_DISPATCH_NORMAL);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void WebrtcGmpVideoEncoder::RegetEncoderForResolutionChange(
+ uint32_t aWidth, uint32_t aHeight,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone) {
+ Close_g();
+
+ UniquePtr<GetGMPVideoEncoderCallback> callback(
+ new InitDoneForResolutionChangeCallback(this, aInitDone, aWidth,
+ aHeight));
+
+ // OpenH264 codec (at least) can't handle dynamic input resolution changes
+ // re-init the plugin when the resolution changes
+ // XXX allow codec to indicate it doesn't need re-init!
+ nsTArray<nsCString> tags;
+ tags.AppendElement("h264"_ns);
+ mInitting = true;
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetGMPVideoEncoder(nullptr, &tags, ""_ns,
+ std::move(callback))))) {
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Encode: GetGMPVideoEncoder failed");
+ }
+}
+
+void WebrtcGmpVideoEncoder::Encode_g(
+ const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ webrtc::VideoFrame aInputImage,
+ std::vector<webrtc::FrameType> aFrameTypes) {
+ if (!aEncoder->mGMP) {
+ // destroyed via Terminate(), failed to init, or just not initted yet
+ GMP_LOG_DEBUG("GMP Encode: not initted yet");
+ return;
+ }
+ MOZ_ASSERT(aEncoder->mHost);
+
+ if (static_cast<uint32_t>(aInputImage.width()) !=
+ aEncoder->mCodecParams.mWidth ||
+ static_cast<uint32_t>(aInputImage.height()) !=
+ aEncoder->mCodecParams.mHeight) {
+ GMP_LOG_DEBUG("GMP Encode: resolution change from %ux%u to %dx%d",
+ aEncoder->mCodecParams.mWidth, aEncoder->mCodecParams.mHeight,
+ aInputImage.width(), aInputImage.height());
+
+ RefPtr<GmpInitDoneRunnable> initDone(
+ new GmpInitDoneRunnable(aEncoder->mPCHandle));
+ aEncoder->RegetEncoderForResolutionChange(aInputImage.width(),
+ aInputImage.height(), initDone);
+ if (!aEncoder->mGMP) {
+ // We needed to go async to re-get the encoder. Bail.
+ return;
+ }
+ }
+
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = aEncoder->mHost->CreateFrame(kGMPI420VideoFrame, &ftmp);
+ if (err != GMPNoErr) {
+ GMP_LOG_DEBUG("GMP Encode: failed to create frame on host");
+ return;
+ }
+ GMPUniquePtr<GMPVideoi420Frame> frame(static_cast<GMPVideoi420Frame*>(ftmp));
+ rtc::scoped_refptr<webrtc::I420BufferInterface> input_image =
+ aInputImage.video_frame_buffer()->GetI420();
+ // check for overflow of stride * height
+ CheckedInt32 ysize =
+ CheckedInt32(input_image->StrideY()) * input_image->height();
+ MOZ_RELEASE_ASSERT(ysize.isValid());
+ // I will assume that if that doesn't overflow, the others case - YUV
+ // 4:2:0 has U/V widths <= Y, even with alignment issues.
+ err = frame->CreateFrame(
+ ysize.value(), input_image->DataY(),
+ input_image->StrideU() * ((input_image->height() + 1) / 2),
+ input_image->DataU(),
+ input_image->StrideV() * ((input_image->height() + 1) / 2),
+ input_image->DataV(), input_image->width(), input_image->height(),
+ input_image->StrideY(), input_image->StrideU(), input_image->StrideV());
+ if (err != GMPNoErr) {
+ GMP_LOG_DEBUG("GMP Encode: failed to create frame");
+ return;
+ }
+ frame->SetTimestamp((aInputImage.timestamp() * 1000ll) /
+ 90); // note: rounds down!
+ // frame->SetDuration(1000000ll/30); // XXX base duration on measured current
+ // FPS - or don't bother
+
+ // Bug XXXXXX: Set codecSpecific info
+ GMPCodecSpecificInfo info;
+ memset(&info, 0, sizeof(info));
+ info.mCodecType = kGMPVideoCodecH264;
+ nsTArray<uint8_t> codecSpecificInfo;
+ codecSpecificInfo.AppendElements((uint8_t*)&info,
+ sizeof(GMPCodecSpecificInfo));
+
+ nsTArray<GMPVideoFrameType> gmp_frame_types;
+ for (auto it = aFrameTypes.begin(); it != aFrameTypes.end(); ++it) {
+ GMPVideoFrameType ft;
+
+ int32_t ret = WebrtcFrameTypeToGmpFrameType(*it, &ft);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ GMP_LOG_DEBUG(
+ "GMP Encode: failed to map webrtc frame type to gmp frame type");
+ return;
+ }
+
+ gmp_frame_types.AppendElement(ft);
+ }
+
+ GMP_LOG_DEBUG("GMP Encode: %llu", (aInputImage.timestamp() * 1000ll) / 90);
+ err = aEncoder->mGMP->Encode(std::move(frame), codecSpecificInfo,
+ gmp_frame_types);
+ if (err != GMPNoErr) {
+ GMP_LOG_DEBUG("GMP Encode: failed to encode frame");
+ }
+}
+
+int32_t WebrtcGmpVideoEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) {
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void WebrtcGmpVideoEncoder::ReleaseGmp_g(
+ const RefPtr<WebrtcGmpVideoEncoder>& aEncoder) {
+ aEncoder->Close_g();
+}
+
+int32_t WebrtcGmpVideoEncoder::Shutdown() {
+ GMP_LOG_DEBUG("GMP Released:");
+ if (mGMPThread) {
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::ReleaseGmp_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcGmpVideoEncoder::SetChannelParameters(uint32_t aPacketLoss,
+ int64_t aRTT) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcGmpVideoEncoder::SetRates(uint32_t aNewBitRate,
+ uint32_t aFrameRate) {
+ MOZ_ASSERT(mGMPThread);
+ if (aFrameRate == 0) {
+ aFrameRate = 30; // Assume 30fps if we don't know the rate
+ }
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::SetRates_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this),
+ aNewBitRate, aFrameRate),
+ NS_DISPATCH_NORMAL);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+int32_t WebrtcGmpVideoEncoder::SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis,
+ uint32_t aNewBitRate,
+ uint32_t aFrameRate) {
+ if (!aThis->mGMP) {
+ // destroyed via Terminate()
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ GMPErr err = aThis->mGMP->SetRates(aNewBitRate, aFrameRate);
+ if (err != GMPNoErr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// GMPVideoEncoderCallback virtual functions.
+void WebrtcGmpVideoEncoder::Terminated() {
+ GMP_LOG_DEBUG("GMP Encoder Terminated: %p", (void*)this);
+
+ mGMP->Close();
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+ // Could now notify that it's dead
+}
+
+void WebrtcGmpVideoEncoder::Encoded(
+ GMPVideoEncodedFrame* aEncodedFrame,
+ const nsTArray<uint8_t>& aCodecSpecificInfo) {
+ MutexAutoLock lock(mCallbackMutex);
+ if (mCallback) {
+ webrtc::FrameType ft;
+ GmpFrameTypeToWebrtcFrameType(aEncodedFrame->FrameType(), &ft);
+ uint32_t timestamp = (aEncodedFrame->TimeStamp() * 90ll + 999) / 1000;
+
+ GMP_LOG_DEBUG("GMP Encoded: %" PRIu64 ", type %d, len %d",
+ aEncodedFrame->TimeStamp(), aEncodedFrame->BufferType(),
+ aEncodedFrame->Size());
+
+ // Right now makes one Encoded() callback per unit
+ // XXX convert to FragmentationHeader format (array of offsets and sizes
+ // plus a buffer) in combination with H264 packetization changes in
+ // webrtc/trunk code
+ uint8_t* buffer = aEncodedFrame->Buffer();
+
+ if (!buffer) {
+ GMP_LOG_ERROR("GMP plugin returned null buffer");
+ return;
+ }
+
+ uint8_t* end = aEncodedFrame->Buffer() + aEncodedFrame->Size();
+ size_t size_bytes;
+ switch (aEncodedFrame->BufferType()) {
+ case GMP_BufferSingle:
+ size_bytes = 0;
+ break;
+ case GMP_BufferLength8:
+ size_bytes = 1;
+ break;
+ case GMP_BufferLength16:
+ size_bytes = 2;
+ break;
+ case GMP_BufferLength24:
+ size_bytes = 3;
+ break;
+ case GMP_BufferLength32:
+ size_bytes = 4;
+ break;
+ default:
+ // Really that it's not in the enum
+ GMP_LOG_ERROR("GMP plugin returned incorrect type (%d)",
+ aEncodedFrame->BufferType());
+ // XXX Bug 1041232 - need a better API for interfacing to the
+ // plugin so we can kill it here
+ return;
+ }
+
+ struct nal_entry {
+ uint32_t offset;
+ uint32_t size;
+ };
+ AutoTArray<nal_entry, 1> nals;
+ uint32_t size = 0;
+ // make sure we don't read past the end of the buffer getting the size
+ while (buffer + size_bytes < end) {
+ switch (aEncodedFrame->BufferType()) {
+ case GMP_BufferSingle:
+ size = aEncodedFrame->Size();
+ break;
+ case GMP_BufferLength8:
+ size = *buffer++;
+ break;
+ case GMP_BufferLength16:
+// The plugin is expected to encode data in native byte order
+#if MOZ_LITTLE_ENDIAN()
+ size = LittleEndian::readUint16(buffer);
+#else
+ size = BigEndian::readUint16(buffer);
+#endif
+ buffer += 2;
+ break;
+ case GMP_BufferLength24:
+ // 24-bits is a pain, since byte-order issues make things painful
+ // I'm going to define 24-bit as little-endian always; big-endian must
+ // convert
+ size = ((uint32_t)*buffer) | (((uint32_t) * (buffer + 1)) << 8) |
+ (((uint32_t) * (buffer + 2)) << 16);
+ buffer += 3;
+ break;
+ case GMP_BufferLength32:
+// The plugin is expected to encode data in native byte order
+#if MOZ_LITTLE_ENDIAN()
+ size = LittleEndian::readUint32(buffer);
+#else
+ size = BigEndian::readUint32(buffer);
+#endif
+ buffer += 4;
+ break;
+ default:
+ MOZ_CRASH("GMP_BufferType already handled in switch above");
+ }
+
+ // OpenH264 1.8.1 occasionally generates a size of 0x01000000.
+ // This is a magic value in the NAL which should be replaced with a
+ // valid size, but for some reason this is not always happening.
+ // If we return early here, encoding will continue to work as expected.
+ // See Bug 1533001.
+ if (size == 0x01000000) {
+ return;
+ }
+
+ MOZ_ASSERT(size != 0 &&
+ buffer + size <=
+ end); // in non-debug code, don't crash in this case
+ if (size == 0 || buffer + size > end) {
+ // XXX see above - should we kill the plugin for returning extra bytes?
+ // Probably
+ GMP_LOG_ERROR(
+ "GMP plugin returned badly formatted encoded "
+ "data: buffer=%p, size=%d, end=%p",
+ buffer, size, end);
+ return;
+ }
+ // XXX optimize by making buffer an offset
+ nal_entry nal = {((uint32_t)(buffer - aEncodedFrame->Buffer())),
+ (uint32_t)size};
+ nals.AppendElement(nal);
+ buffer += size;
+ // on last one, buffer == end normally
+ }
+ if (buffer != end) {
+ // At most 3 bytes can be left over, depending on buffertype
+ GMP_LOG_DEBUG("GMP plugin returned %td extra bytes", end - buffer);
+ }
+
+ size_t num_nals = nals.Length();
+ if (num_nals > 0) {
+ webrtc::RTPFragmentationHeader fragmentation;
+ fragmentation.VerifyAndAllocateFragmentationHeader(num_nals);
+ for (size_t i = 0; i < num_nals; i++) {
+ fragmentation.fragmentationOffset[i] = nals[i].offset;
+ fragmentation.fragmentationLength[i] = nals[i].size;
+ }
+
+ webrtc::EncodedImage unit(aEncodedFrame->Buffer(), size, size);
+ unit._frameType = ft;
+ unit._timeStamp = timestamp;
+ // Ensure we ignore this when calculating RTCP timestamps
+ unit.capture_time_ms_ = -1;
+ unit._completeFrame = true;
+
+ // TODO: Currently the OpenH264 codec does not preserve any codec
+ // specific info passed into it and just returns default values.
+ // If this changes in the future, it would be nice to get rid of
+ // mCodecSpecificInfo.
+ mCallback->OnEncodedImage(unit, &mCodecSpecificInfo, &fragmentation);
+ }
+ }
+}
+
+// Decoder.
+WebrtcGmpVideoDecoder::WebrtcGmpVideoDecoder()
+ : mGMP(nullptr),
+ mInitting(false),
+ mHost(nullptr),
+ mCallbackMutex("WebrtcGmpVideoDecoder decoded callback mutex"),
+ mCallback(nullptr),
+ mCachedPluginId(0),
+ mDecoderStatus(GMPNoErr) {
+ if (mPCHandle.empty()) {
+ mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle();
+ }
+ MOZ_ASSERT(!mPCHandle.empty());
+}
+
+WebrtcGmpVideoDecoder::~WebrtcGmpVideoDecoder() {
+ // We should not have been destroyed if we never closed our GMP
+ MOZ_ASSERT(!mGMP);
+}
+
+int32_t WebrtcGmpVideoDecoder::InitDecode(
+ const webrtc::VideoCodec* aCodecSettings, int32_t aNumberOfCores) {
+ if (!mMPS) {
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ }
+ MOZ_ASSERT(mMPS);
+
+ if (!mGMPThread) {
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle));
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoDecoder::InitDecode_g,
+ RefPtr<WebrtcGmpVideoDecoder>(this),
+ aCodecSettings, aNumberOfCores, initDone),
+ NS_DISPATCH_NORMAL);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void WebrtcGmpVideoDecoder::InitDecode_g(
+ const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ const webrtc::VideoCodec* aCodecSettings, int32_t aNumberOfCores,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone) {
+ nsTArray<nsCString> tags;
+ tags.AppendElement("h264"_ns);
+ UniquePtr<GetGMPVideoDecoderCallback> callback(
+ new InitDoneCallback(aThis, aInitDone));
+ aThis->mInitting = true;
+ nsresult rv = aThis->mMPS->GetGMPVideoDecoder(nullptr, &tags, ""_ns,
+ std::move(callback));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ GMP_LOG_DEBUG("GMP Decode: GetGMPVideoDecoder failed");
+ aThis->Close_g();
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Decode: GetGMPVideoDecoder failed.");
+ }
+}
+
+int32_t WebrtcGmpVideoDecoder::GmpInitDone(GMPVideoDecoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut) {
+ if (!mInitting || !aGMP || !aHost) {
+ *aErrorOut =
+ "GMP Decode: Either init was aborted, "
+ "or init failed to supply either a GMP decoder or GMP host.";
+ if (aGMP) {
+ // This could destroy us, since aGMP may be the last thing holding a ref
+ // Return immediately.
+ aGMP->Close();
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mInitting = false;
+
+ if (mGMP && mGMP != aGMP) {
+ Close_g();
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+ mCachedPluginId = mGMP->GetPluginId();
+ // Bug XXXXXX: transfer settings from codecSettings to codec.
+ GMPVideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+ codec.mGMPApiVersion = 33;
+
+ // XXX this is currently a hack
+ // GMPVideoCodecUnion codecSpecific;
+ // memset(&codecSpecific, 0, sizeof(codecSpecific));
+ nsTArray<uint8_t> codecSpecific;
+ nsresult rv = mGMP->InitDecode(codec, codecSpecific, this, 1);
+ if (NS_FAILED(rv)) {
+ *aErrorOut = "GMP Decode: InitDecode failed";
+ mQueuedFrames.Clear();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // now release any frames that got queued waiting for InitDone
+ if (!mQueuedFrames.IsEmpty()) {
+ // So we're safe to call Decode_g(), which asserts it's empty
+ nsTArray<UniquePtr<GMPDecodeData>> temp = std::move(mQueuedFrames);
+ for (auto& queued : temp) {
+ Decode_g(RefPtr<WebrtcGmpVideoDecoder>(this), std::move(queued));
+ }
+ }
+
+ // This is an ugly solution to asynchronous decoding errors
+ // from Decode_g() not being returned to the synchronous Decode() method.
+ // If we don't return an error code at this point, our caller ultimately won't
+ // know to request a PLI and the video stream will remain frozen unless an IDR
+ // happens to arrive for other reasons. Bug 1492852 tracks implementing a
+ // proper solution.
+ if (mDecoderStatus != GMPNoErr) {
+ GMP_LOG_ERROR("%s: Decoder status is bad (%u)!", __PRETTY_FUNCTION__,
+ static_cast<unsigned>(mDecoderStatus));
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void WebrtcGmpVideoDecoder::Close_g() {
+ GMPVideoDecoderProxy* gmp(mGMP);
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+
+ if (gmp) {
+ // Do this last, since this could cause us to be destroyed
+ gmp->Close();
+ }
+}
+
+int32_t WebrtcGmpVideoDecoder::Decode(
+ const webrtc::EncodedImage& aInputImage, bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs) {
+ MOZ_ASSERT(mGMPThread);
+ MOZ_ASSERT(!NS_IsMainThread());
+ if (!aInputImage._length) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // This is an ugly solution to asynchronous decoding errors
+ // from Decode_g() not being returned to the synchronous Decode() method.
+ // If we don't return an error code at this point, our caller ultimately won't
+ // know to request a PLI and the video stream will remain frozen unless an IDR
+ // happens to arrive for other reasons. Bug 1492852 tracks implementing a
+ // proper solution.
+ auto decodeData =
+ MakeUnique<GMPDecodeData>(aInputImage, aMissingFrames, aRenderTimeMs);
+
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoDecoder::Decode_g,
+ RefPtr<WebrtcGmpVideoDecoder>(this),
+ std::move(decodeData)),
+ NS_DISPATCH_NORMAL);
+
+ if (mDecoderStatus != GMPNoErr) {
+ GMP_LOG_ERROR("%s: Decoder status is bad (%u)!", __PRETTY_FUNCTION__,
+ static_cast<unsigned>(mDecoderStatus));
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void WebrtcGmpVideoDecoder::Decode_g(const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ UniquePtr<GMPDecodeData>&& aDecodeData) {
+ if (!aThis->mGMP) {
+ if (aThis->mInitting) {
+ // InitDone hasn't been called yet (race)
+ aThis->mQueuedFrames.AppendElement(std::move(aDecodeData));
+ return;
+ }
+ // destroyed via Terminate(), failed to init, or just not initted yet
+ GMP_LOG_DEBUG("GMP Decode: not initted yet");
+
+ aThis->mDecoderStatus = GMPDecodeErr;
+ return;
+ }
+
+ MOZ_ASSERT(aThis->mQueuedFrames.IsEmpty());
+ MOZ_ASSERT(aThis->mHost);
+
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = aThis->mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
+ if (err != GMPNoErr) {
+ GMP_LOG_ERROR("%s: CreateFrame failed (%u)!", __PRETTY_FUNCTION__,
+ static_cast<unsigned>(err));
+ aThis->mDecoderStatus = err;
+ return;
+ }
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame(
+ static_cast<GMPVideoEncodedFrame*>(ftmp));
+ err = frame->CreateEmptyFrame(aDecodeData->mImage._length);
+ if (err != GMPNoErr) {
+ GMP_LOG_ERROR("%s: CreateEmptyFrame failed (%u)!", __PRETTY_FUNCTION__,
+ static_cast<unsigned>(err));
+ aThis->mDecoderStatus = err;
+ return;
+ }
+
+ // XXX At this point, we only will get mode1 data (a single length and a
+ // buffer) Session_info.cc/etc code needs to change to support mode 0.
+ *(reinterpret_cast<uint32_t*>(frame->Buffer())) = frame->Size();
+
+ // XXX It'd be wonderful not to have to memcpy the encoded data!
+ memcpy(frame->Buffer() + 4, aDecodeData->mImage._buffer + 4,
+ frame->Size() - 4);
+
+ frame->SetEncodedWidth(aDecodeData->mImage._encodedWidth);
+ frame->SetEncodedHeight(aDecodeData->mImage._encodedHeight);
+ frame->SetTimeStamp((aDecodeData->mImage._timeStamp * 1000ll) /
+ 90); // rounds down
+ frame->SetCompleteFrame(aDecodeData->mImage._completeFrame);
+ frame->SetBufferType(GMP_BufferLength32);
+
+ GMPVideoFrameType ft;
+ int32_t ret =
+ WebrtcFrameTypeToGmpFrameType(aDecodeData->mImage._frameType, &ft);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ GMP_LOG_ERROR("%s: WebrtcFrameTypeToGmpFrameType failed (%u)!",
+ __PRETTY_FUNCTION__, static_cast<unsigned>(ret));
+ aThis->mDecoderStatus = GMPDecodeErr;
+ return;
+ }
+
+ // Bug XXXXXX: Set codecSpecific info
+ GMPCodecSpecificInfo info;
+ memset(&info, 0, sizeof(info));
+ info.mCodecType = kGMPVideoCodecH264;
+ info.mCodecSpecific.mH264.mSimulcastIdx = 0;
+ nsTArray<uint8_t> codecSpecificInfo;
+ codecSpecificInfo.AppendElements((uint8_t*)&info,
+ sizeof(GMPCodecSpecificInfo));
+
+ GMP_LOG_DEBUG("GMP Decode: %" PRIu64 ", len %zu%s", frame->TimeStamp(),
+ aDecodeData->mImage._length,
+ ft == kGMPKeyFrame ? ", KeyFrame" : "");
+
+ nsresult rv =
+ aThis->mGMP->Decode(std::move(frame), aDecodeData->mMissingFrames,
+ codecSpecificInfo, aDecodeData->mRenderTimeMs);
+ if (NS_FAILED(rv)) {
+ GMP_LOG_ERROR("%s: Decode failed (rv=%u)!", __PRETTY_FUNCTION__,
+ static_cast<unsigned>(rv));
+ aThis->mDecoderStatus = GMPDecodeErr;
+ return;
+ }
+
+ aThis->mDecoderStatus = GMPNoErr;
+}
+
+int32_t WebrtcGmpVideoDecoder::RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* aCallback) {
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void WebrtcGmpVideoDecoder::ReleaseGmp_g(
+ const RefPtr<WebrtcGmpVideoDecoder>& aDecoder) {
+ aDecoder->Close_g();
+}
+
+int32_t WebrtcGmpVideoDecoder::ReleaseGmp() {
+ GMP_LOG_DEBUG("GMP Released:");
+ RegisterDecodeCompleteCallback(nullptr);
+
+ if (mGMPThread) {
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoDecoder::ReleaseGmp_g,
+ RefPtr<WebrtcGmpVideoDecoder>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void WebrtcGmpVideoDecoder::Terminated() {
+ GMP_LOG_DEBUG("GMP Decoder Terminated: %p", (void*)this);
+
+ mGMP->Close();
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+ // Could now notify that it's dead
+}
+
+static void DeleteBuffer(uint8_t* data) { delete[] data; }
+
+void WebrtcGmpVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame) {
+ // we have two choices here: wrap the frame with a callback that frees
+ // the data later (risking running out of shmems), or copy the data out
+ // always. Also, we can only Destroy() the frame on the gmp thread, so
+ // copying is simplest if expensive.
+ // I420 size including rounding...
+ CheckedInt32 length =
+ (CheckedInt32(aDecodedFrame->Stride(kGMPYPlane)) *
+ aDecodedFrame->Height()) +
+ (aDecodedFrame->Stride(kGMPVPlane) + aDecodedFrame->Stride(kGMPUPlane)) *
+ ((aDecodedFrame->Height() + 1) / 2);
+ int32_t size = length.value();
+ MOZ_RELEASE_ASSERT(length.isValid() && size > 0);
+ auto buffer = MakeUniqueFallible<uint8_t[]>(size);
+ if (buffer) {
+ // This is 3 separate buffers currently anyways, no use in trying to
+ // see if we can use a single memcpy.
+ uint8_t* buffer_y = buffer.get();
+ memcpy(buffer_y, aDecodedFrame->Buffer(kGMPYPlane),
+ aDecodedFrame->Stride(kGMPYPlane) * aDecodedFrame->Height());
+ // Should this be aligned, making it non-contiguous? Assume no, this is
+ // already factored into the strides.
+ uint8_t* buffer_u =
+ buffer_y + aDecodedFrame->Stride(kGMPYPlane) * aDecodedFrame->Height();
+ memcpy(buffer_u, aDecodedFrame->Buffer(kGMPUPlane),
+ aDecodedFrame->Stride(kGMPUPlane) *
+ ((aDecodedFrame->Height() + 1) / 2));
+ uint8_t* buffer_v = buffer_u + aDecodedFrame->Stride(kGMPUPlane) *
+ ((aDecodedFrame->Height() + 1) / 2);
+ memcpy(buffer_v, aDecodedFrame->Buffer(kGMPVPlane),
+ aDecodedFrame->Stride(kGMPVPlane) *
+ ((aDecodedFrame->Height() + 1) / 2));
+
+ MutexAutoLock lock(mCallbackMutex);
+ if (mCallback) {
+ rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
+ new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
+ aDecodedFrame->Width(), aDecodedFrame->Height(), buffer_y,
+ aDecodedFrame->Stride(kGMPYPlane), buffer_u,
+ aDecodedFrame->Stride(kGMPUPlane), buffer_v,
+ aDecodedFrame->Stride(kGMPVPlane),
+ rtc::Bind(&DeleteBuffer, buffer.release())));
+
+ webrtc::VideoFrame image(video_frame_buffer, 0, 0,
+ webrtc::kVideoRotation_0);
+ image.set_timestamp((aDecodedFrame->Timestamp() * 90ll + 999) /
+ 1000); // round up
+
+ GMP_LOG_DEBUG("GMP Decoded: %" PRIu64, aDecodedFrame->Timestamp());
+ mCallback->Decoded(image);
+ }
+ }
+ aDecodedFrame->Destroy();
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
new file mode 100644
index 0000000000..0b1b5d9468
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2012, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WEBRTCGMPVIDEOCODEC_H_
+#define WEBRTCGMPVIDEOCODEC_H_
+
+#include <queue>
+#include <string>
+
+#include "nsThreadUtils.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Mutex.h"
+
+#include "mozIGeckoMediaPluginService.h"
+#include "MediaConduitInterface.h"
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+
+#include "gmp-video-host.h"
+#include "GMPVideoDecoderProxy.h"
+#include "GMPVideoEncoderProxy.h"
+
+#include "jsapi/PeerConnectionImpl.h"
+
+namespace mozilla {
+
+// Class that allows code on the other side of webrtc.org to tell
+// WebrtcGmpVideoEncoder/Decoder what PC they should send errors to.
+// This is necessary because webrtc.org gives us no way to plumb the handle
+// through, nor does it give us any way to inform it of an error that will
+// make it back to the PC that cares (except for errors encountered
+// synchronously in functions like InitEncode/Decode, which will not happen
+// because GMP init is async).
+// Right now, this is used in MediaPipelineFactory.
+class WebrtcGmpPCHandleSetter {
+ public:
+ explicit WebrtcGmpPCHandleSetter(const std::string& aPCHandle);
+
+ ~WebrtcGmpPCHandleSetter();
+
+ static std::string GetCurrentHandle();
+
+ private:
+ static std::string sCurrentHandle;
+};
+
+class GmpInitDoneRunnable : public Runnable {
+ public:
+ explicit GmpInitDoneRunnable(const std::string& aPCHandle)
+ : Runnable("GmpInitDoneRunnable"),
+ mResult(WEBRTC_VIDEO_CODEC_OK),
+ mPCHandle(aPCHandle) {}
+
+ NS_IMETHOD Run() override {
+ if (mResult == WEBRTC_VIDEO_CODEC_OK) {
+ // Might be useful to notify the PeerConnection about successful init
+ // someday.
+ return NS_OK;
+ }
+
+ PeerConnectionWrapper wrapper(mPCHandle);
+ if (wrapper.impl()) {
+ wrapper.impl()->OnMediaError(mError);
+ }
+ return NS_OK;
+ }
+
+ void Dispatch(int32_t aResult, const std::string& aError = "") {
+ mResult = aResult;
+ mError = aError;
+ nsCOMPtr<nsIThread> mainThread(do_GetMainThread());
+ if (mainThread) {
+ // For some reason, the compiler on CI is treating |this| as a const
+ // pointer, despite the fact that we're in a non-const function. And,
+ // interestingly enough, correcting this doesn't require a const_cast.
+ mainThread->Dispatch(do_AddRef(static_cast<nsIRunnable*>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ }
+
+ int32_t Result() { return mResult; }
+
+ private:
+ int32_t mResult;
+ std::string mPCHandle;
+ std::string mError;
+};
+
+// Hold a frame for later decode
+class GMPDecodeData {
+ public:
+ GMPDecodeData(const webrtc::EncodedImage& aInputImage, bool aMissingFrames,
+ int64_t aRenderTimeMs)
+ : mImage(aInputImage),
+ mMissingFrames(aMissingFrames),
+ mRenderTimeMs(aRenderTimeMs) {
+ // We want to use this for queuing, and the calling code recycles the
+ // buffer on return from Decode()
+ MOZ_RELEASE_ASSERT(aInputImage._length <
+ (std::numeric_limits<size_t>::max() >> 1));
+ mImage._length = aInputImage._length;
+ mImage._size =
+ aInputImage._length +
+ webrtc::EncodedImage::GetBufferPaddingBytes(webrtc::kVideoCodecH264);
+ mImage._buffer = new uint8_t[mImage._size];
+ memcpy(mImage._buffer, aInputImage._buffer, aInputImage._length);
+ }
+
+ ~GMPDecodeData() { delete[] mImage._buffer; }
+
+ webrtc::EncodedImage mImage;
+ bool mMissingFrames;
+ int64_t mRenderTimeMs;
+};
+
+class RefCountedWebrtcVideoEncoder {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedWebrtcVideoEncoder);
+
+ // Implement sort of WebrtcVideoEncoder interface and support refcounting.
+ // (We cannot use |Release|, since that's needed for nsRefPtr)
+ virtual uint64_t PluginID() const = 0;
+
+ virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ size_t aMaxPayloadSize) = 0;
+
+ virtual int32_t Encode(const webrtc::VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) = 0;
+
+ virtual int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) = 0;
+
+ virtual int32_t Shutdown() = 0;
+
+ virtual int32_t SetChannelParameters(uint32_t aPacketLoss, int64_t aRTT) = 0;
+
+ virtual int32_t SetRates(uint32_t aNewBitRate, uint32_t aFrameRate) = 0;
+
+ protected:
+ virtual ~RefCountedWebrtcVideoEncoder() = default;
+};
+
+class WebrtcGmpVideoEncoder : public GMPVideoEncoderCallbackProxy,
+ public RefCountedWebrtcVideoEncoder {
+ public:
+ WebrtcGmpVideoEncoder();
+
+ // Implement VideoEncoder interface, sort of.
+ // (We cannot use |Release|, since that's needed for nsRefPtr)
+ uint64_t PluginID() const override { return mCachedPluginId; }
+
+ int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores, size_t aMaxPayloadSize) override;
+
+ int32_t Encode(const webrtc::VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) override;
+
+ int32_t Shutdown() override;
+
+ int32_t SetChannelParameters(uint32_t aPacketLoss, int64_t aRTT) override;
+
+ int32_t SetRates(uint32_t aNewBitRate, uint32_t aFrameRate) override;
+
+ // GMPVideoEncoderCallback virtual functions.
+ virtual void Terminated() override;
+
+ virtual void Encoded(GMPVideoEncodedFrame* aEncodedFrame,
+ const nsTArray<uint8_t>& aCodecSpecificInfo) override;
+
+ virtual void Error(GMPErr aError) override {}
+
+ private:
+ virtual ~WebrtcGmpVideoEncoder();
+
+ static void InitEncode_g(const RefPtr<WebrtcGmpVideoEncoder>& aThis,
+ const GMPVideoCodec& aCodecParams,
+ int32_t aNumberOfCores, uint32_t aMaxPayloadSize,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+ int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize, std::string* aErrorOut);
+ int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost,
+ std::string* aErrorOut);
+ int32_t InitEncoderForSize(unsigned short aWidth, unsigned short aHeight,
+ std::string* aErrorOut);
+ static void ReleaseGmp_g(const RefPtr<WebrtcGmpVideoEncoder>& aEncoder);
+ void Close_g();
+
+ class InitDoneCallback : public GetGMPVideoEncoderCallback {
+ public:
+ InitDoneCallback(const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize)
+ : mEncoder(aEncoder),
+ mInitDone(aInitDone),
+ mCodecParams(aCodecParams),
+ mMaxPayloadSize(aMaxPayloadSize) {}
+
+ virtual void Done(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost) override {
+ std::string errorOut;
+ int32_t result = mEncoder->GmpInitDone(aGMP, aHost, mCodecParams,
+ mMaxPayloadSize, &errorOut);
+
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoEncoder> mEncoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ GMPVideoCodec mCodecParams;
+ uint32_t mMaxPayloadSize;
+ };
+
+ static void Encode_g(const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ webrtc::VideoFrame aInputImage,
+ std::vector<webrtc::FrameType> aFrameTypes);
+ void RegetEncoderForResolutionChange(
+ uint32_t aWidth, uint32_t aHeight,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+
+ class InitDoneForResolutionChangeCallback
+ : public GetGMPVideoEncoderCallback {
+ public:
+ InitDoneForResolutionChangeCallback(
+ const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone, uint32_t aWidth,
+ uint32_t aHeight)
+ : mEncoder(aEncoder),
+ mInitDone(aInitDone),
+ mWidth(aWidth),
+ mHeight(aHeight) {}
+
+ virtual void Done(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost) override {
+ std::string errorOut;
+ int32_t result = mEncoder->GmpInitDone(aGMP, aHost, &errorOut);
+ if (result != WEBRTC_VIDEO_CODEC_OK) {
+ mInitDone->Dispatch(result, errorOut);
+ return;
+ }
+
+ result = mEncoder->InitEncoderForSize(mWidth, mHeight, &errorOut);
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoEncoder> mEncoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ };
+
+ static int32_t SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis,
+ uint32_t aNewBitRate, uint32_t aFrameRate);
+
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ nsCOMPtr<nsIThread> mGMPThread;
+ GMPVideoEncoderProxy* mGMP;
+ // Used to handle a race where Release() is called while init is in progress
+ bool mInitting;
+ GMPVideoHost* mHost;
+ GMPVideoCodec mCodecParams;
+ uint32_t mMaxPayloadSize;
+ webrtc::CodecSpecificInfo mCodecSpecificInfo;
+ // Protects mCallback
+ Mutex mCallbackMutex;
+ webrtc::EncodedImageCallback* mCallback;
+ uint64_t mCachedPluginId;
+ std::string mPCHandle;
+};
+
+// Basically a strong ref to a RefCountedWebrtcVideoEncoder, that also
+// translates from Release() to RefCountedWebrtcVideoEncoder::Shutdown(),
+// since we need RefCountedWebrtcVideoEncoder::Release() for managing the
+// refcount. The webrtc.org code gets one of these, so it doesn't unilaterally
+// delete the "real" encoder.
+class WebrtcVideoEncoderProxy : public WebrtcVideoEncoder {
+ public:
+ explicit WebrtcVideoEncoderProxy(
+ RefPtr<RefCountedWebrtcVideoEncoder> aEncoder)
+ : mEncoderImpl(std::move(aEncoder)) {}
+
+ virtual ~WebrtcVideoEncoderProxy() {
+ RegisterEncodeCompleteCallback(nullptr);
+ }
+
+ uint64_t PluginID() const override { return mEncoderImpl->PluginID(); }
+
+ int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores, size_t aMaxPayloadSize) override {
+ return mEncoderImpl->InitEncode(aCodecSettings, aNumberOfCores,
+ aMaxPayloadSize);
+ }
+
+ int32_t Encode(const webrtc::VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) override {
+ return mEncoderImpl->Encode(aInputImage, aCodecSpecificInfo, aFrameTypes);
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) override {
+ return mEncoderImpl->RegisterEncodeCompleteCallback(aCallback);
+ }
+
+ int32_t Release() override { return mEncoderImpl->Shutdown(); }
+
+ int32_t SetChannelParameters(uint32_t aPacketLoss, int64_t aRTT) override {
+ return mEncoderImpl->SetChannelParameters(aPacketLoss, aRTT);
+ }
+
+ int32_t SetRates(uint32_t aNewBitRate, uint32_t aFrameRate) override {
+ return mEncoderImpl->SetRates(aNewBitRate, aFrameRate);
+ }
+
+ private:
+ RefPtr<RefCountedWebrtcVideoEncoder> mEncoderImpl;
+};
+
+class WebrtcGmpVideoDecoder : public GMPVideoDecoderCallbackProxy {
+ public:
+ WebrtcGmpVideoDecoder();
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcGmpVideoDecoder);
+
+ // Implement VideoEncoder interface, sort of.
+ // (We cannot use |Release|, since that's needed for nsRefPtr)
+ virtual uint64_t PluginID() const { return mCachedPluginId; }
+
+ virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores);
+ virtual int32_t Decode(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs);
+ virtual int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* aCallback);
+
+ virtual int32_t ReleaseGmp();
+
+ // GMPVideoDecoderCallbackProxy
+ virtual void Terminated() override;
+
+ virtual void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
+
+ virtual void ReceivedDecodedReferenceFrame(
+ const uint64_t aPictureId) override {
+ MOZ_CRASH();
+ }
+
+ virtual void ReceivedDecodedFrame(const uint64_t aPictureId) override {
+ MOZ_CRASH();
+ }
+
+ virtual void InputDataExhausted() override {}
+
+ virtual void DrainComplete() override {}
+
+ virtual void ResetComplete() override {}
+
+ virtual void Error(GMPErr aError) override { mDecoderStatus = aError; }
+
+ private:
+ virtual ~WebrtcGmpVideoDecoder();
+
+ static void InitDecode_g(const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+ int32_t GmpInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost,
+ std::string* aErrorOut);
+ static void ReleaseGmp_g(const RefPtr<WebrtcGmpVideoDecoder>& aDecoder);
+ void Close_g();
+
+ class InitDoneCallback : public GetGMPVideoDecoderCallback {
+ public:
+ explicit InitDoneCallback(const RefPtr<WebrtcGmpVideoDecoder>& aDecoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone)
+ : mDecoder(aDecoder), mInitDone(aInitDone) {}
+
+ virtual void Done(GMPVideoDecoderProxy* aGMP,
+ GMPVideoHost* aHost) override {
+ std::string errorOut;
+ int32_t result = mDecoder->GmpInitDone(aGMP, aHost, &errorOut);
+
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoDecoder> mDecoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ };
+
+ static void Decode_g(const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ UniquePtr<GMPDecodeData>&& aDecodeData);
+
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ nsCOMPtr<nsIThread> mGMPThread;
+ GMPVideoDecoderProxy* mGMP; // Addref is held for us
+ // Used to handle a race where Release() is called while init is in progress
+ bool mInitting;
+ // Frames queued for decode while mInitting is true
+ nsTArray<UniquePtr<GMPDecodeData>> mQueuedFrames;
+ GMPVideoHost* mHost;
+ // Protects mCallback
+ Mutex mCallbackMutex;
+ webrtc::DecodedImageCallback* mCallback;
+ Atomic<uint64_t> mCachedPluginId;
+ Atomic<GMPErr, ReleaseAcquire> mDecoderStatus;
+ std::string mPCHandle;
+};
+
+// Basically a strong ref to a WebrtcGmpVideoDecoder, that also translates
+// from Release() to WebrtcGmpVideoDecoder::ReleaseGmp(), since we need
+// WebrtcGmpVideoDecoder::Release() for managing the refcount.
+// The webrtc.org code gets one of these, so it doesn't unilaterally delete
+// the "real" encoder.
+class WebrtcVideoDecoderProxy : public WebrtcVideoDecoder {
+ public:
+ WebrtcVideoDecoderProxy() : mDecoderImpl(new WebrtcGmpVideoDecoder) {}
+
+ virtual ~WebrtcVideoDecoderProxy() {
+ RegisterDecodeCompleteCallback(nullptr);
+ }
+
+ uint64_t PluginID() const override { return mDecoderImpl->PluginID(); }
+
+ int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores) override {
+ return mDecoderImpl->InitDecode(aCodecSettings, aNumberOfCores);
+ }
+
+ int32_t Decode(const webrtc::EncodedImage& aInputImage, bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs) override {
+ return mDecoderImpl->Decode(aInputImage, aMissingFrames, aFragmentation,
+ aCodecSpecificInfo, aRenderTimeMs);
+ }
+
+ int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* aCallback) override {
+ return mDecoderImpl->RegisterDecodeCompleteCallback(aCallback);
+ }
+
+ int32_t Release() override { return mDecoderImpl->ReleaseGmp(); }
+
+ private:
+ RefPtr<WebrtcGmpVideoDecoder> mDecoderImpl;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h b/dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h
new file mode 100644
index 0000000000..8e0b1af441
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcImageBuffer_h__
+#define WebrtcImageBuffer_h__
+
+#include "webrtc/common_video/include/video_frame_buffer.h"
+#include "webrtc/rtc_base/keep_ref_until_done.h"
+
+namespace mozilla {
+namespace layers {
+class Image;
+}
+
+class ImageBuffer : public webrtc::VideoFrameBuffer {
+ public:
+ explicit ImageBuffer(RefPtr<layers::Image>&& aImage)
+ : mImage(std::move(aImage)) {}
+
+ rtc::scoped_refptr<webrtc::I420BufferInterface> ToI420() override {
+ RefPtr<layers::PlanarYCbCrImage> image = mImage->AsPlanarYCbCrImage();
+ MOZ_ASSERT(image);
+ if (!image) {
+ // TODO. YUV420 ReadBack, Image only provides a RGB readback.
+ return nullptr;
+ }
+ const layers::PlanarYCbCrData* data = image->GetData();
+ rtc::scoped_refptr<webrtc::I420BufferInterface> buf(
+ new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
+ data->mPicSize.width, data->mPicSize.height, data->mYChannel,
+ data->mYStride, data->mCbChannel, data->mCbCrStride,
+ data->mCrChannel, data->mCbCrStride,
+ rtc::KeepRefUntilDone(image.get())));
+ return buf;
+ }
+
+ Type type() const override { return Type::kNative; }
+
+ int width() const override { return mImage->GetSize().width; }
+
+ int height() const override { return mImage->GetSize().height; }
+
+ RefPtr<layers::Image> GetNativeImage() const { return mImage; }
+
+ private:
+ const RefPtr<layers::Image> mImage;
+};
+
+} // namespace mozilla
+
+#endif // WebrtcImageBuffer_h__
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.cpp
new file mode 100644
index 0000000000..5becd5bfb7
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.cpp
@@ -0,0 +1,1291 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdio>
+#include <queue>
+
+#include "common/browser_logging/CSFLog.h"
+#include "nspr.h"
+
+#include "JavaCallbacksSupport.h"
+#include "MediaCodec.h"
+#include "WebrtcMediaCodecVP8VideoCodec.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Monitor.h"
+#include "transport/runnable_utils.h"
+#include "MediaResult.h"
+
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "libyuv/convert_from.h"
+#include "libyuv/convert.h"
+#include "libyuv/row.h"
+
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
+
+#include "webrtc/api/video/i420_buffer.h"
+#include <webrtc/common_video/libyuv/include/webrtc_libyuv.h>
+
+using namespace mozilla;
+
+static const int32_t DECODER_TIMEOUT = 10 * PR_USEC_PER_MSEC; // 10ms
+static const char MEDIACODEC_VIDEO_MIME_VP8[] = "video/x-vnd.on2.vp8";
+
+namespace mozilla {
+
+static const char* wmcLogTag = "WebrtcMediaCodecVP8VideoCodec";
+#ifdef LOGTAG
+# undef LOGTAG
+#endif
+#define LOGTAG wmcLogTag
+
+class CallbacksSupport final : public JavaCallbacksSupport {
+ public:
+ explicit CallbacksSupport(webrtc::EncodedImageCallback* aCallback)
+ : mCallback(aCallback), mPictureId(0) {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ memset(&mEncodedImage, 0, sizeof(mEncodedImage));
+ }
+
+ ~CallbacksSupport() {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ if (mEncodedImage._size) {
+ delete[] mEncodedImage._buffer;
+ mEncodedImage._buffer = nullptr;
+ mEncodedImage._size = 0;
+ }
+ }
+
+ void VerifyAndAllocate(const uint32_t minimumSize) {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ if (minimumSize > mEncodedImage._size) {
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ MOZ_RELEASE_ASSERT(newBuffer);
+
+ if (mEncodedImage._buffer) {
+ delete[] mEncodedImage._buffer;
+ }
+ mEncodedImage._buffer = newBuffer;
+ mEncodedImage._size = minimumSize;
+ }
+ }
+
+ void HandleInput(jlong aTimestamp, bool aProcessed) override {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ }
+
+ void HandleOutputFormatChanged(
+ java::sdk::MediaFormat::Param aFormat) override {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ }
+
+ void HandleOutput(java::Sample::Param aSample,
+ java::SampleBuffer::Param aBuffer) override {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ java::sdk::BufferInfo::LocalRef info = aSample->Info();
+
+ int32_t size;
+ bool ok = NS_SUCCEEDED(info->Size(&size));
+ MOZ_RELEASE_ASSERT(ok);
+
+ if (size > 0) {
+ rtc::CritScope lock(&mCritSect);
+ VerifyAndAllocate(size);
+
+ int64_t presentationTimeUs;
+ ok = NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
+ MOZ_RELEASE_ASSERT(ok);
+
+ mEncodedImage._timeStamp = presentationTimeUs / PR_USEC_PER_MSEC;
+ mEncodedImage.capture_time_ms_ = mEncodedImage._timeStamp;
+
+ int32_t flags;
+ ok = NS_SUCCEEDED(info->Flags(&flags));
+ MOZ_ASSERT(ok);
+
+ if (flags == java::sdk::MediaCodec::BUFFER_FLAG_SYNC_FRAME) {
+ mEncodedImage._frameType = webrtc::kVideoFrameKey;
+ } else {
+ mEncodedImage._frameType = webrtc::kVideoFrameDelta;
+ }
+ mEncodedImage._completeFrame = true;
+ mEncodedImage._length = size;
+
+ jni::ByteBuffer::LocalRef dest =
+ jni::ByteBuffer::New(mEncodedImage._buffer, size);
+ aBuffer->WriteToByteBuffer(dest, 0, size);
+
+ webrtc::CodecSpecificInfo info;
+ info.codecType = webrtc::kVideoCodecVP8;
+ info.codecSpecific.VP8.pictureId = mPictureId;
+ mPictureId = (mPictureId + 1) & 0x7FFF;
+ info.codecSpecific.VP8.tl0PicIdx = -1;
+ info.codecSpecific.VP8.keyIdx = -1;
+ info.codecSpecific.VP8.temporalIdx = 1;
+ info.codecSpecific.VP8.simulcastIdx = 0;
+
+ webrtc::RTPFragmentationHeader header;
+ memset(&header, 0, sizeof(header));
+ header.VerifyAndAllocateFragmentationHeader(1);
+ header.fragmentationLength[0] = mEncodedImage._length;
+
+ MOZ_RELEASE_ASSERT(mCallback);
+ mCallback->OnEncodedImage(mEncodedImage, &info, &header);
+ }
+ }
+
+ void HandleError(const MediaResult& aError) override {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ }
+
+ friend class WebrtcMediaCodecVP8VideoRemoteEncoder;
+
+ private:
+ webrtc::EncodedImageCallback* mCallback;
+ Atomic<bool> mCanceled;
+ webrtc::EncodedImage mEncodedImage;
+ rtc::CriticalSection mCritSect;
+ uint32_t mPictureId;
+};
+
+static java::sdk::MediaCodec::LocalRef CreateDecoder(const char* aMimeType) {
+ if (!aMimeType) {
+ return nullptr;
+ }
+
+ java::sdk::MediaCodec::LocalRef codec;
+ java::sdk::MediaCodec::CreateDecoderByType(aMimeType, &codec);
+ return codec;
+}
+
+static java::sdk::MediaCodec::LocalRef CreateEncoder(const char* aMimeType) {
+ if (!aMimeType) {
+ return nullptr;
+ }
+
+ java::sdk::MediaCodec::LocalRef codec;
+ java::sdk::MediaCodec::CreateEncoderByType(aMimeType, &codec);
+ return codec;
+}
+
+static void ShutdownThread(const nsCOMPtr<nsIThread>& aThread) {
+ aThread->Shutdown();
+}
+
+// Base runnable class to repeatly pull MediaCodec output buffers in seperate
+// thread. How to use:
+// - implementing DrainOutput() to get output. Remember to return false to tell
+// drain not to pop input queue.
+// - call QueueInput() to schedule a run to drain output. The input, aFrame,
+// should contains corresponding info such as image size and timestamps for
+// DrainOutput() implementation to construct data needed by encoded/decoded
+// callbacks.
+class MediaCodecOutputDrain : public Runnable {
+ public:
+ void Start() {
+ MonitorAutoLock lock(mMonitor);
+ if (mThread == nullptr) {
+ NS_NewNamedThread("OutputDrain", getter_AddRefs(mThread));
+ }
+ mEnding = false;
+ mThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+
+ void Stop() {
+ MonitorAutoLock lock(mMonitor);
+ mEnding = true;
+ lock.NotifyAll(); // In case Run() is waiting.
+
+ if (mThread != nullptr) {
+ MonitorAutoUnlock unlock(mMonitor);
+ NS_DispatchToMainThread(
+ WrapRunnableNM(&ShutdownThread, nsCOMPtr<nsIThread>(mThread)));
+ mThread = nullptr;
+ }
+ }
+
+ void QueueInput(const EncodedFrame& aFrame) {
+ MonitorAutoLock lock(mMonitor);
+
+ MOZ_ASSERT(mThread);
+
+ mInputFrames.push(aFrame);
+ // Notify Run() about queued input and it can start working.
+ lock.NotifyAll();
+ }
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(mThread);
+
+ MonitorAutoLock lock(mMonitor);
+ while (true) {
+ if (mInputFrames.empty()) {
+ // Wait for new input.
+ lock.Wait();
+ }
+
+ if (mEnding) {
+ // Stop draining.
+ break;
+ }
+
+ MOZ_ASSERT(!mInputFrames.empty());
+ {
+ // Release monitor while draining because it's blocking.
+ MonitorAutoUnlock unlock(mMonitor);
+ DrainOutput();
+ }
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ MediaCodecOutputDrain()
+ : Runnable("MediaCodecOutputDrain"),
+ mMonitor("MediaCodecOutputDrain monitor"),
+ mEnding(false) {}
+
+ // Drain output buffer for input frame queue mInputFrames.
+ // mInputFrames contains info such as size and time of the input frames.
+ // We have to give a queue to handle encoder frame skips - we can input 10
+ // frames and get one back. NOTE: any access of aInputFrames MUST be preceded
+ // locking mMonitor!
+
+ // Blocks waiting for decoded buffers, but for a limited period because
+ // we need to check for shutdown.
+ virtual bool DrainOutput() = 0;
+
+ protected:
+ // This monitor protects all things below it, and is also used to
+ // wait/notify queued input.
+ Monitor mMonitor;
+ std::queue<EncodedFrame> mInputFrames;
+
+ private:
+ // also protected by mMonitor
+ nsCOMPtr<nsIThread> mThread;
+ bool mEnding;
+};
+
+class WebrtcAndroidMediaCodec {
+ public:
+ WebrtcAndroidMediaCodec()
+ : mEncoderCallback(nullptr),
+ mDecoderCallback(nullptr),
+ isStarted(false),
+ mEnding(false) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ }
+
+ nsresult Configure(uint32_t width, uint32_t height, const jobject aSurface,
+ uint32_t flags, const char* mime, bool encoder) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ nsresult res = NS_OK;
+
+ if (!mCoder) {
+ mWidth = width;
+ mHeight = height;
+
+ java::sdk::MediaFormat::LocalRef format;
+
+ res = java::sdk::MediaFormat::CreateVideoFormat(nsCString(mime), mWidth,
+ mHeight, &format);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(
+ LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, CreateVideoFormat failed err = %d",
+ __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (encoder) {
+ mCoder = CreateEncoder(mime);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, CreateEncoderByType failed "
+ "err = %d",
+ __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+
+ res = format->SetInteger(java::sdk::MediaFormat::KEY_BIT_RATE,
+ 1000 * 300);
+ res = format->SetInteger(java::sdk::MediaFormat::KEY_BITRATE_MODE, 2);
+ res = format->SetInteger(java::sdk::MediaFormat::KEY_COLOR_FORMAT, 21);
+ res = format->SetInteger(java::sdk::MediaFormat::KEY_FRAME_RATE, 30);
+ res = format->SetInteger(java::sdk::MediaFormat::KEY_I_FRAME_INTERVAL,
+ 100);
+
+ } else {
+ mCoder = CreateDecoder(mime);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, CreateDecoderByType failed "
+ "err = %d",
+ __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+ }
+ res = mCoder->Configure(format, nullptr, nullptr, flags);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG, "WebrtcAndroidMediaCodec::%s, err = %d",
+ __FUNCTION__, (int)res);
+ }
+ }
+
+ return res;
+ }
+
+ nsresult Start() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ if (!mCoder) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mEnding = false;
+
+ nsresult res;
+ res = mCoder->Start();
+ if (NS_FAILED(res)) {
+ CSFLogDebug(
+ LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, mCoder->start() return err = %d",
+ __FUNCTION__, (int)res);
+ return res;
+ }
+ isStarted = true;
+ return NS_OK;
+ }
+
+ nsresult Stop() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ mEnding = true;
+
+ if (mOutputDrain != nullptr) {
+ mOutputDrain->Stop();
+ mOutputDrain = nullptr;
+ }
+
+ mCoder->Stop();
+ mCoder->Release();
+ isStarted = false;
+ return NS_OK;
+ }
+
+ void GenerateVideoFrame(size_t width, size_t height, uint32_t timeStamp,
+ void* decoded, int color_format) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ // TODO: eliminate extra pixel copy/color conversion
+ size_t widthUV = (width + 1) / 2;
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer;
+ buffer = webrtc::I420Buffer::Create(width, height, width, widthUV, widthUV);
+
+ uint8_t* src_nv12 = static_cast<uint8_t*>(decoded);
+ int src_nv12_y_size = width * height;
+
+ uint8_t* dstY = buffer->MutableDataY();
+ uint8_t* dstU = buffer->MutableDataU();
+ uint8_t* dstV = buffer->MutableDataV();
+
+ libyuv::NV12ToI420(src_nv12, width, src_nv12 + src_nv12_y_size,
+ (width + 1) & ~1, dstY, width, dstU, (width + 1) / 2,
+ dstV, (width + 1) / 2, width, height);
+
+ mVideoFrame.reset(
+ new webrtc::VideoFrame(buffer, timeStamp, 0, webrtc::kVideoRotation_0));
+ }
+
+ int32_t FeedMediaCodecInput(const webrtc::EncodedImage& inputImage,
+ int64_t renderTimeMs) {
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+#endif
+
+ int inputIndex = DequeueInputBuffer(DECODER_TIMEOUT);
+ if (inputIndex == -1) {
+ CSFLogError(LOGTAG, "%s equeue input buffer failed", __FUNCTION__);
+ return inputIndex;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG, "%s dequeue input buffer took %u ms", __FUNCTION__,
+ PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+ time = PR_IntervalNow();
+#endif
+
+ size_t size = inputImage._length;
+
+ JNIEnv* const env = jni::GetEnvForThread();
+ jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex);
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ PodCopy((uint8_t*)directBuffer, inputImage._buffer, size);
+
+ if (inputIndex >= 0) {
+ CSFLogError(LOGTAG, "%s queue input buffer inputIndex = %d", __FUNCTION__,
+ inputIndex);
+ QueueInputBuffer(inputIndex, 0, size, renderTimeMs, 0);
+
+ {
+ if (mOutputDrain == nullptr) {
+ mOutputDrain = new OutputDrain(this);
+ mOutputDrain->Start();
+ }
+ EncodedFrame frame;
+ frame.width_ = mWidth;
+ frame.height_ = mHeight;
+ frame.timeStamp_ = inputImage._timeStamp;
+ frame.decode_timestamp_ = renderTimeMs;
+ mOutputDrain->QueueInput(frame);
+ }
+ env->DeleteLocalRef(buffer);
+ }
+
+ return inputIndex;
+ }
+
+ nsresult DrainOutput(std::queue<EncodedFrame>& aInputFrames,
+ Monitor& aMonitor) {
+ MOZ_ASSERT(mCoder != nullptr);
+ if (mCoder == nullptr) {
+ return NS_ERROR_FAILURE;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+#endif
+ nsresult res;
+ java::sdk::BufferInfo::LocalRef bufferInfo;
+ res = java::sdk::BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(
+ LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, BufferInfo::New return err = %d",
+ __FUNCTION__, (int)res);
+ return res;
+ }
+ int32_t outputIndex = DequeueOutputBuffer(bufferInfo);
+
+ if (outputIndex == java::sdk::MediaCodec::INFO_TRY_AGAIN_LATER) {
+ // Not an error: output not available yet. Try later.
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer try again:%d", __FUNCTION__,
+ outputIndex);
+ } else if (outputIndex ==
+ java::sdk::MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
+ // handle format change
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer format changed:%d",
+ __FUNCTION__, outputIndex);
+ } else if (outputIndex ==
+ java::sdk::MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer changed:%d", __FUNCTION__,
+ outputIndex);
+ GetOutputBuffers();
+ } else if (outputIndex < 0) {
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer unknow error:%d",
+ __FUNCTION__, outputIndex);
+ MonitorAutoLock lock(aMonitor);
+ aInputFrames.pop();
+ } else {
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG,
+ "%s dequeue output buffer# return status is %d took %u ms",
+ __FUNCTION__, outputIndex,
+ PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+#endif
+ EncodedFrame frame;
+ {
+ MonitorAutoLock lock(aMonitor);
+ frame = aInputFrames.front();
+ aInputFrames.pop();
+ }
+
+ if (mEnding) {
+ ReleaseOutputBuffer(outputIndex, false);
+ return NS_OK;
+ }
+
+ JNIEnv* const env = jni::GetEnvForThread();
+ jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex);
+ if (buffer) {
+ // The buffer will be null on Android L if we are decoding to a Surface
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ int color_format = 0;
+
+ CSFLogDebug(
+ LOGTAG,
+ "%s generate video frame, width = %d, height = %d, timeStamp_ = %d",
+ __FUNCTION__, frame.width_, frame.height_, frame.timeStamp_);
+ GenerateVideoFrame(frame.width_, frame.height_, frame.timeStamp_,
+ directBuffer, color_format);
+ mDecoderCallback->Decoded(*mVideoFrame);
+
+ ReleaseOutputBuffer(outputIndex, false);
+ env->DeleteLocalRef(buffer);
+ }
+ }
+ return NS_OK;
+ }
+
+ int32_t DequeueInputBuffer(int64_t time) {
+ nsresult res;
+ int32_t inputIndex;
+ res = mCoder->DequeueInputBuffer(time, &inputIndex);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, mCoder->DequeueInputBuffer() "
+ "return err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+ return inputIndex;
+ }
+
+ void QueueInputBuffer(int32_t inputIndex, int32_t offset, size_t size,
+ int64_t renderTimes, int32_t flags) {
+ nsresult res = NS_OK;
+ res =
+ mCoder->QueueInputBuffer(inputIndex, offset, size, renderTimes, flags);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, mCoder->QueueInputBuffer() "
+ "return err = %d",
+ __FUNCTION__, (int)res);
+ }
+ }
+
+ int32_t DequeueOutputBuffer(java::sdk::BufferInfo::Param aInfo) {
+ nsresult res;
+
+ int32_t outputStatus;
+ res = mCoder->DequeueOutputBuffer(aInfo, DECODER_TIMEOUT, &outputStatus);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, mCoder->DequeueOutputBuffer() "
+ "return err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+
+ return outputStatus;
+ }
+
+ void ReleaseOutputBuffer(int32_t index, bool flag) {
+ mCoder->ReleaseOutputBuffer(index, flag);
+ }
+
+ jobjectArray GetInputBuffers() {
+ JNIEnv* const env = jni::GetEnvForThread();
+
+ if (mInputBuffers) {
+ env->DeleteGlobalRef(mInputBuffers);
+ }
+
+ nsresult res;
+ jni::ObjectArray::LocalRef inputBuffers;
+ res = mCoder->GetInputBuffers(&inputBuffers);
+ mInputBuffers = (jobjectArray)env->NewGlobalRef(inputBuffers.Get());
+ if (NS_FAILED(res)) {
+ CSFLogDebug(
+ LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, GetInputBuffers return err = %d",
+ __FUNCTION__, (int)res);
+ return nullptr;
+ }
+
+ return mInputBuffers;
+ }
+
+ jobjectArray GetOutputBuffers() {
+ JNIEnv* const env = jni::GetEnvForThread();
+
+ if (mOutputBuffers) {
+ env->DeleteGlobalRef(mOutputBuffers);
+ }
+
+ nsresult res;
+ jni::ObjectArray::LocalRef outputBuffers;
+ res = mCoder->GetOutputBuffers(&outputBuffers);
+ mOutputBuffers = (jobjectArray)env->NewGlobalRef(outputBuffers.Get());
+ if (NS_FAILED(res)) {
+ CSFLogDebug(
+ LOGTAG,
+ "WebrtcAndroidMediaCodec::%s, GetOutputBuffers return err = %d",
+ __FUNCTION__, (int)res);
+ return nullptr;
+ }
+
+ return mOutputBuffers;
+ }
+
+ void SetDecoderCallback(webrtc::DecodedImageCallback* aCallback) {
+ mDecoderCallback = aCallback;
+ }
+
+ void SetEncoderCallback(webrtc::EncodedImageCallback* aCallback) {
+ mEncoderCallback = aCallback;
+ }
+
+ protected:
+ virtual ~WebrtcAndroidMediaCodec() {}
+
+ private:
+ class OutputDrain : public MediaCodecOutputDrain {
+ public:
+ explicit OutputDrain(WebrtcAndroidMediaCodec* aMediaCodec)
+ : MediaCodecOutputDrain(), mMediaCodec(aMediaCodec) {}
+
+ protected:
+ virtual bool DrainOutput() override {
+ return (mMediaCodec->DrainOutput(mInputFrames, mMonitor) == NS_OK);
+ }
+
+ private:
+ WebrtcAndroidMediaCodec* mMediaCodec;
+ };
+
+ friend class WebrtcMediaCodecVP8VideoEncoder;
+ friend class WebrtcMediaCodecVP8VideoDecoder;
+
+ java::sdk::MediaCodec::GlobalRef mCoder;
+ webrtc::EncodedImageCallback* mEncoderCallback;
+ webrtc::DecodedImageCallback* mDecoderCallback;
+ std::unique_ptr<webrtc::VideoFrame> mVideoFrame;
+
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+
+ RefPtr<OutputDrain> mOutputDrain;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ bool isStarted;
+ bool mEnding;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcAndroidMediaCodec)
+};
+
+static bool I420toNV12(uint8_t* dstY, uint16_t* dstUV,
+ const webrtc::VideoFrame& inputImage) {
+ rtc::scoped_refptr<webrtc::I420BufferInterface> inputBuffer =
+ inputImage.video_frame_buffer()->GetI420();
+
+ uint8_t* buffer = dstY;
+ uint8_t* dst_y = buffer;
+ int dst_stride_y = inputBuffer->StrideY();
+ uint8_t* dst_uv = buffer + inputBuffer->StrideY() * inputImage.height();
+ int dst_stride_uv = inputBuffer->StrideU() * 2;
+
+ // Why NV12? Because COLOR_FORMAT_YUV420_SEMIPLANAR. Most hardware is
+ // NV12-friendly.
+ bool converted = !libyuv::I420ToNV12(
+ inputBuffer->DataY(), inputBuffer->StrideY(), inputBuffer->DataU(),
+ inputBuffer->StrideU(), inputBuffer->DataV(), inputBuffer->StrideV(),
+ dst_y, dst_stride_y, dst_uv, dst_stride_uv, inputImage.width(),
+ inputImage.height());
+ return converted;
+}
+
+// Encoder.
+WebrtcMediaCodecVP8VideoEncoder::WebrtcMediaCodecVP8VideoEncoder()
+ : mCallback(nullptr), mMediaCodecEncoder(nullptr) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ memset(&mEncodedImage, 0, sizeof(mEncodedImage));
+}
+
+bool WebrtcMediaCodecVP8VideoEncoder::ResetInputBuffers() {
+ mInputBuffers = mMediaCodecEncoder->GetInputBuffers();
+
+ if (!mInputBuffers) return false;
+
+ return true;
+}
+
+bool WebrtcMediaCodecVP8VideoEncoder::ResetOutputBuffers() {
+ mOutputBuffers = mMediaCodecEncoder->GetOutputBuffers();
+
+ if (!mOutputBuffers) return false;
+
+ return true;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::VerifyAndAllocate(
+ const uint32_t minimumSize) {
+ if (minimumSize > mEncodedImage._size) {
+ // create buffer of sufficient size
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ if (newBuffer == nullptr) {
+ return -1;
+ }
+ if (mEncodedImage._buffer) {
+ // copy old data
+ memcpy(newBuffer, mEncodedImage._buffer, mEncodedImage._size);
+ delete[] mEncodedImage._buffer;
+ }
+ mEncodedImage._buffer = newBuffer;
+ mEncodedImage._size = minimumSize;
+ }
+ return 0;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::InitEncode(
+ const webrtc::VideoCodec* codecSettings, int32_t numberOfCores,
+ size_t maxPayloadSize) {
+ mMaxPayloadSize = maxPayloadSize;
+ CSFLogDebug(LOGTAG, "%s, w = %d, h = %d", __FUNCTION__, codecSettings->width,
+ codecSettings->height);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::Encode(
+ const webrtc::VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::FrameType>* frame_types) {
+ CSFLogDebug(LOGTAG, "%s, w = %d, h = %d", __FUNCTION__, inputImage.width(),
+ inputImage.height());
+
+ if (!mMediaCodecEncoder) {
+ mMediaCodecEncoder = new WebrtcAndroidMediaCodec();
+ }
+
+ if (!mMediaCodecEncoder->isStarted) {
+ if (inputImage.width() == 0 || inputImage.height() == 0) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ } else {
+ mFrameWidth = inputImage.width();
+ mFrameHeight = inputImage.height();
+ }
+
+ mMediaCodecEncoder->SetEncoderCallback(mCallback);
+ nsresult res = mMediaCodecEncoder->Configure(
+ mFrameWidth, mFrameHeight, nullptr,
+ java::sdk::MediaCodec::CONFIGURE_FLAG_ENCODE, MEDIACODEC_VIDEO_MIME_VP8,
+ true /* encoder */);
+
+ if (res != NS_OK) {
+ CSFLogDebug(LOGTAG, "%s, encoder configure return err = %d", __FUNCTION__,
+ (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ res = mMediaCodecEncoder->Start();
+
+ if (NS_FAILED(res)) {
+ mMediaCodecEncoder->isStarted = false;
+ CSFLogDebug(LOGTAG, "%s start encoder. err = %d", __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ bool retBool = ResetInputBuffers();
+ if (!retBool) {
+ CSFLogDebug(LOGTAG, "%s ResetInputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ retBool = ResetOutputBuffers();
+ if (!retBool) {
+ CSFLogDebug(LOGTAG, "%s ResetOutputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecEncoder->isStarted = true;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+#endif
+
+ rtc::scoped_refptr<webrtc::I420BufferInterface> inputBuffer =
+ inputImage.video_frame_buffer()->GetI420();
+ size_t sizeY = inputImage.height() * inputBuffer->StrideY();
+ size_t sizeUV = ((inputImage.height() + 1) / 2) * inputBuffer->StrideU();
+ size_t size = sizeY + 2 * sizeUV;
+
+ int inputIndex = mMediaCodecEncoder->DequeueInputBuffer(DECODER_TIMEOUT);
+ if (inputIndex == -1) {
+ CSFLogError(LOGTAG, "%s dequeue input buffer failed", __FUNCTION__);
+ return inputIndex;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG,
+ "%s WebrtcMediaCodecVP8VideoEncoder::Encode() dequeue OMX input "
+ "buffer took %u ms",
+ __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+#endif
+
+ if (inputIndex >= 0) {
+ JNIEnv* const env = jni::GetEnvForThread();
+ jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex);
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ uint8_t* dstY = static_cast<uint8_t*>(directBuffer);
+ uint16_t* dstUV = reinterpret_cast<uint16_t*>(dstY + sizeY);
+
+ bool converted = I420toNV12(dstY, dstUV, inputImage);
+ if (!converted) {
+ CSFLogError(LOGTAG,
+ "%s WebrtcMediaCodecVP8VideoEncoder::Encode() convert input "
+ "buffer to NV12 error.",
+ __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ env->DeleteLocalRef(buffer);
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ time = PR_IntervalNow();
+ CSFLogError(LOGTAG, "%s queue input buffer inputIndex = %d", __FUNCTION__,
+ inputIndex);
+#endif
+
+ mMediaCodecEncoder->QueueInputBuffer(
+ inputIndex, 0, size,
+ inputImage.render_time_ms() * PR_USEC_PER_MSEC /* ms to us */, 0);
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG,
+ "%s WebrtcMediaCodecVP8VideoEncoder::Encode() queue input "
+ "buffer took %u ms",
+ __FUNCTION__,
+ PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+#endif
+ mEncodedImage._encodedWidth = inputImage.width();
+ mEncodedImage._encodedHeight = inputImage.height();
+ mEncodedImage._timeStamp = inputImage.timestamp();
+ mEncodedImage.capture_time_ms_ = inputImage.timestamp();
+
+ nsresult res;
+ java::sdk::BufferInfo::LocalRef bufferInfo;
+ res = java::sdk::BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG,
+ "WebrtcMediaCodecVP8VideoEncoder::%s, BufferInfo::New return "
+ "err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+
+ int32_t outputIndex = mMediaCodecEncoder->DequeueOutputBuffer(bufferInfo);
+
+ if (outputIndex == java::sdk::MediaCodec::INFO_TRY_AGAIN_LATER) {
+ // Not an error: output not available yet. Try later.
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer try again:%d", __FUNCTION__,
+ outputIndex);
+ } else if (outputIndex ==
+ java::sdk::MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
+ // handle format change
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer format changed:%d",
+ __FUNCTION__, outputIndex);
+ } else if (outputIndex ==
+ java::sdk::MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer changed:%d", __FUNCTION__,
+ outputIndex);
+ mMediaCodecEncoder->GetOutputBuffers();
+ } else if (outputIndex < 0) {
+ CSFLogDebug(LOGTAG, "%s dequeue output buffer unknow error:%d",
+ __FUNCTION__, outputIndex);
+ } else {
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG,
+ "%s dequeue output buffer return status is %d took %u ms",
+ __FUNCTION__, outputIndex,
+ PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+#endif
+
+ JNIEnv* const env = jni::GetEnvForThread();
+ jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex);
+ if (buffer) {
+ int32_t offset;
+ bufferInfo->Offset(&offset);
+ int32_t flags;
+ bufferInfo->Flags(&flags);
+
+ // The buffer will be null on Android L if we are decoding to a Surface
+ void* directBuffer =
+ reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(buffer)) +
+ offset;
+
+ if (flags == java::sdk::MediaCodec::BUFFER_FLAG_SYNC_FRAME) {
+ mEncodedImage._frameType = webrtc::kVideoFrameKey;
+ } else {
+ mEncodedImage._frameType = webrtc::kVideoFrameDelta;
+ }
+ mEncodedImage._completeFrame = true;
+
+ int32_t size;
+ bufferInfo->Size(&size);
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(LOGTAG,
+ "%s dequeue output buffer ok, index:%d, buffer size = %d, "
+ "buffer offset = %d, flags = %d",
+ __FUNCTION__, outputIndex, size, offset, flags);
+#endif
+
+ if (VerifyAndAllocate(size) == -1) {
+ CSFLogDebug(LOGTAG, "%s VerifyAndAllocate buffers failed",
+ __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mEncodedImage._length = size;
+
+ // xxx It's too bad the mediacodec API forces us to memcpy this....
+ // we should find a way that able to 'hold' the buffer or transfer it
+ // from inputImage (ping-pong buffers or select them from a small pool)
+ memcpy(mEncodedImage._buffer, directBuffer, mEncodedImage._length);
+
+ webrtc::CodecSpecificInfo info;
+ info.codecType = webrtc::kVideoCodecVP8;
+ info.codecSpecific.VP8.pictureId = -1;
+ info.codecSpecific.VP8.tl0PicIdx = -1;
+ info.codecSpecific.VP8.keyIdx = -1;
+ info.codecSpecific.VP8.temporalIdx = 1;
+
+ // Generate a header describing a single fragment.
+ webrtc::RTPFragmentationHeader header;
+ memset(&header, 0, sizeof(header));
+ header.VerifyAndAllocateFragmentationHeader(1);
+ header.fragmentationLength[0] = mEncodedImage._length;
+
+ mCallback->OnEncodedImage(mEncodedImage, &info, &header);
+
+ mMediaCodecEncoder->ReleaseOutputBuffer(outputIndex, false);
+ env->DeleteLocalRef(buffer);
+ }
+ }
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ mCallback = callback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::Release() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ delete mMediaCodecEncoder;
+ mMediaCodecEncoder = nullptr;
+
+ delete[] mEncodedImage._buffer;
+ mEncodedImage._buffer = nullptr;
+ mEncodedImage._size = 0;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcMediaCodecVP8VideoEncoder::~WebrtcMediaCodecVP8VideoEncoder() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ Release();
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::SetChannelParameters(
+ uint32_t packetLoss, int64_t rtt) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::SetRates(uint32_t newBitRate,
+ uint32_t frameRate) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+ if (!mMediaCodecEncoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // XXX
+ // 1. implement MediaCodec's setParameters method
+ // 2.find a way to initiate a Java Bundle instance as parameter for MediaCodec
+ // setParameters method. mMediaCodecEncoder->setParameters
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcMediaCodecVP8VideoRemoteEncoder::
+ ~WebrtcMediaCodecVP8VideoRemoteEncoder() {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+ Release();
+}
+
+int32_t WebrtcMediaCodecVP8VideoRemoteEncoder::InitEncode(
+ const webrtc::VideoCodec* codecSettings, int32_t numberOfCores,
+ size_t maxPayloadSize) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoRemoteEncoder::SetRates(uint32_t newBitRate,
+ uint32_t frameRate) {
+ CSFLogDebug(LOGTAG, "%s, newBitRate: %d, frameRate: %d", __FUNCTION__,
+ newBitRate, frameRate);
+ if (!mJavaEncoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ mJavaEncoder->SetBitrate(newBitRate * 1000);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoRemoteEncoder::Encode(
+ const webrtc::VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::FrameType>* frame_types) {
+ CSFLogDebug(LOGTAG, "%s, w = %d, h = %d", __FUNCTION__, inputImage.width(),
+ inputImage.height());
+ if (inputImage.width() == 0 || inputImage.height() == 0) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (!mJavaEncoder) {
+ JavaCallbacksSupport::Init();
+ mJavaCallbacks = java::CodecProxy::NativeCallbacks::New();
+
+ JavaCallbacksSupport::AttachNative(
+ mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(mCallback));
+
+ java::sdk::MediaFormat::LocalRef format;
+
+ nsresult res = java::sdk::MediaFormat::CreateVideoFormat(
+ nsCString(MEDIACODEC_VIDEO_MIME_VP8), inputImage.width(),
+ inputImage.height(), &format);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(LOGTAG, "%s, CreateVideoFormat failed err = %d", __FUNCTION__,
+ (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ res = format->SetInteger(nsCString("bitrate"), 300 * 1000);
+ res = format->SetInteger(nsCString("bitrate-mode"), 2);
+ res = format->SetInteger(nsCString("color-format"), 21);
+ res = format->SetInteger(nsCString("frame-rate"), 30);
+ res = format->SetInteger(nsCString("i-frame-interval"), 100);
+
+ mJavaEncoder =
+ java::CodecProxy::Create(true, format, nullptr, mJavaCallbacks, u""_ns);
+
+ if (mJavaEncoder == nullptr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ rtc::scoped_refptr<webrtc::I420BufferInterface> inputBuffer =
+ inputImage.video_frame_buffer()->GetI420();
+ size_t sizeY = inputImage.height() * inputBuffer->StrideY();
+ size_t sizeUV = ((inputImage.height() + 1) / 2) * inputBuffer->StrideU();
+ size_t size = sizeY + 2 * sizeUV;
+
+ if (mConvertBuf == nullptr) {
+ mConvertBuf = new uint8_t[size];
+ mConvertBufsize = size;
+ }
+
+ uint8_t* dstY = mConvertBuf;
+ uint16_t* dstUV = reinterpret_cast<uint16_t*>(dstY + sizeY);
+
+ bool converted = I420toNV12(dstY, dstUV, inputImage);
+ if (!converted) {
+ CSFLogError(LOGTAG,
+ "%s WebrtcMediaCodecVP8VideoEncoder::Encode() convert input "
+ "buffer to NV12 error.",
+ __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(mConvertBuf, size);
+
+ java::sdk::BufferInfo::LocalRef bufferInfo;
+ nsresult rv = java::sdk::BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(rv)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if ((*frame_types)[0] == webrtc::kVideoFrameKey) {
+ bufferInfo->Set(0, size, inputImage.render_time_ms() * PR_USEC_PER_MSEC,
+ java::sdk::MediaCodec::BUFFER_FLAG_SYNC_FRAME);
+ } else {
+ bufferInfo->Set(0, size, inputImage.render_time_ms() * PR_USEC_PER_MSEC, 0);
+ }
+
+ mJavaEncoder->Input(bytes, bufferInfo, nullptr);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoRemoteEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) {
+ mCallback = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoRemoteEncoder::Release() {
+ CSFLogDebug(LOGTAG, "%s %p", __FUNCTION__, this);
+
+ if (mJavaEncoder) {
+ mJavaEncoder->Release();
+ mJavaEncoder = nullptr;
+ }
+
+ if (mJavaCallbacks) {
+ JavaCallbacksSupport::GetNative(mJavaCallbacks)->Cancel();
+ JavaCallbacksSupport::DisposeNative(mJavaCallbacks);
+ mJavaCallbacks = nullptr;
+ }
+
+ if (mConvertBuf) {
+ delete[] mConvertBuf;
+ mConvertBuf = nullptr;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// Decoder.
+WebrtcMediaCodecVP8VideoDecoder::WebrtcMediaCodecVP8VideoDecoder()
+ : mCallback(nullptr),
+ mFrameWidth(0),
+ mFrameHeight(0),
+ mMediaCodecDecoder(nullptr) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+}
+
+bool WebrtcMediaCodecVP8VideoDecoder::ResetInputBuffers() {
+ mInputBuffers = mMediaCodecDecoder->GetInputBuffers();
+
+ if (!mInputBuffers) return false;
+
+ return true;
+}
+
+bool WebrtcMediaCodecVP8VideoDecoder::ResetOutputBuffers() {
+ mOutputBuffers = mMediaCodecDecoder->GetOutputBuffers();
+
+ if (!mOutputBuffers) return false;
+
+ return true;
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::InitDecode(
+ const webrtc::VideoCodec* codecSettings, int32_t numberOfCores) {
+ if (!mMediaCodecDecoder) {
+ mMediaCodecDecoder = new WebrtcAndroidMediaCodec();
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::Decode(
+ const webrtc::EncodedImage& inputImage, bool missingFrames,
+ const webrtc::RTPFragmentationHeader* fragmentation,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo, int64_t renderTimeMs) {
+ CSFLogDebug(LOGTAG, "%s, renderTimeMs = %" PRId64, __FUNCTION__,
+ renderTimeMs);
+
+ if (inputImage._length == 0 || !inputImage._buffer) {
+ CSFLogDebug(LOGTAG, "%s, input Image invalid. length = %" PRIdPTR,
+ __FUNCTION__, inputImage._length);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (inputImage._frameType == webrtc::kVideoFrameKey) {
+ CSFLogDebug(LOGTAG, "%s, inputImage is Golden frame", __FUNCTION__);
+ mFrameWidth = inputImage._encodedWidth;
+ mFrameHeight = inputImage._encodedHeight;
+ }
+
+ if (!mMediaCodecDecoder->isStarted) {
+ if (mFrameWidth == 0 || mFrameHeight == 0) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecDecoder->SetDecoderCallback(mCallback);
+ nsresult res = mMediaCodecDecoder->Configure(
+ mFrameWidth, mFrameHeight, nullptr, 0, MEDIACODEC_VIDEO_MIME_VP8,
+ false /* decoder */);
+
+ if (res != NS_OK) {
+ CSFLogDebug(LOGTAG, "%s, decoder configure return err = %d", __FUNCTION__,
+ (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ res = mMediaCodecDecoder->Start();
+
+ if (NS_FAILED(res)) {
+ mMediaCodecDecoder->isStarted = false;
+ CSFLogDebug(LOGTAG, "%s start decoder. err = %d", __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ bool retBool = ResetInputBuffers();
+ if (!retBool) {
+ CSFLogDebug(LOGTAG, "%s ResetInputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ retBool = ResetOutputBuffers();
+ if (!retBool) {
+ CSFLogDebug(LOGTAG, "%s ResetOutputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecDecoder->isStarted = true;
+ }
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+ CSFLogDebug(LOGTAG, "%s start decoder took %u ms", __FUNCTION__,
+ PR_IntervalToMilliseconds(PR_IntervalNow() - time));
+#endif
+
+ bool feedFrame = true;
+ int32_t ret = WEBRTC_VIDEO_CODEC_ERROR;
+
+ while (feedFrame) {
+ ret = mMediaCodecDecoder->FeedMediaCodecInput(inputImage, renderTimeMs);
+ feedFrame = (ret == -1);
+ }
+
+ CSFLogDebug(LOGTAG, "%s end, ret = %d", __FUNCTION__, ret);
+
+ return ret;
+}
+
+void WebrtcMediaCodecVP8VideoDecoder::DecodeFrame(EncodedFrame* frame) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* callback) {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ mCallback = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::Release() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ delete mMediaCodecDecoder;
+ mMediaCodecDecoder = nullptr;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcMediaCodecVP8VideoDecoder::~WebrtcMediaCodecVP8VideoDecoder() {
+ CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
+
+ Release();
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.h
new file mode 100644
index 0000000000..ffe9388848
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaCodecVP8VideoCodec.h
@@ -0,0 +1,153 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcMediaCodecVP8VideoCodec_h__
+#define WebrtcMediaCodecVP8VideoCodec_h__
+
+#include <jni.h>
+
+#include "mozilla/java/CodecProxyWrappers.h"
+#include "mozilla/Mutex.h"
+#include "nsThreadUtils.h"
+
+#include "MediaConduitInterface.h"
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+
+namespace mozilla {
+
+struct EncodedFrame {
+ uint32_t width_;
+ uint32_t height_;
+ uint32_t timeStamp_;
+ uint64_t decode_timestamp_;
+};
+
+class WebrtcAndroidMediaCodec;
+
+class WebrtcMediaCodecVP8VideoEncoder : public WebrtcVideoEncoder {
+ public:
+ WebrtcMediaCodecVP8VideoEncoder();
+
+ virtual ~WebrtcMediaCodecVP8VideoEncoder() override;
+
+ // Implement VideoEncoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) override;
+
+ virtual int32_t Encode(
+ const webrtc::VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::FrameType>* frame_types) override;
+
+ virtual int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) override;
+
+ virtual int32_t Release() override;
+
+ virtual int32_t SetChannelParameters(uint32_t packetLoss,
+ int64_t rtt) override;
+
+ virtual int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override;
+
+ private:
+ int32_t VerifyAndAllocate(const uint32_t minimumSize);
+ bool ResetInputBuffers();
+ bool ResetOutputBuffers();
+
+ size_t mMaxPayloadSize;
+ webrtc::EncodedImage mEncodedImage;
+ webrtc::EncodedImageCallback* mCallback;
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+
+ WebrtcAndroidMediaCodec* mMediaCodecEncoder;
+
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+};
+
+class WebrtcMediaCodecVP8VideoRemoteEncoder : public WebrtcVideoEncoder {
+ public:
+ WebrtcMediaCodecVP8VideoRemoteEncoder()
+ : mConvertBuf(nullptr), mConvertBufsize(0), mCallback(nullptr) {}
+
+ ~WebrtcMediaCodecVP8VideoRemoteEncoder() override;
+
+ // Implement VideoEncoder interface.
+ uint64_t PluginID() const override { return 0; }
+
+ int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores, size_t maxPayloadSize) override;
+
+ int32_t Encode(const webrtc::VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::FrameType>* frame_types) override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) override;
+
+ int32_t Release() override;
+
+ int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override {
+ return 0;
+ }
+
+ int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override;
+
+ private:
+ java::CodecProxy::GlobalRef mJavaEncoder;
+ java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks;
+ uint8_t* mConvertBuf;
+ uint8_t mConvertBufsize;
+ webrtc::EncodedImageCallback* mCallback;
+};
+
+class WebrtcMediaCodecVP8VideoDecoder : public WebrtcVideoDecoder {
+ public:
+ WebrtcMediaCodecVP8VideoDecoder();
+
+ virtual ~WebrtcMediaCodecVP8VideoDecoder() override;
+
+ // Implement VideoDecoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitDecode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores) override;
+
+ virtual int32_t Decode(
+ const webrtc::EncodedImage& inputImage, bool missingFrames,
+ const webrtc::RTPFragmentationHeader* fragmentation,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
+ int64_t renderTimeMs = -1) override;
+
+ virtual int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* callback) override;
+
+ virtual int32_t Release() override;
+
+ private:
+ void DecodeFrame(EncodedFrame* frame);
+ void RunCallback();
+ bool ResetInputBuffers();
+ bool ResetOutputBuffers();
+
+ webrtc::DecodedImageCallback* mCallback;
+
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+
+ WebrtcAndroidMediaCodec* mMediaCodecDecoder;
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+};
+
+} // namespace mozilla
+
+#endif // WebrtcMediaCodecVP8VideoCodec_h__
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp
new file mode 100644
index 0000000000..85b626d24e
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp
@@ -0,0 +1,192 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcMediaDataDecoderCodec.h"
+
+#include "ImageContainer.h"
+#include "Layers.h"
+#include "MediaDataDecoderProxy.h"
+#include "PDMFactory.h"
+#include "VideoUtils.h"
+#include "mozilla/layers/ImageBridgeChild.h"
+#include "mozilla/media/MediaUtils.h"
+#include "webrtc/rtc_base/keep_ref_until_done.h"
+
+namespace mozilla {
+
+WebrtcMediaDataDecoder::WebrtcMediaDataDecoder(nsACString& aCodecMimeType)
+ : mThreadPool(GetMediaThreadPool(MediaThreadType::SUPERVISOR)),
+ mTaskQueue(new TaskQueue(do_AddRef(mThreadPool),
+ "WebrtcMediaDataDecoder::mTaskQueue")),
+ mImageContainer(layers::LayerManager::CreateImageContainer(
+ layers::ImageContainer::ASYNCHRONOUS)),
+ mFactory(new PDMFactory()),
+ mTrackType(TrackInfo::kUndefinedTrack),
+ mCodecType(aCodecMimeType) {}
+
+WebrtcMediaDataDecoder::~WebrtcMediaDataDecoder() {}
+
+int32_t WebrtcMediaDataDecoder::InitDecode(
+ const webrtc::VideoCodec* aCodecSettings, int32_t aNumberOfCores) {
+ nsCString codec;
+ mTrackType = TrackInfo::kVideoTrack;
+ mInfo = VideoInfo(aCodecSettings->width, aCodecSettings->height);
+ mInfo.mMimeType = mCodecType;
+
+ return CreateDecoder();
+}
+
+int32_t WebrtcMediaDataDecoder::Decode(
+ const webrtc::EncodedImage& aInputImage, bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs) {
+ if (!mCallback || !mDecoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (!aInputImage._buffer || !aInputImage._length) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Always start with a complete key frame.
+ if (mNeedKeyframe) {
+ if (aInputImage._frameType != webrtc::FrameType::kVideoFrameKey)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ // We have a key frame - is it complete?
+ if (aInputImage._completeFrame) {
+ mNeedKeyframe = false;
+ } else {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ RefPtr<MediaRawData> compressedFrame =
+ new MediaRawData(aInputImage._buffer, aInputImage._length);
+ if (!compressedFrame->Data()) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+
+ compressedFrame->mTime =
+ media::TimeUnit::FromMicroseconds(aInputImage._timeStamp);
+ compressedFrame->mTimecode =
+ media::TimeUnit::FromMicroseconds(aRenderTimeMs * 1000);
+ compressedFrame->mKeyframe =
+ aInputImage._frameType == webrtc::FrameType::kVideoFrameKey;
+ {
+ media::Await(
+ do_AddRef(mThreadPool), mDecoder->Decode(compressedFrame),
+ [&](const MediaDataDecoder::DecodedData& aResults) {
+ mResults = aResults.Clone();
+ mError = NS_OK;
+ },
+ [&](const MediaResult& aError) { mError = aError; });
+
+ for (auto& frame : mResults) {
+ MOZ_ASSERT(frame->mType == MediaData::Type::VIDEO_DATA);
+ RefPtr<VideoData> video = frame->As<VideoData>();
+ MOZ_ASSERT(video);
+ if (!video->mImage) {
+ // Nothing to display.
+ continue;
+ }
+ rtc::scoped_refptr<ImageBuffer> image(
+ new rtc::RefCountedObject<ImageBuffer>(std::move(video->mImage)));
+
+ webrtc::VideoFrame videoFrame(image, frame->mTime.ToMicroseconds(),
+ frame->mDuration.ToMicroseconds() * 1000,
+ aInputImage.rotation_);
+ mCallback->Decoded(videoFrame);
+ }
+ mResults.Clear();
+ }
+
+ if (NS_FAILED(mError) && mError != NS_ERROR_DOM_MEDIA_CANCELED) {
+ CreateDecoder();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return NS_SUCCEEDED(mError) ? WEBRTC_VIDEO_CODEC_OK
+ : WEBRTC_VIDEO_CODEC_ERROR;
+}
+
+int32_t WebrtcMediaDataDecoder::RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* aCallback) {
+ mCallback = aCallback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaDataDecoder::Release() {
+ if (mDecoder) {
+ RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
+ decoder->Flush()->Then(mTaskQueue, __func__,
+ [decoder]() { decoder->Shutdown(); });
+ }
+
+ mNeedKeyframe = true;
+ mError = NS_OK;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool WebrtcMediaDataDecoder::OnTaskQueue() const {
+ return mTaskQueue->IsOnCurrentThread();
+}
+
+int32_t WebrtcMediaDataDecoder::CreateDecoder() {
+ RefPtr<layers::KnowsCompositor> knowsCompositor =
+ layers::ImageBridgeChild::GetSingleton();
+
+ if (mDecoder) {
+ Release();
+ }
+
+ RefPtr<TaskQueue> tq =
+ new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
+ "webrtc decode TaskQueue");
+ RefPtr<MediaDataDecoder> decoder;
+
+ media::Await(do_AddRef(mThreadPool), InvokeAsync(tq, __func__, [&] {
+ RefPtr<GenericPromise> p =
+ mFactory
+ ->CreateDecoder(
+ {mInfo,
+ CreateDecoderParams::OptionSet(
+ CreateDecoderParams::Option::LowLatency,
+ CreateDecoderParams::Option::FullH264Parsing,
+ CreateDecoderParams::Option::
+ ErrorIfNoInitializationData),
+ mTrackType, mImageContainer, knowsCompositor})
+ ->Then(
+ tq, __func__,
+ [&](RefPtr<MediaDataDecoder>&& aDecoder) {
+ decoder = std::move(aDecoder);
+ return GenericPromise::CreateAndResolve(
+ true, __func__);
+ },
+ [](const MediaResult& aResult) {
+ return GenericPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+ return p;
+ }));
+
+ if (!decoder) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // We need to wrap our decoder in a MediaDataDecoderProxy so that it always
+ // run on an nsISerialEventTarget (which the webrtc code doesn't do)
+ mDecoder = new MediaDataDecoderProxy(decoder.forget(), tq.forget());
+
+ media::Await(
+ do_AddRef(mThreadPool), mDecoder->Init(),
+ [&](TrackInfo::TrackType) { mError = NS_OK; },
+ [&](const MediaResult& aError) { mError = aError; });
+
+ return NS_SUCCEEDED(mError) ? WEBRTC_VIDEO_CODEC_OK
+ : WEBRTC_VIDEO_CODEC_ERROR;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.h
new file mode 100644
index 0000000000..8d609fe9d6
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.h
@@ -0,0 +1,74 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcMediaDataDecoderCodec_h__
+#define WebrtcMediaDataDecoderCodec_h__
+
+#include "MediaConduitInterface.h"
+#include "MediaInfo.h"
+#include "MediaResult.h"
+#include "PlatformDecoderModule.h"
+#include "VideoConduit.h"
+#include "WebrtcImageBuffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+class DecodedImageCallback;
+}
+namespace mozilla {
+namespace layers {
+class Image;
+class ImageContainer;
+} // namespace layers
+
+class PDMFactory;
+class SharedThreadPool;
+class TaskQueue;
+
+class WebrtcMediaDataDecoder : public WebrtcVideoDecoder {
+ public:
+ explicit WebrtcMediaDataDecoder(nsACString& aCodecMimeType);
+
+ // Implement VideoDecoder interface.
+ uint64_t PluginID() const override { return 0; }
+
+ int32_t InitDecode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores) override;
+
+ int32_t Decode(const webrtc::EncodedImage& inputImage, bool missingFrames,
+ const webrtc::RTPFragmentationHeader* fragmentation,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
+ int64_t renderTimeMs = -1) override;
+
+ int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* callback) override;
+
+ int32_t Release() override;
+
+ private:
+ ~WebrtcMediaDataDecoder();
+ void QueueFrame(MediaRawData* aFrame);
+ bool OnTaskQueue() const;
+ int32_t CreateDecoder();
+
+ const RefPtr<SharedThreadPool> mThreadPool;
+ const RefPtr<TaskQueue> mTaskQueue;
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<PDMFactory> mFactory;
+ RefPtr<MediaDataDecoder> mDecoder;
+ webrtc::DecodedImageCallback* mCallback = nullptr;
+ VideoInfo mInfo;
+ TrackInfo::TrackType mTrackType;
+ bool mNeedKeyframe = true;
+ MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDecodeRequest;
+
+ MediaResult mError = NS_OK;
+ MediaDataDecoder::DecodedData mResults;
+ const nsCString mCodecType;
+};
+
+} // namespace mozilla
+
+#endif // WebrtcMediaDataDecoderCodec_h__
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
new file mode 100644
index 0000000000..3f168dc5b7
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
@@ -0,0 +1,338 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcMediaDataEncoderCodec.h"
+
+#include "AnnexB.h"
+#include "ImageContainer.h"
+#include "MediaData.h"
+#include "PEMFactory.h"
+#include "VideoUtils.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Span.h"
+#include "mozilla/gfx/Point.h"
+#include "mozilla/media/MediaUtils.h"
+#include "webrtc/media/base/mediaconstants.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace mozilla {
+
+extern LazyLogModule sPEMLog;
+
+#undef LOG
+#define LOG(msg, ...) \
+ MOZ_LOG(sPEMLog, LogLevel::Debug, \
+ ("WebrtcMediaDataEncoder=%p, " msg, this, ##__VA_ARGS__))
+
+#undef LOG_V
+#define LOG_V(msg, ...) \
+ MOZ_LOG(sPEMLog, LogLevel::Verbose, \
+ ("WebrtcMediaDataEncoder=%p, " msg, this, ##__VA_ARGS__))
+
+using namespace media;
+using namespace layers;
+using MimeTypeResult = Maybe<nsLiteralCString>;
+
+static const char* GetModeName(webrtc::H264PacketizationMode aMode) {
+ if (aMode == webrtc::H264PacketizationMode::SingleNalUnit) {
+ return "SingleNalUnit";
+ }
+ if (aMode == webrtc::H264PacketizationMode::NonInterleaved) {
+ return "NonInterleaved";
+ }
+ return "Unknown";
+}
+
+static MimeTypeResult ConvertWebrtcCodecTypeToMimeType(
+ const webrtc::VideoCodecType& aType) {
+ switch (aType) {
+ case webrtc::VideoCodecType::kVideoCodecVP8:
+ return Some("video/vp8"_ns);
+ case webrtc::VideoCodecType::kVideoCodecVP9:
+ return Some("video/vp9"_ns);
+ case webrtc::VideoCodecType::kVideoCodecH264:
+ return Some("video/avc"_ns);
+ default:
+ break;
+ }
+ return Nothing();
+}
+
+static MediaDataEncoder::H264Specific::ProfileLevel ConvertProfileLevel(
+ webrtc::H264::Profile aProfile) {
+ if (aProfile == webrtc::H264::kProfileConstrainedBaseline ||
+ aProfile == webrtc::H264::kProfileBaseline) {
+ return MediaDataEncoder::H264Specific::ProfileLevel::BaselineAutoLevel;
+ }
+ return MediaDataEncoder::H264Specific::ProfileLevel::MainAutoLevel;
+}
+
+static MediaDataEncoder::H264Specific GetCodecSpecific(
+ const webrtc::VideoCodec* aCodecSettings) {
+ return MediaDataEncoder::H264Specific(
+ aCodecSettings->H264().keyFrameInterval,
+ ConvertProfileLevel(aCodecSettings->H264().profile));
+}
+
+WebrtcMediaDataEncoder::WebrtcMediaDataEncoder()
+ : mTaskQueue(new TaskQueue(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "WebrtcMediaDataEncoder::mTaskQueue")),
+ mFactory(new PEMFactory()),
+ mCallbackMutex("WebrtcMediaDataEncoderCodec encoded callback mutex"),
+ // Use the same lower and upper bound as h264_video_toolbox_encoder which
+ // is an encoder from webrtc's upstream codebase.
+ // 0.5 is set as a mininum to prevent overcompensating for large temporary
+ // overshoots. We don't want to degrade video quality too badly.
+ // 0.95 is set to prevent oscillations. When a lower bitrate is set on the
+ // encoder than previously set, its output seems to have a brief period of
+ // drastically reduced bitrate, so we want to avoid that. In steady state
+ // conditions, 0.95 seems to give us better overall bitrate over long
+ // periods of time.
+ mBitrateAdjuster(webrtc::Clock::GetRealTimeClock(), 0.5, 0.95) {}
+
+int32_t WebrtcMediaDataEncoder::InitEncode(
+ const webrtc::VideoCodec* aCodecSettings, int32_t aNumberOfCores,
+ size_t aMaxPayloadSize) {
+ MOZ_ASSERT(
+ aCodecSettings->codecType == webrtc::VideoCodecType::kVideoCodecH264,
+ "Only support h264 for now.");
+
+ if (mEncoder) {
+ // Clean existing encoder.
+ Shutdown();
+ }
+
+ RefPtr<MediaDataEncoder> encoder = CreateEncoder(aCodecSettings);
+ if (!encoder) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ LOG("Init encode, mimeType %s, mode %s", mInfo.mMimeType.get(),
+ GetModeName(mMode));
+ if (!media::Await(do_AddRef(mTaskQueue), encoder->Init()).IsResolve()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ mEncoder = std::move(encoder);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool WebrtcMediaDataEncoder::SetupConfig(
+ const webrtc::VideoCodec* aCodecSettings) {
+ MimeTypeResult mimeType =
+ ConvertWebrtcCodecTypeToMimeType(aCodecSettings->codecType);
+ if (!mimeType) {
+ LOG("Get incorrect mime type");
+ return false;
+ }
+ mInfo = VideoInfo(aCodecSettings->width, aCodecSettings->height);
+ mInfo.mMimeType = mimeType.extract();
+ mMode = aCodecSettings->H264().packetizationMode == 1
+ ? webrtc::H264PacketizationMode::NonInterleaved
+ : webrtc::H264PacketizationMode::SingleNalUnit;
+ mMaxFrameRate = aCodecSettings->maxFramerate;
+ // Those bitrates in codec setting are all kbps, so we have to covert them to
+ // bps.
+ mMaxBitrateBps = aCodecSettings->maxBitrate * 1000;
+ mMinBitrateBps = aCodecSettings->minBitrate * 1000;
+ mBitrateAdjuster.SetTargetBitrateBps(aCodecSettings->startBitrate * 1000);
+ return true;
+}
+
+already_AddRefed<MediaDataEncoder> WebrtcMediaDataEncoder::CreateEncoder(
+ const webrtc::VideoCodec* aCodecSettings) {
+ if (!SetupConfig(aCodecSettings)) {
+ return nullptr;
+ }
+ LOG("Request platform encoder for %s, bitRate=%u bps, frameRate=%u",
+ mInfo.mMimeType.get(), mBitrateAdjuster.GetTargetBitrateBps(),
+ aCodecSettings->maxFramerate);
+ return mFactory->CreateEncoder(CreateEncoderParams(
+ mInfo, MediaDataEncoder::Usage::Realtime,
+ MakeRefPtr<TaskQueue>(
+ GetMediaThreadPool(MediaThreadType::PLATFORM_ENCODER),
+ "WebrtcMediaDataEncoder::mEncoder"),
+ MediaDataEncoder::PixelFormat::YUV420P, aCodecSettings->maxFramerate,
+ mBitrateAdjuster.GetTargetBitrateBps(),
+ GetCodecSpecific(aCodecSettings)));
+}
+
+int32_t WebrtcMediaDataEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) {
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = aCallback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaDataEncoder::Shutdown() {
+ LOG("Release encoder");
+ {
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = nullptr;
+ mError = NS_OK;
+ }
+ if (mEncoder) {
+ media::Await(do_AddRef(mTaskQueue), mEncoder->Shutdown());
+ mEncoder = nullptr;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+static already_AddRefed<VideoData> CreateVideoDataFromWebrtcVideoFrame(
+ const webrtc::VideoFrame& aFrame, const bool aIsKeyFrame,
+ const TimeUnit aDuration) {
+ MOZ_ASSERT(aFrame.video_frame_buffer()->type() ==
+ webrtc::VideoFrameBuffer::Type::kI420,
+ "Only support YUV420!");
+ rtc::scoped_refptr<webrtc::I420BufferInterface> i420 =
+ aFrame.video_frame_buffer()->GetI420();
+
+ PlanarYCbCrData yCbCrData;
+ yCbCrData.mYChannel = const_cast<uint8_t*>(i420->DataY());
+ yCbCrData.mYSize = gfx::IntSize(i420->width(), i420->height());
+ yCbCrData.mYStride = i420->StrideY();
+ yCbCrData.mCbChannel = const_cast<uint8_t*>(i420->DataU());
+ yCbCrData.mCrChannel = const_cast<uint8_t*>(i420->DataV());
+ yCbCrData.mCbCrSize = gfx::IntSize(i420->ChromaWidth(), i420->ChromaHeight());
+ MOZ_ASSERT(i420->StrideU() == i420->StrideV());
+ yCbCrData.mCbCrStride = i420->StrideU();
+ yCbCrData.mPicSize = gfx::IntSize(i420->width(), i420->height());
+
+ RefPtr<PlanarYCbCrImage> image =
+ new RecyclingPlanarYCbCrImage(new BufferRecycleBin());
+ image->CopyData(yCbCrData);
+
+ return VideoData::CreateFromImage(
+ image->GetSize(), 0, TimeUnit::FromMicroseconds(aFrame.timestamp_us()),
+ aDuration, image, aIsKeyFrame,
+ TimeUnit::FromMicroseconds(aFrame.timestamp()));
+}
+
+int32_t WebrtcMediaDataEncoder::Encode(
+ const webrtc::VideoFrame& aInputFrame,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) {
+ if (!aInputFrame.size() || !aInputFrame.video_frame_buffer() ||
+ aFrameTypes->empty()) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (!mEncoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ {
+ MutexAutoLock lock(mCallbackMutex);
+ if (!mCallback) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (NS_FAILED(mError)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ LOG_V("Encode frame, type %d size %u", (*aFrameTypes)[0], aInputFrame.size());
+ MOZ_ASSERT(aInputFrame.video_frame_buffer()->type() ==
+ webrtc::VideoFrameBuffer::Type::kI420);
+ RefPtr<VideoData> data = CreateVideoDataFromWebrtcVideoFrame(
+ aInputFrame, (*aFrameTypes)[0] == webrtc::FrameType::kVideoFrameKey,
+ TimeUnit::FromSeconds(1.0 / mMaxFrameRate));
+ const gfx::IntSize displaySize = data->mDisplay;
+
+ mEncoder->Encode(data)->Then(
+ mTaskQueue, __func__,
+ [self = RefPtr<WebrtcMediaDataEncoder>(this), this,
+ displaySize](MediaDataEncoder::EncodedData aFrames) {
+ LOG_V("Received encoded frame, nums %zu width %d height %d",
+ aFrames.Length(), displaySize.width, displaySize.height);
+ for (auto& frame : aFrames) {
+ MutexAutoLock lock(mCallbackMutex);
+ if (!mCallback) {
+ break;
+ }
+ webrtc::EncodedImage image(const_cast<uint8_t*>(frame->Data()),
+ frame->Size(), frame->Size());
+ image._encodedWidth = displaySize.width;
+ image._encodedHeight = displaySize.height;
+ CheckedInt64 time =
+ TimeUnitToFrames(frame->mTime, cricket::kVideoCodecClockrate);
+ if (!time.isValid()) {
+ self->mError = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ "invalid timestamp from encoder");
+ break;
+ }
+ image._timeStamp = time.value();
+ image._frameType = frame->mKeyframe
+ ? webrtc::FrameType::kVideoFrameKey
+ : webrtc::FrameType::kVideoFrameDelta;
+ image._completeFrame = true;
+
+ nsTArray<AnnexB::NALEntry> entries;
+ AnnexB::ParseNALEntries(
+ Span<const uint8_t>(frame->Data(), frame->Size()), entries);
+ const size_t nalNums = entries.Length();
+ LOG_V("NAL nums %zu", nalNums);
+ MOZ_ASSERT(nalNums, "Should have at least 1 NALU in encoded frame!");
+
+ webrtc::RTPFragmentationHeader header;
+ header.VerifyAndAllocateFragmentationHeader(nalNums);
+ for (size_t idx = 0; idx < nalNums; idx++) {
+ header.fragmentationOffset[idx] = entries[idx].mOffset;
+ header.fragmentationLength[idx] = entries[idx].mSize;
+ LOG_V("NAL offset %" PRId64 " size %" PRId64, entries[idx].mOffset,
+ entries[idx].mSize);
+ }
+
+ webrtc::CodecSpecificInfo codecSpecific;
+ codecSpecific.codecType = webrtc::kVideoCodecH264;
+ codecSpecific.codecSpecific.H264.packetization_mode = mMode;
+
+ LOG_V("Send encoded image");
+ self->mCallback->OnEncodedImage(image, &codecSpecific, &header);
+ self->mBitrateAdjuster.Update(image._size);
+ }
+ },
+ [self = RefPtr<WebrtcMediaDataEncoder>(this)](const MediaResult aError) {
+ self->mError = aError;
+ });
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaDataEncoder::SetChannelParameters(uint32_t aPacketLoss,
+ int64_t aRtt) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaDataEncoder::SetRates(uint32_t aNewBitrateKbps,
+ uint32_t aFrameRate) {
+ if (!aFrameRate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ const uint32_t newBitrateBps = aNewBitrateKbps * 1000;
+ if (newBitrateBps < mMinBitrateBps || newBitrateBps > mMaxBitrateBps) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // We have already been in this bitrate.
+ if (mBitrateAdjuster.GetAdjustedBitrateBps() == newBitrateBps) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ if (!mEncoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ {
+ MutexAutoLock lock(mCallbackMutex);
+ if (NS_FAILED(mError)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+ mBitrateAdjuster.SetTargetBitrateBps(newBitrateBps);
+ LOG("Set bitrate %u bps, minBitrate %u bps, maxBitrate %u bps", newBitrateBps,
+ mMinBitrateBps, mMaxBitrateBps);
+ auto rv =
+ media::Await(do_AddRef(mTaskQueue), mEncoder->SetBitrate(newBitrateBps));
+ return rv.IsResolve() ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
new file mode 100644
index 0000000000..97c239cf9a
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
@@ -0,0 +1,71 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcMediaDataEncoderCodec_h__
+#define WebrtcMediaDataEncoderCodec_h__
+
+#include "MediaConduitInterface.h"
+#include "MediaInfo.h"
+#include "MediaResult.h"
+#include "PlatformEncoderModule.h"
+#include "WebrtcGmpVideoCodec.h"
+#include "common_video/include/bitrate_adjuster.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+
+namespace mozilla {
+
+class MediaData;
+class PEMFactory;
+class SharedThreadPool;
+class TaskQueue;
+
+class WebrtcMediaDataEncoder : public RefCountedWebrtcVideoEncoder {
+ public:
+ WebrtcMediaDataEncoder();
+
+ uint64_t PluginID() const override { return 0; }
+
+ int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores, size_t aMaxPayloadSize) override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) override;
+
+ int32_t Shutdown() override;
+
+ int32_t Encode(const webrtc::VideoFrame& aFrame,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::FrameType>* aFrameTypes) override;
+
+ int32_t SetChannelParameters(uint32_t aPacketLoss, int64_t aRtt) override;
+
+ int32_t SetRates(uint32_t aNewBitrateKbps, uint32_t aFrameRate) override;
+
+ private:
+ virtual ~WebrtcMediaDataEncoder() = default;
+
+ bool SetupConfig(const webrtc::VideoCodec* aCodecSettings);
+ already_AddRefed<MediaDataEncoder> CreateEncoder(
+ const webrtc::VideoCodec* aCodecSettings);
+ bool InitEncoder();
+
+ const RefPtr<TaskQueue> mTaskQueue;
+ const RefPtr<PEMFactory> mFactory;
+ RefPtr<MediaDataEncoder> mEncoder;
+
+ Mutex mCallbackMutex; // Protects mCallback and mError.
+ webrtc::EncodedImageCallback* mCallback = nullptr;
+ MediaResult mError = NS_OK;
+
+ VideoInfo mInfo;
+ webrtc::H264PacketizationMode mMode;
+ webrtc::BitrateAdjuster mBitrateAdjuster;
+ uint32_t mMaxFrameRate;
+ uint32_t mMinBitrateBps;
+ uint32_t mMaxBitrateBps;
+};
+
+} // namespace mozilla
+
+#endif // WebrtcMediaDataEncoderCodec_h__
diff --git a/dom/media/webrtc/libwebrtcglue/moz.build b/dom/media/webrtc/libwebrtcglue/moz.build
new file mode 100644
index 0000000000..9ba8c5d40e
--- /dev/null
+++ b/dom/media/webrtc/libwebrtcglue/moz.build
@@ -0,0 +1,37 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+include("/dom/media/webrtc/third_party_build/webrtc.mozbuild")
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/dom/media/gmp", # for GMPLog.h,
+ "/dom/media/webrtc",
+ "/ipc/chromium/src",
+ "/media/libyuv/libyuv/include",
+ "/media/webrtc",
+ "/third_party/libwebrtc",
+ "/third_party/libwebrtc/webrtc",
+]
+
+UNIFIED_SOURCES += [
+ "AudioConduit.cpp",
+ "GmpVideoCodec.cpp",
+ "MediaDataCodec.cpp",
+ "RtpSourceObserver.cpp",
+ "VideoConduit.cpp",
+ "VideoStreamFactory.cpp",
+ "WebrtcGmpVideoCodec.cpp",
+ "WebrtcMediaDataDecoderCodec.cpp",
+ "WebrtcMediaDataEncoderCodec.cpp",
+]
+
+if CONFIG["OS_TARGET"] == "Android":
+ UNIFIED_SOURCES += [
+ "MediaCodecVideoCodec.cpp",
+ "WebrtcMediaCodecVP8VideoCodec.cpp",
+ ]
+
+FINAL_LIBRARY = "xul"