summaryrefslogtreecommitdiffstats
path: root/dom/media/systemservices
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--dom/media/systemservices/CamerasChild.cpp535
-rw-r--r--dom/media/systemservices/CamerasChild.h262
-rw-r--r--dom/media/systemservices/CamerasParent.cpp1222
-rw-r--r--dom/media/systemservices/CamerasParent.h183
-rw-r--r--dom/media/systemservices/CamerasTypes.cpp26
-rw-r--r--dom/media/systemservices/CamerasTypes.h38
-rw-r--r--dom/media/systemservices/MediaChild.cpp95
-rw-r--r--dom/media/systemservices/MediaChild.h60
-rw-r--r--dom/media/systemservices/MediaParent.cpp536
-rw-r--r--dom/media/systemservices/MediaParent.h91
-rw-r--r--dom/media/systemservices/MediaSystemResourceClient.cpp67
-rw-r--r--dom/media/systemservices/MediaSystemResourceClient.h91
-rw-r--r--dom/media/systemservices/MediaSystemResourceManager.cpp358
-rw-r--r--dom/media/systemservices/MediaSystemResourceManager.h81
-rw-r--r--dom/media/systemservices/MediaSystemResourceManagerChild.cpp42
-rw-r--r--dom/media/systemservices/MediaSystemResourceManagerChild.h65
-rw-r--r--dom/media/systemservices/MediaSystemResourceManagerParent.cpp75
-rw-r--r--dom/media/systemservices/MediaSystemResourceManagerParent.h59
-rw-r--r--dom/media/systemservices/MediaSystemResourceMessageUtils.h24
-rw-r--r--dom/media/systemservices/MediaSystemResourceService.cpp222
-rw-r--r--dom/media/systemservices/MediaSystemResourceService.h83
-rw-r--r--dom/media/systemservices/MediaSystemResourceTypes.h23
-rw-r--r--dom/media/systemservices/MediaTaskUtils.h52
-rw-r--r--dom/media/systemservices/MediaUtils.cpp119
-rw-r--r--dom/media/systemservices/MediaUtils.h326
-rw-r--r--dom/media/systemservices/OSXRunLoopSingleton.cpp41
-rw-r--r--dom/media/systemservices/OSXRunLoopSingleton.h24
-rw-r--r--dom/media/systemservices/PCameras.ipdl93
-rw-r--r--dom/media/systemservices/PMedia.ipdl55
-rw-r--r--dom/media/systemservices/PMediaSystemResourceManager.ipdl38
-rw-r--r--dom/media/systemservices/ShmemPool.cpp99
-rw-r--r--dom/media/systemservices/ShmemPool.h181
-rw-r--r--dom/media/systemservices/VideoEngine.cpp260
-rw-r--r--dom/media/systemservices/VideoEngine.h118
-rw-r--r--dom/media/systemservices/VideoFrameUtils.cpp90
-rw-r--r--dom/media/systemservices/VideoFrameUtils.h48
-rw-r--r--dom/media/systemservices/android_video_capture/device_info_android.cc316
-rw-r--r--dom/media/systemservices/android_video_capture/device_info_android.h73
-rw-r--r--dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java25
-rw-r--r--dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java216
-rw-r--r--dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java121
-rw-r--r--dom/media/systemservices/android_video_capture/video_capture_android.cc270
-rw-r--r--dom/media/systemservices/android_video_capture/video_capture_android.h47
-rw-r--r--dom/media/systemservices/moz.build114
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info.h56
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info.mm170
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info_avfoundation.h71
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info_avfoundation.mm213
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info_objc.h38
-rw-r--r--dom/media/systemservices/objc_video_capture/device_info_objc.mm166
-rw-r--r--dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.h39
-rw-r--r--dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.mm355
-rw-r--r--dom/media/systemservices/objc_video_capture/video_capture.h41
-rw-r--r--dom/media/systemservices/objc_video_capture/video_capture.mm102
-rw-r--r--dom/media/systemservices/objc_video_capture/video_capture_avfoundation.h76
-rw-r--r--dom/media/systemservices/objc_video_capture/video_capture_avfoundation.mm286
-rw-r--r--dom/media/systemservices/video_engine/browser_capture_impl.h78
-rw-r--r--dom/media/systemservices/video_engine/desktop_capture_impl.cc760
-rw-r--r--dom/media/systemservices/video_engine/desktop_capture_impl.h262
-rw-r--r--dom/media/systemservices/video_engine/desktop_device_info.cc347
-rw-r--r--dom/media/systemservices/video_engine/desktop_device_info.h120
-rw-r--r--dom/media/systemservices/video_engine/platform_uithread.cc198
-rw-r--r--dom/media/systemservices/video_engine/platform_uithread.h96
-rw-r--r--dom/media/systemservices/video_engine/tab_capturer.cc336
-rw-r--r--dom/media/systemservices/video_engine/tab_capturer.h73
65 files changed, 10847 insertions, 0 deletions
diff --git a/dom/media/systemservices/CamerasChild.cpp b/dom/media/systemservices/CamerasChild.cpp
new file mode 100644
index 0000000000..3b9ce64e46
--- /dev/null
+++ b/dom/media/systemservices/CamerasChild.cpp
@@ -0,0 +1,535 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CamerasChild.h"
+
+#undef FF
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ipc/BackgroundChild.h"
+#include "mozilla/ipc/PBackgroundChild.h"
+#include "mozilla/Logging.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/Unused.h"
+#include "MediaUtils.h"
+#include "nsThreadUtils.h"
+
+#undef LOG
+#undef LOG_ENABLED
+mozilla::LazyLogModule gCamerasChildLog("CamerasChild");
+#define LOG(args) MOZ_LOG(gCamerasChildLog, mozilla::LogLevel::Debug, args)
+#define LOG_ENABLED() MOZ_LOG_TEST(gCamerasChildLog, mozilla::LogLevel::Debug)
+
+namespace mozilla::camera {
+
+CamerasSingleton::CamerasSingleton()
+ : mCamerasMutex("CamerasSingleton::mCamerasMutex"),
+ mCameras(nullptr),
+ mCamerasChildThread(nullptr) {
+ LOG(("CamerasSingleton: %p", this));
+}
+
+CamerasSingleton::~CamerasSingleton() { LOG(("~CamerasSingleton: %p", this)); }
+
+class InitializeIPCThread : public Runnable {
+ public:
+ InitializeIPCThread()
+ : Runnable("camera::InitializeIPCThread"), mCamerasChild(nullptr) {}
+
+ NS_IMETHOD Run() override {
+ // Try to get the PBackground handle
+ ipc::PBackgroundChild* existingBackgroundChild =
+ ipc::BackgroundChild::GetForCurrentThread();
+ // If it's not spun up yet, block until it is, and retry
+ if (!existingBackgroundChild) {
+ LOG(("No existingBackgroundChild"));
+ existingBackgroundChild =
+ ipc::BackgroundChild::GetOrCreateForCurrentThread();
+ LOG(("BackgroundChild: %p", existingBackgroundChild));
+ if (!existingBackgroundChild) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Create CamerasChild
+ // We will be returning the resulting pointer (synchronously) to our caller.
+ mCamerasChild = static_cast<mozilla::camera::CamerasChild*>(
+ existingBackgroundChild->SendPCamerasConstructor());
+
+ return NS_OK;
+ }
+
+ CamerasChild* GetCamerasChild() { return mCamerasChild; }
+
+ private:
+ CamerasChild* mCamerasChild;
+};
+
+CamerasChild* GetCamerasChild() {
+ CamerasSingleton::Mutex().AssertCurrentThreadOwns();
+ if (!CamerasSingleton::Child()) {
+ MOZ_ASSERT(!NS_IsMainThread(), "Should not be on the main Thread");
+ MOZ_ASSERT(!CamerasSingleton::Thread());
+ LOG(("No sCameras, setting up IPC Thread"));
+ nsresult rv = NS_NewNamedThread("Cameras IPC",
+ getter_AddRefs(CamerasSingleton::Thread()));
+ if (NS_FAILED(rv)) {
+ LOG(("Error launching IPC Thread"));
+ return nullptr;
+ }
+
+ // At this point we are in the MediaManager thread, and the thread we are
+ // dispatching to is the specific Cameras IPC thread that was just made
+ // above, so now we will fire off a runnable to run
+ // BackgroundChild::GetOrCreateForCurrentThread there, while we
+ // block in this thread.
+ // We block until the following happens in the Cameras IPC thread:
+ // 1) Creation of PBackground finishes
+ // 2) Creation of PCameras finishes by sending a message to the parent
+ RefPtr<InitializeIPCThread> runnable = new InitializeIPCThread();
+ RefPtr<SyncRunnable> sr = new SyncRunnable(runnable);
+ sr->DispatchToThread(CamerasSingleton::Thread());
+ CamerasSingleton::Child() = runnable->GetCamerasChild();
+ }
+ if (!CamerasSingleton::Child()) {
+ LOG(("Failed to set up CamerasChild, are we in shutdown?"));
+ }
+ return CamerasSingleton::Child();
+}
+
+CamerasChild* GetCamerasChildIfExists() {
+ OffTheBooksMutexAutoLock lock(CamerasSingleton::Mutex());
+ return CamerasSingleton::Child();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyFailure(void) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = false;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplySuccess(void) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyNumberOfCapabilities(
+ const int& capabilityCount) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ mReplyInteger = capabilityCount;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+// Helper function to dispatch calls to the IPC Thread and
+// CamerasChild object. Takes the needed locks and dispatches.
+// Takes a "failed" value and a reference to the output variable
+// as parameters, will return the right one depending on whether
+// dispatching succeeded.
+//
+// The LockAndDispatch object in the caller must stay alive until after any
+// reply data has been retreived (mReplyInteger, etc) so that the data is
+// protected by the ReplyMonitor/RequestMutex
+template <class T = int>
+class LockAndDispatch {
+ public:
+ LockAndDispatch(CamerasChild* aCamerasChild, const char* aRequestingFunc,
+ nsIRunnable* aRunnable, T aFailureValue,
+ const T& aSuccessValue)
+ : mCamerasChild(aCamerasChild),
+ mRequestingFunc(aRequestingFunc),
+ mRunnable(aRunnable),
+ mReplyLock(aCamerasChild->mReplyMonitor),
+ mRequestLock(aCamerasChild->mRequestMutex),
+ mSuccess(true),
+ mFailureValue(aFailureValue),
+ mSuccessValue(aSuccessValue) {
+ Dispatch();
+ }
+
+ T ReturnValue() const {
+ if (mSuccess) {
+ return mSuccessValue;
+ } else {
+ return mFailureValue;
+ }
+ }
+
+ const bool& Success() const { return mSuccess; }
+
+ private:
+ void Dispatch() {
+ if (!mCamerasChild->DispatchToParent(mRunnable, mReplyLock)) {
+ LOG(("Cameras dispatch for IPC failed in %s", mRequestingFunc));
+ mSuccess = false;
+ }
+ }
+
+ CamerasChild* mCamerasChild;
+ const char* mRequestingFunc;
+ nsIRunnable* mRunnable;
+ // Prevent concurrent use of the reply variables by holding
+ // the mReplyMonitor. Note that this is unlocked while waiting for
+ // the reply to be filled in, necessitating the additional mRequestLock/Mutex;
+ MonitorAutoLock mReplyLock;
+ MutexAutoLock mRequestLock;
+ bool mSuccess;
+ const T mFailureValue;
+ const T& mSuccessValue;
+};
+
+bool CamerasChild::DispatchToParent(nsIRunnable* aRunnable,
+ MonitorAutoLock& aMonitor) {
+ CamerasSingleton::Mutex().AssertCurrentThreadOwns();
+ mReplyMonitor.AssertCurrentThreadOwns();
+ CamerasSingleton::Thread()->Dispatch(aRunnable, NS_DISPATCH_NORMAL);
+ // Guard against spurious wakeups.
+ mReceivedReply = false;
+ // Wait for a reply
+ do {
+ // If the parent has been shut down, then we won't receive a reply.
+ if (!mIPCIsAlive) {
+ return false;
+ }
+ aMonitor.Wait();
+ } while (!mReceivedReply);
+ return mReplySuccess;
+}
+
+int CamerasChild::NumberOfCapabilities(CaptureEngine aCapEngine,
+ const char* deviceUniqueIdUTF8) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ LOG(("NumberOfCapabilities for %s", deviceUniqueIdUTF8));
+ nsCString unique_id(deviceUniqueIdUTF8);
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, nsCString>(
+ "camera::PCamerasChild::SendNumberOfCapabilities", this,
+ &CamerasChild::SendNumberOfCapabilities, aCapEngine, unique_id);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, 0, mReplyInteger);
+ LOG(("Capture capability count: %d", dispatcher.ReturnValue()));
+ return dispatcher.ReturnValue();
+}
+
+int CamerasChild::NumberOfCaptureDevices(CaptureEngine aCapEngine) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable = mozilla::NewRunnableMethod<CaptureEngine>(
+ "camera::PCamerasChild::SendNumberOfCaptureDevices", this,
+ &CamerasChild::SendNumberOfCaptureDevices, aCapEngine);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, 0, mReplyInteger);
+ LOG(("Capture Devices: %d", dispatcher.ReturnValue()));
+ return dispatcher.ReturnValue();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyNumberOfCaptureDevices(
+ const int& aDeviceCount) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ mReplyInteger = aDeviceCount;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+int CamerasChild::EnsureInitialized(CaptureEngine aCapEngine) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable = mozilla::NewRunnableMethod<CaptureEngine>(
+ "camera::PCamerasChild::SendEnsureInitialized", this,
+ &CamerasChild::SendEnsureInitialized, aCapEngine);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, 0, mReplyInteger);
+ LOG(("Capture Devices: %d", dispatcher.ReturnValue()));
+ return dispatcher.ReturnValue();
+}
+
+int CamerasChild::GetCaptureCapability(
+ CaptureEngine aCapEngine, const char* unique_idUTF8,
+ const unsigned int capability_number,
+ webrtc::VideoCaptureCapability* capability) {
+ LOG(("GetCaptureCapability: %s %d", unique_idUTF8, capability_number));
+ MOZ_ASSERT(capability);
+ nsCString unique_id(unique_idUTF8);
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, nsCString, unsigned int>(
+ "camera::PCamerasChild::SendGetCaptureCapability", this,
+ &CamerasChild::SendGetCaptureCapability, aCapEngine, unique_id,
+ capability_number);
+ mReplyCapability = capability;
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ mReplyCapability = nullptr;
+ return dispatcher.ReturnValue();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyGetCaptureCapability(
+ const VideoCaptureCapability& ipcCapability) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ mReplyCapability->width = ipcCapability.width();
+ mReplyCapability->height = ipcCapability.height();
+ mReplyCapability->maxFPS = ipcCapability.maxFPS();
+ mReplyCapability->videoType =
+ static_cast<webrtc::VideoType>(ipcCapability.videoType());
+ mReplyCapability->interlaced = ipcCapability.interlaced();
+ monitor.Notify();
+ return IPC_OK();
+}
+
+int CamerasChild::GetCaptureDevice(
+ CaptureEngine aCapEngine, unsigned int list_number, char* device_nameUTF8,
+ const unsigned int device_nameUTF8Length, char* unique_idUTF8,
+ const unsigned int unique_idUTF8Length, bool* scary) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, unsigned int>(
+ "camera::PCamerasChild::SendGetCaptureDevice", this,
+ &CamerasChild::SendGetCaptureDevice, aCapEngine, list_number);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ if (dispatcher.Success()) {
+ base::strlcpy(device_nameUTF8, mReplyDeviceName.get(),
+ device_nameUTF8Length);
+ base::strlcpy(unique_idUTF8, mReplyDeviceID.get(), unique_idUTF8Length);
+ if (scary) {
+ *scary = mReplyScary;
+ }
+ LOG(("Got %s name %s id", device_nameUTF8, unique_idUTF8));
+ }
+ return dispatcher.ReturnValue();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyGetCaptureDevice(
+ const nsACString& device_name, const nsACString& device_id,
+ const bool& scary) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ mReplyDeviceName = device_name;
+ mReplyDeviceID = device_id;
+ mReplyScary = scary;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+int CamerasChild::AllocateCapture(CaptureEngine aCapEngine,
+ const char* unique_idUTF8,
+ uint64_t aWindowID) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCString unique_id(unique_idUTF8);
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, nsCString, const uint64_t&>(
+ "camera::PCamerasChild::SendAllocateCapture", this,
+ &CamerasChild::SendAllocateCapture, aCapEngine, unique_id, aWindowID);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mReplyInteger);
+ if (dispatcher.Success()) {
+ LOG(("Capture Device allocated: %d", mReplyInteger));
+ }
+ return dispatcher.ReturnValue();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvReplyAllocateCapture(
+ const int& aCaptureId) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mReceivedReply = true;
+ mReplySuccess = true;
+ mReplyInteger = aCaptureId;
+ monitor.Notify();
+ return IPC_OK();
+}
+
+int CamerasChild::ReleaseCapture(CaptureEngine aCapEngine,
+ const int capture_id) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, int>(
+ "camera::PCamerasChild::SendReleaseCapture", this,
+ &CamerasChild::SendReleaseCapture, aCapEngine, capture_id);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ return dispatcher.ReturnValue();
+}
+
+void CamerasChild::AddCallback(const CaptureEngine aCapEngine,
+ const int capture_id, FrameRelay* render) {
+ MutexAutoLock lock(mCallbackMutex);
+ CapturerElement ce;
+ ce.engine = aCapEngine;
+ ce.id = capture_id;
+ ce.callback = render;
+ mCallbacks.AppendElement(ce);
+}
+
+void CamerasChild::RemoveCallback(const CaptureEngine aCapEngine,
+ const int capture_id) {
+ MutexAutoLock lock(mCallbackMutex);
+ for (unsigned int i = 0; i < mCallbacks.Length(); i++) {
+ CapturerElement ce = mCallbacks[i];
+ if (ce.engine == aCapEngine && ce.id == capture_id) {
+ mCallbacks.RemoveElementAt(i);
+ break;
+ }
+ }
+}
+
+int CamerasChild::StartCapture(CaptureEngine aCapEngine, const int capture_id,
+ const webrtc::VideoCaptureCapability& webrtcCaps,
+ FrameRelay* cb) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ AddCallback(aCapEngine, capture_id, cb);
+ VideoCaptureCapability capCap(
+ webrtcCaps.width, webrtcCaps.height, webrtcCaps.maxFPS,
+ static_cast<int>(webrtcCaps.videoType), webrtcCaps.interlaced);
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, int, VideoCaptureCapability>(
+ "camera::PCamerasChild::SendStartCapture", this,
+ &CamerasChild::SendStartCapture, aCapEngine, capture_id, capCap);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ return dispatcher.ReturnValue();
+}
+
+int CamerasChild::FocusOnSelectedSource(CaptureEngine aCapEngine,
+ const int aCaptureId) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, int>(
+ "camera::PCamerasChild::SendFocusOnSelectedSource", this,
+ &CamerasChild::SendFocusOnSelectedSource, aCapEngine, aCaptureId);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ return dispatcher.ReturnValue();
+}
+
+int CamerasChild::StopCapture(CaptureEngine aCapEngine, const int capture_id) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ nsCOMPtr<nsIRunnable> runnable =
+ mozilla::NewRunnableMethod<CaptureEngine, int>(
+ "camera::PCamerasChild::SendStopCapture", this,
+ &CamerasChild::SendStopCapture, aCapEngine, capture_id);
+ LockAndDispatch<> dispatcher(this, __func__, runnable, -1, mZero);
+ if (dispatcher.Success()) {
+ RemoveCallback(aCapEngine, capture_id);
+ }
+ return dispatcher.ReturnValue();
+}
+
+class ShutdownRunnable : public Runnable {
+ public:
+ explicit ShutdownRunnable(already_AddRefed<Runnable>&& aReplyEvent)
+ : Runnable("camera::ShutdownRunnable"), mReplyEvent(aReplyEvent){};
+
+ NS_IMETHOD Run() override {
+ LOG(("Closing BackgroundChild"));
+ // This will also destroy the CamerasChild.
+ ipc::BackgroundChild::CloseForCurrentThread();
+
+ NS_DispatchToMainThread(mReplyEvent.forget());
+
+ return NS_OK;
+ }
+
+ private:
+ RefPtr<Runnable> mReplyEvent;
+};
+
+void Shutdown(void) {
+ // Called from both MediaEngineWebRTC::Shutdown() on the MediaManager thread
+ // and DeallocPCamerasChild() on the dedicated IPC thread.
+ OffTheBooksMutexAutoLock lock(CamerasSingleton::Mutex());
+
+ CamerasChild* child = CamerasSingleton::Child();
+ if (!child) {
+ // We don't want to cause everything to get fired up if we're
+ // really already shut down.
+ LOG(("Shutdown when already shut down"));
+ return;
+ }
+ if (CamerasSingleton::Thread()) {
+ LOG(("PBackground thread exists, dispatching close"));
+ // The IPC thread is shut down on the main thread after the
+ // BackgroundChild is closed.
+ RefPtr<ShutdownRunnable> runnable = new ShutdownRunnable(
+ NewRunnableMethod("nsIThread::Shutdown", CamerasSingleton::Thread(),
+ &nsIThread::Shutdown));
+ CamerasSingleton::Thread()->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
+ } else {
+ LOG(("Shutdown called without PBackground thread"));
+ }
+ LOG(("Erasing sCameras & thread refs (original thread)"));
+ CamerasSingleton::Child() = nullptr;
+ CamerasSingleton::Thread() = nullptr;
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvDeliverFrame(
+ const CaptureEngine& capEngine, const int& capId,
+ mozilla::ipc::Shmem&& shmem, const VideoFrameProperties& prop) {
+ MutexAutoLock lock(mCallbackMutex);
+ if (Callback(capEngine, capId)) {
+ unsigned char* image = shmem.get<unsigned char>();
+ Callback(capEngine, capId)->DeliverFrame(image, prop);
+ } else {
+ LOG(("DeliverFrame called with dead callback"));
+ }
+ SendReleaseFrame(std::move(shmem));
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasChild::RecvDeviceChange() {
+ mDeviceListChangeEvent.Notify();
+ return IPC_OK();
+}
+
+void CamerasChild::ActorDestroy(ActorDestroyReason aWhy) {
+ LOG(("ActorDestroy"));
+ MonitorAutoLock monitor(mReplyMonitor);
+ mIPCIsAlive = false;
+ // Hopefully prevent us from getting stuck
+ // on replies that'll never come.
+ monitor.NotifyAll();
+}
+
+CamerasChild::CamerasChild()
+ : mCallbackMutex("mozilla::cameras::CamerasChild::mCallbackMutex"),
+ mIPCIsAlive(true),
+ mRequestMutex("mozilla::cameras::CamerasChild::mRequestMutex"),
+ mReplyMonitor("mozilla::cameras::CamerasChild::mReplyMonitor"),
+ mReceivedReply(false),
+ mReplySuccess(false),
+ mZero(0),
+ mReplyInteger(0),
+ mReplyScary(false) {
+ LOG(("CamerasChild: %p", this));
+
+ MOZ_COUNT_CTOR(CamerasChild);
+}
+
+CamerasChild::~CamerasChild() {
+ LOG(("~CamerasChild: %p", this));
+ CamerasSingleton::AssertNoChild();
+ MOZ_COUNT_DTOR(CamerasChild);
+}
+
+FrameRelay* CamerasChild::Callback(CaptureEngine aCapEngine, int capture_id) {
+ for (unsigned int i = 0; i < mCallbacks.Length(); i++) {
+ CapturerElement ce = mCallbacks[i];
+ if (ce.engine == aCapEngine && ce.id == capture_id) {
+ return ce.callback;
+ }
+ }
+
+ return nullptr;
+}
+
+} // namespace mozilla::camera
diff --git a/dom/media/systemservices/CamerasChild.h b/dom/media/systemservices/CamerasChild.h
new file mode 100644
index 0000000000..18bdeec251
--- /dev/null
+++ b/dom/media/systemservices/CamerasChild.h
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_CamerasChild_h
+#define mozilla_CamerasChild_h
+
+#include <utility>
+
+#include "MediaEventSource.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/camera/PCamerasChild.h"
+#include "mozilla/camera/PCamerasParent.h"
+#include "nsCOMPtr.h"
+
+// conflicts with #include of scoped_ptr.h
+#undef FF
+#include "modules/video_capture/video_capture_defines.h"
+
+namespace mozilla {
+
+namespace ipc {
+class BackgroundChildImpl;
+} // namespace ipc
+
+namespace camera {
+
+class FrameRelay {
+ public:
+ virtual int DeliverFrame(
+ uint8_t* buffer, const mozilla::camera::VideoFrameProperties& props) = 0;
+};
+
+struct CapturerElement {
+ CaptureEngine engine;
+ int id;
+ FrameRelay* callback;
+};
+
+// Forward declaration so we can work with pointers to it.
+class CamerasChild;
+// Helper class in impl that we friend.
+template <class T>
+class LockAndDispatch;
+
+// We emulate the sync webrtc.org API with the help of singleton
+// CamerasSingleton, which manages a pointer to an IPC object, a thread
+// where IPC operations should run on, and a mutex.
+// The static function Cameras() will use that Singleton to set up,
+// if needed, both the thread and the associated IPC objects and return
+// a pointer to the IPC object. Users can then do IPC calls on that object
+// after dispatching them to aforementioned thread.
+
+// 2 Threads are involved in this code:
+// - the MediaManager thread, which will call the (static, sync API) functions
+// through MediaEngineRemoteVideoSource
+// - the Cameras IPC thread, which will be doing our IPC to the parent process
+// via PBackground
+
+// Our main complication is that we emulate a sync API while (having to do)
+// async messaging. We dispatch the messages to another thread to send them
+// async and hold a Monitor to wait for the result to be asynchronously received
+// again. The requirement for async messaging originates on the parent side:
+// it's not reasonable to block all PBackground IPC there while waiting for
+// something like device enumeration to complete.
+
+class CamerasSingleton {
+ public:
+ static OffTheBooksMutex& Mutex() { return singleton().mCamerasMutex; }
+
+ static CamerasChild*& Child() {
+ Mutex().AssertCurrentThreadOwns();
+ return singleton().mCameras;
+ }
+
+ static nsCOMPtr<nsIThread>& Thread() {
+ Mutex().AssertCurrentThreadOwns();
+ return singleton().mCamerasChildThread;
+ }
+ // The mutex is not held because mCameras is known not to be modified
+ // concurrently when this is asserted.
+ static void AssertNoChild() { MOZ_ASSERT(!singleton().mCameras); }
+
+ private:
+ CamerasSingleton();
+ ~CamerasSingleton();
+
+ static CamerasSingleton& singleton() {
+ static CamerasSingleton camera;
+ return camera;
+ }
+
+ // Reinitializing CamerasChild will change the pointers below.
+ // We don't want this to happen in the middle of preparing IPC.
+ // We will be alive on destruction, so this needs to be off the books.
+ mozilla::OffTheBooksMutex mCamerasMutex;
+
+ // This is owned by the IPC code, and the same code controls the lifetime.
+ // It will set and clear this pointer as appropriate in setup/teardown.
+ // We'd normally make this a WeakPtr but unfortunately the IPC code already
+ // uses the WeakPtr mixin in a protected base class of CamerasChild, and in
+ // any case the object becomes unusable as soon as IPC is tearing down, which
+ // will be before actual destruction.
+ CamerasChild* mCameras;
+ nsCOMPtr<nsIThread> mCamerasChildThread;
+};
+
+// Get a pointer to a CamerasChild object we can use to do IPC with.
+// This does everything needed to set up, including starting the IPC
+// channel with PBackground, blocking until thats done, and starting the
+// thread to do IPC on. This will fail if we're in shutdown. On success
+// it will set up the CamerasSingleton.
+CamerasChild* GetCamerasChild();
+
+CamerasChild* GetCamerasChildIfExists();
+
+// Shut down the IPC channel and everything associated, like WebRTC.
+// This is a static call because the CamerasChild object may not even
+// be alive when we're called.
+void Shutdown(void);
+
+// Obtain the CamerasChild object (if possible, i.e. not shutting down),
+// and maintain a grip on the object for the duration of the call.
+template <class MEM_FUN, class... ARGS>
+int GetChildAndCall(MEM_FUN&& f, ARGS&&... args) {
+ OffTheBooksMutexAutoLock lock(CamerasSingleton::Mutex());
+ CamerasChild* child = GetCamerasChild();
+ if (child) {
+ return (child->*f)(std::forward<ARGS>(args)...);
+ } else {
+ return -1;
+ }
+}
+
+class CamerasChild final : public PCamerasChild {
+ friend class mozilla::ipc::BackgroundChildImpl;
+ template <class T>
+ friend class mozilla::camera::LockAndDispatch;
+
+ public:
+ // We are owned by the PBackground thread only. CamerasSingleton
+ // takes a non-owning reference.
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CamerasChild)
+
+ // IPC messages recevied, received on the PBackground thread
+ // these are the actual callbacks with data
+ mozilla::ipc::IPCResult RecvDeliverFrame(
+ const CaptureEngine&, const int&, mozilla::ipc::Shmem&&,
+ const VideoFrameProperties& prop) override;
+
+ mozilla::ipc::IPCResult RecvDeviceChange() override;
+
+ // these are response messages to our outgoing requests
+ mozilla::ipc::IPCResult RecvReplyNumberOfCaptureDevices(const int&) override;
+ mozilla::ipc::IPCResult RecvReplyNumberOfCapabilities(const int&) override;
+ mozilla::ipc::IPCResult RecvReplyAllocateCapture(const int&) override;
+ mozilla::ipc::IPCResult RecvReplyGetCaptureCapability(
+ const VideoCaptureCapability& capability) override;
+ mozilla::ipc::IPCResult RecvReplyGetCaptureDevice(
+ const nsACString& device_name, const nsACString& device_id,
+ const bool& scary) override;
+ mozilla::ipc::IPCResult RecvReplyFailure(void) override;
+ mozilla::ipc::IPCResult RecvReplySuccess(void) override;
+ void ActorDestroy(ActorDestroyReason aWhy) override;
+
+ // the webrtc.org ViECapture calls are mirrored here, but with access
+ // to a specific PCameras instance to communicate over. These also
+ // run on the MediaManager thread
+ int NumberOfCaptureDevices(CaptureEngine aCapEngine);
+ int NumberOfCapabilities(CaptureEngine aCapEngine,
+ const char* deviceUniqueIdUTF8);
+ int ReleaseCapture(CaptureEngine aCapEngine, const int capture_id);
+ int StartCapture(CaptureEngine aCapEngine, const int capture_id,
+ const webrtc::VideoCaptureCapability& capability,
+ FrameRelay* func);
+ int FocusOnSelectedSource(CaptureEngine aCapEngine, const int capture_id);
+ int StopCapture(CaptureEngine aCapEngine, const int capture_id);
+ // Returns a non-negative capture identifier or -1 on failure.
+ int AllocateCapture(CaptureEngine aCapEngine, const char* unique_idUTF8,
+ uint64_t aWindowID);
+ int GetCaptureCapability(CaptureEngine aCapEngine, const char* unique_idUTF8,
+ const unsigned int capability_number,
+ webrtc::VideoCaptureCapability* capability);
+ int GetCaptureDevice(CaptureEngine aCapEngine, unsigned int list_number,
+ char* device_nameUTF8,
+ const unsigned int device_nameUTF8Length,
+ char* unique_idUTF8,
+ const unsigned int unique_idUTF8Length,
+ bool* scary = nullptr);
+ int EnsureInitialized(CaptureEngine aCapEngine);
+
+ template <typename This>
+ int ConnectDeviceListChangeListener(MediaEventListener* aListener,
+ AbstractThread* aTarget, This* aThis,
+ void (This::*aMethod)()) {
+ // According to the spec, if the script sets
+ // navigator.mediaDevices.ondevicechange and the permission state is
+ // "always granted", the User Agent MUST fires a devicechange event when
+ // a new media input device is made available, even the script never
+ // call getusermedia or enumerateDevices.
+
+ // In order to detect the event, we need to init the camera engine.
+ // Currently EnsureInitialized(aCapEngine) is only called when one of
+ // CamerasParent api, e.g., RecvNumberOfCaptureDevices(), is called.
+
+ // So here we setup camera engine via EnsureInitialized(aCapEngine)
+
+ EnsureInitialized(CameraEngine);
+ *aListener = mDeviceListChangeEvent.Connect(aTarget, aThis, aMethod);
+ return IPC_OK();
+ }
+
+ FrameRelay* Callback(CaptureEngine aCapEngine, int capture_id);
+
+ private:
+ CamerasChild();
+ ~CamerasChild();
+ // Dispatch a Runnable to the PCamerasParent, by executing it on the
+ // decidecated Cameras IPC/PBackground thread.
+ bool DispatchToParent(nsIRunnable* aRunnable, MonitorAutoLock& aMonitor);
+ void AddCallback(const CaptureEngine aCapEngine, const int capture_id,
+ FrameRelay* render);
+ void RemoveCallback(const CaptureEngine aCapEngine, const int capture_id);
+
+ nsTArray<CapturerElement> mCallbacks;
+ // Protects the callback arrays
+ Mutex mCallbackMutex MOZ_UNANNOTATED;
+
+ bool mIPCIsAlive;
+
+ // Hold to prevent multiple outstanding requests. We don't use
+ // request IDs so we only support one at a time. Don't want try
+ // to use the webrtc.org API from multiple threads simultanously.
+ // The monitor below isn't sufficient for this, as it will drop
+ // the lock when Wait-ing for a response, allowing us to send a new
+ // request. The Notify on receiving the response will then unblock
+ // both waiters and one will be guaranteed to get the wrong result.
+ // Take this one before taking mReplyMonitor.
+ Mutex mRequestMutex MOZ_UNANNOTATED;
+ // Hold to wait for an async response to our calls *and* until the
+ // user of LockAndDispatch<> has read the data out. This is done by
+ // keeping the LockAndDispatch object alive.
+ Monitor mReplyMonitor MOZ_UNANNOTATED;
+ // Async response valid?
+ bool mReceivedReply;
+ // Async responses data contents;
+ bool mReplySuccess;
+ const int mZero;
+ int mReplyInteger;
+ webrtc::VideoCaptureCapability* mReplyCapability = nullptr;
+ nsCString mReplyDeviceName;
+ nsCString mReplyDeviceID;
+ bool mReplyScary;
+ MediaEventProducer<void> mDeviceListChangeEvent;
+};
+
+} // namespace camera
+} // namespace mozilla
+
+#endif // mozilla_CamerasChild_h
diff --git a/dom/media/systemservices/CamerasParent.cpp b/dom/media/systemservices/CamerasParent.cpp
new file mode 100644
index 0000000000..91185f44e1
--- /dev/null
+++ b/dom/media/systemservices/CamerasParent.cpp
@@ -0,0 +1,1222 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CamerasParent.h"
+
+#include <atomic>
+#include "MediaEngineSource.h"
+#include "MediaUtils.h"
+#include "PerformanceRecorder.h"
+#include "VideoFrameUtils.h"
+
+#include "mozilla/AppShutdown.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/BasePrincipal.h"
+#include "mozilla/ProfilerMarkers.h"
+#include "mozilla/Unused.h"
+#include "mozilla/Services.h"
+#include "mozilla/Logging.h"
+#include "mozilla/ipc/BackgroundParent.h"
+#include "mozilla/ipc/PBackgroundParent.h"
+#include "mozilla/dom/CanonicalBrowsingContext.h"
+#include "mozilla/dom/WindowGlobalParent.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/StaticPrefs_permissions.h"
+#include "nsIPermissionManager.h"
+#include "nsThreadUtils.h"
+#include "nsNetUtil.h"
+
+#include "api/video/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+
+#if defined(_WIN32)
+# include <process.h>
+# define getpid() _getpid()
+#endif
+
+#undef LOG
+#undef LOG_VERBOSE
+#undef LOG_ENABLED
+mozilla::LazyLogModule gCamerasParentLog("CamerasParent");
+#define LOG(...) \
+ MOZ_LOG(gCamerasParentLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+#define LOG_VERBOSE(...) \
+ MOZ_LOG(gCamerasParentLog, mozilla::LogLevel::Verbose, (__VA_ARGS__))
+#define LOG_ENABLED() MOZ_LOG_TEST(gCamerasParentLog, mozilla::LogLevel::Debug)
+
+namespace mozilla {
+using media::MustGetShutdownBarrier;
+using media::NewRunnableFrom;
+namespace camera {
+
+std::map<uint32_t, const char*> sDeviceUniqueIDs;
+std::map<uint32_t, webrtc::VideoCaptureCapability> sAllRequestedCapabilities;
+
+uint32_t ResolutionFeasibilityDistance(int32_t candidate, int32_t requested) {
+ // The purpose of this function is to find a smallest resolution
+ // which is larger than all requested capabilities.
+ // Then we can use down-scaling to fulfill each request.
+
+ MOZ_DIAGNOSTIC_ASSERT(candidate >= 0, "Candidate unexpectedly negative");
+ MOZ_DIAGNOSTIC_ASSERT(requested >= 0, "Requested unexpectedly negative");
+
+ if (candidate == 0) {
+ // Treat width|height capability of 0 as "can do any".
+ // This allows for orthogonal capabilities that are not in discrete steps.
+ return 0;
+ }
+
+ uint32_t distance =
+ std::abs(candidate - requested) * 1000 / std::max(candidate, requested);
+ if (candidate >= requested) {
+ // This is a good case, the candidate covers the requested resolution.
+ return distance;
+ }
+
+ // This is a bad case, the candidate is lower than the requested resolution.
+ // This is penalized with an added weight of 10000.
+ return 10000 + distance;
+}
+
+uint32_t FeasibilityDistance(int32_t candidate, int32_t requested) {
+ MOZ_DIAGNOSTIC_ASSERT(candidate >= 0, "Candidate unexpectedly negative");
+ MOZ_DIAGNOSTIC_ASSERT(requested >= 0, "Requested unexpectedly negative");
+
+ if (candidate == 0) {
+ // Treat maxFPS capability of 0 as "can do any".
+ // This allows for orthogonal capabilities that are not in discrete steps.
+ return 0;
+ }
+
+ return std::abs(candidate - requested) * 1000 /
+ std::max(candidate, requested);
+}
+
+StaticRefPtr<VideoEngine> CamerasParent::sEngines[CaptureEngine::MaxEngine];
+int32_t CamerasParent::sNumOfOpenCamerasParentEngines = 0;
+int32_t CamerasParent::sNumOfCamerasParents = 0;
+base::Thread* CamerasParent::sVideoCaptureThread = nullptr;
+Monitor* CamerasParent::sThreadMonitor = nullptr;
+StaticMutex CamerasParent::sMutex;
+
+// 3 threads are involved in this code:
+// - the main thread for some setups, and occassionally for video capture setup
+// calls that don't work correctly elsewhere.
+// - the IPC thread on which PBackground is running and which receives and
+// sends messages
+// - a thread which will execute the actual (possibly slow) camera access
+// called "VideoCapture". On Windows this is a thread with an event loop
+// suitable for UI access.
+
+// InputObserver is owned by CamerasParent, and it has a ref to CamerasParent
+void InputObserver::OnDeviceChange() {
+ LOG("%s", __PRETTY_FUNCTION__);
+ MOZ_ASSERT(mParent);
+
+ RefPtr<InputObserver> self(this);
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self]() {
+ if (self->mParent->IsShuttingDown()) {
+ LOG("OnDeviceChanged failure: parent shutting down.");
+ return NS_ERROR_FAILURE;
+ }
+ Unused << self->mParent->SendDeviceChange();
+ return NS_OK;
+ });
+
+ nsIEventTarget* target = mParent->GetBackgroundEventTarget();
+ MOZ_ASSERT(target != nullptr);
+ target->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+};
+
+class DeliverFrameRunnable : public mozilla::Runnable {
+ public:
+ DeliverFrameRunnable(CamerasParent* aParent, CaptureEngine aEngine,
+ uint32_t aStreamId, const TrackingId& aTrackingId,
+ const webrtc::VideoFrame& aFrame,
+ const VideoFrameProperties& aProperties)
+ : Runnable("camera::DeliverFrameRunnable"),
+ mParent(aParent),
+ mCapEngine(aEngine),
+ mStreamId(aStreamId),
+ mTrackingId(aTrackingId),
+ mProperties(aProperties),
+ mResult(0) {
+ // No ShmemBuffer (of the right size) was available, so make an
+ // extra buffer here. We have no idea when we are going to run and
+ // it will be potentially long after the webrtc frame callback has
+ // returned, so the copy needs to be no later than here.
+ // We will need to copy this back into a Shmem later on so we prefer
+ // using ShmemBuffers to avoid the extra copy.
+ PerformanceRecorder<CopyVideoStage> rec(
+ "CamerasParent::VideoFrameToAltBuffer"_ns, aTrackingId, aFrame.width(),
+ aFrame.height());
+ mAlternateBuffer.reset(new unsigned char[aProperties.bufferSize()]);
+ VideoFrameUtils::CopyVideoFrameBuffers(mAlternateBuffer.get(),
+ aProperties.bufferSize(), aFrame);
+ rec.Record();
+ }
+
+ DeliverFrameRunnable(CamerasParent* aParent, CaptureEngine aEngine,
+ uint32_t aStreamId, const TrackingId& aTrackingId,
+ ShmemBuffer aBuffer, VideoFrameProperties& aProperties)
+ : Runnable("camera::DeliverFrameRunnable"),
+ mParent(aParent),
+ mCapEngine(aEngine),
+ mStreamId(aStreamId),
+ mTrackingId(aTrackingId),
+ mBuffer(std::move(aBuffer)),
+ mProperties(aProperties),
+ mResult(0){};
+
+ NS_IMETHOD Run() override {
+ // runs on BackgroundEventTarget
+ MOZ_ASSERT(GetCurrentSerialEventTarget() ==
+ mParent->mPBackgroundEventTarget);
+ if (mParent->IsShuttingDown()) {
+ // Communication channel is being torn down
+ mResult = 0;
+ return NS_OK;
+ }
+ if (!mParent->DeliverFrameOverIPC(mCapEngine, mStreamId, mTrackingId,
+ std::move(mBuffer),
+ mAlternateBuffer.get(), mProperties)) {
+ mResult = -1;
+ } else {
+ mResult = 0;
+ }
+ return NS_OK;
+ }
+
+ int GetResult() { return mResult; }
+
+ private:
+ const RefPtr<CamerasParent> mParent;
+ const CaptureEngine mCapEngine;
+ const uint32_t mStreamId;
+ const TrackingId mTrackingId;
+ ShmemBuffer mBuffer;
+ UniquePtr<unsigned char[]> mAlternateBuffer;
+ const VideoFrameProperties mProperties;
+ int mResult;
+};
+
+NS_IMPL_ISUPPORTS(CamerasParent, nsIAsyncShutdownBlocker)
+
+nsresult CamerasParent::DispatchToVideoCaptureThread(RefPtr<Runnable> event) {
+ // Don't try to dispatch if we're already on the right thread.
+ // There's a potential deadlock because the sThreadMonitor is likely
+ // to be taken already.
+ MonitorAutoLock lock(*sThreadMonitor);
+ if (!sVideoCaptureThread) {
+ LOG("Can't dispatch to video capture thread: thread not present");
+ return NS_ERROR_FAILURE;
+ }
+ MOZ_ASSERT(sVideoCaptureThread->thread_id() != PlatformThread::CurrentId());
+
+ sVideoCaptureThread->message_loop()->PostTask(event.forget());
+ return NS_OK;
+}
+
+void CamerasParent::StopVideoCapture() {
+ LOG("%s", __PRETTY_FUNCTION__);
+ // Called when the actor is destroyed.
+ ipc::AssertIsOnBackgroundThread();
+ // Shut down the WebRTC stack (on the capture thread)
+ RefPtr<CamerasParent> self(this);
+ DebugOnly<nsresult> rv =
+ DispatchToVideoCaptureThread(NewRunnableFrom([self]() {
+ MonitorAutoLock lock(*(self->sThreadMonitor));
+ self->CloseEngines();
+ // After closing the WebRTC stack, clean up the
+ // VideoCapture thread.
+ base::Thread* thread = nullptr;
+ if (sNumOfOpenCamerasParentEngines == 0 && self->sVideoCaptureThread) {
+ thread = self->sVideoCaptureThread;
+ self->sVideoCaptureThread = nullptr;
+ }
+ nsresult rv = NS_DispatchToMainThread(NewRunnableFrom([self, thread]() {
+ if (thread) {
+ thread->Stop();
+ delete thread;
+ }
+ // May fail if already removed after RecvPCamerasConstructor().
+ (void)MustGetShutdownBarrier()->RemoveBlocker(self);
+ return NS_OK;
+ }));
+ MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv),
+ "dispatch for video thread shutdown");
+ return rv;
+ }));
+#ifdef DEBUG
+ // It's ok for the dispatch to fail if the cleanup it has to do
+ // has been done already.
+ MOZ_ASSERT(NS_SUCCEEDED(rv) || !mWebRTCAlive);
+#endif
+}
+
+int CamerasParent::DeliverFrameOverIPC(CaptureEngine capEng, uint32_t aStreamId,
+ const TrackingId& aTrackingId,
+ ShmemBuffer buffer,
+ unsigned char* altbuffer,
+ const VideoFrameProperties& aProps) {
+ // No ShmemBuffers were available, so construct one now of the right size
+ // and copy into it. That is an extra copy, but we expect this to be
+ // the exceptional case, because we just assured the next call *will* have a
+ // buffer of the right size.
+ if (altbuffer != nullptr) {
+ // Get a shared memory buffer from the pool, at least size big
+ ShmemBuffer shMemBuff = mShmemPool.Get(this, aProps.bufferSize());
+
+ if (!shMemBuff.Valid()) {
+ LOG("No usable Video shmem in DeliverFrame (out of buffers?)");
+ // We can skip this frame if we run out of buffers, it's not a real error.
+ return 0;
+ }
+
+ PerformanceRecorder<CopyVideoStage> rec(
+ "CamerasParent::AltBufferToShmem"_ns, aTrackingId, aProps.width(),
+ aProps.height());
+ // get() and Size() check for proper alignment of the segment
+ memcpy(shMemBuff.GetBytes(), altbuffer, aProps.bufferSize());
+ rec.Record();
+
+ if (!SendDeliverFrame(capEng, aStreamId, std::move(shMemBuff.Get()),
+ aProps)) {
+ return -1;
+ }
+ } else {
+ MOZ_ASSERT(buffer.Valid());
+ // ShmemBuffer was available, we're all good. A single copy happened
+ // in the original webrtc callback.
+ if (!SendDeliverFrame(capEng, aStreamId, std::move(buffer.Get()), aProps)) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+ShmemBuffer CamerasParent::GetBuffer(size_t aSize) {
+ return mShmemPool.GetIfAvailable(aSize);
+}
+
+void CallbackHelper::OnFrame(const webrtc::VideoFrame& aVideoFrame) {
+ LOG_VERBOSE("%s", __PRETTY_FUNCTION__);
+ if (profiler_thread_is_being_profiled_for_markers()) {
+ PROFILER_MARKER_UNTYPED(
+ nsPrintfCString("CaptureVideoFrame %dx%d %s %s", aVideoFrame.width(),
+ aVideoFrame.height(),
+ webrtc::VideoFrameBufferTypeToString(
+ aVideoFrame.video_frame_buffer()->type()),
+ mTrackingId.ToString().get()),
+ MEDIA_RT);
+ }
+ RefPtr<DeliverFrameRunnable> runnable = nullptr;
+ // Get frame properties
+ camera::VideoFrameProperties properties;
+ VideoFrameUtils::InitFrameBufferProperties(aVideoFrame, properties);
+ // Get a shared memory buffer to copy the frame data into
+ ShmemBuffer shMemBuffer = mParent->GetBuffer(properties.bufferSize());
+ if (!shMemBuffer.Valid()) {
+ // Either we ran out of buffers or they're not the right size yet
+ LOG("Correctly sized Video shmem not available in DeliverFrame");
+ // We will do the copy into a(n extra) temporary buffer inside
+ // the DeliverFrameRunnable constructor.
+ } else {
+ // Shared memory buffers of the right size are available, do the copy here.
+ PerformanceRecorder<CopyVideoStage> rec(
+ "CamerasParent::VideoFrameToShmem"_ns, mTrackingId, aVideoFrame.width(),
+ aVideoFrame.height());
+ VideoFrameUtils::CopyVideoFrameBuffers(
+ shMemBuffer.GetBytes(), properties.bufferSize(), aVideoFrame);
+ rec.Record();
+ runnable =
+ new DeliverFrameRunnable(mParent, mCapEngine, mStreamId, mTrackingId,
+ std::move(shMemBuffer), properties);
+ }
+ if (!runnable) {
+ runnable = new DeliverFrameRunnable(mParent, mCapEngine, mStreamId,
+ mTrackingId, aVideoFrame, properties);
+ }
+ MOZ_ASSERT(mParent);
+ nsIEventTarget* target = mParent->GetBackgroundEventTarget();
+ MOZ_ASSERT(target != nullptr);
+ target->Dispatch(runnable, NS_DISPATCH_NORMAL);
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvReleaseFrame(
+ mozilla::ipc::Shmem&& s) {
+ mShmemPool.Put(ShmemBuffer(s));
+ return IPC_OK();
+}
+
+bool CamerasParent::SetupEngine(CaptureEngine aCapEngine) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ StaticRefPtr<VideoEngine>& engine = sEngines[aCapEngine];
+
+ if (!engine) {
+ CaptureDeviceType captureDeviceType = CaptureDeviceType::Camera;
+ switch (aCapEngine) {
+ case ScreenEngine:
+ captureDeviceType = CaptureDeviceType::Screen;
+ break;
+ case BrowserEngine:
+ captureDeviceType = CaptureDeviceType::Browser;
+ break;
+ case WinEngine:
+ captureDeviceType = CaptureDeviceType::Window;
+ break;
+ case CameraEngine:
+ captureDeviceType = CaptureDeviceType::Camera;
+ break;
+ default:
+ LOG("Invalid webrtc Video engine");
+ return false;
+ }
+
+ engine = VideoEngine::Create(captureDeviceType);
+
+ if (!engine) {
+ LOG("VideoEngine::Create failed");
+ return false;
+ }
+ }
+
+ if (aCapEngine == CameraEngine && !mCameraObserver) {
+ mCameraObserver = new InputObserver(this);
+ auto device_info = engine->GetOrCreateVideoCaptureDeviceInfo();
+ MOZ_ASSERT(device_info);
+ if (device_info) {
+ device_info->RegisterVideoInputFeedBack(mCameraObserver);
+ }
+ }
+
+ return true;
+}
+
+void CamerasParent::CloseEngines() {
+ sThreadMonitor->AssertCurrentThreadOwns();
+ LOG("%s", __PRETTY_FUNCTION__);
+ if (!mWebRTCAlive) {
+ return;
+ }
+ MOZ_ASSERT(sVideoCaptureThread->thread_id() == PlatformThread::CurrentId());
+
+ // Stop the callers
+ while (mCallbacks.Length()) {
+ auto capEngine = mCallbacks[0]->mCapEngine;
+ auto streamNum = mCallbacks[0]->mStreamId;
+ LOG("Forcing shutdown of engine %d, capturer %d", capEngine, streamNum);
+ StopCapture(capEngine, streamNum);
+ Unused << ReleaseCapture(capEngine, streamNum);
+ }
+
+ StaticRefPtr<VideoEngine>& engine = sEngines[CameraEngine];
+ if (engine && mCameraObserver) {
+ auto device_info = engine->GetOrCreateVideoCaptureDeviceInfo();
+ MOZ_ASSERT(device_info);
+ if (device_info) {
+ device_info->DeRegisterVideoInputFeedBack(mCameraObserver);
+ }
+ mCameraObserver = nullptr;
+ }
+
+ // CloseEngines() is protected by sThreadMonitor
+ sNumOfOpenCamerasParentEngines--;
+ if (sNumOfOpenCamerasParentEngines == 0) {
+ for (StaticRefPtr<VideoEngine>& engine : sEngines) {
+ if (engine) {
+ VideoEngine::Delete(engine);
+ engine = nullptr;
+ }
+ }
+ }
+
+ mWebRTCAlive = false;
+}
+
+VideoEngine* CamerasParent::EnsureInitialized(int aEngine) {
+ LOG_VERBOSE("%s", __PRETTY_FUNCTION__);
+ // We're shutting down, don't try to do new WebRTC ops.
+ if (!mWebRTCAlive) {
+ return nullptr;
+ }
+ CaptureEngine capEngine = static_cast<CaptureEngine>(aEngine);
+ if (!SetupEngine(capEngine)) {
+ LOG("CamerasParent failed to initialize engine");
+ return nullptr;
+ }
+
+ return sEngines[aEngine];
+}
+
+// Dispatch the runnable to do the camera operation on the
+// specific Cameras thread, preventing us from blocking, and
+// chain a runnable to send back the result on the IPC thread.
+// It would be nice to get rid of the code duplication here,
+// perhaps via Promises.
+mozilla::ipc::IPCResult CamerasParent::RecvNumberOfCaptureDevices(
+ const CaptureEngine& aCapEngine) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ LOG("CaptureEngine=%d", aCapEngine);
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom([self, aCapEngine]() {
+ int num = -1;
+ if (auto engine = self->EnsureInitialized(aCapEngine)) {
+ if (auto devInfo = engine->GetOrCreateVideoCaptureDeviceInfo()) {
+ num = devInfo->NumberOfDevices();
+ }
+ }
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self, num]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvNumberOfCaptureDevices failure: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (num < 0) {
+ LOG("RecvNumberOfCaptureDevices couldn't find devices");
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG("RecvNumberOfCaptureDevices: %d", num);
+ Unused << self->SendReplyNumberOfCaptureDevices(num);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvEnsureInitialized(
+ const CaptureEngine& aCapEngine) {
+ LOG("%s", __PRETTY_FUNCTION__);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom([self, aCapEngine]() {
+ bool result = self->EnsureInitialized(aCapEngine);
+
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self, result]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvEnsureInitialized: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!result) {
+ LOG("RecvEnsureInitialized failed");
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG("RecvEnsureInitialized succeeded");
+ Unused << self->SendReplySuccess();
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvNumberOfCapabilities(
+ const CaptureEngine& aCapEngine, const nsACString& unique_id) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ LOG("Getting caps for %s", PromiseFlatCString(unique_id).get());
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable =
+ NewRunnableFrom([self, unique_id = nsCString(unique_id), aCapEngine]() {
+ int num = -1;
+ if (auto engine = self->EnsureInitialized(aCapEngine)) {
+ if (auto devInfo = engine->GetOrCreateVideoCaptureDeviceInfo()) {
+ num = devInfo->NumberOfCapabilities(unique_id.get());
+ }
+ }
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self, num]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvNumberOfCapabilities: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (num < 0) {
+ LOG("RecvNumberOfCapabilities couldn't find capabilities");
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG("RecvNumberOfCapabilities: %d", num);
+ Unused << self->SendReplyNumberOfCapabilities(num);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable,
+ NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvGetCaptureCapability(
+ const CaptureEngine& aCapEngine, const nsACString& unique_id,
+ const int& num) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ LOG("RecvGetCaptureCapability: %s %d", PromiseFlatCString(unique_id).get(),
+ num);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom(
+ [self, unique_id = nsCString(unique_id), aCapEngine, num]() {
+ webrtc::VideoCaptureCapability webrtcCaps;
+ int error = -1;
+ if (auto engine = self->EnsureInitialized(aCapEngine)) {
+ if (auto devInfo = engine->GetOrCreateVideoCaptureDeviceInfo()) {
+ error = devInfo->GetCapability(unique_id.get(), num, webrtcCaps);
+ }
+
+ if (!error && aCapEngine == CameraEngine) {
+ auto iter = self->mAllCandidateCapabilities.find(unique_id);
+ if (iter == self->mAllCandidateCapabilities.end()) {
+ std::map<uint32_t, webrtc::VideoCaptureCapability>
+ candidateCapabilities;
+ candidateCapabilities.emplace(num, webrtcCaps);
+ self->mAllCandidateCapabilities.emplace(nsCString(unique_id),
+ candidateCapabilities);
+ } else {
+ (iter->second).emplace(num, webrtcCaps);
+ }
+ }
+ }
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self, webrtcCaps,
+ error]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvGetCaptureCapability: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+ VideoCaptureCapability capCap(
+ webrtcCaps.width, webrtcCaps.height, webrtcCaps.maxFPS,
+ static_cast<int>(webrtcCaps.videoType), webrtcCaps.interlaced);
+ LOG("Capability: %u %u %u %d %d", webrtcCaps.width, webrtcCaps.height,
+ webrtcCaps.maxFPS, static_cast<int>(webrtcCaps.videoType),
+ webrtcCaps.interlaced);
+ if (error) {
+ LOG("RecvGetCaptureCapability: reply failure");
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ }
+ Unused << self->SendReplyGetCaptureCapability(capCap);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable,
+ NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvGetCaptureDevice(
+ const CaptureEngine& aCapEngine, const int& aDeviceIndex) {
+ LOG("%s", __PRETTY_FUNCTION__);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom([self, aCapEngine,
+ aDeviceIndex]() {
+ char deviceName[MediaEngineSource::kMaxDeviceNameLength];
+ char deviceUniqueId[MediaEngineSource::kMaxUniqueIdLength];
+ nsCString name;
+ nsCString uniqueId;
+ pid_t devicePid = 0;
+ int error = -1;
+ if (auto engine = self->EnsureInitialized(aCapEngine)) {
+ if (auto devInfo = engine->GetOrCreateVideoCaptureDeviceInfo()) {
+ error = devInfo->GetDeviceName(
+ aDeviceIndex, deviceName, sizeof(deviceName), deviceUniqueId,
+ sizeof(deviceUniqueId), nullptr, 0, &devicePid);
+ }
+ }
+ if (!error) {
+ name.Assign(deviceName);
+ uniqueId.Assign(deviceUniqueId);
+ }
+ RefPtr<nsIRunnable> ipc_runnable =
+ NewRunnableFrom([self, error, name, uniqueId, devicePid]() {
+ if (!self->mChildIsAlive) {
+ return NS_ERROR_FAILURE;
+ }
+ if (error) {
+ LOG("GetCaptureDevice failed: %d", error);
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ }
+ bool scary = (devicePid == getpid());
+
+ LOG("Returning %s name %s id (pid = %d)%s", name.get(),
+ uniqueId.get(), devicePid, (scary ? " (scary)" : ""));
+ Unused << self->SendReplyGetCaptureDevice(name, uniqueId, scary);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+// Find out whether the given window with id has permission to use the
+// camera. If the permission is not persistent, we'll make it a one-shot by
+// removing the (session) permission.
+static bool HasCameraPermission(const uint64_t& aWindowId) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ RefPtr<dom::WindowGlobalParent> window =
+ dom::WindowGlobalParent::GetByInnerWindowId(aWindowId);
+ if (!window) {
+ // Could not find window by id
+ return false;
+ }
+
+ // If we delegate permission from first party, we should use the top level
+ // window
+ if (StaticPrefs::permissions_delegation_enabled()) {
+ RefPtr<dom::BrowsingContext> topBC = window->BrowsingContext()->Top();
+ window = topBC->Canonical()->GetCurrentWindowGlobal();
+ }
+
+ // Return false if the window is not the currently-active window for its
+ // BrowsingContext.
+ if (!window || !window->IsCurrentGlobal()) {
+ return false;
+ }
+
+ nsIPrincipal* principal = window->DocumentPrincipal();
+ if (principal->GetIsNullPrincipal()) {
+ return false;
+ }
+
+ if (principal->IsSystemPrincipal()) {
+ return true;
+ }
+
+ MOZ_ASSERT(principal->GetIsContentPrincipal());
+
+ nsresult rv;
+ // Name used with nsIPermissionManager
+ static const nsLiteralCString cameraPermission = "MediaManagerVideo"_ns;
+ nsCOMPtr<nsIPermissionManager> mgr =
+ do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return false;
+ }
+
+ uint32_t video = nsIPermissionManager::UNKNOWN_ACTION;
+ rv = mgr->TestExactPermissionFromPrincipal(principal, cameraPermission,
+ &video);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return false;
+ }
+
+ bool allowed = (video == nsIPermissionManager::ALLOW_ACTION);
+
+ // Session permissions are removed after one use.
+ if (allowed) {
+ mgr->RemoveFromPrincipal(principal, cameraPermission);
+ }
+
+ return allowed;
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvAllocateCapture(
+ const CaptureEngine& aCapEngine, const nsACString& unique_id,
+ const uint64_t& aWindowID) {
+ LOG("%s: Verifying permissions", __PRETTY_FUNCTION__);
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> mainthread_runnable = NewRunnableFrom(
+ [self, aCapEngine, unique_id = nsCString(unique_id), aWindowID]() {
+ // Verify whether the claimed origin has received permission
+ // to use the camera, either persistently or this session (one shot).
+ bool allowed = HasCameraPermission(aWindowID);
+ if (!allowed) {
+ // Developer preference for turning off permission check.
+ if (Preferences::GetBool("media.navigator.permission.disabled",
+ false)) {
+ allowed = true;
+ LOG("No permission but checks are disabled");
+ } else {
+ LOG("No camera permission for this origin");
+ }
+ }
+ // After retrieving the permission (or not) on the main thread,
+ // bounce to the WebRTC thread to allocate the device (or not),
+ // then bounce back to the IPC thread for the reply to content.
+ RefPtr<Runnable> webrtc_runnable =
+ NewRunnableFrom([self, allowed, aCapEngine, unique_id]() {
+ int captureId = -1;
+ int error = -1;
+ if (allowed && self->EnsureInitialized(aCapEngine)) {
+ StaticRefPtr<VideoEngine>& engine = self->sEngines[aCapEngine];
+ captureId = engine->CreateVideoCapture(unique_id.get());
+ engine->WithEntry(captureId,
+ [&error](VideoEngine::CaptureEntry& cap) {
+ if (cap.VideoCapture()) {
+ error = 0;
+ }
+ });
+ }
+ RefPtr<nsIRunnable> ipc_runnable =
+ NewRunnableFrom([self, captureId, error]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvAllocateCapture: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (error) {
+ Unused << self->SendReplyFailure();
+ LOG("RecvAllocateCapture: WithEntry error");
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG("Allocated device nr %d", captureId);
+ Unused << self->SendReplyAllocateCapture(captureId);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable,
+ NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ self->DispatchToVideoCaptureThread(webrtc_runnable);
+ return NS_OK;
+ });
+ NS_DispatchToMainThread(mainthread_runnable);
+ return IPC_OK();
+}
+
+int CamerasParent::ReleaseCapture(const CaptureEngine& aCapEngine,
+ int aCaptureId) {
+ int error = -1;
+ if (auto engine = EnsureInitialized(aCapEngine)) {
+ error = engine->ReleaseVideoCapture(aCaptureId);
+ }
+ return error;
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvReleaseCapture(
+ const CaptureEngine& aCapEngine, const int& aCaptureId) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ LOG("RecvReleaseCamera device nr %d", aCaptureId);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom([self, aCapEngine,
+ aCaptureId]() {
+ int error = self->ReleaseCapture(aCapEngine, aCaptureId);
+ RefPtr<nsIRunnable> ipc_runnable =
+ NewRunnableFrom([self, error, aCaptureId]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvReleaseCapture: child not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (error) {
+ Unused << self->SendReplyFailure();
+ LOG("RecvReleaseCapture: Failed to free device nr %d", aCaptureId);
+ return NS_ERROR_FAILURE;
+ }
+
+ Unused << self->SendReplySuccess();
+ LOG("Freed device nr %d", aCaptureId);
+ return NS_OK;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvStartCapture(
+ const CaptureEngine& aCapEngine, const int& aCaptureId,
+ const VideoCaptureCapability& ipcCaps) {
+ LOG("%s", __PRETTY_FUNCTION__);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom([self, aCapEngine,
+ aCaptureId, ipcCaps]() {
+ LOG("%s", __PRETTY_FUNCTION__);
+ CallbackHelper** cbh;
+ int error = -1;
+ if (self->EnsureInitialized(aCapEngine)) {
+ cbh = self->mCallbacks.AppendElement(new CallbackHelper(
+ static_cast<CaptureEngine>(aCapEngine), aCaptureId, self));
+
+ self->sEngines[aCapEngine]->WithEntry(
+ aCaptureId, [&aCaptureId, &aCapEngine, &error, &ipcCaps, &cbh,
+ self](VideoEngine::CaptureEntry& cap) {
+ webrtc::VideoCaptureCapability capability;
+ capability.width = ipcCaps.width();
+ capability.height = ipcCaps.height();
+ capability.maxFPS = ipcCaps.maxFPS();
+ capability.videoType =
+ static_cast<webrtc::VideoType>(ipcCaps.videoType());
+ capability.interlaced = ipcCaps.interlaced();
+
+#ifndef FUZZING_SNAPSHOT
+ MOZ_DIAGNOSTIC_ASSERT(sDeviceUniqueIDs.find(aCaptureId) ==
+ sDeviceUniqueIDs.end());
+#endif
+ sDeviceUniqueIDs.emplace(aCaptureId,
+ cap.VideoCapture()->CurrentDeviceName());
+
+#ifndef FUZZING_SNAPSHOT
+ MOZ_DIAGNOSTIC_ASSERT(sAllRequestedCapabilities.find(aCaptureId) ==
+ sAllRequestedCapabilities.end());
+#endif
+ sAllRequestedCapabilities.emplace(aCaptureId, capability);
+
+ if (aCapEngine == CameraEngine) {
+ for (const auto& it : sDeviceUniqueIDs) {
+ if (strcmp(it.second,
+ cap.VideoCapture()->CurrentDeviceName()) == 0) {
+ capability.width =
+ std::max(capability.width,
+ sAllRequestedCapabilities[it.first].width);
+ capability.height =
+ std::max(capability.height,
+ sAllRequestedCapabilities[it.first].height);
+ capability.maxFPS =
+ std::max(capability.maxFPS,
+ sAllRequestedCapabilities[it.first].maxFPS);
+ }
+ }
+
+ auto candidateCapabilities = self->mAllCandidateCapabilities.find(
+ nsCString(cap.VideoCapture()->CurrentDeviceName()));
+ if ((candidateCapabilities !=
+ self->mAllCandidateCapabilities.end()) &&
+ (!candidateCapabilities->second.empty())) {
+ int32_t minIdx = -1;
+ uint64_t minDistance = UINT64_MAX;
+
+ for (auto& candidateCapability :
+ candidateCapabilities->second) {
+ if (candidateCapability.second.videoType !=
+ capability.videoType) {
+ continue;
+ }
+ // The first priority is finding a suitable resolution.
+ // So here we raise the weight of width and height
+ uint64_t distance =
+ uint64_t(ResolutionFeasibilityDistance(
+ candidateCapability.second.width, capability.width)) +
+ uint64_t(ResolutionFeasibilityDistance(
+ candidateCapability.second.height,
+ capability.height)) +
+ uint64_t(
+ FeasibilityDistance(candidateCapability.second.maxFPS,
+ capability.maxFPS));
+ if (distance < minDistance) {
+ minIdx = candidateCapability.first;
+ minDistance = distance;
+ }
+ }
+ MOZ_ASSERT(minIdx != -1);
+ capability = candidateCapabilities->second[minIdx];
+ }
+ } else if (aCapEngine == ScreenEngine ||
+ aCapEngine == BrowserEngine || aCapEngine == WinEngine) {
+ for (const auto& it : sDeviceUniqueIDs) {
+ if (strcmp(it.second,
+ cap.VideoCapture()->CurrentDeviceName()) == 0) {
+ capability.maxFPS =
+ std::max(capability.maxFPS,
+ sAllRequestedCapabilities[it.first].maxFPS);
+ }
+ }
+ }
+
+ cap.VideoCapture()->SetTrackingId(
+ (*cbh)->mTrackingId.mUniqueInProcId);
+ error = cap.VideoCapture()->StartCapture(capability);
+
+ if (!error) {
+ cap.VideoCapture()->RegisterCaptureDataCallback(
+ static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(
+ *cbh));
+ } else {
+ sDeviceUniqueIDs.erase(aCaptureId);
+ sAllRequestedCapabilities.erase(aCaptureId);
+ }
+ });
+ }
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self, error]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvStartCapture failure: child is not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!error) {
+ Unused << self->SendReplySuccess();
+ return NS_OK;
+ }
+
+ LOG("RecvStartCapture failure: StartCapture failed");
+ Unused << self->SendReplyFailure();
+ return NS_ERROR_FAILURE;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvFocusOnSelectedSource(
+ const CaptureEngine& aCapEngine, const int& aCaptureId) {
+ LOG("%s", __PRETTY_FUNCTION__);
+ RefPtr<Runnable> webrtc_runnable = NewRunnableFrom(
+ [self = RefPtr<CamerasParent>(this), aCapEngine, aCaptureId]() {
+ if (auto engine = self->EnsureInitialized(aCapEngine)) {
+ engine->WithEntry(aCaptureId, [self](VideoEngine::CaptureEntry& cap) {
+ if (cap.VideoCapture()) {
+ bool result = cap.VideoCapture()->FocusOnSelectedSource();
+ RefPtr<nsIRunnable> ipc_runnable = NewRunnableFrom([self,
+ result]() {
+ if (!self->mChildIsAlive) {
+ LOG("RecvFocusOnSelectedSource failure: child is not alive");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (result) {
+ Unused << self->SendReplySuccess();
+ return NS_OK;
+ }
+
+ Unused << self->SendReplyFailure();
+ LOG("RecvFocusOnSelectedSource failure.");
+ return NS_ERROR_FAILURE;
+ });
+ self->mPBackgroundEventTarget->Dispatch(ipc_runnable,
+ NS_DISPATCH_NORMAL);
+ }
+ });
+ }
+ LOG("RecvFocusOnSelectedSource CameraParent not initialized");
+ return NS_ERROR_FAILURE;
+ });
+ DispatchToVideoCaptureThread(webrtc_runnable);
+ return IPC_OK();
+}
+
+void CamerasParent::StopCapture(const CaptureEngine& aCapEngine,
+ int aCaptureId) {
+ if (auto engine = EnsureInitialized(aCapEngine)) {
+ // we're removing elements, iterate backwards
+ for (size_t i = mCallbacks.Length(); i > 0; i--) {
+ if (mCallbacks[i - 1]->mCapEngine == aCapEngine &&
+ mCallbacks[i - 1]->mStreamId == (uint32_t)aCaptureId) {
+ CallbackHelper* cbh = mCallbacks[i - 1];
+ engine->WithEntry(aCaptureId, [cbh, &aCaptureId](
+ VideoEngine::CaptureEntry& cap) {
+ if (cap.VideoCapture()) {
+ cap.VideoCapture()->DeRegisterCaptureDataCallback(
+ static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(cbh));
+ cap.VideoCapture()->StopCaptureIfAllClientsClose();
+
+ sDeviceUniqueIDs.erase(aCaptureId);
+ sAllRequestedCapabilities.erase(aCaptureId);
+ }
+ });
+
+ delete mCallbacks[i - 1];
+ mCallbacks.RemoveElementAt(i - 1);
+ break;
+ }
+ }
+ }
+}
+
+mozilla::ipc::IPCResult CamerasParent::RecvStopCapture(
+ const CaptureEngine& aCapEngine, const int& aCaptureId) {
+ LOG("%s", __PRETTY_FUNCTION__);
+
+ RefPtr<CamerasParent> self(this);
+ RefPtr<Runnable> webrtc_runnable =
+ NewRunnableFrom([self, aCapEngine, aCaptureId]() {
+ self->StopCapture(aCapEngine, aCaptureId);
+ return NS_OK;
+ });
+ nsresult rv = DispatchToVideoCaptureThread(webrtc_runnable);
+ if (!self->mChildIsAlive) {
+ if (NS_FAILED(rv)) {
+ return IPC_FAIL_NO_REASON(this);
+ }
+ } else {
+ if (NS_SUCCEEDED(rv)) {
+ if (!SendReplySuccess()) {
+ return IPC_FAIL_NO_REASON(this);
+ }
+ } else {
+ if (!SendReplyFailure()) {
+ return IPC_FAIL_NO_REASON(this);
+ }
+ }
+ }
+ return IPC_OK();
+}
+
+void CamerasParent::StopIPC() {
+ MOZ_ASSERT(!mDestroyed);
+ // Release shared memory now, it's our last chance
+ mShmemPool.Cleanup(this);
+ // We don't want to receive callbacks or anything if we can't
+ // forward them anymore anyway.
+ mChildIsAlive = false;
+ mDestroyed = true;
+}
+
+void CamerasParent::ActorDestroy(ActorDestroyReason aWhy) {
+ // No more IPC from here
+ LOG("%s", __PRETTY_FUNCTION__);
+ StopIPC();
+ // Shut down WebRTC (if we're not in full shutdown, else this
+ // will already have happened)
+ StopVideoCapture();
+}
+
+nsString CamerasParent::GetNewName() {
+ static std::atomic<uint64_t> counter{0};
+ nsString name(u"CamerasParent "_ns);
+ name.AppendInt(++counter);
+ return name;
+}
+
+NS_IMETHODIMP CamerasParent::BlockShutdown(nsIAsyncShutdownClient*) {
+ mPBackgroundEventTarget->Dispatch(
+ NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() {
+ // Send__delete() can return failure if AddBlocker() registered this
+ // CamerasParent while RecvPCamerasConstructor() called Send__delete()
+ // because it noticed that AppShutdown had started.
+ (void)Send__delete__(self);
+ }));
+ return NS_OK;
+}
+
+CamerasParent::CamerasParent()
+ : mName(GetNewName()),
+ mShmemPool(CaptureEngine::MaxEngine),
+ mPBackgroundEventTarget(GetCurrentSerialEventTarget()),
+ mChildIsAlive(true),
+ mDestroyed(false),
+ mWebRTCAlive(false) {
+ MOZ_ASSERT(mPBackgroundEventTarget != nullptr,
+ "GetCurrentThreadEventTarget failed");
+ LOG("CamerasParent: %p", this);
+ StaticMutexAutoLock slock(sMutex);
+
+ if (sNumOfCamerasParents++ == 0) {
+ sThreadMonitor = new Monitor("CamerasParent::sThreadMonitor");
+ }
+}
+
+// RecvPCamerasConstructor() is used because IPC messages, for
+// Send__delete__(), cannot be sent from AllocPCamerasParent().
+ipc::IPCResult CamerasParent::RecvPCamerasConstructor() {
+ ipc::AssertIsOnBackgroundThread();
+
+ // A shutdown blocker must be added if sNumOfOpenCamerasParentEngines might
+ // be incremented to indicate ownership of an sVideoCaptureThread.
+ // If this task were queued after checking !IsInOrBeyond(AppShutdown), then
+ // shutdown may have proceeded on the main thread and so the task may run
+ // too late to add the blocker.
+ // Don't dispatch from the constructor a runnable that may toggle the
+ // reference count, because the IPC thread does not get a reference until
+ // after the constructor returns.
+ NS_DispatchToMainThread(
+ NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() {
+ nsresult rv = MustGetShutdownBarrier()->AddBlocker(
+ self, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__, u""_ns);
+ LOG("AddBlocker returned 0x%x", static_cast<unsigned>(rv));
+ // AddBlocker() will fail if called after all
+ // AsyncShutdown.profileBeforeChange conditions have resolved or been
+ // removed.
+ //
+ // The success of this AddBlocker() call is expected when an
+ // sVideoCaptureThread is created based on the assumption that at
+ // least one condition (e.g. nsIAsyncShutdownBlocker) added with
+ // AsyncShutdown.profileBeforeChange.addBlocker() will not resolve or
+ // be removed until it has queued a task and that task has run.
+ // (AyncShutdown.jsm's Spinner#observe() makes a similar assumption
+ // when it calls processNextEvent(), assuming that there will be some
+ // other event generated, before checking whether its Barrier.wait()
+ // promise has resolved.)
+ //
+ // If AppShutdown::IsInOrBeyond(AppShutdown) returned false,
+ // then this main thread task was queued before AppShutdown's
+ // sCurrentShutdownPhase is set to AppShutdown,
+ // which is before profile-before-change is notified,
+ // which is when AsyncShutdown conditions are run,
+ // which is when one condition would queue a task to resolve the
+ // condition or remove the blocker.
+ // That task runs after this task and before AsyncShutdown prevents
+ // further conditions being added through AddBlocker().
+ MOZ_ASSERT(NS_SUCCEEDED(rv) || !self->mWebRTCAlive);
+ }));
+
+ // AsyncShutdown barriers are available only for ShutdownPhases as late as
+ // XPCOMWillShutdown. The IPC background thread shuts down during
+ // XPCOMShutdownThreads, so actors may be created when AsyncShutdown
+ // barriers are no longer available.
+ // IsInOrBeyond() checks sCurrentShutdownPhase, which is atomic.
+ // ShutdownPhase::AppShutdown corresponds to profileBeforeChange used by
+ // MustGetShutdownBarrier() in the parent process.
+ if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdown)) {
+ // The usual blocker removal path depends on the existence of the
+ // `sVideoCaptureThread`, which is not ensured. Queue removal now.
+ NS_DispatchToMainThread(
+ NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() {
+ // May fail if AddBlocker() failed.
+ (void)MustGetShutdownBarrier()->RemoveBlocker(self);
+ }));
+ return Send__delete__(this) ? IPC_OK() : IPC_FAIL(this, "Failed to send");
+ }
+
+ LOG("Spinning up WebRTC Cameras Thread");
+ MonitorAutoLock lock(*sThreadMonitor);
+ if (sVideoCaptureThread == nullptr) {
+ MOZ_ASSERT(sNumOfOpenCamerasParentEngines == 0);
+ sVideoCaptureThread = new base::Thread("VideoCapture");
+ base::Thread::Options options;
+#if defined(_WIN32)
+ options.message_loop_type = MessageLoop::TYPE_MOZILLA_NONMAINUITHREAD;
+#else
+ options.message_loop_type = MessageLoop::TYPE_MOZILLA_NONMAINTHREAD;
+#endif
+ if (!sVideoCaptureThread->StartWithOptions(options)) {
+ MOZ_CRASH();
+ }
+ }
+ mWebRTCAlive = true;
+ sNumOfOpenCamerasParentEngines++;
+ return IPC_OK();
+}
+
+CamerasParent::~CamerasParent() {
+ LOG("~CamerasParent: %p", this);
+ StaticMutexAutoLock slock(sMutex);
+ if (--sNumOfCamerasParents == 0) {
+ delete sThreadMonitor;
+ sThreadMonitor = nullptr;
+ }
+}
+
+already_AddRefed<CamerasParent> CamerasParent::Create() {
+ mozilla::ipc::AssertIsOnBackgroundThread();
+ return MakeAndAddRef<CamerasParent>();
+}
+
+} // namespace camera
+} // namespace mozilla
diff --git a/dom/media/systemservices/CamerasParent.h b/dom/media/systemservices/CamerasParent.h
new file mode 100644
index 0000000000..0fb32d7e14
--- /dev/null
+++ b/dom/media/systemservices/CamerasParent.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_CamerasParent_h
+#define mozilla_CamerasParent_h
+
+#include "VideoEngine.h"
+#include "mozilla/camera/PCamerasParent.h"
+#include "mozilla/ipc/Shmem.h"
+#include "mozilla/ShmemPool.h"
+#include "mozilla/Atomics.h"
+#include "api/video/video_sink_interface.h"
+#include "common_video/include/incoming_video_stream.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_defines.h"
+
+#include "CamerasChild.h"
+
+#include "base/thread.h"
+
+namespace mozilla::camera {
+
+class CamerasParent;
+
+class CallbackHelper : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ CallbackHelper(CaptureEngine aCapEng, uint32_t aStreamId,
+ CamerasParent* aParent)
+ : mCapEngine(aCapEng),
+ mStreamId(aStreamId),
+ mTrackingId(CaptureEngineToTrackingSourceStr(aCapEng), aStreamId),
+ mParent(aParent){};
+
+ // These callbacks end up running on the VideoCapture thread.
+ // From VideoCaptureCallback
+ void OnFrame(const webrtc::VideoFrame& videoFrame) override;
+
+ friend CamerasParent;
+
+ private:
+ const CaptureEngine mCapEngine;
+ const uint32_t mStreamId;
+ const TrackingId mTrackingId;
+ CamerasParent* const mParent;
+};
+
+class InputObserver : public webrtc::VideoInputFeedBack {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(InputObserver)
+
+ explicit InputObserver(CamerasParent* aParent) : mParent(aParent){};
+
+ virtual void OnDeviceChange() override;
+
+ friend CamerasParent;
+
+ private:
+ ~InputObserver() = default;
+
+ const RefPtr<CamerasParent> mParent;
+};
+
+class DeliverFrameRunnable;
+
+class CamerasParent final : public PCamerasParent,
+ public nsIAsyncShutdownBlocker {
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ public:
+ friend DeliverFrameRunnable;
+
+ static already_AddRefed<CamerasParent> Create();
+
+ // Messages received from the child. These run on the IPC/PBackground thread.
+ mozilla::ipc::IPCResult RecvPCamerasConstructor();
+ mozilla::ipc::IPCResult RecvAllocateCapture(
+ const CaptureEngine& aEngine, const nsACString& aUnique_idUTF8,
+ const uint64_t& aWindowID) override;
+ mozilla::ipc::IPCResult RecvReleaseCapture(const CaptureEngine&,
+ const int&) override;
+ mozilla::ipc::IPCResult RecvNumberOfCaptureDevices(
+ const CaptureEngine&) override;
+ mozilla::ipc::IPCResult RecvNumberOfCapabilities(const CaptureEngine&,
+ const nsACString&) override;
+ mozilla::ipc::IPCResult RecvGetCaptureCapability(const CaptureEngine&,
+ const nsACString&,
+ const int&) override;
+ mozilla::ipc::IPCResult RecvGetCaptureDevice(const CaptureEngine&,
+ const int&) override;
+ mozilla::ipc::IPCResult RecvStartCapture(
+ const CaptureEngine&, const int&, const VideoCaptureCapability&) override;
+ mozilla::ipc::IPCResult RecvFocusOnSelectedSource(const CaptureEngine&,
+ const int&) override;
+ mozilla::ipc::IPCResult RecvStopCapture(const CaptureEngine&,
+ const int&) override;
+ mozilla::ipc::IPCResult RecvReleaseFrame(mozilla::ipc::Shmem&&) override;
+ void ActorDestroy(ActorDestroyReason aWhy) override;
+ mozilla::ipc::IPCResult RecvEnsureInitialized(const CaptureEngine&) override;
+
+ nsIEventTarget* GetBackgroundEventTarget() {
+ return mPBackgroundEventTarget;
+ };
+ bool IsShuttingDown() {
+ // the first 2 are pBackground only, the last is atomic
+ MOZ_ASSERT(GetCurrentSerialEventTarget() == mPBackgroundEventTarget);
+ return !mChildIsAlive || mDestroyed || !mWebRTCAlive;
+ };
+ ShmemBuffer GetBuffer(size_t aSize);
+
+ // helper to forward to the PBackground thread
+ int DeliverFrameOverIPC(CaptureEngine capEng, uint32_t aStreamId,
+ const TrackingId& aTrackingId, ShmemBuffer buffer,
+ unsigned char* altbuffer,
+ const VideoFrameProperties& aProps);
+
+ CamerasParent();
+
+ protected:
+ virtual ~CamerasParent();
+
+ // We use these helpers for shutdown and for the respective IPC commands.
+ void StopCapture(const CaptureEngine& aCapEngine, int aCaptureId);
+ int ReleaseCapture(const CaptureEngine& aCapEngine, int aCaptureId);
+
+ bool SetupEngine(CaptureEngine aCapEngine);
+ VideoEngine* EnsureInitialized(int aEngine);
+ void CloseEngines();
+ void StopIPC();
+ void StopVideoCapture();
+ nsresult DispatchToVideoCaptureThread(RefPtr<Runnable> event);
+ NS_IMETHOD BlockShutdown(nsIAsyncShutdownClient*) override;
+ NS_IMETHOD GetName(nsAString& aName) override {
+ aName = mName;
+ return NS_OK;
+ }
+ NS_IMETHOD GetState(nsIPropertyBag**) override { return NS_OK; }
+ static nsString GetNewName();
+
+ // sEngines will be accessed by VideoCapture thread only
+ // sNumOfCamerasParents, sNumOfOpenCamerasParentEngines, and
+ // sVideoCaptureThread will be accessed by main thread / PBackground thread /
+ // VideoCapture thread
+ // sNumOfCamerasParents and sThreadMonitor create & delete are protected by
+ // sMutex
+ // sNumOfOpenCamerasParentEngines and sVideoCaptureThread are protected by
+ // sThreadMonitor
+ static StaticRefPtr<VideoEngine> sEngines[CaptureEngine::MaxEngine];
+ // Number of CamerasParents for which mWebRTCAlive is true.
+ static int32_t sNumOfOpenCamerasParentEngines;
+ static int32_t sNumOfCamerasParents;
+ static StaticMutex sMutex;
+ static Monitor* sThreadMonitor;
+ // video processing thread - where webrtc.org capturer code runs
+ static base::Thread* sVideoCaptureThread;
+
+ nsTArray<CallbackHelper*> mCallbacks;
+ nsString mName;
+
+ // image buffers
+ ShmemPool mShmemPool;
+
+ // PBackgroundParent thread
+ const nsCOMPtr<nsISerialEventTarget> mPBackgroundEventTarget;
+
+ // Shutdown handling
+ bool mChildIsAlive;
+ bool mDestroyed;
+ // Above 2 are PBackground only, but this is potentially
+ // read cross-thread.
+ Atomic<bool> mWebRTCAlive;
+ RefPtr<InputObserver> mCameraObserver;
+ std::map<nsCString, std::map<uint32_t, webrtc::VideoCaptureCapability>>
+ mAllCandidateCapabilities;
+};
+
+PCamerasParent* CreateCamerasParent();
+
+} // namespace mozilla::camera
+
+#endif // mozilla_CameraParent_h
diff --git a/dom/media/systemservices/CamerasTypes.cpp b/dom/media/systemservices/CamerasTypes.cpp
new file mode 100644
index 0000000000..7eda2f650b
--- /dev/null
+++ b/dom/media/systemservices/CamerasTypes.cpp
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CamerasTypes.h"
+
+namespace mozilla::camera {
+
+TrackingId::Source CaptureEngineToTrackingSourceStr(
+ const CaptureEngine& aEngine) {
+ switch (aEngine) {
+ case ScreenEngine:
+ return TrackingId::Source::Screen;
+ case BrowserEngine:
+ return TrackingId::Source::Tab;
+ case WinEngine:
+ return TrackingId::Source::Window;
+ case CameraEngine:
+ return TrackingId::Source::Camera;
+ default:
+ return TrackingId::Source::Unimplemented;
+ }
+}
+} // namespace mozilla::camera
diff --git a/dom/media/systemservices/CamerasTypes.h b/dom/media/systemservices/CamerasTypes.h
new file mode 100644
index 0000000000..fed3941d9c
--- /dev/null
+++ b/dom/media/systemservices/CamerasTypes.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_CamerasTypes_h
+#define mozilla_CamerasTypes_h
+
+#include "ipc/EnumSerializer.h"
+#include "PerformanceRecorder.h"
+
+namespace mozilla::camera {
+
+enum CaptureEngine : int {
+ InvalidEngine = 0,
+ ScreenEngine,
+ BrowserEngine,
+ WinEngine,
+ CameraEngine,
+ MaxEngine
+};
+
+TrackingId::Source CaptureEngineToTrackingSourceStr(
+ const CaptureEngine& aEngine);
+
+} // namespace mozilla::camera
+
+namespace IPC {
+template <>
+struct ParamTraits<mozilla::camera::CaptureEngine>
+ : public ContiguousEnumSerializer<
+ mozilla::camera::CaptureEngine,
+ mozilla::camera::CaptureEngine::InvalidEngine,
+ mozilla::camera::CaptureEngine::MaxEngine> {};
+} // namespace IPC
+
+#endif // mozilla_CamerasTypes_h
diff --git a/dom/media/systemservices/MediaChild.cpp b/dom/media/systemservices/MediaChild.cpp
new file mode 100644
index 0000000000..34780ed74f
--- /dev/null
+++ b/dom/media/systemservices/MediaChild.cpp
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaChild.h"
+#include "MediaParent.h"
+
+#include "nsGlobalWindow.h"
+#include "mozilla/dom/ContentChild.h"
+#include "mozilla/MediaManager.h"
+#include "mozilla/Logging.h"
+#include "nsQueryObject.h"
+
+#undef LOG
+mozilla::LazyLogModule gMediaChildLog("MediaChild");
+#define LOG(args) MOZ_LOG(gMediaChildLog, mozilla::LogLevel::Debug, args)
+
+namespace mozilla::media {
+
+RefPtr<PrincipalKeyPromise> GetPrincipalKey(
+ const ipc::PrincipalInfo& aPrincipalInfo, bool aPersist) {
+ RefPtr<MediaManager> mgr = MediaManager::GetInstance();
+ MOZ_ASSERT(mgr);
+
+ if (XRE_GetProcessType() == GeckoProcessType_Default) {
+ auto p = MakeRefPtr<PrincipalKeyPromise::Private>(__func__);
+
+ mgr->GetNonE10sParent()->RecvGetPrincipalKey(
+ aPrincipalInfo, aPersist,
+ [p](const nsACString& aKey) { p->Resolve(aKey, __func__); });
+ return p;
+ }
+ return Child::Get()
+ ->SendGetPrincipalKey(aPrincipalInfo, aPersist)
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [](const Child::GetPrincipalKeyPromise::ResolveOrRejectValue&
+ aValue) {
+ if (aValue.IsReject() || aValue.ResolveValue().IsEmpty()) {
+ return PrincipalKeyPromise::CreateAndReject(NS_ERROR_FAILURE,
+ __func__);
+ }
+ return PrincipalKeyPromise::CreateAndResolve(
+ aValue.ResolveValue(), __func__);
+ });
+}
+
+void SanitizeOriginKeys(const uint64_t& aSinceWhen, bool aOnlyPrivateBrowsing) {
+ LOG(("SanitizeOriginKeys since %" PRIu64 " %s", aSinceWhen,
+ (aOnlyPrivateBrowsing ? "in Private Browsing." : ".")));
+
+ if (XRE_GetProcessType() == GeckoProcessType_Default) {
+ // Avoid opening MediaManager in this case, since this is called by
+ // sanitize.js when cookies are cleared, which can happen on startup.
+ RefPtr<Parent<NonE10s>> tmpParent = new Parent<NonE10s>();
+ tmpParent->RecvSanitizeOriginKeys(aSinceWhen, aOnlyPrivateBrowsing);
+ } else {
+ Child::Get()->SendSanitizeOriginKeys(aSinceWhen, aOnlyPrivateBrowsing);
+ }
+}
+
+static Child* sChild;
+
+Child* Child::Get() {
+ MOZ_ASSERT(XRE_GetProcessType() == GeckoProcessType_Content);
+ MOZ_ASSERT(NS_IsMainThread());
+ if (!sChild) {
+ sChild = static_cast<Child*>(
+ dom::ContentChild::GetSingleton()->SendPMediaConstructor());
+ }
+ return sChild;
+}
+
+Child::Child() : mActorDestroyed(false) {
+ LOG(("media::Child: %p", this));
+ MOZ_COUNT_CTOR(Child);
+}
+
+Child::~Child() {
+ LOG(("~media::Child: %p", this));
+ sChild = nullptr;
+ MOZ_COUNT_DTOR(Child);
+}
+
+void Child::ActorDestroy(ActorDestroyReason aWhy) { mActorDestroyed = true; }
+
+PMediaChild* AllocPMediaChild() { return new Child(); }
+
+bool DeallocPMediaChild(media::PMediaChild* aActor) {
+ delete static_cast<Child*>(aActor);
+ return true;
+}
+
+} // namespace mozilla::media
diff --git a/dom/media/systemservices/MediaChild.h b/dom/media/systemservices/MediaChild.h
new file mode 100644
index 0000000000..b0ae776cce
--- /dev/null
+++ b/dom/media/systemservices/MediaChild.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MediaChild_h
+#define mozilla_MediaChild_h
+
+#include "mozilla/media/PMediaChild.h"
+#include "mozilla/media/PMediaParent.h"
+#include "MediaUtils.h"
+
+namespace mozilla {
+
+namespace ipc {
+class PrincipalInfo;
+}
+
+namespace media {
+
+typedef MozPromise<nsCString, nsresult, false> PrincipalKeyPromise;
+
+// media::Child implements proxying to the chrome process for some media-related
+// functions, for the moment just:
+//
+// GetPrincipalKey() - get a cookie-like persisted unique key for a given
+// principalInfo.
+//
+// SanitizeOriginKeys() - reset persisted unique keys.
+
+// GetPrincipalKey and SanitizeOriginKeys are asynchronous APIs that return
+// pledges (promise-like objects) with the future value. Use pledge.Then(func)
+// to access.
+
+RefPtr<PrincipalKeyPromise> GetPrincipalKey(
+ const mozilla::ipc::PrincipalInfo& aPrincipalInfo, bool aPersist);
+
+void SanitizeOriginKeys(const uint64_t& aSinceWhen, bool aOnlyPrivateBrowsing);
+
+class Child : public PMediaChild {
+ public:
+ static Child* Get();
+
+ Child();
+
+ void ActorDestroy(ActorDestroyReason aWhy) override;
+ virtual ~Child();
+
+ private:
+ bool mActorDestroyed;
+};
+
+PMediaChild* AllocPMediaChild();
+bool DeallocPMediaChild(PMediaChild* aActor);
+
+} // namespace media
+} // namespace mozilla
+
+#endif // mozilla_MediaChild_h
diff --git a/dom/media/systemservices/MediaParent.cpp b/dom/media/systemservices/MediaParent.cpp
new file mode 100644
index 0000000000..d2fb06b8ae
--- /dev/null
+++ b/dom/media/systemservices/MediaParent.cpp
@@ -0,0 +1,536 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaParent.h"
+
+#include "mozilla/Base64.h"
+#include <mozilla/StaticMutex.h>
+
+#include "MediaUtils.h"
+#include "MediaEngine.h"
+#include "VideoUtils.h"
+#include "nsClassHashtable.h"
+#include "nsThreadUtils.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsIInputStream.h"
+#include "nsILineInputStream.h"
+#include "nsIOutputStream.h"
+#include "nsISafeOutputStream.h"
+#include "nsAppDirectoryServiceDefs.h"
+#include "nsIFile.h"
+#include "nsISupportsImpl.h"
+#include "mozilla/Logging.h"
+
+#undef LOG
+mozilla::LazyLogModule gMediaParentLog("MediaParent");
+#define LOG(args) MOZ_LOG(gMediaParentLog, mozilla::LogLevel::Debug, args)
+
+// A file in the profile dir is used to persist mOriginKeys used to anonymize
+// deviceIds to be unique per origin, to avoid them being supercookies.
+
+#define ORIGINKEYS_FILE u"enumerate_devices.txt"
+#define ORIGINKEYS_VERSION "1"
+
+namespace mozilla::media {
+
+StaticMutex sOriginKeyStoreStsMutex;
+
+class OriginKeyStore {
+ NS_INLINE_DECL_REFCOUNTING(OriginKeyStore);
+ class OriginKey {
+ public:
+ static const size_t DecodedLength = 18;
+ static const size_t EncodedLength = DecodedLength * 4 / 3;
+
+ explicit OriginKey(const nsACString& aKey,
+ int64_t aSecondsStamp = 0) // 0 = temporal
+ : mKey(aKey), mSecondsStamp(aSecondsStamp) {}
+
+ nsCString mKey; // Base64 encoded.
+ int64_t mSecondsStamp;
+ };
+
+ class OriginKeysTable {
+ public:
+ OriginKeysTable() : mPersistCount(0) {}
+
+ nsresult GetPrincipalKey(const ipc::PrincipalInfo& aPrincipalInfo,
+ nsCString& aResult, bool aPersist = false) {
+ nsAutoCString principalString;
+ PrincipalInfoToString(aPrincipalInfo, principalString);
+
+ OriginKey* key;
+ if (!mKeys.Get(principalString, &key)) {
+ nsCString salt; // Make a new one
+ nsresult rv = GenerateRandomName(salt, OriginKey::EncodedLength);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ key = mKeys.InsertOrUpdate(principalString, MakeUnique<OriginKey>(salt))
+ .get();
+ }
+ if (aPersist && !key->mSecondsStamp) {
+ key->mSecondsStamp = PR_Now() / PR_USEC_PER_SEC;
+ mPersistCount++;
+ }
+ aResult = key->mKey;
+ return NS_OK;
+ }
+
+ void Clear(int64_t aSinceWhen) {
+ // Avoid int64_t* <-> void* casting offset
+ OriginKey since(nsCString(), aSinceWhen / PR_USEC_PER_SEC);
+ for (auto iter = mKeys.Iter(); !iter.Done(); iter.Next()) {
+ auto originKey = iter.UserData();
+ LOG((((originKey->mSecondsStamp >= since.mSecondsStamp)
+ ? "%s: REMOVE %" PRId64 " >= %" PRId64
+ : "%s: KEEP %" PRId64 " < %" PRId64),
+ __FUNCTION__, originKey->mSecondsStamp, since.mSecondsStamp));
+
+ if (originKey->mSecondsStamp >= since.mSecondsStamp) {
+ iter.Remove();
+ }
+ }
+ mPersistCount = 0;
+ }
+
+ private:
+ void PrincipalInfoToString(const ipc::PrincipalInfo& aPrincipalInfo,
+ nsACString& aString) {
+ switch (aPrincipalInfo.type()) {
+ case ipc::PrincipalInfo::TSystemPrincipalInfo:
+ aString.AssignLiteral("[System Principal]");
+ return;
+
+ case ipc::PrincipalInfo::TNullPrincipalInfo: {
+ const ipc::NullPrincipalInfo& info =
+ aPrincipalInfo.get_NullPrincipalInfo();
+ aString.Assign(info.spec());
+ return;
+ }
+
+ case ipc::PrincipalInfo::TContentPrincipalInfo: {
+ const ipc::ContentPrincipalInfo& info =
+ aPrincipalInfo.get_ContentPrincipalInfo();
+ aString.Assign(info.originNoSuffix());
+
+ nsAutoCString suffix;
+ info.attrs().CreateSuffix(suffix);
+ aString.Append(suffix);
+ return;
+ }
+
+ case ipc::PrincipalInfo::TExpandedPrincipalInfo: {
+ const ipc::ExpandedPrincipalInfo& info =
+ aPrincipalInfo.get_ExpandedPrincipalInfo();
+
+ aString.AssignLiteral("[Expanded Principal [");
+
+ for (uint32_t i = 0; i < info.allowlist().Length(); i++) {
+ nsAutoCString str;
+ PrincipalInfoToString(info.allowlist()[i], str);
+
+ if (i != 0) {
+ aString.AppendLiteral(", ");
+ }
+
+ aString.Append(str);
+ }
+
+ aString.AppendLiteral("]]");
+ return;
+ }
+
+ default:
+ MOZ_CRASH("Unknown PrincipalInfo type!");
+ }
+ }
+
+ protected:
+ nsClassHashtable<nsCStringHashKey, OriginKey> mKeys;
+ size_t mPersistCount;
+ };
+
+ class OriginKeysLoader : public OriginKeysTable {
+ public:
+ OriginKeysLoader() = default;
+
+ nsresult GetPrincipalKey(const ipc::PrincipalInfo& aPrincipalInfo,
+ nsCString& aResult, bool aPersist = false) {
+ auto before = mPersistCount;
+ nsresult rv =
+ OriginKeysTable::GetPrincipalKey(aPrincipalInfo, aResult, aPersist);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ if (mPersistCount != before) {
+ Save();
+ }
+ return NS_OK;
+ }
+
+ already_AddRefed<nsIFile> GetFile() {
+ MOZ_ASSERT(mProfileDir);
+ nsCOMPtr<nsIFile> file;
+ nsresult rv = mProfileDir->Clone(getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return nullptr;
+ }
+ file->Append(nsLiteralString(ORIGINKEYS_FILE));
+ return file.forget();
+ }
+
+ // Format of file is key secondsstamp origin (first line is version #):
+ //
+ // 1
+ // rOMAAbFujNwKyIpj4RJ3Wt5Q 1424733961 http://fiddle.jshell.net
+ // rOMAAbFujNwKyIpj4RJ3Wt5Q 1424734841 http://mozilla.github.io
+ // etc.
+
+ nsresult Read() {
+ nsCOMPtr<nsIFile> file = GetFile();
+ if (NS_WARN_IF(!file)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ bool exists;
+ nsresult rv = file->Exists(&exists);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ if (!exists) {
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIInputStream> stream;
+ rv = NS_NewLocalFileInputStream(getter_AddRefs(stream), file);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ nsCOMPtr<nsILineInputStream> i = do_QueryInterface(stream);
+ MOZ_ASSERT(i);
+ MOZ_ASSERT(!mPersistCount);
+
+ nsCString line;
+ bool hasMoreLines;
+ rv = i->ReadLine(line, &hasMoreLines);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ if (!line.EqualsLiteral(ORIGINKEYS_VERSION)) {
+ // If version on disk is newer than we can understand then ignore it.
+ return NS_OK;
+ }
+
+ while (hasMoreLines) {
+ rv = i->ReadLine(line, &hasMoreLines);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ // Read key secondsstamp origin.
+ // Ignore any lines that don't fit format in the comment above exactly.
+ int32_t f = line.FindChar(' ');
+ if (f < 0) {
+ continue;
+ }
+ const nsACString& key = Substring(line, 0, f);
+ const nsACString& s = Substring(line, f + 1);
+ f = s.FindChar(' ');
+ if (f < 0) {
+ continue;
+ }
+ int64_t secondsstamp = Substring(s, 0, f).ToInteger64(&rv);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+ const nsACString& origin = Substring(s, f + 1);
+
+ // Validate key
+ if (key.Length() != OriginKey::EncodedLength) {
+ continue;
+ }
+ nsCString dummy;
+ rv = Base64Decode(key, dummy);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+ mKeys.InsertOrUpdate(origin, MakeUnique<OriginKey>(key, secondsstamp));
+ }
+ mPersistCount = mKeys.Count();
+ return NS_OK;
+ }
+
+ nsresult Write() {
+ nsCOMPtr<nsIFile> file = GetFile();
+ if (NS_WARN_IF(!file)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsCOMPtr<nsIOutputStream> stream;
+ nsresult rv =
+ NS_NewSafeLocalFileOutputStream(getter_AddRefs(stream), file);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString versionBuffer;
+ versionBuffer.AppendLiteral(ORIGINKEYS_VERSION);
+ versionBuffer.Append('\n');
+
+ uint32_t count;
+ rv = stream->Write(versionBuffer.Data(), versionBuffer.Length(), &count);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ if (count != versionBuffer.Length()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ for (const auto& entry : mKeys) {
+ const nsACString& origin = entry.GetKey();
+ OriginKey* originKey = entry.GetWeak();
+
+ if (!originKey->mSecondsStamp) {
+ continue; // don't write temporal ones
+ }
+
+ nsCString originBuffer;
+ originBuffer.Append(originKey->mKey);
+ originBuffer.Append(' ');
+ originBuffer.AppendInt(originKey->mSecondsStamp);
+ originBuffer.Append(' ');
+ originBuffer.Append(origin);
+ originBuffer.Append('\n');
+
+ rv = stream->Write(originBuffer.Data(), originBuffer.Length(), &count);
+ if (NS_WARN_IF(NS_FAILED(rv)) || count != originBuffer.Length()) {
+ break;
+ }
+ }
+
+ nsCOMPtr<nsISafeOutputStream> safeStream = do_QueryInterface(stream);
+ MOZ_ASSERT(safeStream);
+
+ rv = safeStream->Finish();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ return NS_OK;
+ }
+
+ nsresult Load() {
+ nsresult rv = Read();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ Delete();
+ }
+ return rv;
+ }
+
+ nsresult Save() {
+ nsresult rv = Write();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ NS_WARNING("Failed to write data for EnumerateDevices id-persistence.");
+ Delete();
+ }
+ return rv;
+ }
+
+ void Clear(int64_t aSinceWhen) {
+ OriginKeysTable::Clear(aSinceWhen);
+ Delete();
+ Save();
+ }
+
+ nsresult Delete() {
+ nsCOMPtr<nsIFile> file = GetFile();
+ if (NS_WARN_IF(!file)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ nsresult rv = file->Remove(false);
+ if (rv == NS_ERROR_FILE_NOT_FOUND) {
+ return NS_OK;
+ }
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ return NS_OK;
+ }
+
+ void SetProfileDir(nsIFile* aProfileDir) {
+ MOZ_ASSERT(!NS_IsMainThread());
+ bool first = !mProfileDir;
+ mProfileDir = aProfileDir;
+ // Load from disk when we first get a profileDir, but not subsequently.
+ if (first) {
+ Load();
+ }
+ }
+
+ private:
+ nsCOMPtr<nsIFile> mProfileDir;
+ };
+
+ private:
+ static OriginKeyStore* sOriginKeyStore;
+
+ virtual ~OriginKeyStore() {
+ MOZ_ASSERT(NS_IsMainThread());
+ sOriginKeyStore = nullptr;
+ LOG(("%s", __FUNCTION__));
+ }
+
+ public:
+ static RefPtr<OriginKeyStore> Get() {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (!sOriginKeyStore) {
+ sOriginKeyStore = new OriginKeyStore();
+ }
+ return RefPtr(sOriginKeyStore);
+ }
+
+ // Only accessed on StreamTS threads
+ OriginKeysLoader mOriginKeys MOZ_GUARDED_BY(sOriginKeyStoreStsMutex);
+ OriginKeysTable mPrivateBrowsingOriginKeys
+ MOZ_GUARDED_BY(sOriginKeyStoreStsMutex);
+};
+OriginKeyStore* OriginKeyStore::sOriginKeyStore = nullptr;
+
+template <class Super>
+mozilla::ipc::IPCResult Parent<Super>::RecvGetPrincipalKey(
+ const ipc::PrincipalInfo& aPrincipalInfo, const bool& aPersist,
+ PMediaParent::GetPrincipalKeyResolver&& aResolve) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // First, get profile dir.
+
+ nsCOMPtr<nsIFile> profileDir;
+ nsresult rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
+ getter_AddRefs(profileDir));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return IPCResult(this, false);
+ }
+
+ // Resolver has to be called in MainThread but the key is discovered
+ // in a different thread. We wrap the resolver around a MozPromise to make
+ // it more flexible and pass it to the new task. When this is done the
+ // resolver is resolved in MainThread.
+
+ // Then over to stream-transport thread (a thread pool) to do the actual
+ // file io. Stash a promise to hold the answer and get an id for this request.
+
+ nsCOMPtr<nsIEventTarget> sts =
+ do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID);
+ MOZ_ASSERT(sts);
+ auto taskQueue = TaskQueue::Create(sts.forget(), "RecvGetPrincipalKey");
+ RefPtr<Parent<Super>> that(this);
+
+ InvokeAsync(
+ taskQueue, __func__,
+ [this, that, profileDir, aPrincipalInfo, aPersist]() {
+ MOZ_ASSERT(!NS_IsMainThread());
+
+ StaticMutexAutoLock lock(sOriginKeyStoreStsMutex);
+ mOriginKeyStore->mOriginKeys.SetProfileDir(profileDir);
+
+ nsresult rv;
+ nsAutoCString result;
+ if (IsPrincipalInfoPrivate(aPrincipalInfo)) {
+ rv = mOriginKeyStore->mPrivateBrowsingOriginKeys.GetPrincipalKey(
+ aPrincipalInfo, result);
+ } else {
+ rv = mOriginKeyStore->mOriginKeys.GetPrincipalKey(aPrincipalInfo,
+ result, aPersist);
+ }
+
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return PrincipalKeyPromise::CreateAndReject(rv, __func__);
+ }
+ return PrincipalKeyPromise::CreateAndResolve(result, __func__);
+ })
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [aResolve](const PrincipalKeyPromise::ResolveOrRejectValue& aValue) {
+ if (aValue.IsReject()) {
+ aResolve(""_ns);
+ } else {
+ aResolve(aValue.ResolveValue());
+ }
+ });
+
+ return IPC_OK();
+}
+
+template <class Super>
+mozilla::ipc::IPCResult Parent<Super>::RecvSanitizeOriginKeys(
+ const uint64_t& aSinceWhen, const bool& aOnlyPrivateBrowsing) {
+ MOZ_ASSERT(NS_IsMainThread());
+ nsCOMPtr<nsIFile> profileDir;
+ nsresult rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
+ getter_AddRefs(profileDir));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return IPCResult(this, false);
+ }
+ // Over to stream-transport thread (a thread pool) to do the file io.
+
+ nsCOMPtr<nsIEventTarget> sts =
+ do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID);
+ MOZ_ASSERT(sts);
+ RefPtr<Parent<Super>> that(this);
+
+ rv = sts->Dispatch(
+ NewRunnableFrom(
+ [this, that, profileDir, aSinceWhen, aOnlyPrivateBrowsing]() {
+ MOZ_ASSERT(!NS_IsMainThread());
+ StaticMutexAutoLock lock(sOriginKeyStoreStsMutex);
+ mOriginKeyStore->mPrivateBrowsingOriginKeys.Clear(aSinceWhen);
+ if (!aOnlyPrivateBrowsing) {
+ mOriginKeyStore->mOriginKeys.SetProfileDir(profileDir);
+ mOriginKeyStore->mOriginKeys.Clear(aSinceWhen);
+ }
+ return NS_OK;
+ }),
+ NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return IPCResult(this, false);
+ }
+ return IPC_OK();
+}
+
+template <class Super>
+void Parent<Super>::ActorDestroy(ActorDestroyReason aWhy) {
+ // No more IPC from here
+ mDestroyed = true;
+ LOG(("%s", __FUNCTION__));
+}
+
+template <class Super>
+Parent<Super>::Parent()
+ : mOriginKeyStore(OriginKeyStore::Get()), mDestroyed(false) {
+ LOG(("media::Parent: %p", this));
+}
+
+template <class Super>
+Parent<Super>::~Parent() {
+ NS_ReleaseOnMainThread("Parent<Super>::mOriginKeyStore",
+ mOriginKeyStore.forget());
+ LOG(("~media::Parent: %p", this));
+}
+
+PMediaParent* AllocPMediaParent() {
+ Parent<PMediaParent>* obj = new Parent<PMediaParent>();
+ obj->AddRef();
+ return obj;
+}
+
+bool DeallocPMediaParent(media::PMediaParent* aActor) {
+ static_cast<Parent<PMediaParent>*>(aActor)->Release();
+ return true;
+}
+
+} // namespace mozilla::media
+
+// Instantiate templates to satisfy linker
+template class mozilla::media::Parent<mozilla::media::NonE10s>;
diff --git a/dom/media/systemservices/MediaParent.h b/dom/media/systemservices/MediaParent.h
new file mode 100644
index 0000000000..77cba312f3
--- /dev/null
+++ b/dom/media/systemservices/MediaParent.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MediaParent_h
+#define mozilla_MediaParent_h
+
+#include "MediaChild.h"
+
+#include "mozilla/media/PMediaParent.h"
+
+namespace mozilla::media {
+
+// media::Parent implements the chrome-process side of ipc for media::Child APIs
+// A same-process version may also be created to service non-e10s calls.
+
+class OriginKeyStore;
+
+class NonE10s {
+ typedef mozilla::ipc::IProtocol::ActorDestroyReason ActorDestroyReason;
+
+ public:
+ virtual ~NonE10s() = default;
+
+ protected:
+ virtual mozilla::ipc::IPCResult RecvGetPrincipalKey(
+ const mozilla::ipc::PrincipalInfo& aPrincipalInfo, const bool& aPersist,
+ PMediaParent::GetPrincipalKeyResolver&& aResolve) = 0;
+ virtual mozilla::ipc::IPCResult RecvSanitizeOriginKeys(
+ const uint64_t& aSinceWhen, const bool& aOnlyPrivateBrowsing) = 0;
+ virtual void ActorDestroy(ActorDestroyReason aWhy) = 0;
+};
+
+/**
+ * Dummy class to avoid a templated class being passed to the refcounting macro
+ * (see Bug 1334421 for what happens then)
+ */
+class RefCountedParent {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedParent)
+
+ protected:
+ virtual ~RefCountedParent() = default;
+};
+
+// Super = PMediaParent or NonE10s
+
+template <class Super>
+class Parent : public RefCountedParent, public Super {
+ typedef mozilla::ipc::IProtocol::ActorDestroyReason ActorDestroyReason;
+
+ public:
+ virtual mozilla::ipc::IPCResult RecvGetPrincipalKey(
+ const mozilla::ipc::PrincipalInfo& aPrincipalInfo, const bool& aPersist,
+ PMediaParent::GetPrincipalKeyResolver&& aResolve) override;
+ virtual mozilla::ipc::IPCResult RecvSanitizeOriginKeys(
+ const uint64_t& aSinceWhen, const bool& aOnlyPrivateBrowsing) override;
+ virtual void ActorDestroy(ActorDestroyReason aWhy) override;
+
+ Parent();
+
+ private:
+ virtual ~Parent();
+
+ RefPtr<OriginKeyStore> mOriginKeyStore;
+ bool mDestroyed;
+};
+
+template <class Parent>
+mozilla::ipc::IPCResult IPCResult(Parent* aSelf, bool aSuccess);
+
+template <>
+inline mozilla::ipc::IPCResult IPCResult(Parent<PMediaParent>* aSelf,
+ bool aSuccess) {
+ return aSuccess ? IPC_OK() : IPC_FAIL_NO_REASON(aSelf);
+}
+
+template <>
+inline mozilla::ipc::IPCResult IPCResult(Parent<NonE10s>* aSelf,
+ bool aSuccess) {
+ return IPC_OK();
+}
+
+PMediaParent* AllocPMediaParent();
+bool DeallocPMediaParent(PMediaParent* aActor);
+
+} // namespace mozilla::media
+
+#endif // mozilla_MediaParent_h
diff --git a/dom/media/systemservices/MediaSystemResourceClient.cpp b/dom/media/systemservices/MediaSystemResourceClient.cpp
new file mode 100644
index 0000000000..50695fc76c
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceClient.cpp
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Monitor.h"
+#include "mozilla/ReentrantMonitor.h"
+
+#include "MediaSystemResourceClient.h"
+
+namespace mozilla {
+
+Atomic<uint32_t> MediaSystemResourceClient::sSerialCounter(0);
+
+MediaSystemResourceClient::MediaSystemResourceClient(
+ MediaSystemResourceType aReourceType)
+ : mResourceType(aReourceType),
+ mId(++sSerialCounter),
+ mListener(nullptr),
+ mResourceState(RESOURCE_STATE_START),
+ mIsSync(false),
+ mAcquireSyncWaitMonitor(nullptr),
+ mAcquireSyncWaitDone(nullptr) {
+ mManager = MediaSystemResourceManager::Get();
+ if (mManager) {
+ mManager->Register(this);
+ }
+}
+
+MediaSystemResourceClient::~MediaSystemResourceClient() {
+ ReleaseResource();
+ if (mManager) {
+ mManager->Unregister(this);
+ }
+}
+
+bool MediaSystemResourceClient::SetListener(
+ MediaSystemResourceReservationListener* aListener) {
+ if (!mManager) {
+ return false;
+ }
+ return mManager->SetListener(this, aListener);
+}
+
+void MediaSystemResourceClient::Acquire() {
+ if (!mManager) {
+ return;
+ }
+ mManager->Acquire(this);
+}
+
+bool MediaSystemResourceClient::AcquireSyncNoWait() {
+ if (!mManager) {
+ return false;
+ }
+ return mManager->AcquireSyncNoWait(this);
+}
+
+void MediaSystemResourceClient::ReleaseResource() {
+ if (!mManager) {
+ return;
+ }
+ mManager->ReleaseResource(this);
+}
+
+} // namespace mozilla
diff --git a/dom/media/systemservices/MediaSystemResourceClient.h b/dom/media/systemservices/MediaSystemResourceClient.h
new file mode 100644
index 0000000000..52cf5107e9
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceClient.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceClient_h_)
+# define MediaSystemResourceClient_h_
+
+# include "MediaSystemResourceManager.h"
+# include "MediaSystemResourceTypes.h"
+# include "mozilla/Atomics.h"
+# include "mozilla/media/MediaSystemResourceTypes.h"
+# include "mozilla/Monitor.h"
+# include "mozilla/RefPtr.h"
+
+namespace mozilla {
+
+class MediaSystemResourceManager;
+
+/**
+ * This is a base class for listener callbacks.
+ * This callback is invoked when the media system resource reservation state
+ * is changed.
+ */
+class MediaSystemResourceReservationListener {
+ public:
+ virtual void ResourceReserved() = 0;
+ virtual void ResourceReserveFailed() = 0;
+};
+
+/**
+ * MediaSystemResourceClient is used to reserve a media system resource
+ * like hw decoder. When system has a limitation of a media resource,
+ * use this class to mediate use rights of the resource.
+ */
+class MediaSystemResourceClient {
+ public:
+ // Enumeration for the valid decoding states
+ enum ResourceState {
+ RESOURCE_STATE_START,
+ RESOURCE_STATE_WAITING,
+ RESOURCE_STATE_ACQUIRED,
+ RESOURCE_STATE_NOT_ACQUIRED,
+ RESOURCE_STATE_END
+ };
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSystemResourceClient)
+
+ explicit MediaSystemResourceClient(MediaSystemResourceType aReourceType);
+
+ bool SetListener(MediaSystemResourceReservationListener* aListener);
+
+ // Try to acquire media resource asynchronously.
+ // If the resource is used by others, wait until acquired.
+ void Acquire();
+
+ // Try to acquire media resource synchronously. If the resource is not
+ // immediately available, fail to acquire it. return false if resource is not
+ // acquired. return true if resource is acquired.
+ //
+ // This function should not be called on ImageBridge thread.
+ // It should be used only for compatibility with legacy code.
+ bool AcquireSyncNoWait();
+
+ void ReleaseResource();
+
+ private:
+ ~MediaSystemResourceClient();
+
+ RefPtr<MediaSystemResourceManager> mManager;
+ const MediaSystemResourceType mResourceType;
+ const uint32_t mId;
+
+ // Modified only by MediaSystemResourceManager.
+ // Accessed and modified with MediaSystemResourceManager::mReentrantMonitor
+ // held.
+ MediaSystemResourceReservationListener* mListener;
+ ResourceState mResourceState;
+ bool mIsSync;
+ ReentrantMonitor* mAcquireSyncWaitMonitor;
+ bool* mAcquireSyncWaitDone;
+
+ static mozilla::Atomic<uint32_t> sSerialCounter;
+
+ friend class MediaSystemResourceManager;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceManager.cpp b/dom/media/systemservices/MediaSystemResourceManager.cpp
new file mode 100644
index 0000000000..414ef8e81d
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManager.cpp
@@ -0,0 +1,358 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "MediaSystemResourceManagerChild.h"
+#include "MediaSystemResourceClient.h"
+
+#include "mozilla/layers/ImageBridgeChild.h"
+
+#include "MediaSystemResourceManager.h"
+
+namespace mozilla {
+
+using namespace mozilla::ipc;
+using namespace mozilla::layers;
+
+/* static */
+StaticRefPtr<MediaSystemResourceManager> MediaSystemResourceManager::sSingleton;
+
+/* static */
+MediaSystemResourceManager* MediaSystemResourceManager::Get() {
+ if (sSingleton) {
+ return sSingleton;
+ }
+ MediaSystemResourceManager::Init();
+ return sSingleton;
+}
+
+/* static */
+void MediaSystemResourceManager::Shutdown() {
+ MOZ_ASSERT(InImageBridgeChildThread());
+ if (sSingleton) {
+ sSingleton->CloseIPC();
+ sSingleton = nullptr;
+ }
+}
+
+/* static */
+void MediaSystemResourceManager::Init() {
+ RefPtr<ImageBridgeChild> imageBridge = ImageBridgeChild::GetSingleton();
+ if (!imageBridge) {
+ NS_WARNING("ImageBridge does not exist");
+ return;
+ }
+
+ if (InImageBridgeChildThread()) {
+ if (!sSingleton) {
+#ifdef DEBUG
+ static int timesCreated = 0;
+ timesCreated++;
+ MOZ_ASSERT(timesCreated == 1);
+#endif
+ sSingleton = new MediaSystemResourceManager();
+ }
+ return;
+ }
+
+ ReentrantMonitor barrier MOZ_UNANNOTATED("MediaSystemResourceManager::Init");
+ ReentrantMonitorAutoEnter mainThreadAutoMon(barrier);
+ bool done = false;
+
+ RefPtr<Runnable> runnable =
+ NS_NewRunnableFunction("MediaSystemResourceManager::Init", [&]() {
+ if (!sSingleton) {
+ sSingleton = new MediaSystemResourceManager();
+ }
+ ReentrantMonitorAutoEnter childThreadAutoMon(barrier);
+ done = true;
+ barrier.NotifyAll();
+ });
+
+ imageBridge->GetThread()->Dispatch(runnable.forget());
+
+ // should stop the thread until done.
+ while (!done) {
+ barrier.Wait();
+ }
+}
+
+MediaSystemResourceManager::MediaSystemResourceManager()
+ : mReentrantMonitor("MediaSystemResourceManager.mReentrantMonitor"),
+ mShutDown(false),
+ mChild(nullptr) {
+ MOZ_ASSERT(InImageBridgeChildThread());
+ OpenIPC();
+}
+
+MediaSystemResourceManager::~MediaSystemResourceManager() {
+ MOZ_ASSERT(IsIpcClosed());
+}
+
+void MediaSystemResourceManager::OpenIPC() {
+ MOZ_ASSERT(InImageBridgeChildThread());
+ MOZ_ASSERT(!mChild);
+
+ media::PMediaSystemResourceManagerChild* child =
+ ImageBridgeChild::GetSingleton()
+ ->SendPMediaSystemResourceManagerConstructor();
+ mChild = static_cast<media::MediaSystemResourceManagerChild*>(child);
+ mChild->SetManager(this);
+}
+
+void MediaSystemResourceManager::CloseIPC() {
+ MOZ_ASSERT(InImageBridgeChildThread());
+
+ if (!mChild) {
+ return;
+ }
+ mChild->Destroy();
+ mChild = nullptr;
+ mShutDown = true;
+}
+
+void MediaSystemResourceManager::OnIpcClosed() { mChild = nullptr; }
+
+bool MediaSystemResourceManager::IsIpcClosed() { return mChild ? true : false; }
+
+void MediaSystemResourceManager::Register(MediaSystemResourceClient* aClient) {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MOZ_ASSERT(aClient);
+ MOZ_ASSERT(!mResourceClients.Contains(aClient->mId));
+
+ mResourceClients.InsertOrUpdate(aClient->mId, aClient);
+}
+
+void MediaSystemResourceManager::Unregister(
+ MediaSystemResourceClient* aClient) {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MOZ_ASSERT(aClient);
+ MOZ_ASSERT(mResourceClients.Contains(aClient->mId));
+ MOZ_ASSERT(mResourceClients.Get(aClient->mId) == aClient);
+
+ mResourceClients.Remove(aClient->mId);
+}
+
+bool MediaSystemResourceManager::SetListener(
+ MediaSystemResourceClient* aClient,
+ MediaSystemResourceReservationListener* aListener) {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MOZ_ASSERT(aClient);
+
+ MediaSystemResourceClient* client = mResourceClients.Get(aClient->mId);
+ MOZ_ASSERT(client);
+
+ if (!client) {
+ return false;
+ }
+ // State Check
+ if (aClient->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_START) {
+ return false;
+ }
+ aClient->mListener = aListener;
+ return true;
+}
+
+void MediaSystemResourceManager::Acquire(MediaSystemResourceClient* aClient) {
+ MOZ_ASSERT(aClient);
+ MOZ_ASSERT(!InImageBridgeChildThread());
+
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MediaSystemResourceClient* client = mResourceClients.Get(aClient->mId);
+ MOZ_ASSERT(client);
+ MOZ_ASSERT(client == aClient);
+
+ aClient->mIsSync = false; // async request
+
+ if (!client) {
+ HandleAcquireResult(aClient->mId, false);
+ return;
+ }
+ // State Check
+ if (aClient->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_START) {
+ HandleAcquireResult(aClient->mId, false);
+ return;
+ }
+ aClient->mResourceState = MediaSystemResourceClient::RESOURCE_STATE_WAITING;
+ ImageBridgeChild::GetSingleton()->GetThread()->Dispatch(
+ NewRunnableMethod<uint32_t>("MediaSystemResourceManager::DoAcquire", this,
+ &MediaSystemResourceManager::DoAcquire,
+ aClient->mId));
+}
+
+bool MediaSystemResourceManager::AcquireSyncNoWait(
+ MediaSystemResourceClient* aClient) {
+ MOZ_ASSERT(aClient);
+ MOZ_ASSERT(!InImageBridgeChildThread());
+
+ ReentrantMonitor barrier MOZ_UNANNOTATED(
+ "MediaSystemResourceManager::AcquireSyncNoWait");
+ ReentrantMonitorAutoEnter autoMon(barrier);
+ bool done = false;
+ {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MediaSystemResourceClient* client = mResourceClients.Get(aClient->mId);
+ MOZ_ASSERT(client);
+ MOZ_ASSERT(client == aClient);
+
+ aClient->mIsSync = true; // sync request
+
+ if (InImageBridgeChildThread()) {
+ HandleAcquireResult(aClient->mId, false);
+ return false;
+ }
+ if (!client || client != aClient) {
+ HandleAcquireResult(aClient->mId, false);
+ return false;
+ }
+ // State Check
+ if (aClient->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_START) {
+ HandleAcquireResult(aClient->mId, false);
+ return false;
+ }
+ // Hold barrier Monitor until acquire task end.
+ aClient->mAcquireSyncWaitMonitor = &barrier;
+ aClient->mAcquireSyncWaitDone = &done;
+ aClient->mResourceState = MediaSystemResourceClient::RESOURCE_STATE_WAITING;
+ }
+
+ ImageBridgeChild::GetSingleton()->GetThread()->Dispatch(
+ NewRunnableMethod<uint32_t>("MediaSystemResourceManager::DoAcquire", this,
+ &MediaSystemResourceManager::DoAcquire,
+ aClient->mId));
+
+ // should stop the thread until done.
+ while (!done) {
+ barrier.Wait();
+ }
+
+ {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ if (aClient->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_ACQUIRED) {
+ return false;
+ }
+ return true;
+ }
+}
+
+void MediaSystemResourceManager::DoAcquire(uint32_t aId) {
+ MOZ_ASSERT(InImageBridgeChildThread());
+ if (mShutDown || !mChild) {
+ HandleAcquireResult(aId, false);
+ return;
+ }
+ {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MediaSystemResourceClient* client = mResourceClients.Get(aId);
+ MOZ_ASSERT(client);
+
+ if (!client || client->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_WAITING) {
+ HandleAcquireResult(aId, false);
+ return;
+ }
+ MOZ_ASSERT(aId == client->mId);
+ bool willWait = !client->mAcquireSyncWaitMonitor ? true : false;
+ mChild->SendAcquire(client->mId, client->mResourceType, willWait);
+ }
+}
+
+void MediaSystemResourceManager::ReleaseResource(
+ MediaSystemResourceClient* aClient) {
+ MOZ_ASSERT(aClient);
+ {
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MediaSystemResourceClient* client = mResourceClients.Get(aClient->mId);
+ MOZ_ASSERT(client);
+ MOZ_ASSERT(client == aClient);
+
+ if (!client || client != aClient ||
+ aClient->mResourceState ==
+ MediaSystemResourceClient::RESOURCE_STATE_START ||
+ aClient->mResourceState ==
+ MediaSystemResourceClient::RESOURCE_STATE_END) {
+ aClient->mResourceState = MediaSystemResourceClient::RESOURCE_STATE_END;
+ return;
+ }
+
+ aClient->mResourceState = MediaSystemResourceClient::RESOURCE_STATE_END;
+
+ ImageBridgeChild::GetSingleton()->GetThread()->Dispatch(
+ NewRunnableMethod<uint32_t>(
+ "MediaSystemResourceManager::DoRelease", this,
+ &MediaSystemResourceManager::DoRelease, aClient->mId));
+ }
+}
+
+void MediaSystemResourceManager::DoRelease(uint32_t aId) {
+ MOZ_ASSERT(InImageBridgeChildThread());
+ if (mShutDown || !mChild) {
+ return;
+ }
+ mChild->SendRelease(aId);
+}
+
+void MediaSystemResourceManager::RecvResponse(uint32_t aId, bool aSuccess) {
+ HandleAcquireResult(aId, aSuccess);
+}
+
+void MediaSystemResourceManager::HandleAcquireResult(uint32_t aId,
+ bool aSuccess) {
+ if (!InImageBridgeChildThread()) {
+ ImageBridgeChild::GetSingleton()->GetThread()->Dispatch(
+ NewRunnableMethod<uint32_t, bool>(
+ "MediaSystemResourceManager::HandleAcquireResult", this,
+ &MediaSystemResourceManager::HandleAcquireResult, aId, aSuccess));
+ return;
+ }
+
+ ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+ MediaSystemResourceClient* client = mResourceClients.Get(aId);
+ if (!client) {
+ // Client was already unregistered.
+ return;
+ }
+ if (client->mResourceState !=
+ MediaSystemResourceClient::RESOURCE_STATE_WAITING) {
+ return;
+ }
+
+ // Update state
+ if (aSuccess) {
+ client->mResourceState = MediaSystemResourceClient::RESOURCE_STATE_ACQUIRED;
+ } else {
+ client->mResourceState =
+ MediaSystemResourceClient::RESOURCE_STATE_NOT_ACQUIRED;
+ }
+
+ if (client->mIsSync) {
+ if (client->mAcquireSyncWaitMonitor) {
+ // Notify AcquireSync() complete
+ MOZ_ASSERT(client->mAcquireSyncWaitDone);
+ ReentrantMonitorAutoEnter autoMon(*client->mAcquireSyncWaitMonitor);
+ *client->mAcquireSyncWaitDone = true;
+ client->mAcquireSyncWaitMonitor->NotifyAll();
+ client->mAcquireSyncWaitMonitor = nullptr;
+ client->mAcquireSyncWaitDone = nullptr;
+ }
+ } else {
+ // Notify Acquire() result
+ if (client->mListener) {
+ if (aSuccess) {
+ client->mListener->ResourceReserved();
+ } else {
+ client->mListener->ResourceReserveFailed();
+ }
+ }
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/systemservices/MediaSystemResourceManager.h b/dom/media/systemservices/MediaSystemResourceManager.h
new file mode 100644
index 0000000000..293595ece0
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManager.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceManager_h_)
+# define MediaSystemResourceManager_h_
+
+# include <queue>
+
+# include "MediaSystemResourceTypes.h"
+# include "mozilla/ReentrantMonitor.h"
+# include "mozilla/StaticPtr.h"
+# include "nsTHashMap.h"
+# include "nsISupportsImpl.h"
+
+namespace mozilla {
+
+namespace media {
+class MediaSystemResourceManagerChild;
+} // namespace media
+
+class MediaSystemResourceClient;
+class MediaSystemResourceReservationListener;
+class ReentrantMonitor;
+class TaskQueue;
+
+/**
+ * Manage media system resource allocation requests within a process.
+ */
+class MediaSystemResourceManager {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSystemResourceManager)
+
+ static MediaSystemResourceManager* Get();
+ static void Init();
+ static void Shutdown();
+
+ void OnIpcClosed();
+
+ void Register(MediaSystemResourceClient* aClient);
+ void Unregister(MediaSystemResourceClient* aClient);
+
+ bool SetListener(MediaSystemResourceClient* aClient,
+ MediaSystemResourceReservationListener* aListener);
+
+ void Acquire(MediaSystemResourceClient* aClient);
+ bool AcquireSyncNoWait(MediaSystemResourceClient* aClient);
+ void ReleaseResource(MediaSystemResourceClient* aClient);
+
+ void RecvResponse(uint32_t aId, bool aSuccess);
+
+ private:
+ MediaSystemResourceManager();
+ virtual ~MediaSystemResourceManager();
+
+ void OpenIPC();
+ void CloseIPC();
+ bool IsIpcClosed();
+
+ void DoAcquire(uint32_t aId);
+
+ void DoRelease(uint32_t aId);
+
+ void HandleAcquireResult(uint32_t aId, bool aSuccess);
+
+ ReentrantMonitor mReentrantMonitor MOZ_UNANNOTATED;
+
+ bool mShutDown;
+
+ media::MediaSystemResourceManagerChild* mChild;
+
+ nsTHashMap<nsUint32HashKey, MediaSystemResourceClient*> mResourceClients;
+
+ static StaticRefPtr<MediaSystemResourceManager> sSingleton;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceManagerChild.cpp b/dom/media/systemservices/MediaSystemResourceManagerChild.cpp
new file mode 100644
index 0000000000..ff671fdf8d
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManagerChild.cpp
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaSystemResourceManager.h"
+
+#include "MediaSystemResourceManagerChild.h"
+
+namespace mozilla::media {
+
+MediaSystemResourceManagerChild::MediaSystemResourceManagerChild()
+ : mDestroyed(false), mManager(nullptr) {}
+
+MediaSystemResourceManagerChild::~MediaSystemResourceManagerChild() = default;
+
+mozilla::ipc::IPCResult MediaSystemResourceManagerChild::RecvResponse(
+ const uint32_t& aId, const bool& aSuccess) {
+ if (mManager) {
+ mManager->RecvResponse(aId, aSuccess);
+ }
+ return IPC_OK();
+}
+
+void MediaSystemResourceManagerChild::ActorDestroy(
+ ActorDestroyReason aActorDestroyReason) {
+ MOZ_ASSERT(!mDestroyed);
+ if (mManager) {
+ mManager->OnIpcClosed();
+ }
+ mDestroyed = true;
+}
+
+void MediaSystemResourceManagerChild::Destroy() {
+ if (mDestroyed) {
+ return;
+ }
+ SendRemoveResourceManager();
+ // WARNING: |this| is dead, hands off
+}
+
+} // namespace mozilla::media
diff --git a/dom/media/systemservices/MediaSystemResourceManagerChild.h b/dom/media/systemservices/MediaSystemResourceManagerChild.h
new file mode 100644
index 0000000000..66bf76cdd3
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManagerChild.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceManagerChild_h_)
+# define MediaSystemResourceManagerChild_h_
+
+# include "mozilla/media/PMediaSystemResourceManagerChild.h"
+# include "nsISupportsImpl.h"
+
+namespace mozilla {
+
+class MediaSystemResourceManager;
+
+namespace ipc {
+class BackgroundChildImpl;
+} // namespace ipc
+
+namespace media {
+
+/**
+ * Handle MediaSystemResourceManager's IPC
+ */
+class MediaSystemResourceManagerChild final
+ : public PMediaSystemResourceManagerChild {
+ friend class PMediaSystemResourceManagerChild;
+
+ public:
+ struct ResourceListener {
+ /* The resource is reserved and can be granted.
+ * The client can allocate the requested resource.
+ */
+ virtual void resourceReserved() = 0;
+ /* The resource is not reserved any more.
+ * The client should release the resource as soon as possible if the
+ * resource is still being held.
+ */
+ virtual void resourceCanceled() = 0;
+ };
+
+ MediaSystemResourceManagerChild();
+ virtual ~MediaSystemResourceManagerChild();
+
+ void Destroy();
+
+ void SetManager(MediaSystemResourceManager* aManager) { mManager = aManager; }
+
+ protected:
+ mozilla::ipc::IPCResult RecvResponse(const uint32_t& aId,
+ const bool& aSuccess);
+
+ private:
+ void ActorDestroy(ActorDestroyReason aActorDestroyReason) override;
+
+ bool mDestroyed;
+ MediaSystemResourceManager* mManager;
+
+ friend class mozilla::ipc::BackgroundChildImpl;
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceManagerParent.cpp b/dom/media/systemservices/MediaSystemResourceManagerParent.cpp
new file mode 100644
index 0000000000..ec20079abc
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManagerParent.cpp
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Unused.h"
+#include "mozilla/layers/PImageBridgeParent.h"
+
+#include "MediaSystemResourceManagerParent.h"
+
+namespace mozilla::media {
+
+using namespace ipc;
+
+MediaSystemResourceManagerParent::MediaSystemResourceManagerParent()
+ : mDestroyed(false) {
+ mMediaSystemResourceService = MediaSystemResourceService::Get();
+}
+
+MediaSystemResourceManagerParent::~MediaSystemResourceManagerParent() {
+ MOZ_ASSERT(mDestroyed);
+}
+
+mozilla::ipc::IPCResult MediaSystemResourceManagerParent::RecvAcquire(
+ const uint32_t& aId, const MediaSystemResourceType& aResourceType,
+ const bool& aWillWait) {
+ mResourceRequests.WithEntryHandle(aId, [&](auto&& request) {
+ MOZ_ASSERT(!request);
+ if (request) {
+ // Send fail response
+ mozilla::Unused << SendResponse(aId, false /* fail */);
+ return;
+ }
+
+ request.Insert(MakeUnique<MediaSystemResourceRequest>(aId, aResourceType));
+ mMediaSystemResourceService->Acquire(this, aId, aResourceType, aWillWait);
+ });
+
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult MediaSystemResourceManagerParent::RecvRelease(
+ const uint32_t& aId) {
+ MediaSystemResourceRequest* request = mResourceRequests.Get(aId);
+ if (!request) {
+ return IPC_OK();
+ }
+
+ mMediaSystemResourceService->ReleaseResource(this, aId,
+ request->mResourceType);
+ mResourceRequests.Remove(aId);
+ return IPC_OK();
+}
+
+mozilla::ipc::IPCResult
+MediaSystemResourceManagerParent::RecvRemoveResourceManager() {
+ IProtocol* mgr = Manager();
+ if (!PMediaSystemResourceManagerParent::Send__delete__(this)) {
+ return IPC_FAIL_NO_REASON(mgr);
+ }
+ return IPC_OK();
+}
+
+void MediaSystemResourceManagerParent::ActorDestroy(
+ ActorDestroyReason aReason) {
+ MOZ_ASSERT(!mDestroyed);
+
+ // Release all resource requests of the MediaSystemResourceManagerParent.
+ // Clears all remaining pointers to this object.
+ mMediaSystemResourceService->ReleaseResource(this);
+
+ mDestroyed = true;
+}
+
+} // namespace mozilla::media
diff --git a/dom/media/systemservices/MediaSystemResourceManagerParent.h b/dom/media/systemservices/MediaSystemResourceManagerParent.h
new file mode 100644
index 0000000000..29ed219f2e
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceManagerParent.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceManagerParent_h_)
+# define MediaSystemResourceManagerParent_h_
+
+# include "MediaSystemResourceManager.h"
+# include "MediaSystemResourceService.h"
+# include "MediaSystemResourceTypes.h"
+# include "mozilla/media/PMediaSystemResourceManagerParent.h"
+
+namespace mozilla::media {
+
+/**
+ * Handle MediaSystemResourceManager's IPC
+ */
+class MediaSystemResourceManagerParent final
+ : public PMediaSystemResourceManagerParent {
+ friend class PMediaSystemResourceManagerParent;
+
+ public:
+ MediaSystemResourceManagerParent();
+ virtual ~MediaSystemResourceManagerParent();
+
+ protected:
+ mozilla::ipc::IPCResult RecvAcquire(
+ const uint32_t& aId, const MediaSystemResourceType& aResourceType,
+ const bool& aWillWait);
+
+ mozilla::ipc::IPCResult RecvRelease(const uint32_t& aId);
+
+ mozilla::ipc::IPCResult RecvRemoveResourceManager();
+
+ private:
+ void ActorDestroy(ActorDestroyReason aActorDestroyReason) override;
+
+ struct MediaSystemResourceRequest {
+ MediaSystemResourceRequest()
+ : mId(-1), mResourceType(MediaSystemResourceType::INVALID_RESOURCE) {}
+ MediaSystemResourceRequest(uint32_t aId,
+ MediaSystemResourceType aResourceType)
+ : mId(aId), mResourceType(aResourceType) {}
+ int32_t mId;
+ MediaSystemResourceType mResourceType;
+ };
+
+ bool mDestroyed;
+
+ RefPtr<MediaSystemResourceService> mMediaSystemResourceService;
+
+ nsClassHashtable<nsUint32HashKey, MediaSystemResourceRequest>
+ mResourceRequests;
+};
+
+} // namespace mozilla::media
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceMessageUtils.h b/dom/media/systemservices/MediaSystemResourceMessageUtils.h
new file mode 100644
index 0000000000..f06da1467d
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceMessageUtils.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceMessageUtils_h_)
+# define MediaSystemResourceMessageUtils_h_
+
+# include "ipc/EnumSerializer.h"
+# include "MediaSystemResourceTypes.h"
+
+namespace IPC {
+
+template <>
+struct ParamTraits<mozilla::MediaSystemResourceType>
+ : public ContiguousEnumSerializer<
+ mozilla::MediaSystemResourceType,
+ mozilla::MediaSystemResourceType::VIDEO_DECODER,
+ mozilla::MediaSystemResourceType::INVALID_RESOURCE> {};
+
+} // namespace IPC
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceService.cpp b/dom/media/systemservices/MediaSystemResourceService.cpp
new file mode 100644
index 0000000000..88c4566e76
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceService.cpp
@@ -0,0 +1,222 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaSystemResourceManagerParent.h"
+#include "mozilla/layers/CompositorThread.h"
+#include "mozilla/Unused.h"
+
+#include "MediaSystemResourceService.h"
+
+using namespace mozilla::layers;
+
+namespace mozilla {
+
+/* static */
+StaticRefPtr<MediaSystemResourceService> MediaSystemResourceService::sSingleton;
+
+/* static */
+MediaSystemResourceService* MediaSystemResourceService::Get() {
+ if (sSingleton) {
+ return sSingleton;
+ }
+ Init();
+ return sSingleton;
+}
+
+/* static */
+void MediaSystemResourceService::Init() {
+ if (!sSingleton) {
+ sSingleton = new MediaSystemResourceService();
+ }
+}
+
+/* static */
+void MediaSystemResourceService::Shutdown() {
+ if (sSingleton) {
+ sSingleton->Destroy();
+ sSingleton = nullptr;
+ }
+}
+
+MediaSystemResourceService::MediaSystemResourceService() : mDestroyed(false) {
+ MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
+}
+
+MediaSystemResourceService::~MediaSystemResourceService() = default;
+
+void MediaSystemResourceService::Destroy() { mDestroyed = true; }
+
+void MediaSystemResourceService::Acquire(
+ media::MediaSystemResourceManagerParent* aParent, uint32_t aId,
+ MediaSystemResourceType aResourceType, bool aWillWait) {
+ MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
+ MOZ_ASSERT(aParent);
+
+ if (mDestroyed) {
+ return;
+ }
+
+ MediaSystemResource* resource =
+ mResources.Get(static_cast<uint32_t>(aResourceType));
+
+ if (!resource || resource->mResourceCount == 0) {
+ // Resource does not exit
+ // Send fail response
+ mozilla::Unused << aParent->SendResponse(aId, false /* fail */);
+ return;
+ }
+
+ // Try to acquire a resource
+ if (resource->mAcquiredRequests.size() < resource->mResourceCount) {
+ // Resource is available
+ resource->mAcquiredRequests.push_back(
+ MediaSystemResourceRequest(aParent, aId));
+ // Send success response
+ mozilla::Unused << aParent->SendResponse(aId, true /* success */);
+ return;
+ }
+
+ if (!aWillWait) {
+ // Resource is not available and do not wait.
+ // Send fail response
+ mozilla::Unused << aParent->SendResponse(aId, false /* fail */);
+ return;
+ }
+ // Wait until acquire.
+ resource->mWaitingRequests.push_back(
+ MediaSystemResourceRequest(aParent, aId));
+}
+
+void MediaSystemResourceService::ReleaseResource(
+ media::MediaSystemResourceManagerParent* aParent, uint32_t aId,
+ MediaSystemResourceType aResourceType) {
+ MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
+ MOZ_ASSERT(aParent);
+
+ if (mDestroyed) {
+ return;
+ }
+
+ MediaSystemResource* resource =
+ mResources.Get(static_cast<uint32_t>(aResourceType));
+
+ if (!resource || resource->mResourceCount == 0) {
+ // Resource does not exit
+ return;
+ }
+ RemoveRequest(aParent, aId, aResourceType);
+ UpdateRequests(aResourceType);
+}
+
+void MediaSystemResourceService::ReleaseResource(
+ media::MediaSystemResourceManagerParent* aParent) {
+ MOZ_ASSERT(aParent);
+
+ if (mDestroyed) {
+ return;
+ }
+
+ for (const uint32_t& key : mResources.Keys()) {
+ RemoveRequests(aParent, static_cast<MediaSystemResourceType>(key));
+ UpdateRequests(static_cast<MediaSystemResourceType>(key));
+ }
+}
+
+void MediaSystemResourceService::RemoveRequest(
+ media::MediaSystemResourceManagerParent* aParent, uint32_t aId,
+ MediaSystemResourceType aResourceType) {
+ MOZ_ASSERT(aParent);
+
+ MediaSystemResource* resource =
+ mResources.Get(static_cast<uint32_t>(aResourceType));
+ if (!resource) {
+ return;
+ }
+
+ std::deque<MediaSystemResourceRequest>::iterator it;
+ std::deque<MediaSystemResourceRequest>& acquiredRequests =
+ resource->mAcquiredRequests;
+ for (it = acquiredRequests.begin(); it != acquiredRequests.end(); it++) {
+ if (((*it).mParent == aParent) && ((*it).mId == aId)) {
+ acquiredRequests.erase(it);
+ return;
+ }
+ }
+
+ std::deque<MediaSystemResourceRequest>& waitingRequests =
+ resource->mWaitingRequests;
+ for (it = waitingRequests.begin(); it != waitingRequests.end(); it++) {
+ if (((*it).mParent == aParent) && ((*it).mId == aId)) {
+ waitingRequests.erase(it);
+ return;
+ }
+ }
+}
+
+void MediaSystemResourceService::RemoveRequests(
+ media::MediaSystemResourceManagerParent* aParent,
+ MediaSystemResourceType aResourceType) {
+ MOZ_ASSERT(aParent);
+
+ MediaSystemResource* resource =
+ mResources.Get(static_cast<uint32_t>(aResourceType));
+
+ if (!resource || resource->mResourceCount == 0) {
+ // Resource does not exit
+ return;
+ }
+
+ std::deque<MediaSystemResourceRequest>::iterator it;
+ std::deque<MediaSystemResourceRequest>& acquiredRequests =
+ resource->mAcquiredRequests;
+ for (it = acquiredRequests.begin(); it != acquiredRequests.end();) {
+ if ((*it).mParent == aParent) {
+ it = acquiredRequests.erase(it);
+ } else {
+ it++;
+ }
+ }
+
+ std::deque<MediaSystemResourceRequest>& waitingRequests =
+ resource->mWaitingRequests;
+ for (it = waitingRequests.begin(); it != waitingRequests.end();) {
+ if ((*it).mParent == aParent) {
+ it = waitingRequests.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+void MediaSystemResourceService::UpdateRequests(
+ MediaSystemResourceType aResourceType) {
+ MediaSystemResource* resource =
+ mResources.Get(static_cast<uint32_t>(aResourceType));
+
+ if (!resource || resource->mResourceCount == 0) {
+ // Resource does not exit
+ return;
+ }
+
+ std::deque<MediaSystemResourceRequest>& acquiredRequests =
+ resource->mAcquiredRequests;
+ std::deque<MediaSystemResourceRequest>& waitingRequests =
+ resource->mWaitingRequests;
+
+ while ((acquiredRequests.size() < resource->mResourceCount) &&
+ (!waitingRequests.empty())) {
+ MediaSystemResourceRequest& request = waitingRequests.front();
+ MOZ_ASSERT(request.mParent);
+ // Send response
+ mozilla::Unused << request.mParent->SendResponse(request.mId,
+ true /* success */);
+ // Move request to mAcquiredRequests
+ acquiredRequests.push_back(waitingRequests.front());
+ waitingRequests.pop_front();
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/systemservices/MediaSystemResourceService.h b/dom/media/systemservices/MediaSystemResourceService.h
new file mode 100644
index 0000000000..8a75a6cafd
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceService.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceService_h_)
+# define MediaSystemResourceService_h_
+
+# include <deque>
+
+# include "MediaSystemResourceTypes.h"
+# include "mozilla/StaticPtr.h"
+# include "nsClassHashtable.h"
+
+namespace mozilla {
+
+namespace media {
+class MediaSystemResourceManagerParent;
+} // namespace media
+
+/**
+ * Manage media system resource allocation requests within system.
+ */
+class MediaSystemResourceService {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSystemResourceService)
+
+ static MediaSystemResourceService* Get();
+ static void Init();
+ static void Shutdown();
+
+ void Acquire(media::MediaSystemResourceManagerParent* aParent, uint32_t aId,
+ MediaSystemResourceType aResourceType, bool aWillWait);
+
+ void ReleaseResource(media::MediaSystemResourceManagerParent* aParent,
+ uint32_t aId, MediaSystemResourceType aResourceType);
+
+ void ReleaseResource(media::MediaSystemResourceManagerParent* aParent);
+
+ private:
+ MediaSystemResourceService();
+ ~MediaSystemResourceService();
+
+ struct MediaSystemResourceRequest {
+ MediaSystemResourceRequest() : mParent(nullptr), mId(-1) {}
+ MediaSystemResourceRequest(media::MediaSystemResourceManagerParent* aParent,
+ uint32_t aId)
+ : mParent(aParent), mId(aId) {}
+ media::MediaSystemResourceManagerParent* mParent;
+ uint32_t mId;
+ };
+
+ struct MediaSystemResource {
+ MediaSystemResource() : mResourceCount(0) {}
+ explicit MediaSystemResource(uint32_t aResourceCount)
+ : mResourceCount(aResourceCount) {}
+
+ std::deque<MediaSystemResourceRequest> mWaitingRequests;
+ std::deque<MediaSystemResourceRequest> mAcquiredRequests;
+ uint32_t mResourceCount;
+ };
+
+ void Destroy();
+
+ void RemoveRequest(media::MediaSystemResourceManagerParent* aParent,
+ uint32_t aId, MediaSystemResourceType aResourceType);
+
+ void RemoveRequests(media::MediaSystemResourceManagerParent* aParent,
+ MediaSystemResourceType aResourceType);
+
+ void UpdateRequests(MediaSystemResourceType aResourceType);
+
+ bool mDestroyed;
+
+ nsClassHashtable<nsUint32HashKey, MediaSystemResource> mResources;
+
+ static StaticRefPtr<MediaSystemResourceService> sSingleton;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/systemservices/MediaSystemResourceTypes.h b/dom/media/systemservices/MediaSystemResourceTypes.h
new file mode 100644
index 0000000000..d294c2b364
--- /dev/null
+++ b/dom/media/systemservices/MediaSystemResourceTypes.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaSystemResourceTypes_h_)
+# define MediaSystemResourceTypes_h_
+
+namespace mozilla {
+
+enum class MediaSystemResourceType : uint32_t {
+ VIDEO_DECODER = 0,
+ AUDIO_DECODER, // Not supported currently.
+ VIDEO_ENCODER,
+ AUDIO_ENCODER, // Not supported currently.
+ CAMERA, // Not supported currently.
+ INVALID_RESOURCE,
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/systemservices/MediaTaskUtils.h b/dom/media/systemservices/MediaTaskUtils.h
new file mode 100644
index 0000000000..cbe464e015
--- /dev/null
+++ b/dom/media/systemservices/MediaTaskUtils.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MediaTaskUtils_h
+#define mozilla_MediaTaskUtils_h
+
+#include "nsThreadUtils.h"
+
+// The main reason this file is separate from MediaUtils.h
+#include "base/task.h"
+
+namespace mozilla {
+namespace media {
+
+/* media::NewTaskFrom() - Create a Task from a lambda.
+ *
+ * Similar to media::NewRunnableFrom() - Create an nsRunnable from a lambda,
+ * but ignore the return value from the lambda.
+ *
+ * Prefer NS_NewRunnableFunction(), which provides a specific name, unless the
+ * lambda really must have a non-void return value that is to be ignored.
+ */
+
+template <typename OnRunType>
+class LambdaTask : public Runnable {
+ public:
+ explicit LambdaTask(OnRunType&& aOnRun)
+ : Runnable("media::LambdaTask"), mOnRun(std::move(aOnRun)) {}
+
+ private:
+ NS_IMETHOD
+ Run() override {
+ mOnRun();
+ return NS_OK;
+ }
+ OnRunType mOnRun;
+};
+
+template <typename OnRunType>
+already_AddRefed<LambdaTask<OnRunType>> NewTaskFrom(OnRunType&& aOnRun) {
+ typedef LambdaTask<OnRunType> LambdaType;
+ RefPtr<LambdaType> lambda = new LambdaType(std::forward<OnRunType>(aOnRun));
+ return lambda.forget();
+}
+
+} // namespace media
+} // namespace mozilla
+
+#endif // mozilla_MediaTaskUtils_h
diff --git a/dom/media/systemservices/MediaUtils.cpp b/dom/media/systemservices/MediaUtils.cpp
new file mode 100644
index 0000000000..fc72b0699a
--- /dev/null
+++ b/dom/media/systemservices/MediaUtils.cpp
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaUtils.h"
+#include "mozilla/Services.h"
+
+namespace mozilla::media {
+
+nsCOMPtr<nsIAsyncShutdownClient> GetShutdownBarrier() {
+ nsCOMPtr<nsIAsyncShutdownService> svc = services::GetAsyncShutdownService();
+ if (!svc) {
+ // We can fail to get the shutdown service if we're already shutting down.
+ return nullptr;
+ }
+
+ nsCOMPtr<nsIAsyncShutdownClient> barrier;
+ nsresult rv = svc->GetProfileBeforeChange(getter_AddRefs(barrier));
+ if (!barrier) {
+ // We are probably in a content process. We need to do cleanup at
+ // XPCOM shutdown in leakchecking builds.
+ rv = svc->GetXpcomWillShutdown(getter_AddRefs(barrier));
+ }
+ MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
+ MOZ_RELEASE_ASSERT(barrier);
+ return barrier;
+}
+
+nsCOMPtr<nsIAsyncShutdownClient> MustGetShutdownBarrier() {
+ nsCOMPtr<nsIAsyncShutdownClient> barrier = GetShutdownBarrier();
+ MOZ_RELEASE_ASSERT(barrier);
+ return barrier;
+}
+
+NS_IMPL_ISUPPORTS(ShutdownBlocker, nsIAsyncShutdownBlocker)
+
+namespace {
+class MediaEventBlocker : public ShutdownBlocker {
+ public:
+ explicit MediaEventBlocker(nsString aName)
+ : ShutdownBlocker(std::move(aName)) {}
+
+ NS_IMETHOD
+ BlockShutdown(nsIAsyncShutdownClient* aProfileBeforeChange) override {
+ mShutdownEvent.Notify();
+ return NS_OK;
+ }
+
+ MediaEventSource<void>& ShutdownEvent() { return mShutdownEvent; }
+
+ private:
+ MediaEventProducer<void> mShutdownEvent;
+};
+
+class RefCountedTicket {
+ RefPtr<MediaEventBlocker> mBlocker;
+ MediaEventForwarder<void> mShutdownEventForwarder;
+
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedTicket)
+
+ RefCountedTicket()
+ : mShutdownEventForwarder(GetMainThreadSerialEventTarget()) {}
+
+ void AddBlocker(const nsString& aName, const nsString& aFileName,
+ int32_t aLineNr) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!mBlocker);
+ mBlocker = MakeAndAddRef<MediaEventBlocker>(aName);
+ mShutdownEventForwarder.Forward(mBlocker->ShutdownEvent());
+ GetShutdownBarrier()->AddBlocker(mBlocker.get(), aFileName, aLineNr, aName);
+ }
+
+ MediaEventSource<void>& ShutdownEvent() { return mShutdownEventForwarder; }
+
+ protected:
+ virtual ~RefCountedTicket() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(mBlocker);
+ GetShutdownBarrier()->RemoveBlocker(mBlocker.get());
+ mShutdownEventForwarder.DisconnectAll();
+ }
+};
+
+class ShutdownBlockingTicketImpl : public ShutdownBlockingTicket {
+ private:
+ RefPtr<RefCountedTicket> mTicket;
+
+ public:
+ ShutdownBlockingTicketImpl(nsString aName, nsString aFileName,
+ int32_t aLineNr)
+ : mTicket(MakeAndAddRef<RefCountedTicket>()) {
+ aName.AppendPrintf(" - %p", this);
+ NS_DispatchToMainThread(NS_NewRunnableFunction(
+ __func__, [ticket = mTicket, name = std::move(aName),
+ fileName = std::move(aFileName), lineNr = aLineNr] {
+ ticket->AddBlocker(name, fileName, lineNr);
+ }));
+ }
+
+ ~ShutdownBlockingTicketImpl() {
+ NS_ReleaseOnMainThread(__func__, mTicket.forget(), true);
+ }
+
+ MediaEventSource<void>& ShutdownEvent() override {
+ return mTicket->ShutdownEvent();
+ }
+};
+} // namespace
+
+UniquePtr<ShutdownBlockingTicket> ShutdownBlockingTicket::Create(
+ nsString aName, nsString aFileName, int32_t aLineNr) {
+ return WrapUnique(new ShutdownBlockingTicketImpl(
+ std::move(aName), std::move(aFileName), aLineNr));
+}
+
+} // namespace mozilla::media
diff --git a/dom/media/systemservices/MediaUtils.h b/dom/media/systemservices/MediaUtils.h
new file mode 100644
index 0000000000..e63148483c
--- /dev/null
+++ b/dom/media/systemservices/MediaUtils.h
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MediaUtils_h
+#define mozilla_MediaUtils_h
+
+#include <map>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/UniquePtr.h"
+#include "MediaEventSource.h"
+#include "nsCOMPtr.h"
+#include "nsIAsyncShutdown.h"
+#include "nsISupportsImpl.h"
+#include "nsProxyRelease.h"
+#include "nsThreadUtils.h"
+
+class nsIEventTarget;
+
+namespace mozilla::media {
+
+/* media::NewRunnableFrom() - Create a Runnable from a lambda.
+ *
+ * Passing variables (closures) to an async function is clunky with Runnable:
+ *
+ * void Foo()
+ * {
+ * class FooRunnable : public Runnable
+ * {
+ * public:
+ * FooRunnable(const Bar &aBar) : mBar(aBar) {}
+ * NS_IMETHOD Run() override
+ * {
+ * // Use mBar
+ * }
+ * private:
+ * RefPtr<Bar> mBar;
+ * };
+ *
+ * RefPtr<Bar> bar = new Bar();
+ * NS_DispatchToMainThread(new FooRunnable(bar);
+ * }
+ *
+ * It's worse with more variables. Lambdas have a leg up with variable capture:
+ *
+ * void Foo()
+ * {
+ * RefPtr<Bar> bar = new Bar();
+ * NS_DispatchToMainThread(media::NewRunnableFrom([bar]() mutable {
+ * // use bar
+ * }));
+ * }
+ *
+ * Capture is by-copy by default, so the nsRefPtr 'bar' is safely copied for
+ * access on the other thread (threadsafe refcounting in bar is assumed).
+ *
+ * The 'mutable' keyword is only needed for non-const access to bar.
+ */
+
+template <typename OnRunType>
+class LambdaRunnable : public Runnable {
+ public:
+ explicit LambdaRunnable(OnRunType&& aOnRun)
+ : Runnable("media::LambdaRunnable"), mOnRun(std::move(aOnRun)) {}
+
+ private:
+ NS_IMETHODIMP
+ Run() override { return mOnRun(); }
+ OnRunType mOnRun;
+};
+
+template <typename OnRunType>
+already_AddRefed<LambdaRunnable<OnRunType>> NewRunnableFrom(
+ OnRunType&& aOnRun) {
+ typedef LambdaRunnable<OnRunType> LambdaType;
+ RefPtr<LambdaType> lambda = new LambdaType(std::forward<OnRunType>(aOnRun));
+ return lambda.forget();
+}
+
+/* media::Refcountable - Add threadsafe ref-counting to something that isn't.
+ *
+ * Often, reference counting is the most practical way to share an object with
+ * another thread without imposing lifetime restrictions, even if there's
+ * otherwise no concurrent access happening on the object. For instance, an
+ * algorithm on another thread may find it more expedient to modify a passed-in
+ * object, rather than pass expensive copies back and forth.
+ *
+ * Lists in particular often aren't ref-countable, yet are expensive to copy,
+ * e.g. nsTArray<RefPtr<Foo>>. Refcountable can be used to make such objects
+ * (or owning smart-pointers to such objects) refcountable.
+ *
+ * Technical limitation: A template specialization is needed for types that take
+ * a constructor. Please add below (UniquePtr covers a lot of ground though).
+ */
+
+class RefcountableBase {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefcountableBase)
+ protected:
+ virtual ~RefcountableBase() = default;
+};
+
+template <typename T>
+class Refcountable : public T, public RefcountableBase {
+ public:
+ Refcountable& operator=(T&& aOther) {
+ T::operator=(std::move(aOther));
+ return *this;
+ }
+
+ Refcountable& operator=(T& aOther) {
+ T::operator=(aOther);
+ return *this;
+ }
+};
+
+template <typename T>
+class Refcountable<UniquePtr<T>> : public UniquePtr<T>,
+ public RefcountableBase {
+ public:
+ explicit Refcountable(T* aPtr) : UniquePtr<T>(aPtr) {}
+};
+
+template <>
+class Refcountable<bool> : public RefcountableBase {
+ public:
+ explicit Refcountable(bool aValue) : mValue(aValue) {}
+
+ Refcountable& operator=(bool aOther) {
+ mValue = aOther;
+ return *this;
+ }
+
+ Refcountable& operator=(const Refcountable& aOther) {
+ mValue = aOther.mValue;
+ return *this;
+ }
+
+ explicit operator bool() const { return mValue; }
+
+ private:
+ bool mValue;
+};
+
+/*
+ * Async shutdown helpers
+ */
+
+nsCOMPtr<nsIAsyncShutdownClient> GetShutdownBarrier();
+
+// Like GetShutdownBarrier but will release assert that the result is not null.
+nsCOMPtr<nsIAsyncShutdownClient> MustGetShutdownBarrier();
+
+class ShutdownBlocker : public nsIAsyncShutdownBlocker {
+ public:
+ ShutdownBlocker(nsString aName) : mName(std::move(aName)) {}
+
+ NS_IMETHOD
+ BlockShutdown(nsIAsyncShutdownClient* aProfileBeforeChange) override = 0;
+
+ NS_IMETHOD GetName(nsAString& aName) override {
+ aName = mName;
+ return NS_OK;
+ }
+
+ NS_IMETHOD GetState(nsIPropertyBag**) override { return NS_OK; }
+
+ NS_DECL_ISUPPORTS
+ protected:
+ virtual ~ShutdownBlocker() = default;
+
+ private:
+ const nsString mName;
+};
+
+/**
+ * A convenience class representing a "ticket" that keeps the process from
+ * shutting down until it is destructed. It does this by blocking
+ * xpcom-will-shutdown. Constructed and destroyed on any thread.
+ */
+class ShutdownBlockingTicket {
+ public:
+ /**
+ * Construct with an arbitrary name, __FILE__ and __LINE__.
+ * Note that __FILE__ needs to be made wide, typically through
+ * NS_LITERAL_STRING_FROM_CSTRING(__FILE__).
+ */
+ static UniquePtr<ShutdownBlockingTicket> Create(nsString aName,
+ nsString aFileName,
+ int32_t aLineNr);
+
+ virtual ~ShutdownBlockingTicket() = default;
+
+ /**
+ * MediaEvent that gets notified once upon xpcom-will-shutdown.
+ */
+ virtual MediaEventSource<void>& ShutdownEvent() = 0;
+};
+
+/**
+ * Await convenience methods to block until the promise has been resolved or
+ * rejected. The Resolve/Reject functions, while called on a different thread,
+ * would be running just as on the current thread thanks to the memory barrier
+ * provided by the monitor.
+ * For now Await can only be used with an exclusive MozPromise if passed a
+ * Resolve/Reject function.
+ * Await() can *NOT* be called from a task queue/nsISerialEventTarget used for
+ * resolving/rejecting aPromise, otherwise things will deadlock.
+ */
+template <typename ResolveValueType, typename RejectValueType,
+ typename ResolveFunction, typename RejectFunction>
+void Await(already_AddRefed<nsIEventTarget> aPool,
+ RefPtr<MozPromise<ResolveValueType, RejectValueType, true>> aPromise,
+ ResolveFunction&& aResolveFunction,
+ RejectFunction&& aRejectFunction) {
+ RefPtr<TaskQueue> taskQueue =
+ TaskQueue::Create(std::move(aPool), "MozPromiseAwait");
+ Monitor mon MOZ_UNANNOTATED(__func__);
+ bool done = false;
+
+ aPromise->Then(
+ taskQueue, __func__,
+ [&](ResolveValueType&& aResolveValue) {
+ MonitorAutoLock lock(mon);
+ aResolveFunction(std::forward<ResolveValueType>(aResolveValue));
+ done = true;
+ mon.Notify();
+ },
+ [&](RejectValueType&& aRejectValue) {
+ MonitorAutoLock lock(mon);
+ aRejectFunction(std::forward<RejectValueType>(aRejectValue));
+ done = true;
+ mon.Notify();
+ });
+
+ MonitorAutoLock lock(mon);
+ while (!done) {
+ mon.Wait();
+ }
+}
+
+template <typename ResolveValueType, typename RejectValueType, bool Excl>
+typename MozPromise<ResolveValueType, RejectValueType,
+ Excl>::ResolveOrRejectValue
+Await(already_AddRefed<nsIEventTarget> aPool,
+ RefPtr<MozPromise<ResolveValueType, RejectValueType, Excl>> aPromise) {
+ RefPtr<TaskQueue> taskQueue =
+ TaskQueue::Create(std::move(aPool), "MozPromiseAwait");
+ Monitor mon MOZ_UNANNOTATED(__func__);
+ bool done = false;
+
+ typename MozPromise<ResolveValueType, RejectValueType,
+ Excl>::ResolveOrRejectValue val;
+ aPromise->Then(
+ taskQueue, __func__,
+ [&](ResolveValueType aResolveValue) {
+ val.SetResolve(std::move(aResolveValue));
+ MonitorAutoLock lock(mon);
+ done = true;
+ mon.Notify();
+ },
+ [&](RejectValueType aRejectValue) {
+ val.SetReject(std::move(aRejectValue));
+ MonitorAutoLock lock(mon);
+ done = true;
+ mon.Notify();
+ });
+
+ MonitorAutoLock lock(mon);
+ while (!done) {
+ mon.Wait();
+ }
+
+ return val;
+}
+
+/**
+ * Similar to Await, takes an array of promises of the same type.
+ * MozPromise::All is used to handle the resolution/rejection of the promises.
+ */
+template <typename ResolveValueType, typename RejectValueType,
+ typename ResolveFunction, typename RejectFunction>
+void AwaitAll(
+ already_AddRefed<nsIEventTarget> aPool,
+ nsTArray<RefPtr<MozPromise<ResolveValueType, RejectValueType, true>>>&
+ aPromises,
+ ResolveFunction&& aResolveFunction, RejectFunction&& aRejectFunction) {
+ typedef MozPromise<ResolveValueType, RejectValueType, true> Promise;
+ RefPtr<nsIEventTarget> pool = aPool;
+ RefPtr<TaskQueue> taskQueue =
+ TaskQueue::Create(do_AddRef(pool), "MozPromiseAwaitAll");
+ RefPtr<typename Promise::AllPromiseType> p =
+ Promise::All(taskQueue, aPromises);
+ Await(pool.forget(), p, std::move(aResolveFunction),
+ std::move(aRejectFunction));
+}
+
+// Note: only works with exclusive MozPromise, as Promise::All would attempt
+// to perform copy of nsTArrays which are disallowed.
+template <typename ResolveValueType, typename RejectValueType>
+typename MozPromise<ResolveValueType, RejectValueType,
+ true>::AllPromiseType::ResolveOrRejectValue
+AwaitAll(already_AddRefed<nsIEventTarget> aPool,
+ nsTArray<RefPtr<MozPromise<ResolveValueType, RejectValueType, true>>>&
+ aPromises) {
+ typedef MozPromise<ResolveValueType, RejectValueType, true> Promise;
+ RefPtr<nsIEventTarget> pool = aPool;
+ RefPtr<TaskQueue> taskQueue =
+ TaskQueue::Create(do_AddRef(pool), "MozPromiseAwaitAll");
+ RefPtr<typename Promise::AllPromiseType> p =
+ Promise::All(taskQueue, aPromises);
+ return Await(pool.forget(), p);
+}
+
+} // namespace mozilla::media
+
+#endif // mozilla_MediaUtils_h
diff --git a/dom/media/systemservices/OSXRunLoopSingleton.cpp b/dom/media/systemservices/OSXRunLoopSingleton.cpp
new file mode 100644
index 0000000000..6dea084e27
--- /dev/null
+++ b/dom/media/systemservices/OSXRunLoopSingleton.cpp
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OSXRunLoopSingleton.h"
+#include <mozilla/StaticMutex.h>
+
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/AudioHardware.h>
+#include <CoreAudio/HostTime.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+static bool gRunLoopSet = false;
+static mozilla::StaticMutex gMutex MOZ_UNANNOTATED;
+
+void mozilla_set_coreaudio_notification_runloop_if_needed() {
+ mozilla::StaticMutexAutoLock lock(gMutex);
+ if (gRunLoopSet) {
+ return;
+ }
+
+ /* This is needed so that AudioUnit listeners get called on this thread, and
+ * not the main thread. If we don't do that, they are not called, or a crash
+ * occur, depending on the OSX version. */
+ AudioObjectPropertyAddress runloop_address = {
+ kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+
+ CFRunLoopRef run_loop = nullptr;
+
+ OSStatus r;
+ r = AudioObjectSetPropertyData(kAudioObjectSystemObject, &runloop_address, 0,
+ NULL, sizeof(CFRunLoopRef), &run_loop);
+ if (r != noErr) {
+ NS_WARNING(
+ "Could not make global CoreAudio notifications use their own thread.");
+ }
+
+ gRunLoopSet = true;
+}
diff --git a/dom/media/systemservices/OSXRunLoopSingleton.h b/dom/media/systemservices/OSXRunLoopSingleton.h
new file mode 100644
index 0000000000..10e7b0153f
--- /dev/null
+++ b/dom/media/systemservices/OSXRunLoopSingleton.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef OSXRUNLOOPSINGLETON_H_
+#define OSXRUNLOOPSINGLETON_H_
+
+#include <mozilla/Types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* This function tells CoreAudio to use its own thread for device change
+ * notifications, and can be called from any thread without external
+ * synchronization. */
+void MOZ_EXPORT mozilla_set_coreaudio_notification_runloop_if_needed();
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif // OSXRUNLOOPSINGLETON_H_
diff --git a/dom/media/systemservices/PCameras.ipdl b/dom/media/systemservices/PCameras.ipdl
new file mode 100644
index 0000000000..535702cd68
--- /dev/null
+++ b/dom/media/systemservices/PCameras.ipdl
@@ -0,0 +1,93 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+include protocol PContent;
+include protocol PBackground;
+
+include PBackgroundSharedTypes;
+
+using mozilla::camera::CaptureEngine from "mozilla/media/CamerasTypes.h";
+
+namespace mozilla {
+namespace camera {
+
+// IPC analog for webrtc::VideoCaptureCapability
+struct VideoCaptureCapability
+{
+ int width;
+ int height;
+ int maxFPS;
+ int videoType;
+ bool interlaced;
+};
+
+
+// IPC analog for webrtc::VideoFrame
+// the described buffer is transported seperately in a Shmem
+// See VideoFrameUtils.h
+struct VideoFrameProperties
+{
+ // Size of image data within the ShMem,
+ // the ShMem is at least this large
+ uint32_t bufferSize;
+ // From webrtc::VideoFrame
+ uint32_t timeStamp;
+ int64_t ntpTimeMs;
+ int64_t renderTimeMs;
+ // See webrtc/**/rotation.h
+ int rotation;
+ int yAllocatedSize;
+ int uAllocatedSize;
+ int vAllocatedSize;
+ // From webrtc::VideoFrameBuffer
+ int width;
+ int height;
+ int yStride;
+ int uStride;
+ int vStride;
+};
+
+[ManualDealloc, ChildImpl=virtual, ParentImpl=virtual]
+async protocol PCameras
+{
+ manager PBackground;
+
+child:
+ // transfers ownership of |buffer| from parent to child
+ async DeliverFrame(CaptureEngine capEngine, int streamId,
+ Shmem buffer, VideoFrameProperties props);
+ async DeviceChange();
+ async ReplyNumberOfCaptureDevices(int deviceCount);
+ async ReplyNumberOfCapabilities(int capabilityCount);
+ async ReplyAllocateCapture(int captureId);
+ async ReplyGetCaptureCapability(VideoCaptureCapability cap);
+ async ReplyGetCaptureDevice(nsCString device_name, nsCString device_id, bool scary);
+ async ReplyFailure();
+ async ReplySuccess();
+ async __delete__();
+
+parent:
+ async NumberOfCaptureDevices(CaptureEngine engine);
+ async NumberOfCapabilities(CaptureEngine engine, nsCString deviceUniqueIdUTF8);
+
+ async GetCaptureCapability(CaptureEngine engine, nsCString unique_idUTF8,
+ int capability_number);
+ async GetCaptureDevice(CaptureEngine engine, int deviceIndex);
+
+ async AllocateCapture(CaptureEngine engine, nsCString unique_idUTF8,
+ uint64_t windowID);
+ async ReleaseCapture(CaptureEngine engine, int captureId);
+ async StartCapture(CaptureEngine engine, int captureId,
+ VideoCaptureCapability capability);
+ async FocusOnSelectedSource(CaptureEngine engine, int captureId);
+ async StopCapture(CaptureEngine engine, int captureId);
+ // transfers frame back
+ async ReleaseFrame(Shmem s);
+
+ // setup camera engine
+ async EnsureInitialized(CaptureEngine engine);
+};
+
+} // namespace camera
+} // namespace mozilla
diff --git a/dom/media/systemservices/PMedia.ipdl b/dom/media/systemservices/PMedia.ipdl
new file mode 100644
index 0000000000..c3d8476b5f
--- /dev/null
+++ b/dom/media/systemservices/PMedia.ipdl
@@ -0,0 +1,55 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+include protocol PContent;
+
+include PBackgroundSharedTypes;
+
+include "mozilla/media/MediaChild.h";
+
+namespace mozilla {
+namespace media {
+
+[ManualDealloc, ChildImpl="Child", ParentImpl=virtual]
+protocol PMedia
+{
+ manager PContent;
+
+parent:
+ /**
+ * Requests a potentially persistent unique secret key for each principal.
+ * Has no expiry, but is cleared by age along with cookies.
+ * This is needed by mediaDevices.enumerateDevices() to produce persistent
+ * deviceIds that wont work cross-origin.
+ *
+ * If this OriginAttributes dictionary has the privateBrowsing flag set to
+ * false, a key for this origin is returned from a primary pool of temporal
+ * in-memory keys and persistent keys read from disk. If no key exists, a
+ * temporal one is created.
+ * If aPersist is true and key is temporal, the key is promoted to persistent.
+ * Once persistent, a key cannot become temporal again.
+ *
+ * If the OriginAttributes dictionary has the privateBrowsing flag set to
+ * true, a different key for this origin is returned from a secondary pool
+ * that is never persisted to disk, and aPersist is ignored.
+ */
+ async GetPrincipalKey(PrincipalInfo aPrincipal, bool aPersist) returns(nsCString aKey);
+
+ /**
+ * Clear per-orgin list of persistent deviceIds stored for enumerateDevices
+ * Fire and forget.
+ *
+ * aSinceTime - milliseconds since 1 January 1970 00:00:00 UTC. 0 = clear all
+ *
+ * aOnlyPrivateBrowsing - if true then only purge the separate in-memory
+ * per-origin list used in Private Browsing.
+ */
+ async SanitizeOriginKeys(uint64_t aSinceWhen, bool aOnlyPrivateBrowsing);
+
+child:
+ async __delete__();
+};
+
+} // namespace media
+} // namespace mozilla
diff --git a/dom/media/systemservices/PMediaSystemResourceManager.ipdl b/dom/media/systemservices/PMediaSystemResourceManager.ipdl
new file mode 100644
index 0000000000..a682de60bf
--- /dev/null
+++ b/dom/media/systemservices/PMediaSystemResourceManager.ipdl
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+include protocol PImageBridge;
+include "mozilla/media/MediaSystemResourceMessageUtils.h";
+
+using mozilla::MediaSystemResourceType from "mozilla/media/MediaSystemResourceTypes.h";
+
+namespace mozilla {
+namespace media {
+
+/*
+ * The PMediaSystemResourceManager is a sub-protocol in PImageBridge
+ */
+[ManualDealloc]
+sync protocol PMediaSystemResourceManager
+{
+ manager PImageBridge;
+
+child:
+ async Response(uint32_t aId, bool aSuccess);
+ async __delete__();
+
+parent:
+ async Acquire(uint32_t aId, MediaSystemResourceType aResourceType, bool aWillWait);
+ async Release(uint32_t aId);
+
+ /**
+ * Asynchronously tell the parent side to remove the PMediaSystemResourceManager.
+ */
+ async RemoveResourceManager();
+};
+
+} // namespace media
+} // namespace mozilla
+
diff --git a/dom/media/systemservices/ShmemPool.cpp b/dom/media/systemservices/ShmemPool.cpp
new file mode 100644
index 0000000000..c39ed5e790
--- /dev/null
+++ b/dom/media/systemservices/ShmemPool.cpp
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ShmemPool.h"
+
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Logging.h"
+
+mozilla::LazyLogModule sShmemPoolLog("ShmemPool");
+
+#define SHMEMPOOL_LOG_VERBOSE(args) \
+ MOZ_LOG(sShmemPoolLog, mozilla::LogLevel::Verbose, args)
+
+namespace mozilla {
+
+ShmemPool::ShmemPool(size_t aPoolSize, PoolType aPoolType)
+ : mPoolType(aPoolType),
+ mMutex("mozilla::ShmemPool"),
+ mPoolFree(aPoolSize),
+ mErrorLogged(false)
+#ifdef DEBUG
+ ,
+ mMaxPoolUse(0)
+#endif
+{
+ mShmemPool.SetLength(aPoolSize);
+}
+
+mozilla::ShmemBuffer ShmemPool::GetIfAvailable(size_t aSize) {
+ MutexAutoLock lock(mMutex);
+
+ // Pool is empty, don't block caller.
+ if (mPoolFree == 0) {
+ if (!mErrorLogged) {
+ // log "out of pool" once as error to avoid log spam
+ mErrorLogged = true;
+ SHMEMPOOL_LOG_ERROR(
+ ("ShmemPool is empty, future occurrences "
+ "will be logged as warnings"));
+ } else {
+ SHMEMPOOL_LOG_WARN(("ShmemPool is empty"));
+ }
+ // This isn't initialized, so will be understood as an error.
+ return ShmemBuffer();
+ }
+
+ ShmemBuffer& res = mShmemPool[mPoolFree - 1];
+
+ if (!res.mInitialized) {
+ SHMEMPOOL_LOG(("No free preallocated Shmem"));
+ return ShmemBuffer();
+ }
+
+ MOZ_ASSERT(res.mShmem.IsWritable(), "Pool in Shmem is not writable?");
+
+ if (res.mShmem.Size<uint8_t>() < aSize) {
+ SHMEMPOOL_LOG(("Free Shmem but not of the right size"));
+ return ShmemBuffer();
+ }
+
+ mPoolFree--;
+#ifdef DEBUG
+ size_t poolUse = mShmemPool.Length() - mPoolFree;
+ if (poolUse > mMaxPoolUse) {
+ mMaxPoolUse = poolUse;
+ SHMEMPOOL_LOG(
+ ("Maximum ShmemPool use increased: %zu buffers", mMaxPoolUse));
+ }
+#endif
+ return std::move(res);
+}
+
+void ShmemPool::Put(ShmemBuffer&& aShmem) {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mPoolFree < mShmemPool.Length());
+ mShmemPool[mPoolFree] = std::move(aShmem);
+ mPoolFree++;
+#ifdef DEBUG
+ size_t poolUse = mShmemPool.Length() - mPoolFree;
+ if (poolUse > 0) {
+ SHMEMPOOL_LOG_VERBOSE(("ShmemPool usage reduced to %zu buffers", poolUse));
+ }
+#endif
+}
+
+ShmemPool::~ShmemPool() {
+#ifdef DEBUG
+ for (size_t i = 0; i < mShmemPool.Length(); i++) {
+ MOZ_ASSERT(!mShmemPool[i].Valid());
+ }
+#endif
+}
+
+} // namespace mozilla
diff --git a/dom/media/systemservices/ShmemPool.h b/dom/media/systemservices/ShmemPool.h
new file mode 100644
index 0000000000..e62ccff24a
--- /dev/null
+++ b/dom/media/systemservices/ShmemPool.h
@@ -0,0 +1,181 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ShmemPool_h
+#define mozilla_ShmemPool_h
+
+#include "mozilla/Mutex.h"
+#include "mozilla/ipc/Shmem.h"
+#include "nsTArray.h"
+
+extern mozilla::LazyLogModule sShmemPoolLog;
+#define SHMEMPOOL_LOG(args) \
+ MOZ_LOG(sShmemPoolLog, mozilla::LogLevel::Debug, args)
+#define SHMEMPOOL_LOG_WARN(args) \
+ MOZ_LOG(sShmemPoolLog, mozilla::LogLevel::Warning, args)
+#define SHMEMPOOL_LOG_ERROR(args) \
+ MOZ_LOG(sShmemPoolLog, mozilla::LogLevel::Error, args)
+
+namespace mozilla {
+
+class ShmemPool;
+
+class ShmemBuffer {
+ public:
+ ShmemBuffer() : mInitialized(false) {}
+ explicit ShmemBuffer(mozilla::ipc::Shmem aShmem) {
+ mInitialized = true;
+ mShmem = aShmem;
+ }
+
+ ShmemBuffer(ShmemBuffer&& rhs) {
+ mInitialized = rhs.mInitialized;
+ mShmem = std::move(rhs.mShmem);
+ }
+
+ ShmemBuffer& operator=(ShmemBuffer&& rhs) {
+ MOZ_ASSERT(&rhs != this, "self-moves are prohibited");
+ mInitialized = rhs.mInitialized;
+ mShmem = std::move(rhs.mShmem);
+ return *this;
+ }
+
+ // No copies allowed
+ ShmemBuffer(const ShmemBuffer&) = delete;
+ ShmemBuffer& operator=(const ShmemBuffer&) = delete;
+
+ bool Valid() { return mInitialized; }
+
+ uint8_t* GetBytes() { return mShmem.get<uint8_t>(); }
+
+ mozilla::ipc::Shmem& Get() { return mShmem; }
+
+ private:
+ friend class ShmemPool;
+
+ bool mInitialized;
+ mozilla::ipc::Shmem mShmem;
+};
+
+class ShmemPool final {
+ public:
+ enum class PoolType { StaticPool, DynamicPool };
+ explicit ShmemPool(size_t aPoolSize,
+ PoolType aPoolType = PoolType::StaticPool);
+ ~ShmemPool();
+ // Get/GetIfAvailable differ in what thread they can run on. GetIfAvailable
+ // can run anywhere but won't allocate if the right size isn't available.
+ ShmemBuffer GetIfAvailable(size_t aSize);
+ void Put(ShmemBuffer&& aShmem);
+
+ // We need to use the allocation/deallocation functions
+ // of a specific IPC child/parent instance.
+ template <class T>
+ void Cleanup(T* aInstance) {
+ MutexAutoLock lock(mMutex);
+ for (size_t i = 0; i < mShmemPool.Length(); i++) {
+ if (mShmemPool[i].mInitialized) {
+ aInstance->DeallocShmem(mShmemPool[i].Get());
+ mShmemPool[i].mInitialized = false;
+ }
+ }
+ }
+
+ enum class AllocationPolicy { Default, Unsafe };
+
+ template <class T>
+ ShmemBuffer Get(T* aInstance, size_t aSize,
+ AllocationPolicy aPolicy = AllocationPolicy::Default) {
+ MutexAutoLock lock(mMutex);
+
+ // Pool is empty, don't block caller.
+ if (mPoolFree == 0 && mPoolType == PoolType::StaticPool) {
+ if (!mErrorLogged) {
+ // log "out of pool" once as error to avoid log spam
+ mErrorLogged = true;
+ SHMEMPOOL_LOG_ERROR(
+ ("ShmemPool is empty, future occurrences "
+ "will be logged as warnings"));
+ } else {
+ SHMEMPOOL_LOG_WARN(("ShmemPool is empty"));
+ }
+ // This isn't initialized, so will be understood as an error.
+ return ShmemBuffer();
+ }
+ if (mPoolFree == 0) {
+ MOZ_ASSERT(mPoolType == PoolType::DynamicPool);
+ SHMEMPOOL_LOG(("Dynamic ShmemPool empty, allocating extra Shmem buffer"));
+ ShmemBuffer newBuffer;
+ mShmemPool.InsertElementAt(0, std::move(newBuffer));
+ mPoolFree++;
+ }
+
+ ShmemBuffer& res = mShmemPool[mPoolFree - 1];
+
+ if (!res.mInitialized) {
+ SHMEMPOOL_LOG(("Initializing new Shmem in pool"));
+ if (!AllocateShmem(aInstance, aSize, res, aPolicy)) {
+ SHMEMPOOL_LOG(("Failure allocating new Shmem buffer"));
+ return ShmemBuffer();
+ }
+ res.mInitialized = true;
+ }
+
+ MOZ_DIAGNOSTIC_ASSERT(res.mShmem.IsWritable(),
+ "Shmem in Pool is not writable?");
+
+ // Prepare buffer, increase size if needed (we never shrink as we don't
+ // maintain seperate sized pools and we don't want to keep reallocating)
+ if (res.mShmem.Size<char>() < aSize) {
+ SHMEMPOOL_LOG(("Size change/increase in Shmem Pool"));
+ aInstance->DeallocShmem(res.mShmem);
+ res.mInitialized = false;
+ // this may fail; always check return value
+ if (!AllocateShmem(aInstance, aSize, res, aPolicy)) {
+ SHMEMPOOL_LOG(("Failure allocating resized Shmem buffer"));
+ return ShmemBuffer();
+ } else {
+ res.mInitialized = true;
+ }
+ }
+
+ MOZ_ASSERT(res.mShmem.IsWritable(),
+ "Shmem in Pool is not writable post resize?");
+
+ mPoolFree--;
+#ifdef DEBUG
+ size_t poolUse = mShmemPool.Length() - mPoolFree;
+ if (poolUse > mMaxPoolUse) {
+ mMaxPoolUse = poolUse;
+ SHMEMPOOL_LOG(
+ ("Maximum ShmemPool use increased: %zu buffers", mMaxPoolUse));
+ }
+#endif
+ return std::move(res);
+ }
+
+ private:
+ template <class T>
+ bool AllocateShmem(T* aInstance, size_t aSize, ShmemBuffer& aRes,
+ AllocationPolicy aPolicy) {
+ return (aPolicy == AllocationPolicy::Default &&
+ aInstance->AllocShmem(aSize, &aRes.mShmem)) ||
+ (aPolicy == AllocationPolicy::Unsafe &&
+ aInstance->AllocUnsafeShmem(aSize, &aRes.mShmem));
+ }
+ const PoolType mPoolType;
+ Mutex mMutex MOZ_UNANNOTATED;
+ size_t mPoolFree;
+ bool mErrorLogged;
+#ifdef DEBUG
+ size_t mMaxPoolUse;
+#endif
+ nsTArray<ShmemBuffer> mShmemPool;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ShmemPool_h
diff --git a/dom/media/systemservices/VideoEngine.cpp b/dom/media/systemservices/VideoEngine.cpp
new file mode 100644
index 0000000000..6a074daec2
--- /dev/null
+++ b/dom/media/systemservices/VideoEngine.cpp
@@ -0,0 +1,260 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VideoEngine.h"
+#include "libwebrtcglue/SystemTime.h"
+#include "video_engine/desktop_capture_impl.h"
+#include "system_wrappers/include/clock.h"
+#ifdef WEBRTC_ANDROID
+# include "modules/video_capture/video_capture.h"
+#endif
+
+#ifdef MOZ_WIDGET_ANDROID
+# include "mozilla/jni/Utils.h"
+#endif
+
+namespace mozilla::camera {
+
+#undef LOG
+#undef LOG_ENABLED
+mozilla::LazyLogModule gVideoEngineLog("VideoEngine");
+#define LOG(args) MOZ_LOG(gVideoEngineLog, mozilla::LogLevel::Debug, args)
+#define LOG_ENABLED() MOZ_LOG_TEST(gVideoEngineLog, mozilla::LogLevel::Debug)
+
+#if defined(ANDROID)
+int VideoEngine::SetAndroidObjects() {
+ LOG(("%s", __PRETTY_FUNCTION__));
+
+ JavaVM* const javaVM = mozilla::jni::GetVM();
+ if (!javaVM || webrtc::SetCaptureAndroidVM(javaVM) != 0) {
+ LOG(("Could not set capture Android VM"));
+ return -1;
+ }
+# ifdef WEBRTC_INCLUDE_INTERNAL_VIDEO_RENDER
+ if (webrtc::SetRenderAndroidVM(javaVM) != 0) {
+ LOG(("Could not set render Android VM"));
+ return -1;
+ }
+# endif
+ return 0;
+}
+#endif
+
+int32_t VideoEngine::CreateVideoCapture(const char* deviceUniqueIdUTF8) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ MOZ_ASSERT(deviceUniqueIdUTF8);
+
+ int32_t id = GenerateId();
+ LOG(("CaptureDeviceInfo.type=%s id=%d", mCaptureDevInfo.TypeName(), id));
+
+ for (auto& it : mCaps) {
+ if (it.second.VideoCapture() &&
+ it.second.VideoCapture()->CurrentDeviceName() &&
+ strcmp(it.second.VideoCapture()->CurrentDeviceName(),
+ deviceUniqueIdUTF8) == 0) {
+ mIdMap.emplace(id, it.first);
+ return id;
+ }
+ }
+
+ CaptureEntry entry = {-1, nullptr};
+
+ if (mCaptureDevInfo.type == CaptureDeviceType::Camera) {
+ entry = CaptureEntry(
+ id, webrtc::VideoCaptureFactory::Create(deviceUniqueIdUTF8));
+ if (entry.VideoCapture()) {
+ entry.VideoCapture()->SetApplyRotation(true);
+ }
+ } else {
+#ifndef WEBRTC_ANDROID
+# ifdef MOZ_X11
+ webrtc::VideoCaptureModule* captureModule;
+ auto type = mCaptureDevInfo.type;
+ nsresult result = NS_DispatchToMainThread(
+ media::NewRunnableFrom([&captureModule, id, deviceUniqueIdUTF8,
+ type]() -> nsresult {
+ captureModule =
+ webrtc::DesktopCaptureImpl::Create(id, deviceUniqueIdUTF8, type);
+ return NS_OK;
+ }),
+ nsIEventTarget::DISPATCH_SYNC);
+
+ if (result == NS_OK) {
+ entry = CaptureEntry(
+ id, rtc::scoped_refptr<webrtc::VideoCaptureModule>(captureModule));
+ } else {
+ return -1;
+ }
+# else
+ entry = CaptureEntry(
+ id, rtc::scoped_refptr<webrtc::VideoCaptureModule>(
+ webrtc::DesktopCaptureImpl::Create(id, deviceUniqueIdUTF8,
+ mCaptureDevInfo.type)));
+# endif
+#else
+ MOZ_ASSERT("CreateVideoCapture NO DESKTOP CAPTURE IMPL ON ANDROID" ==
+ nullptr);
+#endif
+ }
+ mCaps.emplace(id, std::move(entry));
+ mIdMap.emplace(id, id);
+ return id;
+}
+
+int VideoEngine::ReleaseVideoCapture(const int32_t id) {
+ bool found = false;
+
+#ifdef DEBUG
+ {
+ auto it = mIdMap.find(id);
+ MOZ_ASSERT(it != mIdMap.end());
+ Unused << it;
+ }
+#endif
+
+ for (auto& it : mIdMap) {
+ if (it.first != id && it.second == mIdMap[id]) {
+ // There are other tracks still using this hardware.
+ found = true;
+ }
+ }
+
+ if (!found) {
+ WithEntry(id, [&found](CaptureEntry& cap) {
+ cap.mVideoCaptureModule = nullptr;
+ found = true;
+ });
+ MOZ_ASSERT(found);
+ if (found) {
+ auto it = mCaps.find(mIdMap[id]);
+ MOZ_ASSERT(it != mCaps.end());
+ mCaps.erase(it);
+ }
+ }
+
+ mIdMap.erase(id);
+ return found ? 0 : (-1);
+}
+
+std::shared_ptr<webrtc::VideoCaptureModule::DeviceInfo>
+VideoEngine::GetOrCreateVideoCaptureDeviceInfo() {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ webrtc::Timestamp currentTime = webrtc::Timestamp::Micros(0);
+
+ const char* capDevTypeName =
+ CaptureDeviceInfo(mCaptureDevInfo.type).TypeName();
+
+ if (mDeviceInfo) {
+ LOG(("Device cache available."));
+ // Camera cache is invalidated by HW change detection elsewhere
+ if (mCaptureDevInfo.type == CaptureDeviceType::Camera) {
+ LOG(("returning cached CaptureDeviceInfo of type %s", capDevTypeName));
+ return mDeviceInfo;
+ }
+ // Screen sharing cache is invalidated after the expiration time
+ currentTime = WebrtcSystemTime();
+ LOG(("Checking expiry, fetched current time of: %" PRId64,
+ currentTime.ms()));
+ LOG(("device cache expiration is %" PRId64, mExpiryTime.ms()));
+ if (currentTime <= mExpiryTime) {
+ LOG(("returning cached CaptureDeviceInfo of type %s", capDevTypeName));
+ return mDeviceInfo;
+ }
+ }
+
+ if (currentTime.IsZero()) {
+ currentTime = WebrtcSystemTime();
+ LOG(("Fetched current time of: %" PRId64, currentTime.ms()));
+ }
+ mExpiryTime = currentTime + webrtc::TimeDelta::Millis(kCacheExpiryPeriodMs);
+ LOG(("new device cache expiration is %" PRId64, mExpiryTime.ms()));
+ LOG(("creating a new VideoCaptureDeviceInfo of type %s", capDevTypeName));
+
+ switch (mCaptureDevInfo.type) {
+ case CaptureDeviceType::Camera: {
+#ifdef MOZ_WIDGET_ANDROID
+ if (SetAndroidObjects()) {
+ LOG(("VideoEngine::SetAndroidObjects Failed"));
+ break;
+ }
+#endif
+ mDeviceInfo.reset(webrtc::VideoCaptureFactory::CreateDeviceInfo());
+ LOG(("CaptureDeviceType::Camera: Finished creating new device."));
+ break;
+ }
+ // Window and Screen and Browser (tab) types are handled by DesktopCapture
+ case CaptureDeviceType::Browser:
+ case CaptureDeviceType::Window:
+ case CaptureDeviceType::Screen: {
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
+ mDeviceInfo.reset(webrtc::DesktopCaptureImpl::CreateDeviceInfo(
+ mId, mCaptureDevInfo.type));
+ LOG(("screen capture: Finished creating new device."));
+#else
+ MOZ_ASSERT(
+ "GetVideoCaptureDeviceInfo NO DESKTOP CAPTURE IMPL ON ANDROID" ==
+ nullptr);
+ mDeviceInfo.reset();
+#endif
+ break;
+ }
+ }
+ LOG(("EXIT %s", __PRETTY_FUNCTION__));
+ return mDeviceInfo;
+}
+
+already_AddRefed<VideoEngine> VideoEngine::Create(
+ const CaptureDeviceType& aCaptureDeviceType) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ return do_AddRef(new VideoEngine(aCaptureDeviceType));
+}
+
+VideoEngine::CaptureEntry::CaptureEntry(
+ int32_t aCapnum, rtc::scoped_refptr<webrtc::VideoCaptureModule> aCapture)
+ : mCapnum(aCapnum), mVideoCaptureModule(aCapture) {}
+
+rtc::scoped_refptr<webrtc::VideoCaptureModule>
+VideoEngine::CaptureEntry::VideoCapture() {
+ return mVideoCaptureModule;
+}
+
+int32_t VideoEngine::CaptureEntry::Capnum() const { return mCapnum; }
+
+bool VideoEngine::WithEntry(
+ const int32_t entryCapnum,
+ const std::function<void(CaptureEntry& entry)>&& fn) {
+#ifdef DEBUG
+ {
+ auto it = mIdMap.find(entryCapnum);
+ MOZ_ASSERT(it != mIdMap.end());
+ Unused << it;
+ }
+#endif
+
+ auto it = mCaps.find(mIdMap[entryCapnum]);
+ MOZ_ASSERT(it != mCaps.end());
+ if (it == mCaps.end()) {
+ return false;
+ }
+ fn(it->second);
+ return true;
+}
+
+int32_t VideoEngine::GenerateId() {
+ // XXX Something better than this (a map perhaps, or a simple boolean TArray,
+ // given the number in-use is O(1) normally!)
+ static int sId = 0;
+ return mId = sId++;
+}
+
+VideoEngine::VideoEngine(const CaptureDeviceType& aCaptureDeviceType)
+ : mId(0), mCaptureDevInfo(aCaptureDeviceType), mDeviceInfo(nullptr) {
+ LOG(("%s", __PRETTY_FUNCTION__));
+ LOG(("Creating new VideoEngine with CaptureDeviceType %s",
+ mCaptureDevInfo.TypeName()));
+}
+
+} // namespace mozilla::camera
diff --git a/dom/media/systemservices/VideoEngine.h b/dom/media/systemservices/VideoEngine.h
new file mode 100644
index 0000000000..8d02feb878
--- /dev/null
+++ b/dom/media/systemservices/VideoEngine.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_VideoEngine_h
+#define mozilla_VideoEngine_h
+
+#include "MediaEngine.h"
+#include "VideoFrameUtils.h"
+#include "mozilla/media/MediaUtils.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "modules/video_capture/video_capture_factory.h"
+#include <memory>
+#include <functional>
+
+namespace mozilla::camera {
+
+enum class CaptureDeviceType { Camera, Screen, Window, Browser };
+
+struct CaptureDeviceInfo {
+ CaptureDeviceType type;
+
+ CaptureDeviceInfo() : type(CaptureDeviceType::Camera) {}
+ explicit CaptureDeviceInfo(CaptureDeviceType t) : type(t) {}
+
+ const char* TypeName() const {
+ switch (type) {
+ case CaptureDeviceType::Camera: {
+ return "Camera";
+ }
+ case CaptureDeviceType::Screen: {
+ return "Screen";
+ }
+ case CaptureDeviceType::Window: {
+ return "Window";
+ }
+ case CaptureDeviceType::Browser: {
+ return "Browser";
+ }
+ }
+ assert(false);
+ return "UNKOWN-CaptureDeviceType!";
+ }
+};
+
+// Historically the video engine was part of webrtc
+// it was removed (and reimplemented in Talk)
+class VideoEngine {
+ private:
+ virtual ~VideoEngine() = default;
+
+ // Base cache expiration period
+ // Note because cameras use HW plug event detection, this
+ // only applies to screen based modes.
+ static const int64_t kCacheExpiryPeriodMs = 2000;
+
+ public:
+ VideoEngine() : mId(0){};
+ NS_INLINE_DECL_REFCOUNTING(VideoEngine)
+
+ static already_AddRefed<VideoEngine> Create(
+ const CaptureDeviceType& aCaptureDeviceType);
+#if defined(ANDROID)
+ static int SetAndroidObjects();
+#endif
+ // Returns a non-negative capture identifier or -1 on failure.
+ int32_t CreateVideoCapture(const char* deviceUniqueIdUTF8);
+
+ int ReleaseVideoCapture(const int32_t id);
+
+ // VideoEngine is responsible for any cleanup in its modules
+ static void Delete(VideoEngine* engine) {}
+
+ /** Returns an existing or creates a new new DeviceInfo.
+ * Camera info is cached to prevent repeated lengthy polling for "realness"
+ * of the hardware devices. Other types of capture, e.g. screen share info,
+ * are cached for 1 second. This could be handled in a more elegant way in
+ * the future.
+ * @return on failure the shared_ptr will be null, otherwise it will contain
+ * a DeviceInfo.
+ * @see bug 1305212 https://bugzilla.mozilla.org/show_bug.cgi?id=1305212
+ */
+ std::shared_ptr<webrtc::VideoCaptureModule::DeviceInfo>
+ GetOrCreateVideoCaptureDeviceInfo();
+
+ class CaptureEntry {
+ public:
+ CaptureEntry(int32_t aCapnum,
+ rtc::scoped_refptr<webrtc::VideoCaptureModule> aCapture);
+ int32_t Capnum() const;
+ rtc::scoped_refptr<webrtc::VideoCaptureModule> VideoCapture();
+
+ private:
+ int32_t mCapnum;
+ rtc::scoped_refptr<webrtc::VideoCaptureModule> mVideoCaptureModule;
+ friend class VideoEngine;
+ };
+
+ // Returns true iff an entry for capnum exists
+ bool WithEntry(const int32_t entryCapnum,
+ const std::function<void(CaptureEntry& entry)>&& fn);
+
+ private:
+ explicit VideoEngine(const CaptureDeviceType& aCaptureDeviceType);
+ int32_t mId;
+ CaptureDeviceInfo mCaptureDevInfo;
+ std::shared_ptr<webrtc::VideoCaptureModule::DeviceInfo> mDeviceInfo;
+ std::map<int32_t, CaptureEntry> mCaps;
+ std::map<int32_t, int32_t> mIdMap;
+ // The validity period for non-camera capture device infos`
+ webrtc::Timestamp mExpiryTime = webrtc::Timestamp::Micros(0);
+ int32_t GenerateId();
+};
+} // namespace mozilla::camera
+#endif
diff --git a/dom/media/systemservices/VideoFrameUtils.cpp b/dom/media/systemservices/VideoFrameUtils.cpp
new file mode 100644
index 0000000000..00ead56a7b
--- /dev/null
+++ b/dom/media/systemservices/VideoFrameUtils.cpp
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VideoFrameUtils.h"
+#include "api/video/video_frame.h"
+#include "mozilla/ShmemPool.h"
+
+namespace mozilla {
+
+uint32_t VideoFrameUtils::TotalRequiredBufferSize(
+ const webrtc::VideoFrame& aVideoFrame) {
+ auto i420 = aVideoFrame.video_frame_buffer()->ToI420();
+ auto height = i420->height();
+ size_t size = height * i420->StrideY() +
+ ((height + 1) / 2) * i420->StrideU() +
+ ((height + 1) / 2) * i420->StrideV();
+ MOZ_RELEASE_ASSERT(size < std::numeric_limits<uint32_t>::max());
+ return static_cast<uint32_t>(size);
+}
+
+void VideoFrameUtils::InitFrameBufferProperties(
+ const webrtc::VideoFrame& aVideoFrame,
+ camera::VideoFrameProperties& aDestProps) {
+ // The VideoFrameBuffer image data stored in the accompanying buffer
+ // the buffer is at least this size of larger.
+ aDestProps.bufferSize() = TotalRequiredBufferSize(aVideoFrame);
+
+ aDestProps.timeStamp() = aVideoFrame.timestamp();
+ aDestProps.ntpTimeMs() = aVideoFrame.ntp_time_ms();
+ aDestProps.renderTimeMs() = aVideoFrame.render_time_ms();
+
+ aDestProps.rotation() = aVideoFrame.rotation();
+
+ auto i420 = aVideoFrame.video_frame_buffer()->ToI420();
+ auto height = i420->height();
+ aDestProps.yAllocatedSize() = height * i420->StrideY();
+ aDestProps.uAllocatedSize() = ((height + 1) / 2) * i420->StrideU();
+ aDestProps.vAllocatedSize() = ((height + 1) / 2) * i420->StrideV();
+
+ aDestProps.width() = i420->width();
+ aDestProps.height() = height;
+
+ aDestProps.yStride() = i420->StrideY();
+ aDestProps.uStride() = i420->StrideU();
+ aDestProps.vStride() = i420->StrideV();
+}
+
+void VideoFrameUtils::CopyVideoFrameBuffers(uint8_t* aDestBuffer,
+ const size_t aDestBufferSize,
+ const webrtc::VideoFrame& aFrame) {
+ size_t aggregateSize = TotalRequiredBufferSize(aFrame);
+
+ MOZ_ASSERT(aDestBufferSize >= aggregateSize);
+ auto i420 = aFrame.video_frame_buffer()->ToI420();
+
+ // If planes are ordered YUV and contiguous then do a single copy
+ if ((i420->DataY() != nullptr) &&
+ // Check that the three planes are ordered
+ (i420->DataY() < i420->DataU()) && (i420->DataU() < i420->DataV()) &&
+ // Check that the last plane ends at firstPlane[totalsize]
+ (&i420->DataY()[aggregateSize] ==
+ &i420->DataV()[((i420->height() + 1) / 2) * i420->StrideV()])) {
+ memcpy(aDestBuffer, i420->DataY(), aggregateSize);
+ return;
+ }
+
+ // Copy each plane
+ size_t offset = 0;
+ size_t size;
+ auto height = i420->height();
+ size = height * i420->StrideY();
+ memcpy(&aDestBuffer[offset], i420->DataY(), size);
+ offset += size;
+ size = ((height + 1) / 2) * i420->StrideU();
+ memcpy(&aDestBuffer[offset], i420->DataU(), size);
+ offset += size;
+ size = ((height + 1) / 2) * i420->StrideV();
+ memcpy(&aDestBuffer[offset], i420->DataV(), size);
+}
+
+void VideoFrameUtils::CopyVideoFrameBuffers(
+ ShmemBuffer& aDestShmem, const webrtc::VideoFrame& aVideoFrame) {
+ CopyVideoFrameBuffers(aDestShmem.Get().get<uint8_t>(),
+ aDestShmem.Get().Size<uint8_t>(), aVideoFrame);
+}
+
+} // namespace mozilla
diff --git a/dom/media/systemservices/VideoFrameUtils.h b/dom/media/systemservices/VideoFrameUtils.h
new file mode 100644
index 0000000000..23ebf4f316
--- /dev/null
+++ b/dom/media/systemservices/VideoFrameUtils.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_VideoFrameUtil_h
+#define mozilla_VideoFrameUtil_h
+
+#include "mozilla/camera/PCameras.h"
+
+namespace webrtc {
+class VideoFrame;
+}
+
+namespace mozilla {
+class ShmemBuffer;
+
+// Util methods for working with webrtc::VideoFrame(s) and
+// the IPC classes that are used to deliver their contents to the
+// MediaEnginge
+
+class VideoFrameUtils {
+ public:
+ // Returns the total number of bytes necessary to copy a VideoFrame's buffer
+ // across all planes.
+ static uint32_t TotalRequiredBufferSize(const webrtc::VideoFrame& frame);
+
+ // Initializes a camera::VideoFrameProperties from a VideoFrameBuffer
+ static void InitFrameBufferProperties(
+ const webrtc::VideoFrame& aVideoFrame,
+ camera::VideoFrameProperties& aDestProperties);
+
+ // Copies the buffers out of a VideoFrameBuffer into a buffer.
+ // Attempts to make as few memcopies as possible.
+ static void CopyVideoFrameBuffers(uint8_t* aDestBuffer,
+ const size_t aDestBufferSize,
+ const webrtc::VideoFrame& aVideoFrame);
+
+ // Copies the buffers in a VideoFrameBuffer into a Shmem
+ // returns the eno from the underlying memcpy.
+ static void CopyVideoFrameBuffers(ShmemBuffer& aDestShmem,
+ const webrtc::VideoFrame& aVideoFrame);
+};
+
+} /* namespace mozilla */
+
+#endif
diff --git a/dom/media/systemservices/android_video_capture/device_info_android.cc b/dom/media/systemservices/android_video_capture/device_info_android.cc
new file mode 100644
index 0000000000..581040eb94
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/device_info_android.cc
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "device_info_android.h"
+
+#include <algorithm>
+#include <string>
+#include <sstream>
+#include <vector>
+
+#include "rtc_base/logging.h"
+#include "modules/utility/include/helpers_android.h"
+
+#include "mozilla/jni/Utils.h"
+
+namespace webrtc {
+
+namespace videocapturemodule {
+
+// Helper for storing lists of pairs of ints. Used e.g. for resolutions & FPS
+// ranges.
+typedef std::pair<int, int> IntPair;
+typedef std::vector<IntPair> IntPairs;
+
+static std::string IntPairsToString(const IntPairs& pairs, char separator) {
+ std::stringstream stream;
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ if (i > 0) {
+ stream << ", ";
+ }
+ stream << "(" << pairs[i].first << separator << pairs[i].second << ")";
+ }
+ return stream.str();
+}
+
+struct AndroidCameraInfo {
+ std::string name;
+ bool front_facing;
+ int orientation;
+ IntPairs resolutions; // Pairs are: (width,height).
+ // Pairs are (min,max) in units of FPS*1000 ("milli-frame-per-second").
+ IntPairs mfpsRanges;
+
+ std::string ToString() {
+ std::stringstream stream;
+ stream << "Name: [" << name << "], MFPS ranges: ["
+ << IntPairsToString(mfpsRanges, ':')
+ << "], front_facing: " << front_facing
+ << ", orientation: " << orientation << ", resolutions: ["
+ << IntPairsToString(resolutions, 'x') << "]";
+ return stream.str();
+ }
+};
+
+// Camera info; populated during DeviceInfoAndroid::Refresh()
+static std::vector<AndroidCameraInfo>* g_camera_info = NULL;
+
+static JavaVM* g_jvm_dev_info = NULL;
+
+// Set |*index| to the index of |name| in g_camera_info or return false if no
+// match found.
+static bool FindCameraIndexByName(const std::string& name, size_t* index) {
+ for (size_t i = 0; i < g_camera_info->size(); ++i) {
+ if (g_camera_info->at(i).name == name) {
+ *index = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Returns a pointer to the named member of g_camera_info, or NULL if no match
+// is found.
+static AndroidCameraInfo* FindCameraInfoByName(const std::string& name) {
+ size_t index = 0;
+ if (FindCameraIndexByName(name, &index)) {
+ return &g_camera_info->at(index);
+ }
+ return NULL;
+}
+
+// static
+void DeviceInfoAndroid::Initialize(JavaVM* javaVM) {
+ // TODO(henrike): this "if" would make a lot more sense as an assert, but
+ // Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_GetVideoEngine() and
+ // Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_Terminate() conspire to
+ // prevent this. Once that code is made to only
+ // VideoEngine::SetAndroidObjects() once per process, this can turn into an
+ // assert.
+ if (g_camera_info) {
+ return;
+ }
+
+ g_jvm_dev_info = javaVM;
+ BuildDeviceList();
+}
+
+void DeviceInfoAndroid::BuildDeviceList() {
+ if (!g_jvm_dev_info) {
+ return;
+ }
+
+ AttachThreadScoped ats(g_jvm_dev_info);
+ JNIEnv* jni = ats.env();
+
+ g_camera_info = new std::vector<AndroidCameraInfo>();
+ jclass j_info_class = mozilla::jni::GetClassRef(
+ jni, "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid");
+ jclass j_cap_class = mozilla::jni::GetClassRef(
+ jni, "org/webrtc/videoengine/CaptureCapabilityAndroid");
+ assert(j_info_class);
+ jmethodID j_get_device_info = jni->GetStaticMethodID(
+ j_info_class, "getDeviceInfo",
+ "()[Lorg/webrtc/videoengine/CaptureCapabilityAndroid;");
+ jarray j_camera_caps = static_cast<jarray>(
+ jni->CallStaticObjectMethod(j_info_class, j_get_device_info));
+ if (jni->ExceptionCheck()) {
+ jni->ExceptionClear();
+ RTC_LOG(LS_INFO) << __FUNCTION__ << ": Failed to get camera capabilities.";
+ return;
+ }
+ if (j_camera_caps == nullptr) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << ": Failed to get camera capabilities.";
+ return;
+ }
+
+ const jsize capLength = jni->GetArrayLength(j_camera_caps);
+
+ jfieldID widthField = jni->GetFieldID(j_cap_class, "width", "[I");
+ jfieldID heightField = jni->GetFieldID(j_cap_class, "height", "[I");
+ jfieldID maxFpsField = jni->GetFieldID(j_cap_class, "maxMilliFPS", "I");
+ jfieldID minFpsField = jni->GetFieldID(j_cap_class, "minMilliFPS", "I");
+ jfieldID orientationField = jni->GetFieldID(j_cap_class, "orientation", "I");
+ jfieldID frontFacingField = jni->GetFieldID(j_cap_class, "frontFacing", "Z");
+ jfieldID nameField =
+ jni->GetFieldID(j_cap_class, "name", "Ljava/lang/String;");
+ if (widthField == NULL || heightField == NULL || maxFpsField == NULL ||
+ minFpsField == NULL || orientationField == NULL ||
+ frontFacingField == NULL || nameField == NULL) {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << ": Failed to get field Id.";
+ return;
+ }
+
+ for (jsize i = 0; i < capLength; i++) {
+ jobject capabilityElement =
+ jni->GetObjectArrayElement((jobjectArray)j_camera_caps, i);
+
+ AndroidCameraInfo info;
+ jstring camName =
+ static_cast<jstring>(jni->GetObjectField(capabilityElement, nameField));
+ const char* camChars = jni->GetStringUTFChars(camName, nullptr);
+ info.name = std::string(camChars);
+ jni->ReleaseStringUTFChars(camName, camChars);
+
+ info.orientation = jni->GetIntField(capabilityElement, orientationField);
+ info.front_facing =
+ jni->GetBooleanField(capabilityElement, frontFacingField);
+ jint min_mfps = jni->GetIntField(capabilityElement, minFpsField);
+ jint max_mfps = jni->GetIntField(capabilityElement, maxFpsField);
+
+ jintArray widthResArray = static_cast<jintArray>(
+ jni->GetObjectField(capabilityElement, widthField));
+ jintArray heightResArray = static_cast<jintArray>(
+ jni->GetObjectField(capabilityElement, heightField));
+
+ const jsize numRes = jni->GetArrayLength(widthResArray);
+
+ jint* widths = jni->GetIntArrayElements(widthResArray, nullptr);
+ jint* heights = jni->GetIntArrayElements(heightResArray, nullptr);
+
+ for (jsize j = 0; j < numRes; ++j) {
+ info.resolutions.push_back(std::make_pair(widths[j], heights[j]));
+ }
+
+ info.mfpsRanges.push_back(std::make_pair(min_mfps, max_mfps));
+ g_camera_info->push_back(info);
+
+ jni->ReleaseIntArrayElements(widthResArray, widths, JNI_ABORT);
+ jni->ReleaseIntArrayElements(heightResArray, heights, JNI_ABORT);
+ }
+
+ jni->DeleteLocalRef(j_info_class);
+ jni->DeleteLocalRef(j_cap_class);
+}
+
+void DeviceInfoAndroid::DeInitialize() {
+ if (g_camera_info) {
+ delete g_camera_info;
+ g_camera_info = NULL;
+ }
+}
+
+int32_t DeviceInfoAndroid::Refresh() {
+ if (!g_camera_info || g_camera_info->size() == 0) {
+ DeviceInfoAndroid::BuildDeviceList();
+#ifdef DEBUG
+ int frontFacingIndex = -1;
+ for (uint32_t i = 0; i < g_camera_info->size(); i++) {
+ if (g_camera_info->at(i).front_facing) {
+ frontFacingIndex = i;
+ }
+ }
+ // Either there is a front-facing camera, and it's first in the list, or
+ // there is no front-facing camera.
+ MOZ_ASSERT(frontFacingIndex == 0 || frontFacingIndex == -1);
+#endif
+ }
+ return 0;
+}
+
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+ return new videocapturemodule::DeviceInfoAndroid();
+}
+
+DeviceInfoAndroid::DeviceInfoAndroid() : DeviceInfoImpl() {}
+
+DeviceInfoAndroid::~DeviceInfoAndroid() {}
+
+bool DeviceInfoAndroid::FindCameraIndex(const char* deviceUniqueIdUTF8,
+ size_t* index) {
+ return FindCameraIndexByName(deviceUniqueIdUTF8, index);
+}
+
+int32_t DeviceInfoAndroid::Init() { return 0; }
+
+uint32_t DeviceInfoAndroid::NumberOfDevices() {
+ Refresh();
+ return g_camera_info->size();
+}
+
+int32_t DeviceInfoAndroid::GetDeviceName(
+ uint32_t deviceNumber, char* deviceNameUTF8, uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8, uint32_t deviceUniqueIdUTF8Length,
+ char* /*productUniqueIdUTF8*/, uint32_t /*productUniqueIdUTF8Length*/,
+ pid_t* /*pid*/) {
+ if (deviceNumber >= g_camera_info->size()) {
+ return -1;
+ }
+ const AndroidCameraInfo& info = g_camera_info->at(deviceNumber);
+ if (info.name.length() + 1 > deviceNameLength ||
+ info.name.length() + 1 > deviceUniqueIdUTF8Length) {
+ return -1;
+ }
+ memcpy(deviceNameUTF8, info.name.c_str(), info.name.length() + 1);
+ memcpy(deviceUniqueIdUTF8, info.name.c_str(), info.name.length() + 1);
+ return 0;
+}
+
+int32_t DeviceInfoAndroid::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
+ _captureCapabilities.clear();
+ const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
+ if (info == NULL) {
+ return -1;
+ }
+
+ for (size_t i = 0; i < info->resolutions.size(); ++i) {
+ for (size_t j = 0; j < info->mfpsRanges.size(); ++j) {
+ const IntPair& size = info->resolutions[i];
+ const IntPair& mfpsRange = info->mfpsRanges[j];
+ VideoCaptureCapability cap;
+ cap.width = size.first;
+ cap.height = size.second;
+ cap.maxFPS = mfpsRange.second / 1000;
+ cap.videoType = VideoType::kNV21;
+ _captureCapabilities.push_back(cap);
+ }
+ }
+ return _captureCapabilities.size();
+}
+
+int32_t DeviceInfoAndroid::GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
+ if (info == NULL || VideoCaptureImpl::RotationFromDegrees(
+ info->orientation, &orientation) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+void DeviceInfoAndroid::GetMFpsRange(const char* deviceUniqueIdUTF8,
+ int max_fps_to_match, int* min_mfps,
+ int* max_mfps) {
+ const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
+ if (info == NULL) {
+ return;
+ }
+ int desired_mfps = max_fps_to_match * 1000;
+ int best_diff_mfps = 0;
+ RTC_LOG(LS_INFO) << "Search for best target mfps " << desired_mfps;
+ // Search for best fps range with preference shifted to constant fps modes.
+ for (size_t i = 0; i < info->mfpsRanges.size(); ++i) {
+ int diff_mfps =
+ abs(info->mfpsRanges[i].first - desired_mfps) +
+ abs(info->mfpsRanges[i].second - desired_mfps) +
+ (info->mfpsRanges[i].second - info->mfpsRanges[i].first) / 2;
+ RTC_LOG(LS_INFO) << "Fps range " << info->mfpsRanges[i].first << ":"
+ << info->mfpsRanges[i].second
+ << ". Distance: " << diff_mfps;
+ if (i == 0 || diff_mfps < best_diff_mfps) {
+ best_diff_mfps = diff_mfps;
+ *min_mfps = info->mfpsRanges[i].first;
+ *max_mfps = info->mfpsRanges[i].second;
+ }
+ }
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/dom/media/systemservices/android_video_capture/device_info_android.h b/dom/media/systemservices/android_video_capture/device_info_android.h
new file mode 100644
index 0000000000..ac88b2b8ba
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/device_info_android.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
+#define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
+
+#include <jni.h>
+
+#include "modules/video_capture/device_info_impl.h"
+#include "modules/video_capture/video_capture_impl.h"
+
+#define AndroidJavaCaptureDeviceInfoClass \
+ "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid"
+#define AndroidJavaCaptureCapabilityClass \
+ "org/webrtc/videoengine/CaptureCapabilityAndroid"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+class DeviceInfoAndroid : public DeviceInfoImpl {
+ public:
+ static void Initialize(JavaVM* javaVM);
+ static void DeInitialize();
+
+ DeviceInfoAndroid();
+ virtual ~DeviceInfoAndroid();
+
+ // Set |*index| to the index of the camera matching |deviceUniqueIdUTF8|, or
+ // return false if no match.
+ bool FindCameraIndex(const char* deviceUniqueIdUTF8, size_t* index);
+
+ virtual int32_t Init();
+ virtual uint32_t NumberOfDevices();
+ virtual int32_t Refresh();
+ virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8 = 0,
+ uint32_t productUniqueIdUTF8Length = 0,
+ pid_t* pid = 0);
+ virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* /*deviceUniqueIdUTF8*/, const char* /*dialogTitleUTF8*/,
+ void* /*parentWindow*/, uint32_t /*positionX*/, uint32_t /*positionY*/) {
+ return -1;
+ }
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation);
+
+ // Populate |min_mfps| and |max_mfps| with the closest supported range of the
+ // device to |max_fps_to_match|.
+ void GetMFpsRange(const char* deviceUniqueIdUTF8, int max_fps_to_match,
+ int* min_mfps, int* max_mfps);
+
+ private:
+ enum { kExpectedCaptureDelay = 190 };
+ static void BuildDeviceList();
+};
+
+} // namespace videocapturemodule
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
diff --git a/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java
new file mode 100644
index 0000000000..305fc74804
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.videoengine;
+
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+@WebRTCJNITarget
+public class CaptureCapabilityAndroid {
+ public String name;
+ public int width[];
+ public int height[];
+ public int minMilliFPS;
+ public int maxMilliFPS;
+ public boolean frontFacing;
+ public boolean infrared;
+ public int orientation;
+}
diff --git a/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
new file mode 100644
index 0000000000..cc54009a7b
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.videoengine;
+
+import java.io.IOException;
+import java.util.List;
+
+import android.content.Context;
+import android.util.Log;
+import android.view.Surface;
+import android.view.WindowManager;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+import org.webrtc.CameraEnumerator;
+import org.webrtc.Camera1Enumerator;
+import org.webrtc.Camera2Enumerator;
+import org.webrtc.CameraVideoCapturer;
+import org.webrtc.CapturerObserver;
+import org.webrtc.EglBase;
+import org.webrtc.SurfaceTextureHelper;
+import org.webrtc.VideoFrame;
+import org.webrtc.VideoFrame.I420Buffer;
+
+public class VideoCaptureAndroid implements CameraVideoCapturer.CameraEventsHandler, CapturerObserver {
+ private final static String TAG = "WEBRTC-JC";
+
+ private final String deviceName;
+ private volatile long native_capturer; // |VideoCaptureAndroid*| in C++.
+ private Context context;
+ private CameraVideoCapturer cameraVideoCapturer;
+ private EglBase eglBase;
+ private SurfaceTextureHelper surfaceTextureHelper;
+
+ // This class is recreated everytime we start/stop capture, so we
+ // can safely create the CountDownLatches here.
+ private final CountDownLatch capturerStarted = new CountDownLatch(1);
+ private boolean capturerStartedSucceeded = false;
+ private final CountDownLatch capturerStopped = new CountDownLatch(1);
+
+ @WebRTCJNITarget
+ public VideoCaptureAndroid(String deviceName) {
+ // Remove the camera facing information from the name.
+ String[] parts = deviceName.split("Facing (front|back):");
+ if (parts.length == 2) {
+ this.deviceName = parts[1].replace(" (infrared)", "");
+ } else {
+ Log.e(TAG, "VideoCaptureAndroid: Expected facing mode as part of name: " + deviceName);
+ this.deviceName = deviceName;
+ }
+ this.context = GetContext();
+
+ CameraEnumerator enumerator;
+ if (Camera2Enumerator.isSupported(context)) {
+ enumerator = new Camera2Enumerator(context);
+ } else {
+ enumerator = new Camera1Enumerator();
+ }
+ try {
+ cameraVideoCapturer = enumerator.createCapturer(this.deviceName, this);
+ eglBase = EglBase.create();
+ surfaceTextureHelper = SurfaceTextureHelper.create("VideoCaptureAndroidSurfaceTextureHelper", eglBase.getEglBaseContext());
+ cameraVideoCapturer.initialize(surfaceTextureHelper, context, this);
+ } catch (java.lang.IllegalArgumentException e) {
+ Log.e(TAG, "VideoCaptureAndroid: Exception while creating capturer: " + e);
+ }
+ }
+
+ // Return the global application context.
+ @WebRTCJNITarget
+ private static native Context GetContext();
+
+ // Called by native code. Returns true if capturer is started.
+ //
+ // Note that this actually opens the camera, and Camera callbacks run on the
+ // thread that calls open(), so this is done on the CameraThread. Since ViE
+ // API needs a synchronous success return value we wait for the result.
+ @WebRTCJNITarget
+ private synchronized boolean startCapture(
+ final int width, final int height,
+ final int min_mfps, final int max_mfps,
+ long native_capturer) {
+ Log.d(TAG, "startCapture: " + width + "x" + height + "@" +
+ min_mfps + ":" + max_mfps);
+
+ if (cameraVideoCapturer == null) {
+ return false;
+ }
+
+ cameraVideoCapturer.startCapture(width, height, max_mfps);
+ try {
+ capturerStarted.await();
+ } catch (InterruptedException e) {
+ return false;
+ }
+ if (capturerStartedSucceeded) {
+ this.native_capturer = native_capturer;
+ }
+ return capturerStartedSucceeded;
+ }
+
+ // Called by native code. Returns true when camera is known to be stopped.
+ @WebRTCJNITarget
+ private synchronized boolean stopCapture() {
+ Log.d(TAG, "stopCapture");
+ if (cameraVideoCapturer == null) {
+ return false;
+ }
+
+ native_capturer = 0;
+ try {
+ cameraVideoCapturer.stopCapture();
+ capturerStopped.await();
+ } catch (InterruptedException e) {
+ return false;
+ }
+ Log.d(TAG, "stopCapture done");
+ return true;
+ }
+
+ @WebRTCJNITarget
+ private int getDeviceOrientation() {
+ int orientation = 0;
+ if (context != null) {
+ WindowManager wm = (WindowManager) context.getSystemService(
+ Context.WINDOW_SERVICE);
+ switch(wm.getDefaultDisplay().getRotation()) {
+ case Surface.ROTATION_90:
+ orientation = 90;
+ break;
+ case Surface.ROTATION_180:
+ orientation = 180;
+ break;
+ case Surface.ROTATION_270:
+ orientation = 270;
+ break;
+ case Surface.ROTATION_0:
+ default:
+ orientation = 0;
+ break;
+ }
+ }
+ return orientation;
+ }
+
+ @WebRTCJNITarget
+ private native void ProvideCameraFrame(
+ int width, int height,
+ java.nio.ByteBuffer dataY, int strideY,
+ java.nio.ByteBuffer dataU, int strideU,
+ java.nio.ByteBuffer dataV, int strideV,
+ int rotation, long timeStamp, long captureObject);
+
+ //
+ // CameraVideoCapturer.CameraEventsHandler interface
+ //
+
+ // Camera error handler - invoked when camera can not be opened
+ // or any camera exception happens on camera thread.
+ public void onCameraError(String errorDescription) {}
+
+ // Called when camera is disconnected.
+ public void onCameraDisconnected() {}
+
+ // Invoked when camera stops receiving frames.
+ public void onCameraFreezed(String errorDescription) {}
+
+ // Callback invoked when camera is opening.
+ public void onCameraOpening(String cameraName) {}
+
+ // Callback invoked when first camera frame is available after camera is started.
+ public void onFirstFrameAvailable() {}
+
+ // Callback invoked when camera is closed.
+ public void onCameraClosed() {}
+
+ //
+ // CapturerObserver interface
+ //
+
+ // Notify if the capturer have been started successfully or not.
+ public void onCapturerStarted(boolean success) {
+ capturerStartedSucceeded = success;
+ capturerStarted.countDown();
+ }
+
+ // Notify that the capturer has been stopped.
+ public void onCapturerStopped() {
+ capturerStopped.countDown();
+ }
+
+ // Delivers a captured frame.
+ public void onFrameCaptured(VideoFrame frame) {
+ if (native_capturer != 0) {
+ I420Buffer i420Buffer = frame.getBuffer().toI420();
+ ProvideCameraFrame(i420Buffer.getWidth(), i420Buffer.getHeight(),
+ i420Buffer.getDataY(), i420Buffer.getStrideY(),
+ i420Buffer.getDataU(), i420Buffer.getStrideU(),
+ i420Buffer.getDataV(), i420Buffer.getStrideV(),
+ frame.getRotation(),
+ frame.getTimestampNs() / 1000000, native_capturer);
+
+ i420Buffer.release();
+ }
+ }
+}
diff --git a/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
new file mode 100644
index 0000000000..8ad8453955
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.videoengine;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import android.Manifest;
+import android.app.Activity;
+import android.content.Context;
+import android.util.Log;
+
+import org.mozilla.gecko.GeckoAppShell;
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+import org.webrtc.CameraEnumerator;
+import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
+import org.webrtc.Camera1Enumerator;
+import org.webrtc.Camera2Enumerator;
+
+public class VideoCaptureDeviceInfoAndroid {
+ private final static String TAG = "WEBRTC-JC";
+
+ // Returns information about all cameras on the device.
+ // Since this reflects static information about the hardware present, there is
+ // no need to call this function more than once in a single process. It is
+ // marked "private" as it is only called by native code.
+ @WebRTCJNITarget
+ private static CaptureCapabilityAndroid[] getDeviceInfo() {
+ final Context context = GeckoAppShell.getApplicationContext();
+
+ if (Camera2Enumerator.isSupported(context)) {
+ return createDeviceList(new Camera2Enumerator(context));
+ } else {
+ return createDeviceList(new Camera1Enumerator());
+ }
+ }
+
+ private static CaptureCapabilityAndroid[] createDeviceList(CameraEnumerator enumerator) {
+
+ ArrayList<CaptureCapabilityAndroid> allDevices = new ArrayList<CaptureCapabilityAndroid>();
+ ArrayList<CaptureCapabilityAndroid> IRDevices = new ArrayList<CaptureCapabilityAndroid>();
+
+ for (String camera: enumerator.getDeviceNames()) {
+ List<CaptureFormat> formats = enumerator.getSupportedFormats(camera);
+ int numFormats = formats.size();
+ if (numFormats <= 0) {
+ continue;
+ }
+
+ CaptureCapabilityAndroid device = new CaptureCapabilityAndroid();
+
+ // The only way to plumb through whether the device is front facing
+ // or not is by the name, but the name we receive depends upon the
+ // camera API in use. For the Camera1 API, this information is
+ // already present, but that is not the case when using Camera2.
+ // Later on, we look up the camera by name, so we have to use a
+ // format this is easy to undo. Ideally, libwebrtc would expose
+ // camera facing in VideoCaptureCapability and none of this would be
+ // necessary.
+ device.name = "Facing " + (enumerator.isFrontFacing(camera) ? "front" : "back") + ":" + camera;
+
+
+ boolean ir = enumerator.isInfrared(camera);
+ device.infrared = ir;
+ if (ir) {
+ device.name += " (infrared)";
+ }
+
+ // This isn't part of the new API, but we don't call
+ // GetDeviceOrientation() anywhere, so this value is unused.
+ device.orientation = 0;
+
+ device.width = new int[numFormats];
+ device.height = new int[numFormats];
+ device.minMilliFPS = formats.get(0).framerate.min;
+ device.maxMilliFPS = formats.get(0).framerate.max;
+ int i = 0;
+ for (CaptureFormat format: formats) {
+ device.width[i] = format.width;
+ device.height[i] = format.height;
+ if (format.framerate.min < device.minMilliFPS) {
+ device.minMilliFPS = format.framerate.min;
+ }
+ if (format.framerate.max > device.maxMilliFPS) {
+ device.maxMilliFPS = format.framerate.max;
+ }
+ i++;
+ }
+ device.frontFacing = enumerator.isFrontFacing(camera);
+ // Infrared devices always last (but front facing ones before
+ // non-front-facing ones), front-facing non IR first, other in
+ // the middle.
+ if (!device.infrared) {
+ if (device.frontFacing) {
+ allDevices.add(0, device);
+ } else {
+ allDevices.add(device);
+ }
+ } else {
+ if (device.frontFacing) {
+ IRDevices.add(0, device);
+ } else {
+ IRDevices.add(device);
+ }
+ }
+ }
+
+ allDevices.addAll(IRDevices);
+
+ return allDevices.toArray(new CaptureCapabilityAndroid[0]);
+ }
+}
diff --git a/dom/media/systemservices/android_video_capture/video_capture_android.cc b/dom/media/systemservices/android_video_capture/video_capture_android.cc
new file mode 100644
index 0000000000..7c9cd72ccb
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/video_capture_android.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video_capture_android.h"
+
+#include "device_info_android.h"
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/time_utils.h"
+
+#include "AndroidBridge.h"
+
+static JavaVM* g_jvm_capture = NULL;
+static jclass g_java_capturer_class = NULL; // VideoCaptureAndroid.class.
+static jobject g_context = NULL; // Owned android.content.Context.
+
+namespace webrtc {
+
+jobject JniCommon_allocateNativeByteBuffer(JNIEnv* env, jclass, jint size) {
+ void* new_data = ::operator new(size);
+ jobject byte_buffer = env->NewDirectByteBuffer(new_data, size);
+ return byte_buffer;
+}
+
+void JniCommon_freeNativeByteBuffer(JNIEnv* env, jclass, jobject byte_buffer) {
+ void* data = env->GetDirectBufferAddress(byte_buffer);
+ ::operator delete(data);
+}
+
+// Called by Java to get the global application context.
+jobject JNICALL GetContext(JNIEnv* env, jclass) {
+ assert(g_context);
+ return g_context;
+}
+
+// Called by Java when the camera has a new frame to deliver.
+void JNICALL ProvideCameraFrame(JNIEnv* env, jobject, jint width, jint height,
+ jobject javaDataY, jint strideY,
+ jobject javaDataU, jint strideU,
+ jobject javaDataV, jint strideV, jint rotation,
+ jlong timeStamp, jlong context) {
+ if (!context) {
+ return;
+ }
+
+ webrtc::videocapturemodule::VideoCaptureAndroid* captureModule =
+ reinterpret_cast<webrtc::videocapturemodule::VideoCaptureAndroid*>(
+ context);
+ uint8_t* dataY =
+ reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(javaDataY));
+ uint8_t* dataU =
+ reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(javaDataU));
+ uint8_t* dataV =
+ reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(javaDataV));
+
+ rtc::scoped_refptr<I420Buffer> i420Buffer = I420Buffer::Copy(
+ width, height, dataY, strideY, dataU, strideU, dataV, strideV);
+
+ captureModule->OnIncomingFrame(i420Buffer, rotation, timeStamp);
+}
+
+int32_t SetCaptureAndroidVM(JavaVM* javaVM) {
+ if (g_java_capturer_class) {
+ return 0;
+ }
+
+ if (javaVM) {
+ assert(!g_jvm_capture);
+ g_jvm_capture = javaVM;
+ AttachThreadScoped ats(g_jvm_capture);
+
+ g_context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
+
+ videocapturemodule::DeviceInfoAndroid::Initialize(g_jvm_capture);
+
+ {
+ jclass clsRef = mozilla::jni::GetClassRef(
+ ats.env(), "org/webrtc/videoengine/VideoCaptureAndroid");
+ g_java_capturer_class =
+ static_cast<jclass>(ats.env()->NewGlobalRef(clsRef));
+ ats.env()->DeleteLocalRef(clsRef);
+ assert(g_java_capturer_class);
+
+ JNINativeMethod native_methods[] = {
+ {"GetContext", "()Landroid/content/Context;",
+ reinterpret_cast<void*>(&GetContext)},
+ {"ProvideCameraFrame",
+ "(IILjava/nio/ByteBuffer;ILjava/nio/ByteBuffer;ILjava/nio/"
+ "ByteBuffer;IIJJ)V",
+ reinterpret_cast<void*>(&ProvideCameraFrame)}};
+ if (ats.env()->RegisterNatives(g_java_capturer_class, native_methods,
+ 2) != 0)
+ assert(false);
+ }
+
+ {
+ jclass clsRef =
+ mozilla::jni::GetClassRef(ats.env(), "org/webrtc/JniCommon");
+
+ JNINativeMethod native_methods[] = {
+ {"nativeAllocateByteBuffer", "(I)Ljava/nio/ByteBuffer;",
+ reinterpret_cast<void*>(&JniCommon_allocateNativeByteBuffer)},
+ {"nativeFreeByteBuffer", "(Ljava/nio/ByteBuffer;)V",
+ reinterpret_cast<void*>(&JniCommon_freeNativeByteBuffer)}};
+ if (ats.env()->RegisterNatives(clsRef, native_methods, 2) != 0)
+ assert(false);
+ }
+ } else {
+ if (g_jvm_capture) {
+ AttachThreadScoped ats(g_jvm_capture);
+ ats.env()->UnregisterNatives(g_java_capturer_class);
+ ats.env()->DeleteGlobalRef(g_java_capturer_class);
+ g_java_capturer_class = NULL;
+ g_context = NULL;
+ videocapturemodule::DeviceInfoAndroid::DeInitialize();
+ g_jvm_capture = NULL;
+ }
+ }
+
+ return 0;
+}
+
+namespace videocapturemodule {
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+ const char* deviceUniqueIdUTF8) {
+ rtc::scoped_refptr<VideoCaptureAndroid> implementation(
+ new rtc::RefCountedObject<VideoCaptureAndroid>());
+ if (implementation->Init(deviceUniqueIdUTF8) != 0) {
+ implementation = nullptr;
+ }
+ return implementation;
+}
+
+void VideoCaptureAndroid::OnIncomingFrame(rtc::scoped_refptr<I420Buffer> buffer,
+ int32_t degrees,
+ int64_t captureTime) {
+ MutexLock lock(&api_lock_);
+
+ VideoRotation rotation =
+ (degrees <= 45 || degrees > 315) ? kVideoRotation_0
+ : (degrees > 45 && degrees <= 135) ? kVideoRotation_90
+ : (degrees > 135 && degrees <= 225) ? kVideoRotation_180
+ : (degrees > 225 && degrees <= 315) ? kVideoRotation_270
+ : kVideoRotation_0; // Impossible.
+
+ // Historically, we have ignored captureTime. Why?
+ VideoFrame captureFrame(I420Buffer::Rotate(*buffer, rotation), 0,
+ rtc::TimeMillis(), rotation);
+
+ DeliverCapturedFrame(captureFrame);
+}
+
+VideoCaptureAndroid::VideoCaptureAndroid()
+ : VideoCaptureImpl(),
+ _deviceInfo(),
+ _jCapturer(NULL),
+ _captureStarted(false) {}
+
+int32_t VideoCaptureAndroid::Init(const char* deviceUniqueIdUTF8) {
+ const int nameLength = strlen(deviceUniqueIdUTF8);
+ if (nameLength >= kVideoCaptureUniqueNameLength) return -1;
+
+ // Store the device name
+ RTC_LOG(LS_INFO) << "VideoCaptureAndroid::Init: " << deviceUniqueIdUTF8;
+ _deviceUniqueId = new char[nameLength + 1];
+ memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
+
+ AttachThreadScoped ats(g_jvm_capture);
+ JNIEnv* env = ats.env();
+ jmethodID ctor = env->GetMethodID(g_java_capturer_class, "<init>",
+ "(Ljava/lang/String;)V");
+ assert(ctor);
+ jstring j_deviceName = env->NewStringUTF(_deviceUniqueId);
+ _jCapturer = env->NewGlobalRef(
+ env->NewObject(g_java_capturer_class, ctor, j_deviceName));
+ assert(_jCapturer);
+ return 0;
+}
+
+VideoCaptureAndroid::~VideoCaptureAndroid() {
+ // Ensure Java camera is released even if our caller didn't explicitly Stop.
+ if (_captureStarted) StopCapture();
+ AttachThreadScoped ats(g_jvm_capture);
+ JNIEnv* env = ats.env();
+ env->DeleteGlobalRef(_jCapturer);
+}
+
+int32_t VideoCaptureAndroid::StartCapture(
+ const VideoCaptureCapability& capability) {
+ AttachThreadScoped ats(g_jvm_capture);
+ JNIEnv* env = ats.env();
+ int width = 0;
+ int height = 0;
+ int min_mfps = 0;
+ int max_mfps = 0;
+ {
+ MutexLock lock(&api_lock_);
+
+ if (_deviceInfo.GetBestMatchedCapability(_deviceUniqueId, capability,
+ _captureCapability) < 0) {
+ RTC_LOG(LS_ERROR) << __FUNCTION__
+ << "s: GetBestMatchedCapability failed: "
+ << capability.width << "x" << capability.height;
+ return -1;
+ }
+
+ width = _captureCapability.width;
+ height = _captureCapability.height;
+ _deviceInfo.GetMFpsRange(_deviceUniqueId, _captureCapability.maxFPS,
+ &min_mfps, &max_mfps);
+
+ // Exit critical section to avoid blocking camera thread inside
+ // onIncomingFrame() call.
+ }
+
+ jmethodID j_start =
+ env->GetMethodID(g_java_capturer_class, "startCapture", "(IIIIJ)Z");
+ assert(j_start);
+ jlong j_this = reinterpret_cast<intptr_t>(this);
+ bool started = env->CallBooleanMethod(_jCapturer, j_start, width, height,
+ min_mfps, max_mfps, j_this);
+ if (started) {
+ MutexLock lock(&api_lock_);
+ _requestedCapability = capability;
+ _captureStarted = true;
+ }
+ return started ? 0 : -1;
+}
+
+int32_t VideoCaptureAndroid::StopCapture() {
+ AttachThreadScoped ats(g_jvm_capture);
+ JNIEnv* env = ats.env();
+ {
+ MutexLock lock(&api_lock_);
+
+ memset(&_requestedCapability, 0, sizeof(_requestedCapability));
+ memset(&_captureCapability, 0, sizeof(_captureCapability));
+ _captureStarted = false;
+ // Exit critical section to avoid blocking camera thread inside
+ // onIncomingFrame() call.
+ }
+
+ // try to stop the capturer.
+ jmethodID j_stop =
+ env->GetMethodID(g_java_capturer_class, "stopCapture", "()Z");
+ return env->CallBooleanMethod(_jCapturer, j_stop) ? 0 : -1;
+}
+
+bool VideoCaptureAndroid::CaptureStarted() {
+ MutexLock lock(&api_lock_);
+ return _captureStarted;
+}
+
+int32_t VideoCaptureAndroid::CaptureSettings(VideoCaptureCapability& settings) {
+ MutexLock lock(&api_lock_);
+ settings = _requestedCapability;
+ return 0;
+}
+
+} // namespace videocapturemodule
+} // namespace webrtc
diff --git a/dom/media/systemservices/android_video_capture/video_capture_android.h b/dom/media/systemservices/android_video_capture/video_capture_android.h
new file mode 100644
index 0000000000..720c28e70b
--- /dev/null
+++ b/dom/media/systemservices/android_video_capture/video_capture_android.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
+#define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
+
+#include <jni.h>
+
+#include "device_info_android.h"
+#include "api/video/i420_buffer.h"
+#include "modules/video_capture/video_capture_impl.h"
+
+namespace webrtc {
+namespace videocapturemodule {
+
+class VideoCaptureAndroid : public VideoCaptureImpl {
+ public:
+ VideoCaptureAndroid();
+ virtual int32_t Init(const char* deviceUniqueIdUTF8);
+
+ virtual int32_t StartCapture(const VideoCaptureCapability& capability);
+ virtual int32_t StopCapture();
+ virtual bool CaptureStarted();
+ virtual int32_t CaptureSettings(VideoCaptureCapability& settings);
+
+ void OnIncomingFrame(rtc::scoped_refptr<I420Buffer> buffer, int32_t degrees,
+ int64_t captureTime = 0);
+
+ protected:
+ virtual ~VideoCaptureAndroid();
+
+ DeviceInfoAndroid _deviceInfo;
+ jobject _jCapturer; // Global ref to Java VideoCaptureAndroid object.
+ VideoCaptureCapability _captureCapability;
+ bool _captureStarted;
+};
+
+} // namespace videocapturemodule
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
diff --git a/dom/media/systemservices/moz.build b/dom/media/systemservices/moz.build
new file mode 100644
index 0000000000..0da8306825
--- /dev/null
+++ b/dom/media/systemservices/moz.build
@@ -0,0 +1,114 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+include("/dom/media/webrtc/third_party_build/webrtc.mozbuild")
+
+if CONFIG["MOZ_WEBRTC"]:
+ EXPORTS += [
+ "CamerasChild.h",
+ "CamerasParent.h",
+ "VideoEngine.h",
+ "VideoFrameUtils.h",
+ ]
+ UNIFIED_SOURCES += [
+ "CamerasChild.cpp",
+ "CamerasParent.cpp",
+ "VideoEngine.cpp",
+ "VideoFrameUtils.cpp",
+ ]
+ LOCAL_INCLUDES += [
+ "/dom/media/webrtc",
+ "/media/libyuv/libyuv/include",
+ "/mfbt",
+ "/third_party/libwebrtc",
+ "/third_party/libwebrtc/third_party/abseil-cpp",
+ "/tools/profiler/public",
+ ]
+
+ if CONFIG["OS_TARGET"] == "Android":
+ UNIFIED_SOURCES += [
+ "android_video_capture/device_info_android.cc",
+ "android_video_capture/video_capture_android.cc",
+ ]
+ elif CONFIG["OS_TARGET"] == "Darwin":
+ REQUIRES_UNIFIED_BUILD = True
+ UNIFIED_SOURCES += [
+ "objc_video_capture/device_info.mm",
+ "objc_video_capture/device_info_avfoundation.mm",
+ "objc_video_capture/device_info_objc.mm",
+ "objc_video_capture/rtc_video_capture_objc.mm",
+ "objc_video_capture/video_capture.mm",
+ "objc_video_capture/video_capture_avfoundation.mm",
+ ]
+ LOCAL_INCLUDES += [
+ "/third_party/libwebrtc/sdk/objc",
+ "/third_party/libwebrtc/sdk/objc/base",
+ ]
+ CMMFLAGS += [
+ "-fobjc-arc",
+ ]
+
+ if CONFIG["OS_TARGET"] != "Android":
+ UNIFIED_SOURCES += [
+ "video_engine/desktop_capture_impl.cc",
+ "video_engine/desktop_device_info.cc",
+ "video_engine/platform_uithread.cc",
+ "video_engine/tab_capturer.cc",
+ ]
+
+
+if CONFIG["OS_TARGET"] == "Android":
+ DEFINES["WEBRTC_ANDROID"] = True
+
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "cocoa":
+ UNIFIED_SOURCES += ["OSXRunLoopSingleton.cpp"]
+ EXPORTS += ["OSXRunLoopSingleton.h"]
+
+EXPORTS.mozilla += [
+ "ShmemPool.h",
+]
+
+EXPORTS.mozilla.media += [
+ "CamerasTypes.h",
+ "MediaChild.h",
+ "MediaParent.h",
+ "MediaSystemResourceClient.h",
+ "MediaSystemResourceManager.h",
+ "MediaSystemResourceManagerChild.h",
+ "MediaSystemResourceManagerParent.h",
+ "MediaSystemResourceMessageUtils.h",
+ "MediaSystemResourceService.h",
+ "MediaSystemResourceTypes.h",
+ "MediaTaskUtils.h",
+ "MediaUtils.h",
+]
+UNIFIED_SOURCES += [
+ "CamerasTypes.cpp",
+ "MediaChild.cpp",
+ "MediaParent.cpp",
+ "MediaSystemResourceClient.cpp",
+ "MediaSystemResourceManager.cpp",
+ "MediaSystemResourceManagerChild.cpp",
+ "MediaSystemResourceManagerParent.cpp",
+ "MediaSystemResourceService.cpp",
+ "MediaUtils.cpp",
+ "ShmemPool.cpp",
+]
+IPDL_SOURCES += [
+ "PCameras.ipdl",
+ "PMedia.ipdl",
+ "PMediaSystemResourceManager.ipdl",
+]
+# /dom/base needed for nsGlobalWindow.h in MediaChild.cpp
+LOCAL_INCLUDES += [
+ "/dom/base",
+]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"
+
+with Files("android_video_capture/**"):
+ SCHEDULES.exclusive = ["android"]
diff --git a/dom/media/systemservices/objc_video_capture/device_info.h b/dom/media/systemservices/objc_video_capture/device_info.h
new file mode 100644
index 0000000000..d146cfcfda
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_H_
+#define MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_H_
+
+#include "modules/video_capture/device_info_impl.h"
+
+#include <map>
+#include <string>
+
+@class DeviceInfoIosObjC;
+
+namespace webrtc::videocapturemodule {
+class DeviceInfoIos : public DeviceInfoImpl {
+ public:
+ DeviceInfoIos();
+ virtual ~DeviceInfoIos();
+
+ // Implementation of DeviceInfoImpl.
+ int32_t Init() override;
+ uint32_t NumberOfDevices() override;
+ int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8, uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8, uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8 = 0, uint32_t productUniqueIdUTF8Length = 0,
+ pid_t* pid = 0) override;
+
+ int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) override;
+
+ int32_t GetCapability(const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) override;
+
+ int32_t DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
+ const char* dialogTitleUTF8, void* parentWindow,
+ uint32_t positionX, uint32_t positionY) override;
+
+ int32_t GetOrientation(const char* deviceUniqueIdUTF8, VideoRotation& orientation) override;
+
+ int32_t CreateCapabilityMap(const char* device_unique_id_utf8) override
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+
+ private:
+ std::map<std::string, VideoCaptureCapabilities> _capabilitiesMap;
+ DeviceInfoIosObjC* _captureInfo;
+};
+
+} // namespace webrtc::videocapturemodule
+
+#endif // MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_H_
diff --git a/dom/media/systemservices/objc_video_capture/device_info.mm b/dom/media/systemservices/objc_video_capture/device_info.mm
new file mode 100644
index 0000000000..d0299a9ec9
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info.mm
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+# error "This file requires ARC support."
+#endif
+
+#include <AVFoundation/AVFoundation.h>
+
+#include <string>
+
+#include "device_info.h"
+#include "device_info_objc.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "objc_video_capture/device_info_avfoundation.h"
+#include "rtc_base/logging.h"
+
+using namespace mozilla;
+using namespace webrtc;
+using namespace videocapturemodule;
+
+static NSArray* camera_presets = @[
+ AVCaptureSessionPreset352x288, AVCaptureSessionPreset640x480, AVCaptureSessionPreset1280x720
+];
+
+#define IOS_UNSUPPORTED() \
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << " is not supported on the iOS platform."; \
+ return -1;
+
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+ if (StaticPrefs::media_getusermedia_camera_macavf_enabled_AtStartup()) {
+ return new DeviceInfoAvFoundation();
+ }
+ return new DeviceInfoIos();
+}
+
+DeviceInfoIos::DeviceInfoIos() { this->Init(); }
+
+DeviceInfoIos::~DeviceInfoIos() { [_captureInfo registerOwner:nil]; }
+
+int32_t DeviceInfoIos::Init() {
+ _captureInfo = [[DeviceInfoIosObjC alloc] init];
+ [_captureInfo registerOwner:this];
+
+ // Fill in all device capabilities.
+ int deviceCount = [DeviceInfoIosObjC captureDeviceCount];
+
+ for (int i = 0; i < deviceCount; i++) {
+ AVCaptureDevice* avDevice = [DeviceInfoIosObjC captureDeviceForIndex:i];
+ VideoCaptureCapabilities capabilityVector;
+
+ for (NSString* preset in camera_presets) {
+ BOOL support = [avDevice supportsAVCaptureSessionPreset:preset];
+ if (support) {
+ VideoCaptureCapability capability = [DeviceInfoIosObjC capabilityForPreset:preset];
+ capabilityVector.push_back(capability);
+ }
+ }
+
+ char deviceNameUTF8[256];
+ char deviceId[256];
+ int error = this->GetDeviceName(i, deviceNameUTF8, 256, deviceId, 256);
+ if (error) {
+ return error;
+ }
+ std::string deviceIdCopy(deviceId);
+ std::pair<std::string, VideoCaptureCapabilities> mapPair =
+ std::pair<std::string, VideoCaptureCapabilities>(deviceIdCopy, capabilityVector);
+ _capabilitiesMap.insert(mapPair);
+ }
+
+ return 0;
+}
+
+uint32_t DeviceInfoIos::NumberOfDevices() { return [DeviceInfoIosObjC captureDeviceCount]; }
+
+int32_t DeviceInfoIos::GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameUTF8Length, char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length, char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length, pid_t* pid) {
+ if (deviceNumber >= NumberOfDevices()) {
+ return -1;
+ }
+
+ NSString* deviceName = [DeviceInfoIosObjC deviceNameForIndex:deviceNumber];
+
+ NSString* deviceUniqueId = [DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
+
+ strncpy(deviceNameUTF8, [deviceName UTF8String], deviceNameUTF8Length);
+ deviceNameUTF8[deviceNameUTF8Length - 1] = '\0';
+
+ strncpy(deviceUniqueIdUTF8, deviceUniqueId.UTF8String, deviceUniqueIdUTF8Length);
+ deviceUniqueIdUTF8[deviceUniqueIdUTF8Length - 1] = '\0';
+
+ if (productUniqueIdUTF8) {
+ productUniqueIdUTF8[0] = '\0';
+ }
+
+ return 0;
+}
+
+int32_t DeviceInfoIos::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
+ int32_t numberOfCapabilities = 0;
+ std::string deviceUniqueId(deviceUniqueIdUTF8);
+ std::map<std::string, VideoCaptureCapabilities>::iterator it =
+ _capabilitiesMap.find(deviceUniqueId);
+
+ if (it != _capabilitiesMap.end()) {
+ numberOfCapabilities = it->second.size();
+ }
+ return numberOfCapabilities;
+}
+
+int32_t DeviceInfoIos::GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ std::string deviceUniqueId(deviceUniqueIdUTF8);
+ std::map<std::string, VideoCaptureCapabilities>::iterator it =
+ _capabilitiesMap.find(deviceUniqueId);
+
+ if (it != _capabilitiesMap.end()) {
+ VideoCaptureCapabilities deviceCapabilities = it->second;
+
+ if (deviceCapabilityNumber < deviceCapabilities.size()) {
+ VideoCaptureCapability cap;
+ cap = deviceCapabilities[deviceCapabilityNumber];
+ capability = cap;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int32_t DeviceInfoIos::DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
+ const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX,
+ uint32_t positionY) {
+ IOS_UNSUPPORTED();
+}
+
+int32_t DeviceInfoIos::GetOrientation(const char* deviceUniqueIdUTF8, VideoRotation& orientation) {
+ if (strcmp(deviceUniqueIdUTF8, "Front Camera") == 0) {
+ orientation = kVideoRotation_0;
+ } else {
+ orientation = kVideoRotation_90;
+ }
+ return orientation;
+}
+
+int32_t DeviceInfoIos::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
+ std::string deviceName(deviceUniqueIdUTF8);
+ std::map<std::string, std::vector<VideoCaptureCapability>>::iterator it =
+ _capabilitiesMap.find(deviceName);
+ VideoCaptureCapabilities deviceCapabilities;
+ if (it != _capabilitiesMap.end()) {
+ _captureCapabilities = it->second;
+ return 0;
+ }
+
+ return -1;
+}
diff --git a/dom/media/systemservices/objc_video_capture/device_info_avfoundation.h b/dom/media/systemservices/objc_video_capture/device_info_avfoundation.h
new file mode 100644
index 0000000000..9a698480fa
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info_avfoundation.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_SYSTEMSERVICES_OBJC_VIDEO_CAPTURE_DEVICE_INFO_AVFOUNDATION_H_
+#define DOM_MEDIA_SYSTEMSERVICES_OBJC_VIDEO_CAPTURE_DEVICE_INFO_AVFOUNDATION_H_
+
+#include <map>
+#include <string>
+
+#include "api/sequence_checker.h"
+#include "device_info_objc.h"
+#include "modules/video_capture/device_info_impl.h"
+
+namespace webrtc::videocapturemodule {
+
+/**
+ * DeviceInfo implementation for the libwebrtc ios/mac sdk camera backend.
+ * Single threaded except for DeviceChange() that happens on a platform callback
+ * thread.
+ */
+class DeviceInfoAvFoundation : public DeviceInfoImpl {
+ public:
+ static int32_t ConvertAVFrameRateToCapabilityFPS(Float64 aRate);
+ static webrtc::VideoType ConvertFourCCToVideoType(FourCharCode aCode);
+
+ DeviceInfoAvFoundation();
+ virtual ~DeviceInfoAvFoundation();
+
+ // Implementation of DeviceInfoImpl.
+ int32_t Init() override { return 0; }
+ void DeviceChange() override;
+ uint32_t NumberOfDevices() override;
+ int32_t GetDeviceName(uint32_t aDeviceNumber, char* aDeviceNameUTF8,
+ uint32_t aDeviceNameLength, char* aDeviceUniqueIdUTF8,
+ uint32_t aDeviceUniqueIdUTF8Length,
+ char* aProductUniqueIdUTF8 = nullptr,
+ uint32_t aProductUniqueIdUTF8Length = 0,
+ pid_t* aPid = nullptr) override;
+ int32_t NumberOfCapabilities(const char* aDeviceUniqueIdUTF8) override;
+ int32_t GetCapability(const char* aDeviceUniqueIdUTF8,
+ const uint32_t aDeviceCapabilityNumber,
+ VideoCaptureCapability& aCapability) override;
+ int32_t DisplayCaptureSettingsDialogBox(const char* aDeviceUniqueIdUTF8,
+ const char* aDialogTitleUTF8,
+ void* aParentWindow,
+ uint32_t aPositionX,
+ uint32_t aPositionY) override {
+ return -1;
+ }
+ int32_t CreateCapabilityMap(const char* aDeviceUniqueIdUTF8) override
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
+
+ private:
+ const std::tuple<std::string, std::string, VideoCaptureCapabilities>*
+ FindDeviceAndCapabilities(const std::string& aDeviceUniqueId) const;
+ void EnsureCapabilitiesMap();
+
+ SequenceChecker mChecker;
+ std::atomic<bool> mInvalidateCapabilities;
+ // [{uniqueId, name, capabilities}]
+ std::vector<std::tuple<std::string, std::string, VideoCaptureCapabilities>>
+ mDevicesAndCapabilities RTC_GUARDED_BY(mChecker);
+ const DeviceInfoIosObjC* mDeviceChangeCaptureInfo RTC_GUARDED_BY(mChecker);
+};
+
+} // namespace webrtc::videocapturemodule
+
+#endif
diff --git a/dom/media/systemservices/objc_video_capture/device_info_avfoundation.mm b/dom/media/systemservices/objc_video_capture/device_info_avfoundation.mm
new file mode 100644
index 0000000000..fae65ff343
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info_avfoundation.mm
@@ -0,0 +1,213 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "device_info_avfoundation.h"
+#include <CoreVideo/CVPixelBuffer.h>
+
+#include <string>
+
+#include "components/capturer/RTCCameraVideoCapturer.h"
+#import "helpers/NSString+StdString.h"
+#include "media/base/video_common.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc::videocapturemodule {
+/* static */
+int32_t DeviceInfoAvFoundation::ConvertAVFrameRateToCapabilityFPS(Float64 aRate) {
+ return static_cast<int32_t>(aRate);
+}
+
+/* static */
+webrtc::VideoType DeviceInfoAvFoundation::ConvertFourCCToVideoType(FourCharCode aCode) {
+ switch (aCode) {
+ case kCVPixelFormatType_420YpCbCr8Planar:
+ case kCVPixelFormatType_420YpCbCr8PlanarFullRange:
+ return webrtc::VideoType::kI420;
+ case kCVPixelFormatType_24BGR:
+ return webrtc::VideoType::kRGB24;
+ case kCVPixelFormatType_32ABGR:
+ return webrtc::VideoType::kABGR;
+ case kCMPixelFormat_32ARGB:
+ return webrtc::VideoType::kBGRA;
+ case kCMPixelFormat_32BGRA:
+ return webrtc::VideoType::kARGB;
+ case kCMPixelFormat_16LE565:
+ return webrtc::VideoType::kRGB565;
+ case kCMPixelFormat_16LE555:
+ case kCMPixelFormat_16LE5551:
+ return webrtc::VideoType::kARGB1555;
+ case kCMPixelFormat_422YpCbCr8_yuvs:
+ return webrtc::VideoType::kYUY2;
+ case kCMPixelFormat_422YpCbCr8:
+ return webrtc::VideoType::kUYVY;
+ case kCMVideoCodecType_JPEG:
+ case kCMVideoCodecType_JPEG_OpenDML:
+ return webrtc::VideoType::kMJPEG;
+ case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
+ case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
+ return webrtc::VideoType::kNV12;
+ default:
+ RTC_LOG(LS_WARNING) << "Unhandled FourCharCode" << aCode;
+ return webrtc::VideoType::kUnknown;
+ }
+}
+
+DeviceInfoAvFoundation::DeviceInfoAvFoundation()
+ : mInvalidateCapabilities(false), mDeviceChangeCaptureInfo([[DeviceInfoIosObjC alloc] init]) {
+ [mDeviceChangeCaptureInfo registerOwner:this];
+}
+
+DeviceInfoAvFoundation::~DeviceInfoAvFoundation() { [mDeviceChangeCaptureInfo registerOwner:nil]; }
+
+void DeviceInfoAvFoundation::DeviceChange() {
+ mInvalidateCapabilities = true;
+ DeviceInfo::DeviceChange();
+}
+
+uint32_t DeviceInfoAvFoundation::NumberOfDevices() {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ EnsureCapabilitiesMap();
+ return mDevicesAndCapabilities.size();
+}
+
+int32_t DeviceInfoAvFoundation::GetDeviceName(uint32_t aDeviceNumber, char* aDeviceNameUTF8,
+ uint32_t aDeviceNameLength, char* aDeviceUniqueIdUTF8,
+ uint32_t aDeviceUniqueIdUTF8Length,
+ char* /* aProductUniqueIdUTF8 */,
+ uint32_t /* aProductUniqueIdUTF8Length */,
+ pid_t* /* aPid */) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ // Don't EnsureCapabilitiesMap() here, since:
+ // 1) That might invalidate the capabilities map
+ // 2) This function depends on the device index
+
+ if (aDeviceNumber >= mDevicesAndCapabilities.size()) {
+ return -1;
+ }
+
+ const auto& [uniqueId, name, _] = mDevicesAndCapabilities[aDeviceNumber];
+
+ strncpy(aDeviceUniqueIdUTF8, uniqueId.c_str(), aDeviceUniqueIdUTF8Length);
+ aDeviceUniqueIdUTF8[aDeviceUniqueIdUTF8Length - 1] = '\0';
+
+ strncpy(aDeviceNameUTF8, name.c_str(), aDeviceNameLength);
+ aDeviceNameUTF8[aDeviceNameLength - 1] = '\0';
+
+ return 0;
+}
+
+int32_t DeviceInfoAvFoundation::NumberOfCapabilities(const char* aDeviceUniqueIdUTF8) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+
+ std::string deviceUniqueId(aDeviceUniqueIdUTF8);
+ const auto* tup = FindDeviceAndCapabilities(deviceUniqueId);
+ if (!tup) {
+ return 0;
+ }
+
+ const auto& [_, __, capabilities] = *tup;
+ return static_cast<int32_t>(capabilities.size());
+}
+
+int32_t DeviceInfoAvFoundation::GetCapability(const char* aDeviceUniqueIdUTF8,
+ const uint32_t aDeviceCapabilityNumber,
+ VideoCaptureCapability& aCapability) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+
+ std::string deviceUniqueId(aDeviceUniqueIdUTF8);
+ const auto* tup = FindDeviceAndCapabilities(deviceUniqueId);
+ if (!tup) {
+ return -1;
+ }
+
+ const auto& [_, __, capabilities] = *tup;
+ if (aDeviceCapabilityNumber >= capabilities.size()) {
+ return -1;
+ }
+
+ aCapability = capabilities[aDeviceCapabilityNumber];
+ return 0;
+}
+
+int32_t DeviceInfoAvFoundation::CreateCapabilityMap(const char* aDeviceUniqueIdUTF8) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+
+ const size_t deviceUniqueIdUTF8Length = strlen(aDeviceUniqueIdUTF8);
+ if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
+ RTC_LOG(LS_INFO) << "Device name too long";
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device " << aDeviceUniqueIdUTF8;
+ std::string deviceUniqueId(aDeviceUniqueIdUTF8);
+ const auto* tup = FindDeviceAndCapabilities(deviceUniqueId);
+ if (!tup) {
+ RTC_LOG(LS_INFO) << "no matching device found";
+ return -1;
+ }
+
+ // Store the new used device name
+ _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
+ _lastUsedDeviceName =
+ static_cast<char*>(realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
+ memcpy(_lastUsedDeviceName, aDeviceUniqueIdUTF8, _lastUsedDeviceNameLength + 1);
+
+ const auto& [_, __, capabilities] = *tup;
+ _captureCapabilities = capabilities;
+ return static_cast<int32_t>(_captureCapabilities.size());
+}
+
+auto DeviceInfoAvFoundation::FindDeviceAndCapabilities(const std::string& aDeviceUniqueId) const
+ -> const std::tuple<std::string, std::string, VideoCaptureCapabilities>* {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ for (const auto& tup : mDevicesAndCapabilities) {
+ if (std::get<0>(tup) == aDeviceUniqueId) {
+ return &tup;
+ }
+ }
+ return nullptr;
+}
+
+void DeviceInfoAvFoundation::EnsureCapabilitiesMap() {
+ RTC_DCHECK_RUN_ON(&mChecker);
+
+ if (mInvalidateCapabilities.exchange(false)) {
+ mDevicesAndCapabilities.clear();
+ }
+
+ if (!mDevicesAndCapabilities.empty()) {
+ return;
+ }
+
+ for (AVCaptureDevice* device in [RTCCameraVideoCapturer captureDevices]) {
+ std::string uniqueId = [NSString stdStringForString:device.uniqueID];
+ std::string name = [NSString stdStringForString:device.localizedName];
+ auto& [_, __, capabilities] =
+ mDevicesAndCapabilities.emplace_back(uniqueId, name, VideoCaptureCapabilities());
+
+ for (AVCaptureDeviceFormat* format in
+ [RTCCameraVideoCapturer supportedFormatsForDevice:device]) {
+ VideoCaptureCapability cap;
+ FourCharCode fourcc = CMFormatDescriptionGetMediaSubType(format.formatDescription);
+ cap.videoType = ConvertFourCCToVideoType(fourcc);
+ CMVideoDimensions dimensions =
+ CMVideoFormatDescriptionGetDimensions(format.formatDescription);
+ cap.width = dimensions.width;
+ cap.height = dimensions.height;
+
+ for (AVFrameRateRange* range in format.videoSupportedFrameRateRanges) {
+ cap.maxFPS = ConvertAVFrameRateToCapabilityFPS(range.maxFrameRate);
+ capabilities.push_back(cap);
+ }
+
+ if (capabilities.empty()) {
+ cap.maxFPS = 30;
+ capabilities.push_back(cap);
+ }
+ }
+ }
+}
+} // namespace webrtc::videocapturemodule
diff --git a/dom/media/systemservices/objc_video_capture/device_info_objc.h b/dom/media/systemservices/objc_video_capture/device_info_objc.h
new file mode 100644
index 0000000000..1ddedb471e
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info_objc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_OBJC_H_
+#define MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_OBJC_H_
+
+#import <AVFoundation/AVFoundation.h>
+
+#include "modules/video_capture/video_capture_defines.h"
+#include "device_info.h"
+
+@interface DeviceInfoIosObjC : NSObject {
+ NSArray* _observers;
+ NSLock* _lock;
+ webrtc::VideoCaptureModule::DeviceInfo* _owner;
+}
+
++ (int)captureDeviceCount;
++ (AVCaptureDevice*)captureDeviceForIndex:(int)index;
++ (AVCaptureDevice*)captureDeviceForUniqueId:(NSString*)uniqueId;
++ (NSString*)deviceNameForIndex:(int)index;
++ (NSString*)deviceUniqueIdForIndex:(int)index;
++ (NSString*)deviceNameForUniqueId:(NSString*)uniqueId;
++ (webrtc::VideoCaptureCapability)capabilityForPreset:(NSString*)preset;
+
+- (void)registerOwner:(webrtc::VideoCaptureModule::DeviceInfo*)owner;
+- (void)configureObservers;
+
+@end
+
+#endif // MODULES_VIDEO_CAPTURE_OBJC_DEVICE_INFO_OBJC_H_
diff --git a/dom/media/systemservices/objc_video_capture/device_info_objc.mm b/dom/media/systemservices/objc_video_capture/device_info_objc.mm
new file mode 100644
index 0000000000..6e9435daff
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/device_info_objc.mm
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+# error "This file requires ARC support."
+#endif
+
+#import <AVFoundation/AVFoundation.h>
+
+#import "device_info_objc.h"
+
+@implementation DeviceInfoIosObjC
+
+- (id)init {
+ self = [super init];
+ if (nil != self) {
+ _lock = [[NSLock alloc] init];
+ }
+ return self;
+}
+
+- (void)dealloc {
+}
+
+- (void)registerOwner:(webrtc::VideoCaptureModule::DeviceInfo*)owner {
+ [_lock lock];
+ if (!_owner && owner) {
+ [self configureObservers];
+ } else if (_owner && !owner) {
+ NSNotificationCenter* notificationCenter = [NSNotificationCenter defaultCenter];
+ for (id observer in _observers) {
+ [notificationCenter removeObserver:observer];
+ }
+ _observers = nil;
+ }
+ _owner = owner;
+ [_lock unlock];
+}
+
++ (int)captureDeviceCount {
+ int cnt = 0;
+ @try {
+ for (AVCaptureDevice* device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
+ if ([device isSuspended]) {
+ continue;
+ }
+ cnt++;
+ }
+ } @catch (NSException* exception) {
+ cnt = 0;
+ }
+ return cnt;
+}
+
++ (AVCaptureDevice*)captureDeviceForIndex:(int)index {
+ int cnt = 0;
+ @try {
+ for (AVCaptureDevice* device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
+ if ([device isSuspended]) {
+ continue;
+ }
+ if (cnt == index) {
+ return device;
+ }
+ cnt++;
+ }
+ } @catch (NSException* exception) {
+ cnt = 0;
+ }
+
+ return nil;
+}
+
++ (AVCaptureDevice*)captureDeviceForUniqueId:(NSString*)uniqueId {
+ for (AVCaptureDevice* device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
+ if ([device isSuspended]) {
+ continue;
+ }
+ if ([uniqueId isEqual:device.uniqueID]) {
+ return device;
+ }
+ }
+
+ return nil;
+}
+
++ (NSString*)deviceNameForIndex:(int)index {
+ return [DeviceInfoIosObjC captureDeviceForIndex:index].localizedName;
+}
+
++ (NSString*)deviceUniqueIdForIndex:(int)index {
+ return [DeviceInfoIosObjC captureDeviceForIndex:index].uniqueID;
+}
+
++ (NSString*)deviceNameForUniqueId:(NSString*)uniqueId {
+ return [[AVCaptureDevice deviceWithUniqueID:uniqueId] localizedName];
+}
+
++ (webrtc::VideoCaptureCapability)capabilityForPreset:(NSString*)preset {
+ webrtc::VideoCaptureCapability capability;
+
+ // TODO(tkchin): Maybe query AVCaptureDevice for supported formats, and
+ // then get the dimensions / frame rate from each supported format
+ if ([preset isEqualToString:AVCaptureSessionPreset352x288]) {
+ capability.width = 352;
+ capability.height = 288;
+ capability.maxFPS = 30;
+ capability.videoType = webrtc::VideoType::kNV12;
+ capability.interlaced = false;
+ } else if ([preset isEqualToString:AVCaptureSessionPreset640x480]) {
+ capability.width = 640;
+ capability.height = 480;
+ capability.maxFPS = 30;
+ capability.videoType = webrtc::VideoType::kNV12;
+ capability.interlaced = false;
+ } else if ([preset isEqualToString:AVCaptureSessionPreset1280x720]) {
+ capability.width = 1280;
+ capability.height = 720;
+ capability.maxFPS = 30;
+ capability.videoType = webrtc::VideoType::kNV12;
+ capability.interlaced = false;
+ }
+
+ return capability;
+}
+
+- (void)configureObservers {
+ // register device connected / disconnected event
+ NSNotificationCenter* notificationCenter = [NSNotificationCenter defaultCenter];
+
+ id deviceWasConnectedObserver =
+ [notificationCenter addObserverForName:AVCaptureDeviceWasConnectedNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* note) {
+ [_lock lock];
+ AVCaptureDevice* device = [note object];
+ BOOL isVideoDevice = [device hasMediaType:AVMediaTypeVideo];
+ if (isVideoDevice && _owner) _owner->DeviceChange();
+ [_lock unlock];
+ }];
+
+ id deviceWasDisconnectedObserver =
+ [notificationCenter addObserverForName:AVCaptureDeviceWasDisconnectedNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* note) {
+ [_lock lock];
+ AVCaptureDevice* device = [note object];
+ BOOL isVideoDevice = [device hasMediaType:AVMediaTypeVideo];
+ if (isVideoDevice && _owner) _owner->DeviceChange();
+ [_lock unlock];
+ }];
+
+ _observers = [[NSArray alloc]
+ initWithObjects:deviceWasConnectedObserver, deviceWasDisconnectedObserver, nil];
+}
+
+@end
diff --git a/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.h b/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.h
new file mode 100644
index 0000000000..9c6604ffe5
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_OBJC_RTC_VIDEO_CAPTURE_OBJC_H_
+#define MODULES_VIDEO_CAPTURE_OBJC_RTC_VIDEO_CAPTURE_OBJC_H_
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+#ifdef WEBRTC_IOS
+# import <UIKit/UIKit.h>
+#endif
+
+#include "video_capture.h"
+
+// The following class listens to a notification with name:
+// 'StatusBarOrientationDidChange'.
+// This notification must be posted in order for the capturer to reflect the
+// orientation change in video w.r.t. the application orientation.
+@interface RTCVideoCaptureIosObjC : NSObject <AVCaptureVideoDataOutputSampleBufferDelegate>
+
+@property webrtc::VideoRotation frameRotation;
+
+// custom initializer. Instance of VideoCaptureIos is needed
+// for callback purposes.
+// default init methods have been overridden to return nil.
+- (id)initWithOwner:(webrtc::videocapturemodule::VideoCaptureIos*)owner;
+- (BOOL)setCaptureDeviceByUniqueId:(NSString*)uniqueId;
+- (BOOL)startCaptureWithCapability:(const webrtc::VideoCaptureCapability&)capability;
+- (BOOL)stopCapture;
+
+@end
+#endif // MODULES_VIDEO_CAPTURE_OBJC_RTC_VIDEO_CAPTURE_OBJC_H_
diff --git a/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.mm b/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.mm
new file mode 100644
index 0000000000..0a36768fa8
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.mm
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+# error "This file requires ARC support."
+#endif
+
+#import <AVFoundation/AVFoundation.h>
+#ifdef WEBRTC_IOS
+# import <UIKit/UIKit.h>
+#endif
+
+#import "device_info_objc.h"
+#import "rtc_video_capture_objc.h"
+
+#include "rtc_base/logging.h"
+
+using namespace webrtc;
+using namespace webrtc::videocapturemodule;
+
+@interface RTCVideoCaptureIosObjC (hidden)
+- (int)changeCaptureInputWithName:(NSString*)captureDeviceName;
+@end
+
+@implementation RTCVideoCaptureIosObjC {
+ webrtc::videocapturemodule::VideoCaptureIos* _owner;
+ webrtc::VideoCaptureCapability _capability;
+ AVCaptureSession* _captureSession;
+ BOOL _orientationHasChanged;
+ AVCaptureConnection* _connection;
+ BOOL _captureChanging; // Guarded by _captureChangingCondition.
+ NSCondition* _captureChangingCondition;
+ dispatch_queue_t _frameQueue;
+}
+
+@synthesize frameRotation = _framRotation;
+
+- (id)initWithOwner:(VideoCaptureIos*)owner {
+ if (self == [super init]) {
+ _owner = owner;
+ _captureSession = [[AVCaptureSession alloc] init];
+#if defined(WEBRTC_IOS)
+ _captureSession.usesApplicationAudioSession = NO;
+#endif
+ _captureChanging = NO;
+ _captureChangingCondition = [[NSCondition alloc] init];
+
+ if (!_captureSession || !_captureChangingCondition) {
+ return nil;
+ }
+
+ // create and configure a new output (using callbacks)
+ AVCaptureVideoDataOutput* captureOutput = [[AVCaptureVideoDataOutput alloc] init];
+ NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
+
+ NSNumber* val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8];
+ NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key];
+ captureOutput.videoSettings = videoSettings;
+
+ // add new output
+ if ([_captureSession canAddOutput:captureOutput]) {
+ [_captureSession addOutput:captureOutput];
+ } else {
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << ": Could not add output to AVCaptureSession";
+ }
+
+#ifdef WEBRTC_IOS
+ [[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
+
+ NSNotificationCenter* notify = [NSNotificationCenter defaultCenter];
+ [notify addObserver:self
+ selector:@selector(onVideoError:)
+ name:AVCaptureSessionRuntimeErrorNotification
+ object:_captureSession];
+ [notify addObserver:self
+ selector:@selector(deviceOrientationDidChange:)
+ name:UIDeviceOrientationDidChangeNotification
+ object:nil];
+#endif
+ }
+
+ // Create a serial queue on which video capture will run. By setting the target,
+ // blocks should still run on DISPATH_QUEUE_PRIORITY_DEFAULT rather than creating
+ // a new thread.
+ _frameQueue = dispatch_queue_create("org.webrtc.videocapture", DISPATCH_QUEUE_SERIAL);
+ dispatch_set_target_queue(_frameQueue,
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0));
+
+ return self;
+}
+
+- (void)directOutputToSelf {
+ [[self currentOutput] setSampleBufferDelegate:self queue:_frameQueue];
+}
+
+- (void)directOutputToNil {
+ [[self currentOutput] setSampleBufferDelegate:nil queue:NULL];
+}
+
+- (void)deviceOrientationDidChange:(NSNotification*)notification {
+ _orientationHasChanged = YES;
+ [self setRelativeVideoOrientation];
+}
+
+- (void)dealloc {
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
+- (BOOL)setCaptureDeviceByUniqueId:(NSString*)uniqueId {
+ [self waitForCaptureChangeToFinish];
+ // check to see if the camera is already set
+ if (_captureSession) {
+ NSArray* currentInputs = [NSArray arrayWithArray:[_captureSession inputs]];
+ if ([currentInputs count] > 0) {
+ AVCaptureDeviceInput* currentInput = [currentInputs objectAtIndex:0];
+ if ([uniqueId isEqualToString:[currentInput.device localizedName]]) {
+ return YES;
+ }
+ }
+ }
+
+ return [self changeCaptureInputByUniqueId:uniqueId];
+}
+
+- (BOOL)startCaptureWithCapability:(const VideoCaptureCapability&)capability {
+ [self waitForCaptureChangeToFinish];
+ if (!_captureSession) {
+ return NO;
+ }
+
+ // check limits of the resolution
+ if (capability.maxFPS < 0 || capability.maxFPS > 60) {
+ return NO;
+ }
+
+ if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset1280x720]) {
+ if (capability.width > 1280 || capability.height > 720) {
+ return NO;
+ }
+ } else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {
+ if (capability.width > 640 || capability.height > 480) {
+ return NO;
+ }
+ } else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset352x288]) {
+ if (capability.width > 352 || capability.height > 288) {
+ return NO;
+ }
+ } else if (capability.width < 0 || capability.height < 0) {
+ return NO;
+ }
+
+ _capability = capability;
+
+ AVCaptureVideoDataOutput* currentOutput = [self currentOutput];
+ if (!currentOutput) return NO;
+
+ [self directOutputToSelf];
+
+ _orientationHasChanged = NO;
+ _captureChanging = YES;
+ dispatch_async(_frameQueue, ^{
+ [self startCaptureInBackgroundWithOutput:currentOutput];
+ });
+ return YES;
+}
+
+- (AVCaptureVideoDataOutput*)currentOutput {
+ return [[_captureSession outputs] firstObject];
+}
+
+- (void)startCaptureInBackgroundWithOutput:(AVCaptureVideoDataOutput*)currentOutput {
+ NSString* captureQuality = [NSString stringWithString:AVCaptureSessionPresetLow];
+ if (_capability.width >= 1280 || _capability.height >= 720) {
+ captureQuality = [NSString stringWithString:AVCaptureSessionPreset1280x720];
+ } else if (_capability.width >= 640 || _capability.height >= 480) {
+ captureQuality = [NSString stringWithString:AVCaptureSessionPreset640x480];
+ } else if (_capability.width >= 352 || _capability.height >= 288) {
+ captureQuality = [NSString stringWithString:AVCaptureSessionPreset352x288];
+ }
+
+ // begin configuration for the AVCaptureSession
+ [_captureSession beginConfiguration];
+
+ // picture resolution
+ [_captureSession setSessionPreset:captureQuality];
+
+ _connection = [currentOutput connectionWithMediaType:AVMediaTypeVideo];
+ [self setRelativeVideoOrientation];
+
+ // finished configuring, commit settings to AVCaptureSession.
+ [_captureSession commitConfiguration];
+
+ [_captureSession startRunning];
+ [self signalCaptureChangeEnd];
+}
+
+- (void)setRelativeVideoOrientation {
+ if (!_connection.supportsVideoOrientation) {
+ return;
+ }
+#ifndef WEBRTC_IOS
+ _connection.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
+ return;
+#else
+ switch ([UIDevice currentDevice].orientation) {
+ case UIDeviceOrientationPortrait:
+ _connection.videoOrientation = AVCaptureVideoOrientationPortrait;
+ break;
+ case UIDeviceOrientationPortraitUpsideDown:
+ _connection.videoOrientation = AVCaptureVideoOrientationPortraitUpsideDown;
+ break;
+ case UIDeviceOrientationLandscapeLeft:
+ _connection.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
+ break;
+ case UIDeviceOrientationLandscapeRight:
+ _connection.videoOrientation = AVCaptureVideoOrientationLandscapeLeft;
+ break;
+ case UIDeviceOrientationFaceUp:
+ case UIDeviceOrientationFaceDown:
+ case UIDeviceOrientationUnknown:
+ if (!_orientationHasChanged) {
+ _connection.videoOrientation = AVCaptureVideoOrientationPortrait;
+ }
+ break;
+ }
+#endif
+}
+
+- (void)onVideoError:(NSNotification*)notification {
+ NSLog(@"onVideoError: %@", notification);
+ // TODO(sjlee): make the specific error handling with this notification.
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << ": [AVCaptureSession startRunning] error.";
+}
+
+- (BOOL)stopCapture {
+#ifdef WEBRTC_IOS
+ [[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
+#endif
+ _orientationHasChanged = NO;
+ [self waitForCaptureChangeToFinish];
+ [self directOutputToNil];
+
+ if (!_captureSession) {
+ return NO;
+ }
+
+ _captureChanging = YES;
+ [_captureSession stopRunning];
+
+ dispatch_sync(_frameQueue, ^{
+ [self signalCaptureChangeEnd];
+ });
+ return YES;
+}
+
+- (BOOL)changeCaptureInputByUniqueId:(NSString*)uniqueId {
+ [self waitForCaptureChangeToFinish];
+ NSArray* currentInputs = [_captureSession inputs];
+ // remove current input
+ if ([currentInputs count] > 0) {
+ AVCaptureInput* currentInput = (AVCaptureInput*)[currentInputs objectAtIndex:0];
+
+ [_captureSession removeInput:currentInput];
+ }
+
+ // Look for input device with the name requested (as our input param)
+ // get list of available capture devices
+ int captureDeviceCount = [DeviceInfoIosObjC captureDeviceCount];
+ if (captureDeviceCount <= 0) {
+ return NO;
+ }
+
+ AVCaptureDevice* captureDevice = [DeviceInfoIosObjC captureDeviceForUniqueId:uniqueId];
+
+ if (!captureDevice) {
+ return NO;
+ }
+
+ // now create capture session input out of AVCaptureDevice
+ NSError* deviceError = nil;
+ AVCaptureDeviceInput* newCaptureInput = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice
+ error:&deviceError];
+
+ if (!newCaptureInput) {
+ const char* errorMessage = [[deviceError localizedDescription] UTF8String];
+
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << ": deviceInputWithDevice error:" << errorMessage;
+
+ return NO;
+ }
+
+ // try to add our new capture device to the capture session
+ [_captureSession beginConfiguration];
+
+ BOOL addedCaptureInput = NO;
+ if ([_captureSession canAddInput:newCaptureInput]) {
+ [_captureSession addInput:newCaptureInput];
+ addedCaptureInput = YES;
+ } else {
+ addedCaptureInput = NO;
+ }
+
+ [_captureSession commitConfiguration];
+
+ return addedCaptureInput;
+}
+
+- (void)captureOutput:(AVCaptureOutput*)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
+ fromConnection:(AVCaptureConnection*)connection {
+ const int kFlags = 0;
+ CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
+
+ if (CVPixelBufferLockBaseAddress(videoFrame, kFlags) != kCVReturnSuccess) {
+ return;
+ }
+
+ uint8_t* baseAddress = (uint8_t*)CVPixelBufferGetBaseAddress(videoFrame);
+ const size_t width = CVPixelBufferGetWidth(videoFrame);
+ const size_t height = CVPixelBufferGetHeight(videoFrame);
+ const size_t frameSize = width * height * 2;
+
+ VideoCaptureCapability tempCaptureCapability;
+ tempCaptureCapability.width = width;
+ tempCaptureCapability.height = height;
+ tempCaptureCapability.maxFPS = _capability.maxFPS;
+ tempCaptureCapability.videoType = VideoType::kUYVY;
+
+ _owner->IncomingFrame(baseAddress, frameSize, tempCaptureCapability, 0);
+
+ CVPixelBufferUnlockBaseAddress(videoFrame, kFlags);
+}
+
+- (void)signalCaptureChangeEnd {
+ [_captureChangingCondition lock];
+ _captureChanging = NO;
+ [_captureChangingCondition signal];
+ [_captureChangingCondition unlock];
+}
+
+- (void)waitForCaptureChangeToFinish {
+ [_captureChangingCondition lock];
+ while (_captureChanging) {
+ [_captureChangingCondition wait];
+ }
+ [_captureChangingCondition unlock];
+}
+@end
diff --git a/dom/media/systemservices/objc_video_capture/video_capture.h b/dom/media/systemservices/objc_video_capture/video_capture.h
new file mode 100644
index 0000000000..b9f228f679
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/video_capture.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CAPTURE_OBJC_VIDEO_CAPTURE_H_
+#define MODULES_VIDEO_CAPTURE_OBJC_VIDEO_CAPTURE_H_
+
+#include "modules/video_capture/video_capture_impl.h"
+#include "api/scoped_refptr.h"
+
+@class RTCVideoCaptureIosObjC;
+
+namespace webrtc::videocapturemodule {
+class VideoCaptureIos : public VideoCaptureImpl {
+ public:
+ VideoCaptureIos();
+ virtual ~VideoCaptureIos();
+
+ static rtc::scoped_refptr<VideoCaptureModule> Create(const char* device_unique_id_utf8);
+
+ // Implementation of VideoCaptureImpl.
+ int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ int32_t StopCapture() override;
+ bool CaptureStarted() override;
+ int32_t CaptureSettings(VideoCaptureCapability& settings) override;
+
+ private:
+ RTCVideoCaptureIosObjC* capture_device_;
+ bool is_capturing_;
+ VideoCaptureCapability capability_;
+};
+
+} // namespace webrtc::videocapturemodule
+
+#endif // MODULES_VIDEO_CAPTURE_OBJC_VIDEO_CAPTURE_H_
diff --git a/dom/media/systemservices/objc_video_capture/video_capture.mm b/dom/media/systemservices/objc_video_capture/video_capture.mm
new file mode 100644
index 0000000000..63aef3204c
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/video_capture.mm
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+# error "This file requires ARC support."
+#endif
+
+#include "device_info_objc.h"
+#include "rtc_video_capture_objc.h"
+#include "rtc_base/ref_counted_object.h"
+#include "api/scoped_refptr.h"
+#include "video_capture_avfoundation.h"
+#include "mozilla/StaticPrefs_media.h"
+
+using namespace mozilla;
+using namespace webrtc;
+using namespace videocapturemodule;
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(const char* deviceUniqueIdUTF8) {
+ if (StaticPrefs::media_getusermedia_camera_macavf_enabled_AtStartup()) {
+ return VideoCaptureAvFoundation::Create(deviceUniqueIdUTF8);
+ }
+ return VideoCaptureIos::Create(deviceUniqueIdUTF8);
+}
+
+VideoCaptureIos::VideoCaptureIos() : is_capturing_(false) {
+ capability_.width = kDefaultWidth;
+ capability_.height = kDefaultHeight;
+ capability_.maxFPS = kDefaultFrameRate;
+ capture_device_ = nil;
+}
+
+VideoCaptureIos::~VideoCaptureIos() {
+ if (is_capturing_) {
+ [capture_device_ stopCapture];
+ capture_device_ = nil;
+ }
+}
+
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureIos::Create(const char* deviceUniqueIdUTF8) {
+ if (!deviceUniqueIdUTF8[0]) {
+ return NULL;
+ }
+
+ rtc::scoped_refptr<VideoCaptureIos> capture_module(new rtc::RefCountedObject<VideoCaptureIos>());
+
+ const int32_t name_length = strlen(deviceUniqueIdUTF8);
+ if (name_length >= kVideoCaptureUniqueNameLength) return nullptr;
+
+ capture_module->_deviceUniqueId = new char[name_length + 1];
+ strncpy(capture_module->_deviceUniqueId, deviceUniqueIdUTF8, name_length + 1);
+ capture_module->_deviceUniqueId[name_length] = '\0';
+
+ capture_module->capture_device_ =
+ [[RTCVideoCaptureIosObjC alloc] initWithOwner:capture_module.get()];
+ if (!capture_module->capture_device_) {
+ return nullptr;
+ }
+
+ if (![capture_module->capture_device_
+ setCaptureDeviceByUniqueId:[[NSString alloc] initWithCString:deviceUniqueIdUTF8
+ encoding:NSUTF8StringEncoding]]) {
+ return nullptr;
+ }
+ return capture_module;
+}
+
+int32_t VideoCaptureIos::StartCapture(const VideoCaptureCapability& capability) {
+ capability_ = capability;
+
+ if (![capture_device_ startCaptureWithCapability:capability]) {
+ return -1;
+ }
+
+ is_capturing_ = true;
+
+ return 0;
+}
+
+int32_t VideoCaptureIos::StopCapture() {
+ if (![capture_device_ stopCapture]) {
+ return -1;
+ }
+
+ is_capturing_ = false;
+ return 0;
+}
+
+bool VideoCaptureIos::CaptureStarted() { return is_capturing_; }
+
+int32_t VideoCaptureIos::CaptureSettings(VideoCaptureCapability& settings) {
+ settings = capability_;
+ settings.videoType = VideoType::kNV12;
+ return 0;
+}
diff --git a/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.h b/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.h
new file mode 100644
index 0000000000..570d0dd72c
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_SYSTEMSERVICES_OBJC_VIDEO_CAPTURE_VIDEO_CAPTURE2_H_
+#define DOM_MEDIA_SYSTEMSERVICES_OBJC_VIDEO_CAPTURE_VIDEO_CAPTURE2_H_
+
+#import "components/capturer/RTCCameraVideoCapturer.h"
+
+#include "api/sequence_checker.h"
+#include "modules/video_capture/video_capture_impl.h"
+#include "mozilla/Maybe.h"
+#include "PerformanceRecorder.h"
+
+@class VideoCaptureAdapter;
+
+namespace webrtc::videocapturemodule {
+
+/**
+ * VideoCaptureImpl implementation of the libwebrtc ios/mac sdk camera backend.
+ * Single threaded except for OnFrame() that happens on a platform callback thread.
+ */
+class VideoCaptureAvFoundation : public VideoCaptureImpl {
+ public:
+ VideoCaptureAvFoundation(AVCaptureDevice* _Nonnull aDevice);
+ virtual ~VideoCaptureAvFoundation();
+
+ static rtc::scoped_refptr<VideoCaptureModule> Create(const char* _Nullable aDeviceUniqueIdUTF8);
+
+ // Implementation of VideoCaptureImpl. Single threaded.
+
+ // Starts capturing synchronously. Idempotent. If an existing capture is live and another
+ // capability is requested we'll restart the underlying backend with the new capability.
+ int32_t StartCapture(const VideoCaptureCapability& aCapability) MOZ_EXCLUDES(api_lock_) override;
+ // Stops capturing synchronously. Idempotent.
+ int32_t StopCapture() MOZ_EXCLUDES(api_lock_) override;
+ bool CaptureStarted() MOZ_EXCLUDES(api_lock_) override;
+ int32_t CaptureSettings(VideoCaptureCapability& aSettings) override;
+
+ // Callback. This can be called on any thread.
+ int32_t OnFrame(webrtc::VideoFrame& aFrame) MOZ_EXCLUDES(api_lock_);
+
+ void SetTrackingId(uint32_t aTrackingIdProcId) MOZ_EXCLUDES(api_lock_) override;
+
+ // Allows the capturer to start the recording before calling OnFrame, to cover more operations
+ // under the same measurement.
+ void StartFrameRecording(int32_t aWidth, int32_t aHeight) MOZ_EXCLUDES(api_lock_);
+
+ // Registers the current thread with the profiler if not already registered.
+ void MaybeRegisterCallbackThread();
+
+ private:
+ SequenceChecker mChecker;
+ AVCaptureDevice* _Nonnull mDevice RTC_GUARDED_BY(mChecker);
+ VideoCaptureAdapter* _Nonnull mAdapter RTC_GUARDED_BY(mChecker);
+ RTC_OBJC_TYPE(RTCCameraVideoCapturer) * _Nullable mCapturer RTC_GUARDED_BY(mChecker);
+ // If capture has started, this is the capability it was started for. Written on the mChecker
+ // thread only.
+ mozilla::Maybe<VideoCaptureCapability> mCapability MOZ_GUARDED_BY(api_lock_);
+ // Id string uniquely identifying this capture source. Written on the mChecker thread only.
+ mozilla::Maybe<mozilla::TrackingId> mTrackingId MOZ_GUARDED_BY(api_lock_);
+ // Adds frame specific markers to the profiler while mTrackingId is set.
+ mozilla::PerformanceRecorderMulti<mozilla::CaptureStage> mCaptureRecorder;
+ mozilla::PerformanceRecorderMulti<mozilla::CopyVideoStage> mConversionRecorder;
+ std::atomic<ProfilerThreadId> mCallbackThreadId;
+};
+
+} // namespace webrtc::videocapturemodule
+
+@interface VideoCaptureAdapter : NSObject <RTC_OBJC_TYPE (RTCVideoCapturerDelegate)>
+@property(nonatomic) webrtc::videocapturemodule::VideoCaptureAvFoundation* _Nullable capturer;
+@end
+
+#endif
diff --git a/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.mm b/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.mm
new file mode 100644
index 0000000000..bab1acac66
--- /dev/null
+++ b/dom/media/systemservices/objc_video_capture/video_capture_avfoundation.mm
@@ -0,0 +1,286 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "video_capture_avfoundation.h"
+
+#import "api/video_frame_buffer/RTCNativeI420Buffer+Private.h"
+#import "base/RTCI420Buffer.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/capturer/RTCCameraVideoCapturer.h"
+#import "helpers/NSString+StdString.h"
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_rotation.h"
+#include "CallbackThreadRegistry.h"
+#include "device_info_avfoundation.h"
+#include "modules/video_capture/video_capture_defines.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/UniquePtr.h"
+#include "rtc_base/time_utils.h"
+
+using namespace mozilla;
+using namespace webrtc::videocapturemodule;
+
+namespace {
+webrtc::VideoRotation ToNativeRotation(RTCVideoRotation aRotation) {
+ switch (aRotation) {
+ case RTCVideoRotation_0:
+ return webrtc::kVideoRotation_0;
+ case RTCVideoRotation_90:
+ return webrtc::kVideoRotation_90;
+ case RTCVideoRotation_180:
+ return webrtc::kVideoRotation_180;
+ case RTCVideoRotation_270:
+ return webrtc::kVideoRotation_270;
+ default:
+ MOZ_CRASH_UNSAFE_PRINTF("Unexpected rotation %d", static_cast<int>(aRotation));
+ return webrtc::kVideoRotation_0;
+ }
+}
+
+AVCaptureDeviceFormat* _Nullable FindFormat(AVCaptureDevice* _Nonnull aDevice,
+ webrtc::VideoCaptureCapability aCapability) {
+ for (AVCaptureDeviceFormat* format in [aDevice formats]) {
+ CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
+ if (dimensions.width != aCapability.width) {
+ continue;
+ }
+ if (dimensions.height != aCapability.height) {
+ continue;
+ }
+ FourCharCode fourcc = CMFormatDescriptionGetMediaSubType(format.formatDescription);
+ if (aCapability.videoType != DeviceInfoAvFoundation::ConvertFourCCToVideoType(fourcc)) {
+ continue;
+ }
+ if ([format.videoSupportedFrameRateRanges
+ indexOfObjectPassingTest:^BOOL(AVFrameRateRange* _Nonnull obj, NSUInteger idx,
+ BOOL* _Nonnull stop) {
+ return static_cast<BOOL>(DeviceInfoAvFoundation::ConvertAVFrameRateToCapabilityFPS(
+ obj.maxFrameRate) == aCapability.maxFPS);
+ }] == NSNotFound) {
+ continue;
+ }
+
+ return format;
+ }
+ return nullptr;
+}
+} // namespace
+
+@implementation VideoCaptureAdapter
+@synthesize capturer = _capturer;
+
+- (void)capturer:(RTC_OBJC_TYPE(RTCVideoCapturer) * _Nonnull)capturer
+ didCaptureVideoFrame:(RTC_OBJC_TYPE(RTCVideoFrame) * _Nonnull)frame {
+ _capturer->StartFrameRecording(frame.width, frame.height);
+ const int64_t timestamp_us = frame.timeStampNs / rtc::kNumNanosecsPerMicrosec;
+ RTC_OBJC_TYPE(RTCI420Buffer)* buffer = [[frame buffer] toI420];
+ // Accessing the (intended-to-be-private) native buffer directly is hacky but lets us skip two
+ // copies
+ rtc::scoped_refptr<webrtc::I420BufferInterface> nativeBuffer = [buffer nativeI420Buffer];
+ webrtc::VideoFrame nativeFrame = webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(nativeBuffer)
+ .set_rotation(ToNativeRotation(frame.rotation))
+ .set_timestamp_us(timestamp_us)
+ .build();
+ _capturer->OnFrame(nativeFrame);
+}
+
+@end
+
+namespace webrtc::videocapturemodule {
+VideoCaptureAvFoundation::VideoCaptureAvFoundation(AVCaptureDevice* _Nonnull aDevice)
+ : mDevice(aDevice),
+ mAdapter([[VideoCaptureAdapter alloc] init]),
+ mCapturer(nullptr),
+ mCallbackThreadId() {
+ {
+ const char* uniqueId = [[aDevice uniqueID] UTF8String];
+ size_t len = strlen(uniqueId);
+ _deviceUniqueId = new (std::nothrow) char[len + 1];
+ if (_deviceUniqueId) {
+ memcpy(_deviceUniqueId, uniqueId, len + 1);
+ }
+ }
+
+ mAdapter.capturer = this;
+ mCapturer = [[RTC_OBJC_TYPE(RTCCameraVideoCapturer) alloc] initWithDelegate:mAdapter];
+}
+
+VideoCaptureAvFoundation::~VideoCaptureAvFoundation() {
+ // Must block until capture has fully stopped, including async operations.
+ StopCapture();
+}
+
+/* static */
+rtc::scoped_refptr<VideoCaptureModule> VideoCaptureAvFoundation::Create(
+ const char* _Nullable aDeviceUniqueIdUTF8) {
+ std::string uniqueId(aDeviceUniqueIdUTF8);
+ for (AVCaptureDevice* device in [RTCCameraVideoCapturer captureDevices]) {
+ if ([NSString stdStringForString:device.uniqueID] == uniqueId) {
+ rtc::scoped_refptr<VideoCaptureModule> module(
+ new rtc::RefCountedObject<VideoCaptureAvFoundation>(device));
+ return module;
+ }
+ }
+ return nullptr;
+}
+
+int32_t VideoCaptureAvFoundation::StartCapture(const VideoCaptureCapability& aCapability) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ AVCaptureDeviceFormat* format = FindFormat(mDevice, aCapability);
+ if (!format) {
+ return -1;
+ }
+
+ {
+ MutexLock lock(&api_lock_);
+ if (mCapability) {
+ if (mCapability->width == aCapability.width && mCapability->height == aCapability.height &&
+ mCapability->maxFPS == aCapability.maxFPS &&
+ mCapability->videoType == aCapability.videoType) {
+ return 0;
+ }
+
+ api_lock_.Unlock();
+ int32_t rv = StopCapture();
+ api_lock_.Lock();
+
+ if (rv != 0) {
+ return rv;
+ }
+ }
+ }
+
+ Monitor monitor("VideoCaptureAVFoundation::StartCapture");
+ Monitor* copyableMonitor = &monitor;
+ MonitorAutoLock lock(monitor);
+ __block Maybe<int32_t> rv;
+
+ [mCapturer startCaptureWithDevice:mDevice
+ format:format
+ fps:aCapability.maxFPS
+ completionHandler:^(NSError* error) {
+ MOZ_RELEASE_ASSERT(!rv);
+ rv = Some(error ? -1 : 0);
+ copyableMonitor->Notify();
+ }];
+
+ while (!rv) {
+ monitor.Wait();
+ }
+
+ if (*rv == 0) {
+ MutexLock lock(&api_lock_);
+ mCapability = Some(aCapability);
+ }
+
+ return *rv;
+}
+
+int32_t VideoCaptureAvFoundation::StopCapture() {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ {
+ MutexLock lock(&api_lock_);
+ if (!mCapability) {
+ return 0;
+ }
+ mCapability = Nothing();
+ }
+
+ Monitor monitor("VideoCaptureAVFoundation::StopCapture");
+ Monitor* copyableMonitor = &monitor;
+ MonitorAutoLock lock(monitor);
+ __block bool done = false;
+
+ [mCapturer stopCaptureWithCompletionHandler:^(void) {
+ MOZ_RELEASE_ASSERT(!done);
+ done = true;
+ copyableMonitor->Notify();
+ }];
+
+ while (!done) {
+ monitor.Wait();
+ }
+ return 0;
+}
+
+bool VideoCaptureAvFoundation::CaptureStarted() {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ MutexLock lock(&api_lock_);
+ return mCapability.isSome();
+}
+
+int32_t VideoCaptureAvFoundation::CaptureSettings(VideoCaptureCapability& aSettings) {
+ MOZ_CRASH("Unexpected call");
+ return -1;
+}
+
+int32_t VideoCaptureAvFoundation::OnFrame(webrtc::VideoFrame& aFrame) {
+ MutexLock lock(&api_lock_);
+ mConversionRecorder.Record(0);
+ int32_t rv = DeliverCapturedFrame(aFrame);
+ mCaptureRecorder.Record(0);
+ return rv;
+}
+
+void VideoCaptureAvFoundation::SetTrackingId(uint32_t aTrackingIdProcId) {
+ RTC_DCHECK_RUN_ON(&mChecker);
+ MutexLock lock(&api_lock_);
+ if (NS_WARN_IF(mTrackingId.isSome())) {
+ // This capture instance must be shared across multiple camera requests. For now ignore other
+ // requests than the first.
+ return;
+ }
+ mTrackingId.emplace(TrackingId::Source::Camera, aTrackingIdProcId);
+}
+
+void VideoCaptureAvFoundation::StartFrameRecording(int32_t aWidth, int32_t aHeight) {
+ MaybeRegisterCallbackThread();
+ MutexLock lock(&api_lock_);
+ if (MOZ_UNLIKELY(!mTrackingId)) {
+ return;
+ }
+ auto fromWebrtcVideoType = [](webrtc::VideoType aType) -> CaptureStage::ImageType {
+ switch (aType) {
+ case webrtc::VideoType::kI420:
+ return CaptureStage::ImageType::I420;
+ case webrtc::VideoType::kYUY2:
+ return CaptureStage::ImageType::YUY2;
+ case webrtc::VideoType::kYV12:
+ return CaptureStage::ImageType::YV12;
+ case webrtc::VideoType::kUYVY:
+ return CaptureStage::ImageType::UYVY;
+ case webrtc::VideoType::kNV12:
+ return CaptureStage::ImageType::NV12;
+ case webrtc::VideoType::kNV21:
+ return CaptureStage::ImageType::NV21;
+ case webrtc::VideoType::kMJPEG:
+ return CaptureStage::ImageType::MJPEG;
+ default:
+ return CaptureStage::ImageType::Unknown;
+ }
+ };
+ mCaptureRecorder.Start(
+ 0, "VideoCaptureAVFoundation"_ns, *mTrackingId, aWidth, aHeight,
+ mCapability.map([&](const auto& aCap) { return fromWebrtcVideoType(aCap.videoType); })
+ .valueOr(CaptureStage::ImageType::Unknown));
+ if (mCapability && mCapability->videoType != webrtc::VideoType::kI420) {
+ mConversionRecorder.Start(0, "VideoCaptureAVFoundation"_ns, *mTrackingId, aWidth, aHeight);
+ }
+}
+
+void VideoCaptureAvFoundation::MaybeRegisterCallbackThread() {
+ ProfilerThreadId id = profiler_current_thread_id();
+ if (MOZ_LIKELY(id == mCallbackThreadId)) {
+ return;
+ }
+ mCallbackThreadId = id;
+ CallbackThreadRegistry::Get()->Register(mCallbackThreadId, "VideoCaptureAVFoundationCallback");
+}
+} // namespace webrtc::videocapturemodule
diff --git a/dom/media/systemservices/video_engine/browser_capture_impl.h b/dom/media/systemservices/video_engine/browser_capture_impl.h
new file mode 100644
index 0000000000..aeaae62202
--- /dev/null
+++ b/dom/media/systemservices/video_engine/browser_capture_impl.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBRTC_MODULES_BROWSER_CAPTURE_MAIN_SOURCE_BROWSER_CAPTURE_IMPL_H_
+#define WEBRTC_MODULES_BROWSER_CAPTURE_MAIN_SOURCE_BROWSER_CAPTURE_IMPL_H_
+
+#include "webrtc/modules/video_capture/video_capture.h"
+
+using namespace webrtc::videocapturemodule;
+
+namespace webrtc {
+
+class BrowserDeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
+ public:
+ virtual uint32_t NumberOfDevices() { return 1; }
+
+ virtual int32_t Refresh() { return 0; }
+
+ virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameSize,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Size,
+ char* productUniqueIdUTF8 = NULL,
+ uint32_t productUniqueIdUTF8Size = 0,
+ pid_t* pid = 0) {
+ deviceNameUTF8 = const_cast<char*>(kDeviceName);
+ deviceUniqueIdUTF8 = const_cast<char*>(kUniqueDeviceName);
+ productUniqueIdUTF8 = const_cast<char*>(kProductUniqueId);
+ return 1;
+ };
+
+ virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
+ return 0;
+ }
+
+ virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ return 0;
+ };
+
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ return 0;
+ }
+
+ virtual int32_t GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) {
+ return 0;
+ }
+
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY) {
+ return 0;
+ }
+
+ BrowserDeviceInfoImpl()
+ : kDeviceName("browser"),
+ kUniqueDeviceName("browser"),
+ kProductUniqueId("browser") {}
+
+ static BrowserDeviceInfoImpl* CreateDeviceInfo() {
+ return new BrowserDeviceInfoImpl();
+ }
+ virtual ~BrowserDeviceInfoImpl() {}
+
+ private:
+ const char* kDeviceName;
+ const char* kUniqueDeviceName;
+ const char* kProductUniqueId;
+};
+} // namespace webrtc
+#endif
diff --git a/dom/media/systemservices/video_engine/desktop_capture_impl.cc b/dom/media/systemservices/video_engine/desktop_capture_impl.cc
new file mode 100644
index 0000000000..fd88433cfa
--- /dev/null
+++ b/dom/media/systemservices/video_engine/desktop_capture_impl.cc
@@ -0,0 +1,760 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_capture/video_capture_impl.h"
+
+#include <stdlib.h>
+#include <memory>
+#include <string>
+
+#include "CamerasTypes.h"
+#include "PerformanceRecorder.h"
+
+#include "api/video/i420_buffer.h"
+#include "base/scoped_nsautorelease_pool.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "libyuv.h" // NOLINT
+#include "modules/include/module_common_types.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "system_wrappers/include/clock.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "video_engine/desktop_capture_impl.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/video_capture/video_capture.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/StaticPrefs_media.h"
+
+#include "PerformanceRecorder.h"
+
+#if defined(_WIN32)
+# include "platform_uithread.h"
+#else
+# include "rtc_base/platform_thread.h"
+#endif
+
+namespace webrtc {
+
+ScreenDeviceInfoImpl::ScreenDeviceInfoImpl(const int32_t id) : _id(id) {}
+
+ScreenDeviceInfoImpl::~ScreenDeviceInfoImpl(void) {}
+
+int32_t ScreenDeviceInfoImpl::Init() {
+ desktop_device_info_ =
+ std::unique_ptr<DesktopDeviceInfo>(DesktopDeviceInfoImpl::Create());
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::Refresh() {
+ desktop_device_info_->Refresh();
+ return 0;
+}
+
+uint32_t ScreenDeviceInfoImpl::NumberOfDevices() {
+ return desktop_device_info_->getDisplayDeviceCount();
+}
+
+int32_t ScreenDeviceInfoImpl::GetDeviceName(
+ uint32_t deviceNumber, char* deviceNameUTF8, uint32_t deviceNameUTF8Size,
+ char* deviceUniqueIdUTF8, uint32_t deviceUniqueIdUTF8Size,
+ char* productUniqueIdUTF8, uint32_t productUniqueIdUTF8Size, pid_t* pid) {
+ DesktopDisplayDevice desktopDisplayDevice;
+
+ // always initialize output
+ if (deviceNameUTF8 && deviceNameUTF8Size > 0) {
+ memset(deviceNameUTF8, 0, deviceNameUTF8Size);
+ }
+
+ if (deviceUniqueIdUTF8 && deviceUniqueIdUTF8Size > 0) {
+ memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Size);
+ }
+ if (productUniqueIdUTF8 && productUniqueIdUTF8Size > 0) {
+ memset(productUniqueIdUTF8, 0, productUniqueIdUTF8Size);
+ }
+
+ if (desktop_device_info_->getDesktopDisplayDeviceInfo(
+ deviceNumber, desktopDisplayDevice) == 0) {
+ size_t len;
+
+ const char* deviceName = desktopDisplayDevice.getDeviceName();
+ len = deviceName ? strlen(deviceName) : 0;
+ if (len && deviceNameUTF8 && len < deviceNameUTF8Size) {
+ memcpy(deviceNameUTF8, deviceName, len);
+ }
+
+ const char* deviceUniqueId = desktopDisplayDevice.getUniqueIdName();
+ len = deviceUniqueId ? strlen(deviceUniqueId) : 0;
+ if (len && deviceUniqueIdUTF8 && len < deviceUniqueIdUTF8Size) {
+ memcpy(deviceUniqueIdUTF8, deviceUniqueId, len);
+ }
+ }
+
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY) {
+ // no device properties to change
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::NumberOfCapabilities(
+ const char* deviceUniqueIdUTF8) {
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::GetCapability(
+ const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) {
+ return 0;
+}
+
+int32_t ScreenDeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ return 0;
+}
+
+VideoCaptureModule* DesktopCaptureImpl::Create(const int32_t id,
+ const char* uniqueId,
+ const CaptureDeviceType type) {
+ return new rtc::RefCountedObject<DesktopCaptureImpl>(id, uniqueId, type);
+}
+
+int32_t WindowDeviceInfoImpl::Init() {
+ desktop_device_info_ =
+ std::unique_ptr<DesktopDeviceInfo>(DesktopDeviceInfoImpl::Create());
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::Refresh() {
+ desktop_device_info_->Refresh();
+ return 0;
+}
+
+uint32_t WindowDeviceInfoImpl::NumberOfDevices() {
+ return desktop_device_info_->getWindowCount();
+}
+
+int32_t WindowDeviceInfoImpl::GetDeviceName(
+ uint32_t deviceNumber, char* deviceNameUTF8, uint32_t deviceNameUTF8Size,
+ char* deviceUniqueIdUTF8, uint32_t deviceUniqueIdUTF8Size,
+ char* productUniqueIdUTF8, uint32_t productUniqueIdUTF8Size, pid_t* pid) {
+ DesktopDisplayDevice desktopDisplayDevice;
+
+ // always initialize output
+ if (deviceNameUTF8 && deviceNameUTF8Size > 0) {
+ memset(deviceNameUTF8, 0, deviceNameUTF8Size);
+ }
+ if (deviceUniqueIdUTF8 && deviceUniqueIdUTF8Size > 0) {
+ memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Size);
+ }
+ if (productUniqueIdUTF8 && productUniqueIdUTF8Size > 0) {
+ memset(productUniqueIdUTF8, 0, productUniqueIdUTF8Size);
+ }
+
+ if (desktop_device_info_->getWindowInfo(deviceNumber, desktopDisplayDevice) ==
+ 0) {
+ size_t len;
+
+ const char* deviceName = desktopDisplayDevice.getDeviceName();
+ len = deviceName ? strlen(deviceName) : 0;
+ if (len && deviceNameUTF8 && len < deviceNameUTF8Size) {
+ memcpy(deviceNameUTF8, deviceName, len);
+ }
+
+ const char* deviceUniqueId = desktopDisplayDevice.getUniqueIdName();
+ len = deviceUniqueId ? strlen(deviceUniqueId) : 0;
+ if (len && deviceUniqueIdUTF8 && len < deviceUniqueIdUTF8Size) {
+ memcpy(deviceUniqueIdUTF8, deviceUniqueId, len);
+ }
+ if (pid) {
+ *pid = desktopDisplayDevice.getPid();
+ }
+ }
+
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY) {
+ // no device properties to change
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::NumberOfCapabilities(
+ const char* deviceUniqueIdUTF8) {
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::GetCapability(
+ const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) {
+ return 0;
+}
+
+int32_t WindowDeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::Init() {
+ desktop_device_info_ =
+ std::unique_ptr<DesktopDeviceInfo>(DesktopDeviceInfoImpl::Create());
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::Refresh() {
+ desktop_device_info_->Refresh();
+ return 0;
+}
+
+uint32_t BrowserDeviceInfoImpl::NumberOfDevices() {
+ return desktop_device_info_->getTabCount();
+}
+
+int32_t BrowserDeviceInfoImpl::GetDeviceName(
+ uint32_t deviceNumber, char* deviceNameUTF8, uint32_t deviceNameUTF8Size,
+ char* deviceUniqueIdUTF8, uint32_t deviceUniqueIdUTF8Size,
+ char* productUniqueIdUTF8, uint32_t productUniqueIdUTF8Size, pid_t* pid) {
+ DesktopTab desktopTab;
+
+ // always initialize output
+ if (deviceNameUTF8 && deviceNameUTF8Size > 0) {
+ memset(deviceNameUTF8, 0, deviceNameUTF8Size);
+ }
+ if (deviceUniqueIdUTF8 && deviceUniqueIdUTF8Size > 0) {
+ memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Size);
+ }
+ if (productUniqueIdUTF8 && productUniqueIdUTF8Size > 0) {
+ memset(productUniqueIdUTF8, 0, productUniqueIdUTF8Size);
+ }
+
+ if (desktop_device_info_->getTabInfo(deviceNumber, desktopTab) == 0) {
+ size_t len;
+
+ const char* deviceName = desktopTab.getTabName();
+ len = deviceName ? strlen(deviceName) : 0;
+ if (len && deviceNameUTF8 && len < deviceNameUTF8Size) {
+ memcpy(deviceNameUTF8, deviceName, len);
+ }
+
+ const char* deviceUniqueId = desktopTab.getUniqueIdName();
+ len = deviceUniqueId ? strlen(deviceUniqueId) : 0;
+ if (len && deviceUniqueIdUTF8 && len < deviceUniqueIdUTF8Size) {
+ memcpy(deviceUniqueIdUTF8, deviceUniqueId, len);
+ }
+ }
+
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY) {
+ // no device properties to change
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::NumberOfCapabilities(
+ const char* deviceUniqueIdUTF8) {
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::GetCapability(
+ const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability) {
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting) {
+ return 0;
+}
+
+int32_t BrowserDeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation) {
+ return 0;
+}
+
+VideoCaptureModule::DeviceInfo* DesktopCaptureImpl::CreateDeviceInfo(
+ const int32_t id, const CaptureDeviceType type) {
+ if (type == CaptureDeviceType::Screen) {
+ ScreenDeviceInfoImpl* pScreenDeviceInfoImpl = new ScreenDeviceInfoImpl(id);
+ if (!pScreenDeviceInfoImpl || pScreenDeviceInfoImpl->Init()) {
+ delete pScreenDeviceInfoImpl;
+ pScreenDeviceInfoImpl = NULL;
+ }
+ return pScreenDeviceInfoImpl;
+ } else if (type == CaptureDeviceType::Window) {
+ WindowDeviceInfoImpl* pWindowDeviceInfoImpl = new WindowDeviceInfoImpl(id);
+ if (!pWindowDeviceInfoImpl || pWindowDeviceInfoImpl->Init()) {
+ delete pWindowDeviceInfoImpl;
+ pWindowDeviceInfoImpl = NULL;
+ }
+ return pWindowDeviceInfoImpl;
+ } else if (type == CaptureDeviceType::Browser) {
+ BrowserDeviceInfoImpl* pBrowserDeviceInfoImpl =
+ new BrowserDeviceInfoImpl(id);
+ if (!pBrowserDeviceInfoImpl || pBrowserDeviceInfoImpl->Init()) {
+ delete pBrowserDeviceInfoImpl;
+ pBrowserDeviceInfoImpl = NULL;
+ }
+ return pBrowserDeviceInfoImpl;
+ }
+ return NULL;
+}
+
+const char* DesktopCaptureImpl::CurrentDeviceName() const {
+ return _deviceUniqueId.c_str();
+}
+
+static DesktopCaptureOptions CreateDesktopCaptureOptions() {
+ DesktopCaptureOptions options = DesktopCaptureOptions::CreateDefault();
+ // Leave desktop effects enabled during WebRTC captures.
+ options.set_disable_effects(false);
+
+#if defined(WEBRTC_WIN)
+ if (mozilla::StaticPrefs::media_webrtc_capture_allow_directx()) {
+ options.set_allow_directx_capturer(true);
+ options.set_allow_use_magnification_api(false);
+ } else {
+ options.set_allow_use_magnification_api(true);
+ }
+ options.set_allow_cropping_window_capturer(true);
+# if defined(RTC_ENABLE_WIN_WGC)
+ if (mozilla::StaticPrefs::media_webrtc_capture_allow_wgc()) {
+ options.set_allow_wgc_capturer(true);
+ }
+# endif
+#endif
+
+#if defined(WEBRTC_MAC)
+ if (mozilla::StaticPrefs::media_webrtc_capture_allow_iosurface()) {
+ options.set_allow_iosurface(true);
+ }
+#endif
+
+#if defined(WEBRTC_USE_PIPEWIRE)
+ if (mozilla::StaticPrefs::media_webrtc_capture_allow_pipewire()) {
+ options.set_allow_pipewire(true);
+ }
+#endif
+
+ return options;
+}
+
+int32_t DesktopCaptureImpl::LazyInitDesktopCapturer() {
+ // Already initialized
+ if (desktop_capturer_cursor_composer_) {
+ return 0;
+ }
+
+ DesktopCaptureOptions options = CreateDesktopCaptureOptions();
+
+ if (_deviceType == CaptureDeviceType::Screen) {
+ std::unique_ptr<DesktopCapturer> pScreenCapturer =
+ DesktopCapturer::CreateScreenCapturer(options);
+ if (!pScreenCapturer.get()) {
+ return -1;
+ }
+
+ DesktopCapturer::SourceId sourceId = atoi(_deviceUniqueId.c_str());
+ pScreenCapturer->SelectSource(sourceId);
+
+ desktop_capturer_cursor_composer_ =
+ std::unique_ptr<DesktopAndCursorComposer>(
+ new DesktopAndCursorComposer(std::move(pScreenCapturer), options));
+ } else if (_deviceType == CaptureDeviceType::Window) {
+#if defined(RTC_ENABLE_WIN_WGC)
+ options.set_allow_wgc_capturer_fallback(true);
+#endif
+ std::unique_ptr<DesktopCapturer> pWindowCapturer =
+ DesktopCapturer::CreateWindowCapturer(options);
+ if (!pWindowCapturer.get()) {
+ return -1;
+ }
+
+ DesktopCapturer::SourceId sourceId = atoi(_deviceUniqueId.c_str());
+ pWindowCapturer->SelectSource(sourceId);
+
+ desktop_capturer_cursor_composer_ =
+ std::unique_ptr<DesktopAndCursorComposer>(
+ new DesktopAndCursorComposer(std::move(pWindowCapturer), options));
+ } else if (_deviceType == CaptureDeviceType::Browser) {
+ // XXX We don't capture cursors, so avoid the extra indirection layer. We
+ // could also pass null for the pMouseCursorMonitor.
+ desktop_capturer_cursor_composer_ =
+ DesktopCapturer::CreateTabCapturer(options);
+ if (!desktop_capturer_cursor_composer_) {
+ return -1;
+ }
+
+ DesktopCapturer::SourceId sourceId = atoi(_deviceUniqueId.c_str());
+ desktop_capturer_cursor_composer_->SelectSource(sourceId);
+ }
+ return 0;
+}
+
+DesktopCaptureImpl::DesktopCaptureImpl(const int32_t id, const char* uniqueId,
+ const CaptureDeviceType type)
+ : _id(id),
+ _tracking_id(
+ mozilla::TrackingId(CaptureEngineToTrackingSourceStr([&] {
+ switch (type) {
+ case CaptureDeviceType::Screen:
+ return CaptureEngine::ScreenEngine;
+ case CaptureDeviceType::Window:
+ return CaptureEngine::WinEngine;
+ case CaptureDeviceType::Browser:
+ return CaptureEngine::BrowserEngine;
+ default:
+ return CaptureEngine::InvalidEngine;
+ }
+ }()),
+ id)),
+ _deviceUniqueId(uniqueId),
+ _deviceType(type),
+ _requestedCapability(),
+ _rotateFrame(kVideoRotation_0),
+ last_capture_time_ms_(rtc::TimeMillis()),
+ time_event_(EventWrapper::Create()),
+ capturer_thread_(nullptr),
+ started_(false) {
+ _requestedCapability.width = kDefaultWidth;
+ _requestedCapability.height = kDefaultHeight;
+ _requestedCapability.maxFPS = 30;
+ _requestedCapability.videoType = VideoType::kI420;
+ _maxFPSNeeded = 1000 / _requestedCapability.maxFPS;
+ memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
+}
+
+DesktopCaptureImpl::~DesktopCaptureImpl() {
+ time_event_->Set();
+ if (capturer_thread_) {
+#if defined(_WIN32)
+ capturer_thread_->Stop();
+#else
+ capturer_thread_->Finalize();
+#endif
+ }
+}
+
+void DesktopCaptureImpl::RegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) {
+ rtc::CritScope lock(&_apiCs);
+ _dataCallBacks.insert(dataCallback);
+}
+
+void DesktopCaptureImpl::DeRegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) {
+ rtc::CritScope lock(&_apiCs);
+ auto it = _dataCallBacks.find(dataCallback);
+ if (it != _dataCallBacks.end()) {
+ _dataCallBacks.erase(it);
+ }
+}
+
+int32_t DesktopCaptureImpl::StopCaptureIfAllClientsClose() {
+ if (_dataCallBacks.empty()) {
+ return StopCapture();
+ } else {
+ return 0;
+ }
+}
+
+int32_t DesktopCaptureImpl::DeliverCapturedFrame(
+ webrtc::VideoFrame& captureFrame) {
+ UpdateFrameCount(); // frame count used for local frame rate callBack.
+
+ // Set the capture time
+ captureFrame.set_timestamp_us(rtc::TimeMicros());
+
+ if (captureFrame.render_time_ms() == last_capture_time_ms_) {
+ // We don't allow the same capture time for two frames, drop this one.
+ return -1;
+ }
+ last_capture_time_ms_ = captureFrame.render_time_ms();
+
+ for (auto dataCallBack : _dataCallBacks) {
+ dataCallBack->OnFrame(captureFrame);
+ }
+
+ return 0;
+}
+
+// Originally copied from VideoCaptureImpl::IncomingFrame, but has diverged
+// somewhat. See Bug 1038324 and bug 1738946.
+int32_t DesktopCaptureImpl::IncomingFrame(
+ uint8_t* videoFrame, size_t videoFrameLength, size_t widthWithPadding,
+ const VideoCaptureCapability& frameInfo) {
+ int64_t startProcessTime = rtc::TimeNanos();
+ rtc::CritScope cs(&_apiCs);
+
+ const int32_t width = frameInfo.width;
+ const int32_t height = frameInfo.height;
+
+ // Not encoded, convert to I420.
+ if (frameInfo.videoType != VideoType::kMJPEG &&
+ CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
+ videoFrameLength) {
+ RTC_LOG(LS_ERROR) << "Wrong incoming frame length.";
+ return -1;
+ }
+
+ int stride_y = width;
+ int stride_uv = (width + 1) / 2;
+
+ // Setting absolute height (in case it was negative).
+ // In Windows, the image starts bottom left, instead of top left.
+ // Setting a negative source height, inverts the image (within LibYuv).
+
+ mozilla::PerformanceRecorder<mozilla::CopyVideoStage> rec(
+ "DesktopCaptureImpl::ConvertToI420"_ns, _tracking_id, width, abs(height));
+ // TODO(nisse): Use a pool?
+ rtc::scoped_refptr<I420Buffer> buffer =
+ I420Buffer::Create(width, abs(height), stride_y, stride_uv, stride_uv);
+
+ const int conversionResult = libyuv::ConvertToI420(
+ videoFrame, videoFrameLength, buffer.get()->MutableDataY(),
+ buffer.get()->StrideY(), buffer.get()->MutableDataU(),
+ buffer.get()->StrideU(), buffer.get()->MutableDataV(),
+ buffer.get()->StrideV(), 0, 0, // No Cropping
+ static_cast<int>(widthWithPadding), height, width, height,
+ libyuv::kRotate0, ConvertVideoType(frameInfo.videoType));
+ if (conversionResult != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to convert capture frame from type "
+ << static_cast<int>(frameInfo.videoType) << "to I420.";
+ return -1;
+ }
+ rec.Record();
+
+ VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(), kVideoRotation_0);
+
+ DeliverCapturedFrame(captureFrame);
+
+ const int64_t processTime =
+ (rtc::TimeNanos() - startProcessTime) / rtc::kNumNanosecsPerMillisec;
+
+ if (processTime > 10) {
+ RTC_LOG(LS_WARNING) << "Too long processing time of incoming frame: "
+ << processTime << " ms";
+ }
+
+ return 0;
+}
+
+int32_t DesktopCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
+ rtc::CritScope lock(&_apiCs);
+ _rotateFrame = rotation;
+ return 0;
+}
+
+bool DesktopCaptureImpl::SetApplyRotation(bool enable) { return true; }
+
+void DesktopCaptureImpl::UpdateFrameCount() {
+ if (_incomingFrameTimesNanos[0] == 0) {
+ // first no shift
+ } else {
+ // shift
+ for (int i = (kFrameRateCountHistorySize - 2); i >= 0; i--) {
+ _incomingFrameTimesNanos[i + 1] = _incomingFrameTimesNanos[i];
+ }
+ }
+ _incomingFrameTimesNanos[0] = rtc::TimeNanos();
+}
+
+uint32_t DesktopCaptureImpl::CalculateFrameRate(int64_t now_ns) {
+ int32_t num = 0;
+ int32_t nrOfFrames = 0;
+ for (num = 1; num < (kFrameRateCountHistorySize - 1); num++) {
+ if (_incomingFrameTimesNanos[num] <= 0 ||
+ (now_ns - _incomingFrameTimesNanos[num]) /
+ rtc::kNumNanosecsPerMillisec >
+ kFrameRateHistoryWindowMs) // don't use data older than 2sec
+ {
+ break;
+ } else {
+ nrOfFrames++;
+ }
+ }
+ if (num > 1) {
+ int64_t diff = (now_ns - _incomingFrameTimesNanos[num - 1]) /
+ rtc::kNumNanosecsPerMillisec;
+ if (diff > 0) {
+ return uint32_t((nrOfFrames * 1000.0f / diff) + 0.5f);
+ }
+ }
+
+ return nrOfFrames;
+}
+
+void DesktopCaptureImpl::LazyInitCaptureThread() {
+ MOZ_ASSERT(desktop_capturer_cursor_composer_,
+ "DesktopCapturer must be initialized before the capture thread");
+ if (capturer_thread_) {
+ return;
+ }
+#if defined(_WIN32)
+ capturer_thread_ = std::make_unique<rtc::PlatformUIThread>(
+ std::function([self = (void*)this]() { Run(self); }),
+ "ScreenCaptureThread", rtc::ThreadAttributes{});
+ capturer_thread_->RequestCallbackTimer(_maxFPSNeeded);
+#else
+ auto self = rtc::scoped_refptr<DesktopCaptureImpl>(this);
+ capturer_thread_ =
+ std::make_unique<rtc::PlatformThread>(rtc::PlatformThread::SpawnJoinable(
+ [self] { self->process(); }, "ScreenCaptureThread"));
+#endif
+ started_ = true;
+}
+
+int32_t DesktopCaptureImpl::StartCapture(
+ const VideoCaptureCapability& capability) {
+ rtc::CritScope lock(&_apiCs);
+ // See Bug 1780884 for followup on understanding why multiple calls happen.
+ // MOZ_ASSERT(!started_, "Capture must be stopped before Start() can be
+ // called");
+ if (started_) {
+ return 0;
+ }
+
+ if (uint32_t err = LazyInitDesktopCapturer(); err) {
+ return err;
+ }
+ started_ = true;
+ _requestedCapability = capability;
+ _maxFPSNeeded = _requestedCapability.maxFPS > 0
+ ? 1000 / _requestedCapability.maxFPS
+ : 1000;
+ LazyInitCaptureThread();
+
+ return 0;
+}
+
+bool DesktopCaptureImpl::FocusOnSelectedSource() {
+ if (uint32_t err = LazyInitDesktopCapturer(); err) {
+ return false;
+ }
+
+ return desktop_capturer_cursor_composer_->FocusOnSelectedSource();
+}
+
+int32_t DesktopCaptureImpl::StopCapture() {
+ if (started_) {
+ started_ = false;
+ MOZ_ASSERT(capturer_thread_, "Capturer thread should be initialized.");
+
+#if defined(_WIN32)
+ capturer_thread_
+ ->Stop(); // thread is guaranteed stopped before this returns
+#else
+ capturer_thread_
+ ->Finalize(); // thread is guaranteed stopped before this returns
+#endif
+ desktop_capturer_cursor_composer_.reset();
+ cursor_composer_started_ = false;
+ capturer_thread_.reset();
+ return 0;
+ }
+ return -1;
+}
+
+bool DesktopCaptureImpl::CaptureStarted() { return started_; }
+
+int32_t DesktopCaptureImpl::CaptureSettings(VideoCaptureCapability& settings) {
+ return -1;
+}
+
+void DesktopCaptureImpl::OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) {
+ if (frame.get() == nullptr) return;
+ uint8_t* videoFrame = frame->data();
+ VideoCaptureCapability frameInfo;
+ frameInfo.width = frame->size().width();
+ frameInfo.height = frame->size().height();
+ frameInfo.videoType = VideoType::kARGB;
+
+ size_t videoFrameLength =
+ frameInfo.width * frameInfo.height * DesktopFrame::kBytesPerPixel;
+ IncomingFrame(videoFrame, videoFrameLength,
+ frame->stride() / DesktopFrame::kBytesPerPixel, frameInfo);
+}
+
+void DesktopCaptureImpl::process() {
+ // We need to call Start on the same thread we call CaptureFrame on.
+ if (!cursor_composer_started_) {
+ desktop_capturer_cursor_composer_->Start(this);
+ cursor_composer_started_ = true;
+ }
+#if defined(WEBRTC_WIN)
+ ProcessIter();
+#else
+ do {
+ ProcessIter();
+ } while (started_);
+#endif
+}
+
+void DesktopCaptureImpl::ProcessIter() {
+// We should deliver at least one frame before stopping
+#if !defined(_WIN32)
+ int64_t startProcessTime = rtc::TimeNanos();
+#endif
+
+ // Don't leak while we're looping
+ base::ScopedNSAutoreleasePool autoreleasepool;
+
+#if defined(WEBRTC_MAC)
+ // Give cycles to the RunLoop so frame callbacks can happen
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.01, true);
+#endif
+
+ desktop_capturer_cursor_composer_->CaptureFrame();
+
+#if !defined(_WIN32)
+ const uint32_t processTime =
+ ((uint32_t)(rtc::TimeNanos() - startProcessTime)) /
+ rtc::kNumNanosecsPerMillisec;
+ // Use at most x% CPU or limit framerate
+ const float sleepTimeFactor = (100.0f / kMaxDesktopCaptureCpuUsage) - 1.0f;
+ const uint32_t sleepTime = sleepTimeFactor * processTime;
+ time_event_->Wait(std::max<uint32_t>(_maxFPSNeeded, sleepTime));
+#endif
+
+#if defined(WEBRTC_WIN)
+// Let the timer events in PlatformUIThread drive process,
+// don't sleep.
+#elif defined(WEBRTC_MAC)
+ sched_yield();
+#else
+ static const struct timespec ts_null = {0};
+ nanosleep(&ts_null, nullptr);
+#endif
+}
+
+} // namespace webrtc
diff --git a/dom/media/systemservices/video_engine/desktop_capture_impl.h b/dom/media/systemservices/video_engine/desktop_capture_impl.h
new file mode 100644
index 0000000000..d6b024f24b
--- /dev/null
+++ b/dom/media/systemservices/video_engine/desktop_capture_impl.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_MAIN_SOURCE_DESKTOP_CAPTURE_IMPL_H_
+#define WEBRTC_MODULES_DESKTOP_CAPTURE_MAIN_SOURCE_DESKTOP_CAPTURE_IMPL_H_
+
+/*
+ * video_capture_impl.h
+ */
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include "api/video/video_frame.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_capture/video_capture.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "modules/video_coding/event_wrapper.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "modules/desktop_capture/desktop_and_cursor_composer.h"
+#include "rtc_base/deprecated/recursive_critical_section.h"
+
+#include "desktop_device_info.h"
+#include "VideoEngine.h"
+
+#if !defined(_WIN32)
+# include "rtc_base/platform_thread.h"
+#endif
+
+using namespace webrtc::videocapturemodule;
+using namespace mozilla::camera; // for mozilla::camera::CaptureDeviceType
+
+namespace rtc {
+#if defined(_WIN32)
+class PlatformUIThread;
+#endif
+} // namespace rtc
+
+namespace webrtc {
+
+class VideoCaptureEncodeInterface;
+
+// simulate deviceInfo interface for video engine, bridge screen/application and
+// real screen/application device info
+
+class ScreenDeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
+ public:
+ ScreenDeviceInfoImpl(const int32_t id);
+ virtual ~ScreenDeviceInfoImpl(void);
+
+ int32_t Init();
+ int32_t Refresh();
+
+ virtual uint32_t NumberOfDevices();
+ virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length, pid_t* pid);
+
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY);
+ virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8);
+ virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability);
+
+ virtual int32_t GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting);
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation);
+
+ protected:
+ int32_t _id;
+ std::unique_ptr<DesktopDeviceInfo> desktop_device_info_;
+};
+
+class WindowDeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
+ public:
+ WindowDeviceInfoImpl(const int32_t id) : _id(id){};
+ virtual ~WindowDeviceInfoImpl(void){};
+
+ int32_t Init();
+ int32_t Refresh();
+
+ virtual uint32_t NumberOfDevices();
+ virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length, pid_t* pid);
+
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY);
+ virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8);
+ virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability);
+
+ virtual int32_t GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting);
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation);
+
+ protected:
+ int32_t _id;
+ std::unique_ptr<DesktopDeviceInfo> desktop_device_info_;
+};
+
+class BrowserDeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
+ public:
+ BrowserDeviceInfoImpl(const int32_t id) : _id(id){};
+ virtual ~BrowserDeviceInfoImpl(void){};
+
+ int32_t Init();
+ int32_t Refresh();
+
+ virtual uint32_t NumberOfDevices();
+ virtual int32_t GetDeviceName(uint32_t deviceNumber, char* deviceNameUTF8,
+ uint32_t deviceNameLength,
+ char* deviceUniqueIdUTF8,
+ uint32_t deviceUniqueIdUTF8Length,
+ char* productUniqueIdUTF8,
+ uint32_t productUniqueIdUTF8Length, pid_t* pid);
+
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* deviceUniqueIdUTF8, const char* dialogTitleUTF8,
+ void* parentWindow, uint32_t positionX, uint32_t positionY);
+ virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8);
+ virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
+ const uint32_t deviceCapabilityNumber,
+ VideoCaptureCapability& capability);
+
+ virtual int32_t GetBestMatchedCapability(
+ const char* deviceUniqueIdUTF8, const VideoCaptureCapability& requested,
+ VideoCaptureCapability& resulting);
+ virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
+ VideoRotation& orientation);
+
+ protected:
+ int32_t _id;
+ std::unique_ptr<DesktopDeviceInfo> desktop_device_info_;
+};
+
+// Reuses the video engine pipeline for screen sharing.
+// As with video, DesktopCaptureImpl is a proxy for screen sharing
+// and follows the video pipeline design
+class DesktopCaptureImpl : public DesktopCapturer::Callback,
+ public VideoCaptureModule {
+ public:
+ /* Create a screen capture modules object
+ */
+ static VideoCaptureModule* Create(const int32_t id, const char* uniqueId,
+ const CaptureDeviceType type);
+ static VideoCaptureModule::DeviceInfo* CreateDeviceInfo(
+ const int32_t id, const CaptureDeviceType type);
+
+ // Call backs
+ void RegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) override;
+ void DeRegisterCaptureDataCallback(
+ rtc::VideoSinkInterface<VideoFrame>* dataCallback) override;
+ int32_t StopCaptureIfAllClientsClose() override;
+
+ int32_t SetCaptureRotation(VideoRotation rotation) override;
+ bool SetApplyRotation(bool enable) override;
+ bool GetApplyRotation() override { return true; }
+
+ const char* CurrentDeviceName() const override;
+
+ int32_t IncomingFrame(uint8_t* videoFrame, size_t videoFrameLength,
+ size_t widthWithPadding,
+ const VideoCaptureCapability& frameInfo);
+
+ // Platform dependent
+ int32_t StartCapture(const VideoCaptureCapability& capability) override;
+ virtual bool FocusOnSelectedSource() override;
+ int32_t StopCapture() override;
+ bool CaptureStarted() override;
+ int32_t CaptureSettings(VideoCaptureCapability& settings) override;
+
+ protected:
+ DesktopCaptureImpl(const int32_t id, const char* uniqueId,
+ const CaptureDeviceType type);
+ virtual ~DesktopCaptureImpl();
+ int32_t DeliverCapturedFrame(webrtc::VideoFrame& captureFrame);
+
+ static const uint32_t kMaxDesktopCaptureCpuUsage =
+ 50; // maximum CPU usage in %
+
+ int32_t _id; // Module ID
+ const mozilla::TrackingId
+ _tracking_id; // Allows tracking of this video frame source
+ std::string _deviceUniqueId; // current Device unique name;
+ CaptureDeviceType _deviceType;
+
+ VideoCaptureCapability
+ _requestedCapability; // Should be set by platform dependent code in
+ // StartCapture.
+ private:
+ void LazyInitCaptureThread();
+ int32_t LazyInitDesktopCapturer();
+ void UpdateFrameCount();
+ uint32_t CalculateFrameRate(int64_t now_ns);
+
+ rtc::RecursiveCriticalSection _apiCs;
+
+ std::set<rtc::VideoSinkInterface<VideoFrame>*> _dataCallBacks;
+
+ int64_t _incomingFrameTimesNanos
+ [kFrameRateCountHistorySize]; // timestamp for local captured frames
+ VideoRotation _rotateFrame; // Set if the frame should be rotated by the
+ // capture module.
+ std::atomic<uint32_t> _maxFPSNeeded;
+
+ // Used to make sure incoming timestamp is increasing for every frame.
+ int64_t last_capture_time_ms_;
+
+ // DesktopCapturer::Callback interface.
+ void OnCaptureResult(DesktopCapturer::Result result,
+ std::unique_ptr<DesktopFrame> frame) override;
+
+ public:
+ static void Run(void* obj) {
+ static_cast<DesktopCaptureImpl*>(obj)->process();
+ };
+ void process();
+ void ProcessIter();
+
+ private:
+ // This is created on the main thread and accessed on both the main thread
+ // and the capturer thread. It is created prior to the capturer thread
+ // starting and is destroyed after it is stopped.
+ std::unique_ptr<DesktopCapturer> desktop_capturer_cursor_composer_;
+ bool cursor_composer_started_ = false;
+
+ std::unique_ptr<EventWrapper> time_event_;
+#if defined(_WIN32)
+ std::unique_ptr<rtc::PlatformUIThread> capturer_thread_;
+#else
+ std::unique_ptr<rtc::PlatformThread> capturer_thread_;
+#endif
+ std::atomic<bool> started_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_DESKTOP_CAPTURE_MAIN_SOURCE_DESKTOP_CAPTURE_IMPL_H_
diff --git a/dom/media/systemservices/video_engine/desktop_device_info.cc b/dom/media/systemservices/video_engine/desktop_device_info.cc
new file mode 100644
index 0000000000..e46bf01902
--- /dev/null
+++ b/dom/media/systemservices/video_engine/desktop_device_info.cc
@@ -0,0 +1,347 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "desktop_device_info.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/UniquePtr.h"
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdio>
+#include <cstring>
+#include <memory>
+
+namespace webrtc {
+
+static inline void SetStringMember(char** member, const char* value) {
+ if (!value) {
+ return;
+ }
+
+ if (*member) {
+ delete[] * member;
+ *member = NULL;
+ }
+
+ size_t nBufLen = strlen(value) + 1;
+ char* buffer = new char[nBufLen];
+ memcpy(buffer, value, nBufLen - 1);
+ buffer[nBufLen - 1] = '\0';
+ *member = buffer;
+}
+
+DesktopDisplayDevice::DesktopDisplayDevice() {
+ screenId_ = kInvalidScreenId;
+ deviceUniqueIdUTF8_ = NULL;
+ deviceNameUTF8_ = NULL;
+ pid_ = 0;
+}
+
+DesktopDisplayDevice::~DesktopDisplayDevice() {
+ screenId_ = kInvalidScreenId;
+
+ if (deviceUniqueIdUTF8_) {
+ delete[] deviceUniqueIdUTF8_;
+ }
+
+ if (deviceNameUTF8_) {
+ delete[] deviceNameUTF8_;
+ }
+
+ deviceUniqueIdUTF8_ = NULL;
+ deviceNameUTF8_ = NULL;
+}
+
+void DesktopDisplayDevice::setScreenId(const ScreenId screenId) {
+ screenId_ = screenId;
+}
+
+void DesktopDisplayDevice::setDeviceName(const char* deviceNameUTF8) {
+ SetStringMember(&deviceNameUTF8_, deviceNameUTF8);
+}
+
+void DesktopDisplayDevice::setUniqueIdName(const char* deviceUniqueIdUTF8) {
+ SetStringMember(&deviceUniqueIdUTF8_, deviceUniqueIdUTF8);
+}
+
+void DesktopDisplayDevice::setPid(const int pid) { pid_ = pid; }
+
+ScreenId DesktopDisplayDevice::getScreenId() { return screenId_; }
+
+const char* DesktopDisplayDevice::getDeviceName() { return deviceNameUTF8_; }
+
+const char* DesktopDisplayDevice::getUniqueIdName() {
+ return deviceUniqueIdUTF8_;
+}
+
+pid_t DesktopDisplayDevice::getPid() { return pid_; }
+
+DesktopDisplayDevice& DesktopDisplayDevice::operator=(
+ DesktopDisplayDevice& other) {
+ if (&other == this) {
+ return *this;
+ }
+ screenId_ = other.getScreenId();
+ setUniqueIdName(other.getUniqueIdName());
+ setDeviceName(other.getDeviceName());
+ pid_ = other.getPid();
+
+ return *this;
+}
+
+DesktopTab::DesktopTab() {
+ tabBrowserId_ = 0;
+ tabNameUTF8_ = NULL;
+ tabUniqueIdUTF8_ = NULL;
+ tabCount_ = 0;
+}
+
+DesktopTab::~DesktopTab() {
+ if (tabNameUTF8_) {
+ delete[] tabNameUTF8_;
+ }
+
+ if (tabUniqueIdUTF8_) {
+ delete[] tabUniqueIdUTF8_;
+ }
+
+ tabNameUTF8_ = NULL;
+ tabUniqueIdUTF8_ = NULL;
+}
+
+void DesktopTab::setTabBrowserId(uint64_t tabBrowserId) {
+ tabBrowserId_ = tabBrowserId;
+}
+
+void DesktopTab::setUniqueIdName(const char* tabUniqueIdUTF8) {
+ SetStringMember(&tabUniqueIdUTF8_, tabUniqueIdUTF8);
+}
+
+void DesktopTab::setTabName(const char* tabNameUTF8) {
+ SetStringMember(&tabNameUTF8_, tabNameUTF8);
+}
+
+void DesktopTab::setTabCount(const uint32_t count) { tabCount_ = count; }
+
+uint64_t DesktopTab::getTabBrowserId() { return tabBrowserId_; }
+
+const char* DesktopTab::getUniqueIdName() { return tabUniqueIdUTF8_; }
+
+const char* DesktopTab::getTabName() { return tabNameUTF8_; }
+
+uint32_t DesktopTab::getTabCount() { return tabCount_; }
+
+DesktopTab& DesktopTab::operator=(DesktopTab& other) {
+ tabBrowserId_ = other.getTabBrowserId();
+ setUniqueIdName(other.getUniqueIdName());
+ setTabName(other.getTabName());
+
+ return *this;
+}
+
+DesktopDeviceInfoImpl::DesktopDeviceInfoImpl() {}
+
+DesktopDeviceInfoImpl::~DesktopDeviceInfoImpl() { CleanUp(); }
+
+int32_t DesktopDeviceInfoImpl::getDisplayDeviceCount() {
+ return desktop_display_list_.size();
+}
+
+int32_t DesktopDeviceInfoImpl::getDesktopDisplayDeviceInfo(
+ int32_t nIndex, DesktopDisplayDevice& desktopDisplayDevice) {
+ if (nIndex < 0 || (size_t)nIndex >= desktop_display_list_.size()) {
+ return -1;
+ }
+
+ std::map<intptr_t, DesktopDisplayDevice*>::iterator iter =
+ desktop_display_list_.begin();
+ std::advance(iter, nIndex);
+ DesktopDisplayDevice* pDesktopDisplayDevice = iter->second;
+ if (pDesktopDisplayDevice) {
+ desktopDisplayDevice = (*pDesktopDisplayDevice);
+ }
+
+ return 0;
+}
+
+int32_t DesktopDeviceInfoImpl::getWindowCount() {
+ return desktop_window_list_.size();
+}
+int32_t DesktopDeviceInfoImpl::getWindowInfo(
+ int32_t nIndex, DesktopDisplayDevice& windowDevice) {
+ if (nIndex < 0 || (size_t)nIndex >= desktop_window_list_.size()) {
+ return -1;
+ }
+
+ std::map<intptr_t, DesktopDisplayDevice*>::iterator itr =
+ desktop_window_list_.begin();
+ std::advance(itr, nIndex);
+ DesktopDisplayDevice* pWindow = itr->second;
+ if (!pWindow) {
+ return -1;
+ }
+
+ windowDevice = (*pWindow);
+ return 0;
+}
+
+int32_t DesktopDeviceInfoImpl::getTabCount() {
+ return desktop_tab_list_.size();
+}
+
+int32_t DesktopDeviceInfoImpl::getTabInfo(int32_t nIndex,
+ DesktopTab& desktopTab) {
+ if (nIndex < 0 || (size_t)nIndex >= desktop_tab_list_.size()) {
+ return -1;
+ }
+
+ std::map<intptr_t, DesktopTab*>::iterator iter = desktop_tab_list_.begin();
+ std::advance(iter, nIndex);
+ DesktopTab* pDesktopTab = iter->second;
+ if (pDesktopTab) {
+ desktopTab = (*pDesktopTab);
+ }
+
+ return 0;
+}
+
+void DesktopDeviceInfoImpl::CleanUp() {
+ CleanUpScreenList();
+ CleanUpWindowList();
+ CleanUpTabList();
+}
+int32_t DesktopDeviceInfoImpl::Init() {
+ InitializeScreenList();
+ InitializeWindowList();
+ InitializeTabList();
+
+ return 0;
+}
+int32_t DesktopDeviceInfoImpl::Refresh() {
+ RefreshScreenList();
+ RefreshWindowList();
+ RefreshTabList();
+
+ return 0;
+}
+
+void DesktopDeviceInfoImpl::CleanUpWindowList() {
+ std::map<intptr_t, DesktopDisplayDevice*>::iterator iterWindow;
+ for (iterWindow = desktop_window_list_.begin();
+ iterWindow != desktop_window_list_.end(); iterWindow++) {
+ DesktopDisplayDevice* pWindow = iterWindow->second;
+ delete pWindow;
+ iterWindow->second = NULL;
+ }
+ desktop_window_list_.clear();
+}
+
+void DesktopDeviceInfoImpl::InitializeWindowList() {
+ std::unique_ptr<DesktopCapturer> pWinCap =
+ DesktopCapturer::CreateWindowCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ DesktopCapturer::SourceList list;
+ if (pWinCap && pWinCap->GetSourceList(&list)) {
+ DesktopCapturer::SourceList::iterator itr;
+ for (itr = list.begin(); itr != list.end(); itr++) {
+ DesktopDisplayDevice* pWinDevice = new DesktopDisplayDevice;
+ if (!pWinDevice) {
+ continue;
+ }
+
+ pWinDevice->setScreenId(itr->id);
+ pWinDevice->setDeviceName(itr->title.c_str());
+ pWinDevice->setPid(itr->pid);
+
+ char idStr[BUFSIZ];
+#if WEBRTC_WIN
+ _snprintf_s(idStr, sizeof(idStr), sizeof(idStr) - 1, "%ld",
+ static_cast<long>(pWinDevice->getScreenId()));
+#else
+ SprintfLiteral(idStr, "%ld",
+ static_cast<long>(pWinDevice->getScreenId()));
+#endif
+ pWinDevice->setUniqueIdName(idStr);
+ desktop_window_list_[pWinDevice->getScreenId()] = pWinDevice;
+ }
+ }
+}
+
+void DesktopDeviceInfoImpl::RefreshWindowList() {
+ CleanUpWindowList();
+ InitializeWindowList();
+}
+
+void DesktopDeviceInfoImpl::CleanUpTabList() {
+ for (auto& iterTab : desktop_tab_list_) {
+ DesktopTab* pDesktopTab = iterTab.second;
+ delete pDesktopTab;
+ iterTab.second = NULL;
+ }
+ desktop_tab_list_.clear();
+}
+
+void DesktopDeviceInfoImpl::RefreshTabList() {
+ CleanUpTabList();
+ InitializeTabList();
+}
+
+void DesktopDeviceInfoImpl::CleanUpScreenList() {
+ std::map<intptr_t, DesktopDisplayDevice*>::iterator iterDevice;
+ for (iterDevice = desktop_display_list_.begin();
+ iterDevice != desktop_display_list_.end(); iterDevice++) {
+ DesktopDisplayDevice* pDesktopDisplayDevice = iterDevice->second;
+ delete pDesktopDisplayDevice;
+ iterDevice->second = NULL;
+ }
+ desktop_display_list_.clear();
+}
+
+void DesktopDeviceInfoImpl::InitializeScreenList() {
+ std::unique_ptr<DesktopCapturer> screenCapturer =
+ DesktopCapturer::CreateScreenCapturer(
+ DesktopCaptureOptions::CreateDefault());
+ DesktopCapturer::SourceList list;
+ if (screenCapturer && screenCapturer->GetSourceList(&list)) {
+ DesktopCapturer::SourceList::iterator itr;
+ for (itr = list.begin(); itr != list.end(); itr++) {
+ DesktopDisplayDevice* screenDevice = new DesktopDisplayDevice;
+ screenDevice->setScreenId(itr->id);
+ if (list.size() == 1) {
+ screenDevice->setDeviceName("Primary Monitor");
+ } else {
+ screenDevice->setDeviceName(itr->title.c_str());
+ }
+ screenDevice->setPid(itr->pid);
+
+ char idStr[BUFSIZ];
+#if WEBRTC_WIN
+ _snprintf_s(idStr, sizeof(idStr), sizeof(idStr) - 1, "%ld",
+ static_cast<long>(screenDevice->getScreenId()));
+#else
+ SprintfLiteral(idStr, "%ld",
+ static_cast<long>(screenDevice->getScreenId()));
+#endif
+ screenDevice->setUniqueIdName(idStr);
+ desktop_display_list_[screenDevice->getScreenId()] = screenDevice;
+ }
+ }
+}
+
+void DesktopDeviceInfoImpl::RefreshScreenList() {
+ CleanUpScreenList();
+ InitializeScreenList();
+}
+
+/* static */
+DesktopDeviceInfo* DesktopDeviceInfoImpl::Create() {
+ auto info = mozilla::MakeUnique<DesktopDeviceInfoImpl>();
+ if (info->Init() != 0) {
+ return nullptr;
+ }
+ return info.release();
+}
+} // namespace webrtc
diff --git a/dom/media/systemservices/video_engine/desktop_device_info.h b/dom/media/systemservices/video_engine/desktop_device_info.h
new file mode 100644
index 0000000000..1de2845e20
--- /dev/null
+++ b/dom/media/systemservices/video_engine/desktop_device_info.h
@@ -0,0 +1,120 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_DEVICE_INFO_H_
+#define WEBRTC_MODULES_DESKTOP_CAPTURE_DEVICE_INFO_H_
+
+#include <map>
+#include "modules/desktop_capture/desktop_capture_types.h"
+
+namespace webrtc {
+
+class DesktopDisplayDevice {
+ public:
+ DesktopDisplayDevice();
+ ~DesktopDisplayDevice();
+
+ void setScreenId(const ScreenId screenId);
+ void setDeviceName(const char* deviceNameUTF8);
+ void setUniqueIdName(const char* deviceUniqueIdUTF8);
+ void setPid(pid_t pid);
+
+ ScreenId getScreenId();
+ const char* getDeviceName();
+ const char* getUniqueIdName();
+ pid_t getPid();
+
+ DesktopDisplayDevice& operator=(DesktopDisplayDevice& other);
+
+ protected:
+ ScreenId screenId_;
+ char* deviceNameUTF8_;
+ char* deviceUniqueIdUTF8_;
+ pid_t pid_;
+};
+
+typedef std::map<intptr_t, DesktopDisplayDevice*> DesktopDisplayDeviceList;
+
+class DesktopTab {
+ public:
+ DesktopTab();
+ ~DesktopTab();
+
+ void setTabBrowserId(uint64_t tabBrowserId);
+ void setUniqueIdName(const char* tabUniqueIdUTF8);
+ void setTabName(const char* tabNameUTF8);
+ void setTabCount(const uint32_t count);
+
+ uint64_t getTabBrowserId();
+ const char* getUniqueIdName();
+ const char* getTabName();
+ uint32_t getTabCount();
+
+ DesktopTab& operator=(DesktopTab& other);
+
+ protected:
+ uint64_t tabBrowserId_;
+ char* tabNameUTF8_;
+ char* tabUniqueIdUTF8_;
+ uint32_t tabCount_;
+};
+
+typedef std::map<intptr_t, DesktopTab*> DesktopTabList;
+
+class DesktopDeviceInfo {
+ public:
+ virtual ~DesktopDeviceInfo(){};
+
+ virtual int32_t Init() = 0;
+ virtual int32_t Refresh() = 0;
+ virtual int32_t getDisplayDeviceCount() = 0;
+ virtual int32_t getDesktopDisplayDeviceInfo(
+ int32_t nIndex, DesktopDisplayDevice& desktopDisplayDevice) = 0;
+ virtual int32_t getWindowCount() = 0;
+ virtual int32_t getWindowInfo(int32_t nindex,
+ DesktopDisplayDevice& windowDevice) = 0;
+ virtual int32_t getTabCount() = 0;
+ virtual int32_t getTabInfo(int32_t nIndex, DesktopTab& desktopTab) = 0;
+};
+
+class DesktopDeviceInfoImpl : public DesktopDeviceInfo {
+ public:
+ DesktopDeviceInfoImpl();
+ ~DesktopDeviceInfoImpl();
+
+ int32_t Init() override;
+ int32_t Refresh() override;
+ int32_t getDisplayDeviceCount() override;
+ int32_t getDesktopDisplayDeviceInfo(
+ int32_t nIndex, DesktopDisplayDevice& desktopDisplayDevice) override;
+ int32_t getWindowCount() override;
+ int32_t getWindowInfo(int32_t nindex,
+ DesktopDisplayDevice& windowDevice) override;
+ int32_t getTabCount() override;
+ int32_t getTabInfo(int32_t nIndex, DesktopTab& desktopTab) override;
+ static DesktopDeviceInfo* Create();
+
+ protected:
+ DesktopDisplayDeviceList desktop_display_list_;
+ DesktopDisplayDeviceList desktop_window_list_;
+ DesktopTabList desktop_tab_list_;
+
+ void CleanUp();
+ void CleanUpWindowList();
+ void CleanUpTabList();
+ void CleanUpScreenList();
+
+ void InitializeWindowList();
+ virtual void InitializeTabList();
+ void InitializeScreenList();
+
+ void RefreshWindowList();
+ void RefreshTabList();
+ void RefreshScreenList();
+
+ void DummyTabList(DesktopTabList& list);
+};
+}; // namespace webrtc
+
+#endif
diff --git a/dom/media/systemservices/video_engine/platform_uithread.cc b/dom/media/systemservices/video_engine/platform_uithread.cc
new file mode 100644
index 0000000000..701a989a18
--- /dev/null
+++ b/dom/media/systemservices/video_engine/platform_uithread.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_WIN)
+
+# include "platform_uithread.h"
+
+namespace rtc {
+
+// timer id used in delayed callbacks
+static const UINT_PTR kTimerId = 1;
+static const wchar_t kThisProperty[] = L"ThreadWindowsUIPtr";
+static const wchar_t kThreadWindow[] = L"WebrtcWindowsUIThread";
+
+PlatformUIThread::~PlatformUIThread() {
+ CritScope scoped_lock(&cs_);
+ switch (state_) {
+ case State::STARTED: {
+ MOZ_DIAGNOSTIC_ASSERT(
+ false, "PlatformUIThread must be stopped before destruction");
+ break;
+ }
+ case State::STOPPED:
+ break;
+ case State::UNSTARTED:
+ break;
+ }
+}
+
+bool PlatformUIThread::InternalInit() {
+ // Create an event window for use in generating callbacks to capture
+ // objects.
+ CritScope scoped_lock(&cs_);
+ switch (state_) {
+ // We have already started there is nothing todo. Should this be assert?
+ case State::STARTED:
+ break;
+ // Stop() has already been called so there is likewise nothing to do.
+ case State::STOPPED:
+ break;
+ // Stop() has not been called yet, setup the UI thread, and set our
+ // state to STARTED.
+ case State::UNSTARTED: {
+ WNDCLASSW wc;
+ HMODULE hModule = GetModuleHandle(NULL);
+ if (!GetClassInfoW(hModule, kThreadWindow, &wc)) {
+ ZeroMemory(&wc, sizeof(WNDCLASSW));
+ wc.hInstance = hModule;
+ wc.lpfnWndProc = EventWindowProc;
+ wc.lpszClassName = kThreadWindow;
+ RegisterClassW(&wc);
+ }
+ hwnd_ = CreateWindowW(kThreadWindow, L"", 0, 0, 0, 0, 0, NULL, NULL,
+ hModule, NULL);
+ // Added in review of bug 1760843, follow up to remove 1767861
+ MOZ_RELEASE_ASSERT(hwnd_);
+ // Expected to always work but if it doesn't we should still fulfill the
+ // contract of always running the process loop at least a single
+ // iteration.
+ // This could be rexamined in the future.
+ if (hwnd_) {
+ SetPropW(hwnd_, kThisProperty, this);
+ // state_ needs to be STARTED before we request the initial timer
+ state_ = State::STARTED;
+ if (timeout_) {
+ // if someone set the timer before we started
+ RequestCallbackTimer(timeout_);
+ }
+ }
+ break;
+ }
+ };
+ return state_ == State::STARTED;
+}
+
+bool PlatformUIThread::RequestCallbackTimer(unsigned int milliseconds) {
+ CritScope scoped_lock(&cs_);
+
+ switch (state_) {
+ // InternalInit() has yet to run so we do not have a UI thread to use as a
+ // target of the timer. We should just remember what timer interval was
+ // requested and let InternalInit() call this function again when it is
+ // ready.
+ case State::UNSTARTED: {
+ timeout_ = milliseconds;
+ return false;
+ }
+ // We have already stopped, do not schedule a new timer.
+ case State::STOPPED:
+ return false;
+ case State::STARTED: {
+ if (timerid_) {
+ KillTimer(hwnd_, timerid_);
+ }
+ timeout_ = milliseconds;
+ timerid_ = SetTimer(hwnd_, kTimerId, milliseconds, NULL);
+ return !!timerid_;
+ }
+ }
+ // UNREACHABLE
+}
+
+void PlatformUIThread::Stop() {
+ {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ CritScope scoped_lock(&cs_);
+ // Shut down the dispatch loop and let the background thread exit.
+ if (timerid_) {
+ MOZ_ASSERT(hwnd_);
+ KillTimer(hwnd_, timerid_);
+ timerid_ = 0;
+ }
+ switch (state_) {
+ // If we haven't started yet there is nothing to do, we will go into
+ // the STOPPED state at the end of the function and InternalInit()
+ // will not move us to STARTED.
+ case State::UNSTARTED:
+ break;
+ // If we have started, that means that InternalInit() has run and the
+ // message wait loop has or will run. We need to signal it to stop. wich
+ // will allow PlatformThread::Stop to join that thread.
+ case State::STARTED: {
+ MOZ_ASSERT(hwnd_);
+ PostMessage(hwnd_, WM_CLOSE, 0, 0);
+ break;
+ }
+ // We have already stopped. There is nothing to do.
+ case State::STOPPED:
+ break;
+ }
+ // Always set our state to STOPPED
+ state_ = State::STOPPED;
+ }
+ monitor_thread_.Finalize();
+}
+
+void PlatformUIThread::Run() {
+ // InternalInit() will return false when the thread is already in shutdown.
+ // otherwise we must run until we get a Windows WM_QUIT msg.
+ const bool runUntilQuitMsg = InternalInit();
+ // The interface contract of Start/Stop is that for a successful call to
+ // Start, there should be at least one call to the run function.
+ NativeEventCallback();
+ while (runUntilQuitMsg) {
+ // Alertable sleep to receive WM_QUIT (following a WM_CLOSE triggering a
+ // WM_DESTROY)
+ if (MsgWaitForMultipleObjectsEx(0, nullptr, INFINITE, QS_ALLINPUT,
+ MWMO_ALERTABLE | MWMO_INPUTAVAILABLE) ==
+ WAIT_OBJECT_0) {
+ MSG msg;
+ if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
+ if (msg.message == WM_QUIT) {
+ // THE ONLY WAY to exit the thread loop
+ break;
+ }
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+ }
+ }
+}
+
+void PlatformUIThread::NativeEventCallback() { native_event_callback_(); }
+
+/* static */
+LRESULT CALLBACK PlatformUIThread::EventWindowProc(HWND hwnd, UINT uMsg,
+ WPARAM wParam,
+ LPARAM lParam) {
+ if (uMsg == WM_DESTROY) {
+ RemovePropW(hwnd, kThisProperty);
+ PostQuitMessage(0);
+ return 0;
+ }
+
+ PlatformUIThread* twui =
+ static_cast<PlatformUIThread*>(GetPropW(hwnd, kThisProperty));
+ if (!twui) {
+ return DefWindowProc(hwnd, uMsg, wParam, lParam);
+ }
+
+ if (uMsg == WM_TIMER && wParam == kTimerId) {
+ twui->NativeEventCallback();
+ return 0;
+ }
+
+ return DefWindowProc(hwnd, uMsg, wParam, lParam);
+}
+
+} // namespace rtc
+
+#endif
diff --git a/dom/media/systemservices/video_engine/platform_uithread.h b/dom/media/systemservices/video_engine/platform_uithread.h
new file mode 100644
index 0000000000..9c213ca933
--- /dev/null
+++ b/dom/media/systemservices/video_engine/platform_uithread.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PLATFORM_UITHREAD_H_
+#define RTC_BASE_PLATFORM_UITHREAD_H_
+
+#if defined(WEBRTC_WIN)
+# include "Assertions.h"
+# include "rtc_base/deprecated/recursive_critical_section.h"
+# include "rtc_base/platform_thread.h"
+# include "api/sequence_checker.h"
+# include "ThreadSafety.h"
+
+namespace rtc {
+/*
+ * Windows UI thread for screen capture
+ * Launches a thread which enters a message wait loop after calling the
+ * provided ThreadRunFunction once. A repeating timer event might be registered
+ * with a callback through the Win32 API. If so, that timer will cause WM_TIMER
+ * messages to appear in the threads message queue. This will wake the thread
+ * which will then first look to see if it received the WM_QUIT message, then
+ * it will pass any non WM_QUIT messages on to the registered message handlers
+ * (synchronously on the current thread). In the case oF WM_TIMER the
+ * registered handler calls the NativeEventCallback which is simply the
+ * ThreadRunFunction which was passed to the constructor.
+ *
+ * Shutdown of the message wait loop is triggered by sending a WM_CLOSE which
+ * will start tearing down the "window" which hosts the UI thread. This will
+ * cause a WM_DESTROY message to be received. Upon reception a WM_QUIT message
+ * is enqueued. When the message wait loop receives a WM_QUIT message it stops,
+ * thus allowing the thread to be joined.
+ *
+ * Note: that the only source of a WM_CLOSE should be PlatformUIThread::Stop.
+ * Note: because PlatformUIThread::Stop is called from a different thread than
+ * PlatformUIThread::Run, it is possible that Stop can race Run.
+ *
+ * After being stopped PlatformUIThread can not be started again.
+ *
+ */
+
+class PlatformUIThread {
+ public:
+ PlatformUIThread(std::function<void()> func, const char* name,
+ ThreadAttributes attributes)
+ : name_(name),
+ native_event_callback_(std::move(func)),
+ monitor_thread_(PlatformThread::SpawnJoinable([this]() { Run(); }, name,
+ attributes)) {}
+
+ virtual ~PlatformUIThread();
+
+ void Stop();
+
+ /**
+ * Request a recurring callback.
+ */
+ bool RequestCallbackTimer(unsigned int milliseconds);
+
+ protected:
+ void Run();
+
+ private:
+ static LRESULT CALLBACK EventWindowProc(HWND, UINT, WPARAM, LPARAM);
+ void NativeEventCallback();
+ // Initialize the UI thread that is servicing the timer events
+ bool InternalInit();
+
+ // Needs to be initialized before monitor_thread_ as it takes a string view to
+ // name_
+ std::string name_;
+ RecursiveCriticalSection cs_;
+ std::function<void()> native_event_callback_;
+ webrtc::SequenceChecker thread_checker_;
+ PlatformThread monitor_thread_;
+ HWND hwnd_ MOZ_GUARDED_BY(cs_) = nullptr;
+ UINT_PTR timerid_ MOZ_GUARDED_BY(cs_) = 0;
+ unsigned int timeout_ MOZ_GUARDED_BY(cs_) = 0;
+ enum class State {
+ UNSTARTED,
+ STARTED,
+ STOPPED,
+ };
+ State state_ MOZ_GUARDED_BY(cs_) = State::UNSTARTED;
+};
+
+} // namespace rtc
+
+#endif
+#endif // RTC_BASE_PLATFORM_UITHREAD_H_
diff --git a/dom/media/systemservices/video_engine/tab_capturer.cc b/dom/media/systemservices/video_engine/tab_capturer.cc
new file mode 100644
index 0000000000..4050dda6a5
--- /dev/null
+++ b/dom/media/systemservices/video_engine/tab_capturer.cc
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+
+#include "tab_capturer.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "modules/desktop_capture/desktop_frame.h"
+#include "mozilla/Logging.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "nsThreadUtils.h"
+#include "nsIBrowserWindowTracker.h"
+#include "nsIDocShellTreeOwner.h"
+#include "nsImportModule.h"
+#include "mozilla/dom/BrowserHost.h"
+#include "mozilla/dom/BrowsingContext.h"
+#include "mozilla/dom/ImageBitmapBinding.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/PromiseNativeHandler.h"
+#include "mozilla/dom/WindowGlobalParent.h"
+#include "mozilla/gfx/2D.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/SyncRunnable.h"
+#include "desktop_device_info.h"
+
+#include "MediaUtils.h"
+
+mozilla::LazyLogModule gTabShareLog("TabShare");
+
+using namespace mozilla::dom;
+
+namespace mozilla {
+
+class CaptureFrameRequest {
+ using CapturePromise = TabCapturerWebrtc::CapturePromise;
+
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CaptureFrameRequest)
+
+ CaptureFrameRequest() : mCaptureTime(TimeStamp::Now()) {}
+
+ operator MozPromiseRequestHolder<CapturePromise>&() { return mRequest; }
+
+ void Complete() { mRequest.Complete(); }
+ void Disconnect() { mRequest.Disconnect(); }
+ bool Exists() { return mRequest.Exists(); }
+
+ protected:
+ virtual ~CaptureFrameRequest() = default;
+
+ public:
+ const TimeStamp mCaptureTime;
+
+ private:
+ MozPromiseRequestHolder<CapturePromise> mRequest;
+};
+
+TabCapturerWebrtc::TabCapturerWebrtc(
+ const webrtc::DesktopCaptureOptions& options)
+ : mMainThreadWorker(
+ TaskQueue::Create(do_AddRef(GetMainThreadSerialEventTarget()),
+ "TabCapturerWebrtc::mMainThreadWorker")) {}
+
+TabCapturerWebrtc::~TabCapturerWebrtc() {
+ MOZ_ALWAYS_SUCCEEDS(
+ mMainThreadWorker->Dispatch(NS_NewRunnableFunction(__func__, [this] {
+ for (const auto& req : mRequests) {
+ req->Disconnect();
+ }
+ mMainThreadWorker->BeginShutdown();
+ })));
+ // Block until the worker has run all pending tasks, since mCallback must
+ // outlive them, and libwebrtc only guarantees mCallback outlives us.
+ mMainThreadWorker->AwaitShutdownAndIdle();
+}
+
+bool TabCapturerWebrtc::GetSourceList(
+ webrtc::DesktopCapturer::SourceList* sources) {
+ MOZ_LOG(gTabShareLog, LogLevel::Debug,
+ ("TabShare: GetSourceList, result %zu", sources->size()));
+ // XXX UI
+ return true;
+}
+
+bool TabCapturerWebrtc::SelectSource(webrtc::DesktopCapturer::SourceId id) {
+ MOZ_LOG(gTabShareLog, LogLevel::Debug, ("TabShare: source %d", (int)id));
+ mBrowserId = id;
+ return true;
+}
+
+bool TabCapturerWebrtc::FocusOnSelectedSource() { return true; }
+
+void TabCapturerWebrtc::Start(webrtc::DesktopCapturer::Callback* callback) {
+ RTC_DCHECK(!mCallback);
+ RTC_DCHECK(callback);
+
+ MOZ_LOG(gTabShareLog, LogLevel::Debug,
+ ("TabShare: Start, id=%" PRIu64, mBrowserId));
+
+ mCallback = callback;
+ CaptureFrame();
+}
+
+void TabCapturerWebrtc::CaptureFrame() {
+ auto request = MakeRefPtr<CaptureFrameRequest>();
+ InvokeAsync(mMainThreadWorker, __func__,
+ [this, request]() mutable {
+ if (mRequests.GetSize() > 2) {
+ // Allow two async capture requests in flight
+ request->Disconnect();
+ return CapturePromise::CreateAndReject(NS_ERROR_NOT_AVAILABLE,
+ __func__);
+ }
+ mRequests.PushFront(request.forget());
+ return CaptureFrameNow();
+ })
+ ->Then(
+ mMainThreadWorker, __func__,
+ [this, request](const RefPtr<dom::ImageBitmap>& aBitmap) {
+ if (!CompleteRequest(request)) {
+ return;
+ }
+
+ UniquePtr<dom::ImageBitmapCloneData> data = aBitmap->ToCloneData();
+ webrtc::DesktopSize size(data->mPictureRect.Width(),
+ data->mPictureRect.Height());
+ webrtc::DesktopRect rect = webrtc::DesktopRect::MakeSize(size);
+ std::unique_ptr<webrtc::DesktopFrame> frame(
+ new webrtc::BasicDesktopFrame(size));
+
+ gfx::DataSourceSurface::ScopedMap map(data->mSurface,
+ gfx::DataSourceSurface::READ);
+ if (!map.IsMapped()) {
+ mCallback->OnCaptureResult(
+ webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr);
+ return;
+ }
+ frame->CopyPixelsFrom(map.GetData(), map.GetStride(), rect);
+
+ mCallback->OnCaptureResult(webrtc::DesktopCapturer::Result::SUCCESS,
+ std::move(frame));
+ },
+ [this, request](nsresult aRv) {
+ if (!CompleteRequest(request)) {
+ return;
+ }
+
+ mCallback->OnCaptureResult(
+ webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr);
+ })
+ ->Track(*request);
+}
+
+bool TabCapturerWebrtc::IsOccluded(const webrtc::DesktopVector& pos) {
+ return false;
+}
+
+class TabCapturedHandler final : public dom::PromiseNativeHandler {
+ public:
+ NS_DECL_ISUPPORTS
+
+ using CapturePromise = TabCapturerWebrtc::CapturePromise;
+
+ static void Create(dom::Promise* aPromise,
+ MozPromiseHolder<CapturePromise> aHolder) {
+ MOZ_ASSERT(aPromise);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ RefPtr<TabCapturedHandler> handler =
+ new TabCapturedHandler(std::move(aHolder));
+ aPromise->AppendNativeHandler(handler);
+ }
+
+ void ResolvedCallback(JSContext* aCx, JS::Handle<JS::Value> aValue,
+ ErrorResult& aRv) override {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (NS_WARN_IF(!aValue.isObject())) {
+ mHolder.Reject(NS_ERROR_UNEXPECTED, __func__);
+ return;
+ }
+
+ RefPtr<dom::ImageBitmap> bitmap;
+ if (NS_WARN_IF(NS_FAILED(
+ UNWRAP_OBJECT(ImageBitmap, &aValue.toObject(), bitmap)))) {
+ mHolder.Reject(NS_ERROR_UNEXPECTED, __func__);
+ return;
+ }
+
+ mHolder.Resolve(std::move(bitmap), __func__);
+ }
+
+ void RejectedCallback(JSContext* aCx, JS::Handle<JS::Value> aValue,
+ ErrorResult& aRv) override {
+ MOZ_ASSERT(NS_IsMainThread());
+ mHolder.Reject(aRv.StealNSResult(), __func__);
+ }
+
+ private:
+ explicit TabCapturedHandler(MozPromiseHolder<CapturePromise> aHolder)
+ : mHolder(std::move(aHolder)) {}
+
+ ~TabCapturedHandler() = default;
+
+ MozPromiseHolder<CapturePromise> mHolder;
+};
+
+NS_IMPL_ISUPPORTS0(TabCapturedHandler)
+
+bool TabCapturerWebrtc::CompleteRequest(CaptureFrameRequest* aRequest) {
+ MOZ_ASSERT(mMainThreadWorker->IsOnCurrentThread());
+ if (!aRequest->Exists()) {
+ // Request was disconnected or overrun
+ return false;
+ }
+ while (CaptureFrameRequest* req = mRequests.Peek()) {
+ if (req->mCaptureTime > aRequest->mCaptureTime) {
+ break;
+ }
+ // Pop the request before calling the callback, in case it could mutate
+ // mRequests, now or in the future.
+ RefPtr<CaptureFrameRequest> dropMe = mRequests.Pop();
+ req->Complete();
+ if (req->mCaptureTime < aRequest->mCaptureTime) {
+ mCallback->OnCaptureResult(
+ webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr);
+ }
+ }
+ MOZ_DIAGNOSTIC_ASSERT(!aRequest->Exists());
+ return true;
+}
+
+auto TabCapturerWebrtc::CaptureFrameNow() -> RefPtr<CapturePromise> {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_LOG(gTabShareLog, LogLevel::Debug, ("TabShare: CaptureFrameNow"));
+
+ WindowGlobalParent* wgp = nullptr;
+ if (mBrowserId != 0) {
+ RefPtr<BrowsingContext> context =
+ BrowsingContext::GetCurrentTopByBrowserId(mBrowserId);
+ if (context) {
+ wgp = context->Canonical()->GetCurrentWindowGlobal();
+ }
+ // If we can't access the window, we just won't capture anything
+ }
+ if (!wgp) {
+ return CapturePromise::CreateAndReject(NS_ERROR_NOT_AVAILABLE, __func__);
+ }
+
+ // XXX This would be more efficient if we used CrossProcessPaint directly and
+ // returned a surface.
+ RefPtr<dom::Promise> promise =
+ wgp->DrawSnapshot(nullptr, 1.0, "white"_ns, false, IgnoreErrors());
+ if (!promise) {
+ return CapturePromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
+ }
+
+ MozPromiseHolder<CapturePromise> holder;
+ RefPtr<CapturePromise> p = holder.Ensure(__func__);
+ TabCapturedHandler::Create(promise, std::move(holder));
+ return p;
+}
+} // namespace mozilla
+
+namespace webrtc {
+// static
+std::unique_ptr<webrtc::DesktopCapturer>
+webrtc::DesktopCapturer::CreateRawTabCapturer(
+ const webrtc::DesktopCaptureOptions& options) {
+ return std::unique_ptr<webrtc::DesktopCapturer>(
+ new mozilla::TabCapturerWebrtc(options));
+}
+
+void webrtc::DesktopDeviceInfoImpl::InitializeTabList() {
+ if (!mozilla::StaticPrefs::media_getusermedia_browser_enabled()) {
+ return;
+ }
+
+ // This is a sync dispatch to main thread, which is unfortunate. To
+ // call JavaScript we have to be on main thread, but the remaining
+ // DesktopCapturer very much wants to be off main thread. This might
+ // be solvable by calling this method earlier on while we're still on
+ // main thread and plumbing the information down to here.
+ nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(__func__, [&] {
+ nsresult rv;
+ nsCOMPtr<nsIBrowserWindowTracker> bwt =
+ do_ImportModule("resource:///modules/BrowserWindowTracker.jsm",
+ "BrowserWindowTracker", &rv);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ nsTArray<RefPtr<nsIVisibleTab>> tabArray;
+ rv = bwt->GetAllVisibleTabs(tabArray);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ for (const auto& browserTab : tabArray) {
+ nsString contentTitle;
+ browserTab->GetContentTitle(contentTitle);
+ int64_t browserId;
+ browserTab->GetBrowserId(&browserId);
+
+ DesktopTab* desktopTab = new DesktopTab;
+ if (desktopTab) {
+ char* contentTitleUTF8 = ToNewUTF8String(contentTitle);
+ desktopTab->setTabBrowserId(browserId);
+ desktopTab->setTabName(contentTitleUTF8);
+ std::ostringstream uniqueId;
+ uniqueId << browserId;
+ desktopTab->setUniqueIdName(uniqueId.str().c_str());
+ desktop_tab_list_[static_cast<intptr_t>(
+ desktopTab->getTabBrowserId())] = desktopTab;
+ free(contentTitleUTF8);
+ }
+ }
+ });
+ mozilla::SyncRunnable::DispatchToThread(
+ mozilla::GetMainThreadSerialEventTarget(), runnable);
+}
+
+} // namespace webrtc
diff --git a/dom/media/systemservices/video_engine/tab_capturer.h b/dom/media/systemservices/video_engine/tab_capturer.h
new file mode 100644
index 0000000000..ea896310cb
--- /dev/null
+++ b/dom/media/systemservices/video_engine/tab_capturer.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_DESKTOP_CAPTURE_TAB_CAPTURER_H_
+#define MODULES_DESKTOP_CAPTURE_TAB_CAPTURER_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "mozilla/dom/ImageBitmap.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/TaskQueue.h"
+#include "nsDeque.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class CaptureFrameRequest;
+class TabCapturedHandler;
+
+class TabCapturerWebrtc : public webrtc::DesktopCapturer {
+ private:
+ ~TabCapturerWebrtc();
+
+ public:
+ friend class CaptureFrameRequest;
+ friend class TabCapturedHandler;
+
+ explicit TabCapturerWebrtc(const webrtc::DesktopCaptureOptions& options);
+
+ static std::unique_ptr<webrtc::DesktopCapturer> CreateRawWindowCapturer(
+ const webrtc::DesktopCaptureOptions& options);
+
+ TabCapturerWebrtc(const TabCapturerWebrtc&) = delete;
+ TabCapturerWebrtc& operator=(const TabCapturerWebrtc&) = delete;
+
+ // DesktopCapturer interface.
+ void Start(Callback* callback) override;
+ void CaptureFrame() override;
+ bool GetSourceList(SourceList* sources) override;
+ bool SelectSource(SourceId id) override;
+ bool FocusOnSelectedSource() override;
+ bool IsOccluded(const webrtc::DesktopVector& pos) override;
+
+ private:
+ // Capture code
+ using CapturePromise = MozPromise<RefPtr<dom::ImageBitmap>, nsresult, true>;
+ RefPtr<CapturePromise> CaptureFrameNow();
+
+ // Helper that checks for overrun requests. Returns true if aRequest had not
+ // been dropped.
+ bool CompleteRequest(CaptureFrameRequest* aRequest);
+
+ const RefPtr<TaskQueue> mMainThreadWorker;
+ webrtc::DesktopCapturer::Callback* mCallback = nullptr;
+ uint64_t mBrowserId = 0;
+
+ // mMainThreadWorker only
+ nsRefPtrDeque<CaptureFrameRequest> mRequests;
+};
+
+} // namespace mozilla
+
+#endif // MODULES_DESKTOP_CAPTURE_TAB_CAPTURER_H_