// -*- mode: C++ -*- /* This file is auto-generated by run_glean_parser.py. It is only for internal use by types in toolkit/components/glean/bindings/private */ #include "mozilla/AppShutdown.h" #include "mozilla/ClearOnShutdown.h" #include "mozilla/Maybe.h" #include "mozilla/Telemetry.h" #include "mozilla/Tuple.h" #include "mozilla/DataMutex.h" #include "mozilla/Tuple.h" #include "nsClassHashtable.h" #include "nsIThread.h" #include "nsTHashMap.h" #include "nsThreadUtils.h" #ifndef mozilla_glean_ScalarGifftMap_h #define mozilla_glean_ScalarGifftMap_h namespace mozilla::glean { using Telemetry::ScalarID; typedef nsUint32HashKey SubmetricIdHashKey; typedef nsTHashMap> SubmetricToLabeledMirrorMapType; typedef StaticDataMutex> SubmetricToMirrorMutex; static inline Maybe GetLabeledMirrorLock() { static SubmetricToMirrorMutex sLabeledMirrors("sLabeledMirrors"); auto lock = sLabeledMirrors.Lock(); // GIFFT will work up to the end of AppShutdownTelemetry. if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMWillShutdown)) { return Nothing(); } if (!*lock) { *lock = MakeUnique(); RefPtr cleanupFn = NS_NewRunnableFunction(__func__, [&] { if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMWillShutdown)) { auto lock = sLabeledMirrors.Lock(); *lock = nullptr; // deletes, see UniquePtr.h return; } RunOnShutdown([&] { auto lock = sLabeledMirrors.Lock(); *lock = nullptr; // deletes, see UniquePtr.h }, ShutdownPhase::XPCOMWillShutdown); }); // Both getting the main thread and dispatching to it can fail. // In that event we leak. Grab a pointer so we have something to NS_RELEASE // in that case. nsIRunnable* temp = cleanupFn.get(); nsCOMPtr mainThread; if (NS_FAILED(NS_GetMainThread(getter_AddRefs(mainThread))) || NS_FAILED(mainThread->Dispatch(cleanupFn.forget(), nsIThread::DISPATCH_NORMAL)) ) { // Failed to dispatch cleanup routine. // First, un-leak the runnable (but only if we actually attempted dispatch) if (!cleanupFn) { NS_RELEASE(temp); } // Next, cleanup immediately, and allow metrics to try again later. *lock = nullptr; return Nothing(); } } return Some(std::move(lock)); } namespace { class ScalarIDHashKey : public PLDHashEntryHdr { public: typedef const ScalarID& KeyType; typedef const ScalarID* KeyTypePointer; explicit ScalarIDHashKey(KeyTypePointer aKey) : mValue(*aKey) {} ScalarIDHashKey(ScalarIDHashKey&& aOther) : PLDHashEntryHdr(std::move(aOther)), mValue(std::move(aOther.mValue)) {} ~ScalarIDHashKey() = default; KeyType GetKey() const { return mValue; } bool KeyEquals(KeyTypePointer aKey) const { return *aKey == mValue; } static KeyTypePointer KeyToPointer(KeyType aKey) { return &aKey; } static PLDHashNumber HashKey(KeyTypePointer aKey) { return static_cast::type>(*aKey); } enum { ALLOW_MEMMOVE = true }; private: const ScalarID mValue; }; } // namespace typedef StaticDataMutex>> TimesToStartsMutex; static inline Maybe GetTimesToStartsLock() { static TimesToStartsMutex sTimespanStarts("sTimespanStarts"); auto lock = sTimespanStarts.Lock(); // GIFFT will work up to the end of AppShutdownTelemetry. if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMWillShutdown)) { return Nothing(); } if (!*lock) { *lock = MakeUnique>(); RefPtr cleanupFn = NS_NewRunnableFunction(__func__, [&] { if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMWillShutdown)) { auto lock = sTimespanStarts.Lock(); *lock = nullptr; // deletes, see UniquePtr.h return; } RunOnShutdown([&] { auto lock = sTimespanStarts.Lock(); *lock = nullptr; // deletes, see UniquePtr.h }, ShutdownPhase::XPCOMWillShutdown); }); // Both getting the main thread and dispatching to it can fail. // In that event we leak. Grab a pointer so we have something to NS_RELEASE // in that case. nsIRunnable* temp = cleanupFn.get(); nsCOMPtr mainThread; if (NS_FAILED(NS_GetMainThread(getter_AddRefs(mainThread))) || NS_FAILED(mainThread->Dispatch(cleanupFn.forget(), nsIThread::DISPATCH_NORMAL)) ) { // Failed to dispatch cleanup routine. // First, un-leak the runnable (but only if we actually attempted dispatch) if (!cleanupFn) { NS_RELEASE(temp); } // Next, cleanup immediately, and allow metrics to try again later. *lock = nullptr; return Nothing(); } } return Some(std::move(lock)); } static inline bool IsSubmetricId(uint32_t aId) { // Submetrics have the 2^25 bit set. // (ID_BITS - ID_SIGNAL_BITS, keep it in sync with js.py). return (aId & (1 << 25)) > 0; } static Maybe ScalarIdForMetric(uint32_t aId) { switch(aId) { case 1: { // test.boolean_metric return Some(ScalarID::SOME_BOOL_SCALAR); } case 2: { // test.labeled_boolean_metric return Some(ScalarID::SOME_KEYED_BOOL_SCALAR); } case 3: { // test.labeled_boolean_metric_labels return Some(ScalarID::SOME_OTHER_KEYED_BOOL_SCALAR); } case 4: { // test.counter_metric return Some(ScalarID::SOME_UINT_SCALAR); } case 5: { // test.labeled_counter_metric return Some(ScalarID::SOME_KEYED_UINT_SCALAR); } case 6: { // test.labeled_counter_metric_labels return Some(ScalarID::SOME_OTHER_KEYED_UINT_SCALAR); } case 7: { // test.string_metric return Some(ScalarID::SOME_STRING_SCALAR); } case 10: { // test.string_list_metric return Some(ScalarID::YET_ANOTHER_KEYED_BOOL_SCALAR); } case 11: { // test.timespan_metric return Some(ScalarID::SOME_OTHER_UINT_SCALAR); } case 15: { // test.nested.uuid_metric return Some(ScalarID::SOME_OTHER_STRING_SCALAR); } case 16: { // test.nested.datetime_metric return Some(ScalarID::SOME_STILL_OTHER_STRING_SCALAR); } case 19: { // test.nested.quantity_metric return Some(ScalarID::TELEMETRY_TEST_MIRROR_FOR_QUANTITY); } default: { return Nothing(); } } } } // namespace mozilla::glean #endif // mozilla_glean_ScalarGifftMaps_h