summaryrefslogtreecommitdiffstats
path: root/netwerk/cache
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /netwerk/cache
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--netwerk/cache/moz.build50
-rw-r--r--netwerk/cache/nsApplicationCache.h33
-rw-r--r--netwerk/cache/nsApplicationCacheService.cpp192
-rw-r--r--netwerk/cache/nsApplicationCacheService.h27
-rw-r--r--netwerk/cache/nsCache.cpp70
-rw-r--r--netwerk/cache/nsCache.h39
-rw-r--r--netwerk/cache/nsCacheDevice.h63
-rw-r--r--netwerk/cache/nsCacheEntry.cpp415
-rw-r--r--netwerk/cache/nsCacheEntry.h285
-rw-r--r--netwerk/cache/nsCacheEntryDescriptor.cpp1307
-rw-r--r--netwerk/cache/nsCacheEntryDescriptor.h220
-rw-r--r--netwerk/cache/nsCacheMetaData.cpp151
-rw-r--r--netwerk/cache/nsCacheMetaData.h45
-rw-r--r--netwerk/cache/nsCacheRequest.h146
-rw-r--r--netwerk/cache/nsCacheService.cpp1910
-rw-r--r--netwerk/cache/nsCacheService.h331
-rw-r--r--netwerk/cache/nsCacheSession.cpp121
-rw-r--r--netwerk/cache/nsCacheSession.h67
-rw-r--r--netwerk/cache/nsCacheUtils.cpp78
-rw-r--r--netwerk/cache/nsCacheUtils.h44
-rw-r--r--netwerk/cache/nsDeleteDir.cpp362
-rw-r--r--netwerk/cache/nsDeleteDir.h79
-rw-r--r--netwerk/cache/nsDiskCache.h72
-rw-r--r--netwerk/cache/nsDiskCacheDeviceSQL.cpp2608
-rw-r--r--netwerk/cache/nsDiskCacheDeviceSQL.h263
-rw-r--r--netwerk/cache/nsICache.idl142
-rw-r--r--netwerk/cache/nsICacheEntryDescriptor.idl164
-rw-r--r--netwerk/cache/nsICacheListener.idl32
-rw-r--r--netwerk/cache/nsICacheService.idl98
-rw-r--r--netwerk/cache/nsICacheSession.idl88
-rw-r--r--netwerk/cache/nsICacheVisitor.idl123
-rw-r--r--netwerk/cache2/AppCacheStorage.cpp185
-rw-r--r--netwerk/cache2/AppCacheStorage.h35
-rw-r--r--netwerk/cache2/CacheEntry.cpp1913
-rw-r--r--netwerk/cache2/CacheEntry.h573
-rw-r--r--netwerk/cache2/CacheFile.cpp2596
-rw-r--r--netwerk/cache2/CacheFile.h264
-rw-r--r--netwerk/cache2/CacheFileChunk.cpp840
-rw-r--r--netwerk/cache2/CacheFileChunk.h238
-rw-r--r--netwerk/cache2/CacheFileContextEvictor.cpp741
-rw-r--r--netwerk/cache2/CacheFileContextEvictor.h99
-rw-r--r--netwerk/cache2/CacheFileIOManager.cpp4225
-rw-r--r--netwerk/cache2/CacheFileIOManager.h479
-rw-r--r--netwerk/cache2/CacheFileInputStream.cpp719
-rw-r--r--netwerk/cache2/CacheFileInputStream.h80
-rw-r--r--netwerk/cache2/CacheFileMetadata.cpp1049
-rw-r--r--netwerk/cache2/CacheFileMetadata.h237
-rw-r--r--netwerk/cache2/CacheFileOutputStream.cpp466
-rw-r--r--netwerk/cache2/CacheFileOutputStream.h70
-rw-r--r--netwerk/cache2/CacheFileUtils.cpp669
-rw-r--r--netwerk/cache2/CacheFileUtils.h224
-rw-r--r--netwerk/cache2/CacheHashUtils.cpp235
-rw-r--r--netwerk/cache2/CacheHashUtils.h67
-rw-r--r--netwerk/cache2/CacheIOThread.cpp641
-rw-r--r--netwerk/cache2/CacheIOThread.h147
-rw-r--r--netwerk/cache2/CacheIndex.cpp3903
-rw-r--r--netwerk/cache2/CacheIndex.h1277
-rw-r--r--netwerk/cache2/CacheIndexContextIterator.cpp33
-rw-r--r--netwerk/cache2/CacheIndexContextIterator.h31
-rw-r--r--netwerk/cache2/CacheIndexIterator.cpp102
-rw-r--r--netwerk/cache2/CacheIndexIterator.h57
-rw-r--r--netwerk/cache2/CacheLog.cpp22
-rw-r--r--netwerk/cache2/CacheLog.h20
-rw-r--r--netwerk/cache2/CacheObserver.cpp257
-rw-r--r--netwerk/cache2/CacheObserver.h109
-rw-r--r--netwerk/cache2/CacheStorage.cpp253
-rw-r--r--netwerk/cache2/CacheStorage.h72
-rw-r--r--netwerk/cache2/CacheStorageService.cpp2376
-rw-r--r--netwerk/cache2/CacheStorageService.h437
-rw-r--r--netwerk/cache2/OldWrappers.cpp1037
-rw-r--r--netwerk/cache2/OldWrappers.h268
-rw-r--r--netwerk/cache2/moz.build62
-rw-r--r--netwerk/cache2/nsICacheEntry.idl364
-rw-r--r--netwerk/cache2/nsICacheEntryDoomCallback.idl15
-rw-r--r--netwerk/cache2/nsICacheEntryOpenCallback.idl91
-rw-r--r--netwerk/cache2/nsICacheStorage.idl145
-rw-r--r--netwerk/cache2/nsICacheStorageService.idl155
-rw-r--r--netwerk/cache2/nsICacheStorageVisitor.idl35
-rw-r--r--netwerk/cache2/nsICacheTesting.idl20
79 files changed, 37558 insertions, 0 deletions
diff --git a/netwerk/cache/moz.build b/netwerk/cache/moz.build
new file mode 100644
index 0000000000..62a0c06ebe
--- /dev/null
+++ b/netwerk/cache/moz.build
@@ -0,0 +1,50 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Networking: Cache")
+
+XPIDL_SOURCES += [
+ "nsICache.idl",
+ "nsICacheEntryDescriptor.idl",
+ "nsICacheListener.idl",
+ "nsICacheService.idl",
+ "nsICacheSession.idl",
+ "nsICacheVisitor.idl",
+]
+
+XPIDL_MODULE = "necko_cache"
+
+EXPORTS += [
+ "nsApplicationCache.h",
+ "nsApplicationCacheService.h",
+ "nsCacheDevice.h",
+ "nsCacheService.h",
+ "nsDeleteDir.h",
+ "nsDiskCacheDeviceSQL.h",
+]
+
+UNIFIED_SOURCES += [
+ "nsApplicationCacheService.cpp",
+ "nsCache.cpp",
+ "nsCacheEntry.cpp",
+ "nsCacheEntryDescriptor.cpp",
+ "nsCacheMetaData.cpp",
+ "nsCacheService.cpp",
+ "nsCacheSession.cpp",
+ "nsCacheUtils.cpp",
+ "nsDeleteDir.cpp",
+ "nsDiskCacheDeviceSQL.cpp",
+]
+
+FINAL_LIBRARY = "xul"
+
+LOCAL_INCLUDES += [
+ "/netwerk/base",
+]
+
+if CONFIG["CC_TYPE"] in ("clang", "gcc"):
+ CXXFLAGS += ["-Wno-error=shadow"]
diff --git a/netwerk/cache/nsApplicationCache.h b/netwerk/cache/nsApplicationCache.h
new file mode 100644
index 0000000000..37e3bc14e4
--- /dev/null
+++ b/netwerk/cache/nsApplicationCache.h
@@ -0,0 +1,33 @@
+/* vim:set ts=2 sw=2 sts=2 et cin: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIApplicationCache.h"
+#include "nsWeakReference.h"
+#include "mozilla/RefPtr.h"
+#include "nsString.h"
+
+class nsOfflineCacheDevice;
+
+class nsApplicationCache : public nsIApplicationCache,
+ public nsSupportsWeakReference {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIAPPLICATIONCACHE
+
+ nsApplicationCache(nsOfflineCacheDevice* device, const nsACString& group,
+ const nsACString& clientID);
+
+ nsApplicationCache();
+
+ void MarkInvalid();
+
+ private:
+ virtual ~nsApplicationCache();
+
+ RefPtr<nsOfflineCacheDevice> mDevice;
+ nsCString mGroup;
+ nsCString mClientID;
+ bool mValid;
+};
diff --git a/netwerk/cache/nsApplicationCacheService.cpp b/netwerk/cache/nsApplicationCacheService.cpp
new file mode 100644
index 0000000000..af931706a3
--- /dev/null
+++ b/netwerk/cache/nsApplicationCacheService.cpp
@@ -0,0 +1,192 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsDiskCache.h"
+#include "nsDiskCacheDeviceSQL.h"
+#include "nsCacheService.h"
+#include "nsApplicationCacheService.h"
+#include "nsCRT.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsIObserverService.h"
+#include "mozilla/LoadContextInfo.h"
+
+using namespace mozilla;
+
+static NS_DEFINE_CID(kCacheServiceCID, NS_CACHESERVICE_CID);
+
+//-----------------------------------------------------------------------------
+// nsApplicationCacheService
+//-----------------------------------------------------------------------------
+
+NS_IMPL_ISUPPORTS(nsApplicationCacheService, nsIApplicationCacheService)
+
+nsApplicationCacheService::nsApplicationCacheService() {
+ nsCOMPtr<nsICacheService> serv = do_GetService(kCacheServiceCID);
+ mCacheService = nsCacheService::GlobalInstance();
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::BuildGroupIDForInfo(
+ nsIURI* aManifestURL, nsILoadContextInfo* aLoadContextInfo,
+ nsACString& _result) {
+ nsresult rv;
+
+ nsAutoCString originSuffix;
+ if (aLoadContextInfo) {
+ aLoadContextInfo->OriginAttributesPtr()->CreateSuffix(originSuffix);
+ }
+
+ rv = nsOfflineCacheDevice::BuildApplicationCacheGroupID(
+ aManifestURL, originSuffix, _result);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::BuildGroupIDForSuffix(
+ nsIURI* aManifestURL, nsACString const& aOriginSuffix,
+ nsACString& _result) {
+ nsresult rv;
+
+ rv = nsOfflineCacheDevice::BuildApplicationCacheGroupID(
+ aManifestURL, aOriginSuffix, _result);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::CreateApplicationCache(const nsACString& group,
+ nsIApplicationCache** out) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->CreateApplicationCache(group, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::CreateCustomApplicationCache(
+ const nsACString& group, nsIFile* profileDir, int32_t quota,
+ nsIApplicationCache** out) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetCustomOfflineDevice(profileDir, quota,
+ getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->CreateApplicationCache(group, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::GetApplicationCache(const nsACString& clientID,
+ nsIApplicationCache** out) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->GetApplicationCache(clientID, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::GetActiveCache(const nsACString& group,
+ nsIApplicationCache** out) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->GetActiveCache(group, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::DeactivateGroup(const nsACString& group) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->DeactivateGroup(group);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::ChooseApplicationCache(
+ const nsACString& key, nsILoadContextInfo* aLoadContextInfo,
+ nsIApplicationCache** out) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ if (NS_FAILED(rv)) {
+ // Silently fail and provide no appcache to the caller.
+ return NS_OK;
+ }
+
+ return device->ChooseApplicationCache(key, aLoadContextInfo, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::CacheOpportunistically(nsIApplicationCache* cache,
+ const nsACString& key) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->CacheOpportunistically(cache, key);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::Evict(nsILoadContextInfo* aInfo) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->Evict(aInfo);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::EvictMatchingOriginAttributes(
+ nsAString const& aPattern) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mozilla::OriginAttributesPattern pattern;
+ if (!pattern.Init(aPattern)) {
+ NS_ERROR(
+ "Could not parse OriginAttributesPattern JSON in "
+ "clear-origin-attributes-data notification");
+ return NS_ERROR_FAILURE;
+ }
+
+ return device->Evict(pattern);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::GetGroups(nsTArray<nsCString>& keys) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->GetGroups(keys);
+}
+
+NS_IMETHODIMP
+nsApplicationCacheService::GetGroupsTimeOrdered(nsTArray<nsCString>& keys) {
+ if (!mCacheService) return NS_ERROR_UNEXPECTED;
+
+ RefPtr<nsOfflineCacheDevice> device;
+ nsresult rv = mCacheService->GetOfflineDevice(getter_AddRefs(device));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return device->GetGroupsTimeOrdered(keys);
+}
diff --git a/netwerk/cache/nsApplicationCacheService.h b/netwerk/cache/nsApplicationCacheService.h
new file mode 100644
index 0000000000..d57a913726
--- /dev/null
+++ b/netwerk/cache/nsApplicationCacheService.h
@@ -0,0 +1,27 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsApplicationCacheService_h_
+#define _nsApplicationCacheService_h_
+
+#include "nsIApplicationCacheService.h"
+#include "mozilla/Attributes.h"
+
+class nsCacheService;
+
+class nsApplicationCacheService final : public nsIApplicationCacheService {
+ public:
+ nsApplicationCacheService();
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIAPPLICATIONCACHESERVICE
+
+ static void AppClearDataObserverInit();
+
+ private:
+ ~nsApplicationCacheService() = default;
+ RefPtr<nsCacheService> mCacheService;
+};
+
+#endif // _nsApplicationCacheService_h_
diff --git a/netwerk/cache/nsCache.cpp b/netwerk/cache/nsCache.cpp
new file mode 100644
index 0000000000..73b98dee1c
--- /dev/null
+++ b/netwerk/cache/nsCache.cpp
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCache.h"
+#include "nsReadableUtils.h"
+#include "nsDependentSubstring.h"
+#include "nsString.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+/**
+ * Cache Service Utility Functions
+ */
+
+mozilla::LazyLogModule gCacheLog("cache");
+
+void CacheLogPrintPath(mozilla::LogLevel level, const char* format,
+ nsIFile* item) {
+ MOZ_LOG(gCacheLog, level, (format, item->HumanReadablePath().get()));
+}
+
+uint32_t SecondsFromPRTime(PRTime prTime) {
+ int64_t microSecondsPerSecond = PR_USEC_PER_SEC;
+ return uint32_t(prTime / microSecondsPerSecond);
+}
+
+PRTime PRTimeFromSeconds(uint32_t seconds) {
+ int64_t intermediateResult = seconds;
+ PRTime prTime = intermediateResult * PR_USEC_PER_SEC;
+ return prTime;
+}
+
+nsresult ClientIDFromCacheKey(const nsACString& key, nsACString& result) {
+ nsReadingIterator<char> colon;
+ key.BeginReading(colon);
+
+ nsReadingIterator<char> start;
+ key.BeginReading(start);
+
+ nsReadingIterator<char> end;
+ key.EndReading(end);
+
+ if (FindCharInReadable(':', colon, end)) {
+ result.Assign(Substring(start, colon));
+ return NS_OK;
+ }
+
+ NS_ASSERTION(false, "FindCharInRead failed to find ':'");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult ClientKeyFromCacheKey(const nsCString& key, nsACString& result) {
+ nsReadingIterator<char> start;
+ key.BeginReading(start);
+
+ nsReadingIterator<char> end;
+ key.EndReading(end);
+
+ if (FindCharInReadable(':', start, end)) {
+ ++start; // advance past clientID ':' delimiter
+ result.Assign(Substring(start, end));
+ return NS_OK;
+ }
+
+ NS_ASSERTION(false, "FindCharInRead failed to find ':'");
+ result.Truncate(0);
+ return NS_ERROR_UNEXPECTED;
+}
diff --git a/netwerk/cache/nsCache.h b/netwerk/cache/nsCache.h
new file mode 100644
index 0000000000..859dddb8f2
--- /dev/null
+++ b/netwerk/cache/nsCache.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Cache Service Utility Functions
+ */
+
+#ifndef _nsCache_h_
+#define _nsCache_h_
+
+#include "mozilla/Logging.h"
+#include "nsISupports.h"
+#include "nsIFile.h"
+#include "nsAString.h"
+#include "prtime.h"
+#include "nsError.h"
+
+// PR_LOG args = "format string", arg, arg, ...
+extern mozilla::LazyLogModule gCacheLog;
+void CacheLogPrintPath(mozilla::LogLevel level, const char* format,
+ nsIFile* item);
+#define CACHE_LOG_INFO(args) MOZ_LOG(gCacheLog, mozilla::LogLevel::Info, args)
+#define CACHE_LOG_ERROR(args) MOZ_LOG(gCacheLog, mozilla::LogLevel::Error, args)
+#define CACHE_LOG_WARNING(args) \
+ MOZ_LOG(gCacheLog, mozilla::LogLevel::Warning, args)
+#define CACHE_LOG_DEBUG(args) MOZ_LOG(gCacheLog, mozilla::LogLevel::Debug, args)
+#define CACHE_LOG_PATH(level, format, item) \
+ CacheLogPrintPath(level, format, item)
+
+extern uint32_t SecondsFromPRTime(PRTime prTime);
+extern PRTime PRTimeFromSeconds(uint32_t seconds);
+
+extern nsresult ClientIDFromCacheKey(const nsACString& key, nsACString& result);
+extern nsresult ClientKeyFromCacheKey(const nsCString& key, nsACString& result);
+
+#endif // _nsCache_h
diff --git a/netwerk/cache/nsCacheDevice.h b/netwerk/cache/nsCacheDevice.h
new file mode 100644
index 0000000000..fc68bcb8a2
--- /dev/null
+++ b/netwerk/cache/nsCacheDevice.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheDevice_h_
+#define _nsCacheDevice_h_
+
+#include "nspr.h"
+#include "nsError.h"
+#include "nsICache.h"
+#include "nsStringFwd.h"
+
+class nsIFile;
+class nsCacheEntry;
+class nsICacheVisitor;
+class nsIInputStream;
+class nsIOutputStream;
+
+/******************************************************************************
+ * nsCacheDevice
+ *******************************************************************************/
+class nsCacheDevice {
+ public:
+ MOZ_COUNTED_DEFAULT_CTOR(nsCacheDevice)
+ MOZ_COUNTED_DTOR_VIRTUAL(nsCacheDevice)
+
+ virtual nsresult Init() = 0;
+ virtual nsresult Shutdown() = 0;
+
+ virtual const char* GetDeviceID(void) = 0;
+ virtual nsCacheEntry* FindEntry(nsCString* key, bool* collision) = 0;
+
+ virtual nsresult DeactivateEntry(nsCacheEntry* entry) = 0;
+ virtual nsresult BindEntry(nsCacheEntry* entry) = 0;
+ virtual void DoomEntry(nsCacheEntry* entry) = 0;
+
+ virtual nsresult OpenInputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIInputStream** result) = 0;
+
+ virtual nsresult OpenOutputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIOutputStream** result) = 0;
+
+ virtual nsresult GetFileForEntry(nsCacheEntry* entry, nsIFile** result) = 0;
+
+ virtual nsresult OnDataSizeChange(nsCacheEntry* entry, int32_t deltaSize) = 0;
+
+ virtual nsresult Visit(nsICacheVisitor* visitor) = 0;
+
+ /**
+ * Device must evict entries associated with clientID. If clientID ==
+ * nullptr, all entries must be evicted. Active entries must be doomed,
+ * rather than evicted.
+ */
+ virtual nsresult EvictEntries(const char* clientID) = 0;
+};
+
+#endif // _nsCacheDevice_h_
diff --git a/netwerk/cache/nsCacheEntry.cpp b/netwerk/cache/nsCacheEntry.cpp
new file mode 100644
index 0000000000..c91748ef72
--- /dev/null
+++ b/netwerk/cache/nsCacheEntry.cpp
@@ -0,0 +1,415 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCache.h"
+#include "nspr.h"
+#include "nsCacheEntry.h"
+#include "nsCacheEntryDescriptor.h"
+#include "nsCacheMetaData.h"
+#include "nsCacheRequest.h"
+#include "nsThreadUtils.h"
+#include "nsError.h"
+#include "nsCacheService.h"
+#include "nsCacheDevice.h"
+#include "nsHashKeys.h"
+
+using namespace mozilla;
+
+nsCacheEntry::nsCacheEntry(const nsACString& key, bool streamBased,
+ nsCacheStoragePolicy storagePolicy)
+ : mKey(key),
+ mFetchCount(0),
+ mLastFetched(0),
+ mLastModified(0),
+ mLastValidated(0),
+ mExpirationTime(nsICache::NO_EXPIRATION_TIME),
+ mFlags(0),
+ mPredictedDataSize(-1),
+ mDataSize(0),
+ mCacheDevice(nullptr),
+ mCustomDevice(nullptr),
+ mData(nullptr),
+ mRequestQ{},
+ mDescriptorQ{} {
+ MOZ_COUNT_CTOR(nsCacheEntry);
+ PR_INIT_CLIST(this);
+ PR_INIT_CLIST(&mRequestQ);
+ PR_INIT_CLIST(&mDescriptorQ);
+
+ if (streamBased) MarkStreamBased();
+ SetStoragePolicy(storagePolicy);
+
+ MarkPublic();
+}
+
+nsCacheEntry::~nsCacheEntry() {
+ MOZ_COUNT_DTOR(nsCacheEntry);
+
+ if (mData) nsCacheService::ReleaseObject_Locked(mData, mEventTarget);
+}
+
+nsresult nsCacheEntry::Create(const char* key, bool streamBased,
+ nsCacheStoragePolicy storagePolicy,
+ nsCacheDevice* device, nsCacheEntry** result) {
+ nsCacheEntry* entry =
+ new nsCacheEntry(nsCString(key), streamBased, storagePolicy);
+ entry->SetCacheDevice(device);
+ *result = entry;
+ return NS_OK;
+}
+
+void nsCacheEntry::Fetched() {
+ mLastFetched = SecondsFromPRTime(PR_Now());
+ ++mFetchCount;
+ MarkEntryDirty();
+}
+
+const char* nsCacheEntry::GetDeviceID() {
+ if (mCacheDevice) return mCacheDevice->GetDeviceID();
+ return nullptr;
+}
+
+void nsCacheEntry::TouchData() {
+ mLastModified = SecondsFromPRTime(PR_Now());
+ MarkDataDirty();
+}
+
+void nsCacheEntry::SetData(nsISupports* data) {
+ if (mData) {
+ nsCacheService::ReleaseObject_Locked(mData, mEventTarget);
+ mData = nullptr;
+ }
+
+ if (data) {
+ NS_ADDREF(mData = data);
+ mEventTarget = GetCurrentEventTarget();
+ }
+}
+
+void nsCacheEntry::TouchMetaData() {
+ mLastModified = SecondsFromPRTime(PR_Now());
+ MarkMetaDataDirty();
+}
+
+/**
+ * cache entry states
+ * 0 descriptors (new entry)
+ * 0 descriptors (existing, bound entry)
+ * n descriptors (existing, bound entry) valid
+ * n descriptors (existing, bound entry) not valid (wait until valid or
+ * doomed)
+ */
+
+nsresult nsCacheEntry::RequestAccess(nsCacheRequest* request,
+ nsCacheAccessMode* accessGranted) {
+ nsresult rv = NS_OK;
+
+ if (IsDoomed()) return NS_ERROR_CACHE_ENTRY_DOOMED;
+
+ if (!IsInitialized()) {
+ // brand new, unbound entry
+ if (request->IsStreamBased()) MarkStreamBased();
+ MarkInitialized();
+
+ *accessGranted = request->AccessRequested() & nsICache::ACCESS_WRITE;
+ NS_ASSERTION(*accessGranted, "new cache entry for READ-ONLY request");
+ PR_APPEND_LINK(request, &mRequestQ);
+ return rv;
+ }
+
+ if (IsStreamData() != request->IsStreamBased()) {
+ *accessGranted = nsICache::ACCESS_NONE;
+ return request->IsStreamBased() ? NS_ERROR_CACHE_DATA_IS_NOT_STREAM
+ : NS_ERROR_CACHE_DATA_IS_STREAM;
+ }
+
+ if (PR_CLIST_IS_EMPTY(&mDescriptorQ)) {
+ // 1st descriptor for existing bound entry
+ *accessGranted = request->AccessRequested();
+ if (*accessGranted & nsICache::ACCESS_WRITE) {
+ MarkInvalid();
+ } else {
+ MarkValid();
+ }
+ } else {
+ // nth request for existing, bound entry
+ *accessGranted = request->AccessRequested() & ~nsICache::ACCESS_WRITE;
+ if (!IsValid()) rv = NS_ERROR_CACHE_WAIT_FOR_VALIDATION;
+ }
+ PR_APPEND_LINK(request, &mRequestQ);
+
+ return rv;
+}
+
+nsresult nsCacheEntry::CreateDescriptor(nsCacheRequest* request,
+ nsCacheAccessMode accessGranted,
+ nsICacheEntryDescriptor** result) {
+ NS_ENSURE_ARG_POINTER(request && result);
+
+ nsCacheEntryDescriptor* descriptor =
+ new nsCacheEntryDescriptor(this, accessGranted);
+
+ // XXX check request is on q
+ PR_REMOVE_AND_INIT_LINK(request); // remove request regardless of success
+
+ if (descriptor == nullptr) return NS_ERROR_OUT_OF_MEMORY;
+
+ PR_APPEND_LINK(descriptor, &mDescriptorQ);
+
+ CACHE_LOG_DEBUG((" descriptor %p created for request %p on entry %p\n",
+ descriptor, request, this));
+
+ *result = do_AddRef(descriptor).take();
+ return NS_OK;
+}
+
+bool nsCacheEntry::RemoveRequest(nsCacheRequest* request) {
+ // XXX if debug: verify this request belongs to this entry
+ PR_REMOVE_AND_INIT_LINK(request);
+
+ // return true if this entry should stay active
+ return !((PR_CLIST_IS_EMPTY(&mRequestQ)) &&
+ (PR_CLIST_IS_EMPTY(&mDescriptorQ)));
+}
+
+bool nsCacheEntry::RemoveDescriptor(nsCacheEntryDescriptor* descriptor,
+ bool* doomEntry) {
+ NS_ASSERTION(descriptor->CacheEntry() == this, "### Wrong cache entry!!");
+
+ *doomEntry = descriptor->ClearCacheEntry();
+
+ PR_REMOVE_AND_INIT_LINK(descriptor);
+
+ if (!PR_CLIST_IS_EMPTY(&mDescriptorQ))
+ return true; // stay active if we still have open descriptors
+
+ if (PR_CLIST_IS_EMPTY(&mRequestQ))
+ return false; // no descriptors or requests, we can deactivate
+
+ return true; // find next best request to give a descriptor to
+}
+
+void nsCacheEntry::DetachDescriptors() {
+ nsCacheEntryDescriptor* descriptor =
+ (nsCacheEntryDescriptor*)PR_LIST_HEAD(&mDescriptorQ);
+
+ while (descriptor != &mDescriptorQ) {
+ nsCacheEntryDescriptor* nextDescriptor =
+ (nsCacheEntryDescriptor*)PR_NEXT_LINK(descriptor);
+
+ descriptor->ClearCacheEntry();
+ PR_REMOVE_AND_INIT_LINK(descriptor);
+ descriptor = nextDescriptor;
+ }
+}
+
+void nsCacheEntry::GetDescriptors(
+ nsTArray<RefPtr<nsCacheEntryDescriptor> >& outDescriptors) {
+ nsCacheEntryDescriptor* descriptor =
+ (nsCacheEntryDescriptor*)PR_LIST_HEAD(&mDescriptorQ);
+
+ while (descriptor != &mDescriptorQ) {
+ nsCacheEntryDescriptor* nextDescriptor =
+ (nsCacheEntryDescriptor*)PR_NEXT_LINK(descriptor);
+
+ outDescriptors.AppendElement(descriptor);
+ descriptor = nextDescriptor;
+ }
+}
+
+/******************************************************************************
+ * nsCacheEntryInfo - for implementing about:cache
+ *****************************************************************************/
+
+NS_IMPL_ISUPPORTS(nsCacheEntryInfo, nsICacheEntryInfo)
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetClientID(nsACString& aClientID) {
+ if (!mCacheEntry) {
+ aClientID.Truncate();
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return ClientIDFromCacheKey(*mCacheEntry->Key(), aClientID);
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetDeviceID(nsACString& aDeviceID) {
+ if (!mCacheEntry) {
+ aDeviceID.Truncate();
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ aDeviceID.Assign(mCacheEntry->GetDeviceID());
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetKey(nsACString& key) {
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return ClientKeyFromCacheKey(*mCacheEntry->Key(), key);
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetFetchCount(int32_t* fetchCount) {
+ NS_ENSURE_ARG_POINTER(fetchCount);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *fetchCount = mCacheEntry->FetchCount();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetLastFetched(uint32_t* lastFetched) {
+ NS_ENSURE_ARG_POINTER(lastFetched);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *lastFetched = mCacheEntry->LastFetched();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetLastModified(uint32_t* lastModified) {
+ NS_ENSURE_ARG_POINTER(lastModified);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *lastModified = mCacheEntry->LastModified();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetExpirationTime(uint32_t* expirationTime) {
+ NS_ENSURE_ARG_POINTER(expirationTime);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *expirationTime = mCacheEntry->ExpirationTime();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::GetDataSize(uint32_t* dataSize) {
+ NS_ENSURE_ARG_POINTER(dataSize);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *dataSize = mCacheEntry->DataSize();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryInfo::IsStreamBased(bool* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->IsStreamData();
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsCacheEntryHashTable
+ *****************************************************************************/
+
+const PLDHashTableOps nsCacheEntryHashTable::ops = {HashKey, MatchEntry,
+ MoveEntry, ClearEntry};
+
+nsCacheEntryHashTable::nsCacheEntryHashTable()
+ : table(&ops, sizeof(nsCacheEntryHashTableEntry), kInitialTableLength),
+ initialized(false) {
+ MOZ_COUNT_CTOR(nsCacheEntryHashTable);
+}
+
+nsCacheEntryHashTable::~nsCacheEntryHashTable() {
+ MOZ_COUNT_DTOR(nsCacheEntryHashTable);
+ if (initialized) Shutdown();
+}
+
+void nsCacheEntryHashTable::Init() {
+ table.ClearAndPrepareForLength(kInitialTableLength);
+ initialized = true;
+}
+
+void nsCacheEntryHashTable::Shutdown() {
+ if (initialized) {
+ table.ClearAndPrepareForLength(kInitialTableLength);
+ initialized = false;
+ }
+}
+
+nsCacheEntry* nsCacheEntryHashTable::GetEntry(const nsCString* key) const {
+ NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
+ if (!initialized) return nullptr;
+
+ PLDHashEntryHdr* hashEntry = table.Search(key);
+ return hashEntry ? ((nsCacheEntryHashTableEntry*)hashEntry)->cacheEntry
+ : nullptr;
+}
+
+nsresult nsCacheEntryHashTable::AddEntry(nsCacheEntry* cacheEntry) {
+ PLDHashEntryHdr* hashEntry;
+
+ NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
+ if (!initialized) return NS_ERROR_NOT_INITIALIZED;
+ if (!cacheEntry) return NS_ERROR_NULL_POINTER;
+
+ hashEntry = table.Add(&(cacheEntry->mKey), fallible);
+
+ if (!hashEntry) return NS_ERROR_FAILURE;
+ NS_ASSERTION(((nsCacheEntryHashTableEntry*)hashEntry)->cacheEntry == nullptr,
+ "### nsCacheEntryHashTable::AddEntry - entry already used");
+ ((nsCacheEntryHashTableEntry*)hashEntry)->cacheEntry = cacheEntry;
+
+ return NS_OK;
+}
+
+void nsCacheEntryHashTable::RemoveEntry(nsCacheEntry* cacheEntry) {
+ NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
+ NS_ASSERTION(cacheEntry, "### cacheEntry == nullptr");
+
+ if (!initialized) return; // NS_ERROR_NOT_INITIALIZED
+
+#if DEBUG
+ // XXX debug code to make sure we have the entry we're trying to remove
+ nsCacheEntry* check = GetEntry(&(cacheEntry->mKey));
+ NS_ASSERTION(check == cacheEntry,
+ "### Attempting to remove unknown cache entry!!!");
+#endif
+ table.Remove(&(cacheEntry->mKey));
+}
+
+PLDHashTable::Iterator nsCacheEntryHashTable::Iter() {
+ return PLDHashTable::Iterator(&table);
+}
+
+/**
+ * hash table operation callback functions
+ */
+
+PLDHashNumber nsCacheEntryHashTable::HashKey(const void* key) {
+ return HashString(*static_cast<const nsCString*>(key));
+}
+
+bool nsCacheEntryHashTable::MatchEntry(const PLDHashEntryHdr* hashEntry,
+ const void* key) {
+ NS_ASSERTION(key != nullptr,
+ "### nsCacheEntryHashTable::MatchEntry : null key");
+ nsCacheEntry* cacheEntry =
+ ((nsCacheEntryHashTableEntry*)hashEntry)->cacheEntry;
+
+ return cacheEntry->mKey.Equals(*(nsCString*)key);
+}
+
+void nsCacheEntryHashTable::MoveEntry(PLDHashTable* /* table */,
+ const PLDHashEntryHdr* from,
+ PLDHashEntryHdr* to) {
+ new (KnownNotNull, to) nsCacheEntryHashTableEntry(
+ std::move(*((nsCacheEntryHashTableEntry*)from)));
+ // No need to destroy `from`.
+}
+
+void nsCacheEntryHashTable::ClearEntry(PLDHashTable* /* table */,
+ PLDHashEntryHdr* hashEntry) {
+ ((nsCacheEntryHashTableEntry*)hashEntry)->cacheEntry = nullptr;
+}
diff --git a/netwerk/cache/nsCacheEntry.h b/netwerk/cache/nsCacheEntry.h
new file mode 100644
index 0000000000..ee2a2af35f
--- /dev/null
+++ b/netwerk/cache/nsCacheEntry.h
@@ -0,0 +1,285 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheEntry_h_
+#define _nsCacheEntry_h_
+
+#include "nsICache.h"
+#include "nsICacheEntryDescriptor.h"
+#include "nsCacheMetaData.h"
+
+#include "nspr.h"
+#include "PLDHashTable.h"
+#include "nscore.h"
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include "nsAString.h"
+
+class nsCacheDevice;
+class nsCacheMetaData;
+class nsCacheRequest;
+class nsCacheEntryDescriptor;
+class nsIEventTarget;
+
+/******************************************************************************
+ * nsCacheEntry
+ *******************************************************************************/
+class nsCacheEntry : public PRCList {
+ public:
+ nsCacheEntry(const nsACString& key, bool streamBased,
+ nsCacheStoragePolicy storagePolicy);
+ ~nsCacheEntry();
+
+ static nsresult Create(const char* key, bool streamBased,
+ nsCacheStoragePolicy storagePolicy,
+ nsCacheDevice* device, nsCacheEntry** result);
+
+ nsCString* Key() { return &mKey; }
+
+ int32_t FetchCount() { return mFetchCount; }
+ void SetFetchCount(int32_t count) { mFetchCount = count; }
+ void Fetched();
+
+ uint32_t LastFetched() { return mLastFetched; }
+ void SetLastFetched(uint32_t lastFetched) { mLastFetched = lastFetched; }
+
+ uint32_t LastModified() { return mLastModified; }
+ void SetLastModified(uint32_t lastModified) { mLastModified = lastModified; }
+
+ uint32_t ExpirationTime() { return mExpirationTime; }
+ void SetExpirationTime(uint32_t expires) { mExpirationTime = expires; }
+
+ uint32_t Size() { return mDataSize + mMetaData.Size() + mKey.Length(); }
+
+ nsCacheDevice* CacheDevice() { return mCacheDevice; }
+ void SetCacheDevice(nsCacheDevice* device) { mCacheDevice = device; }
+ void SetCustomCacheDevice(nsCacheDevice* device) { mCustomDevice = device; }
+ nsCacheDevice* CustomCacheDevice() { return mCustomDevice; }
+ const char* GetDeviceID();
+
+ /**
+ * Data accessors
+ */
+ nsISupports* Data() { return mData; }
+ void SetData(nsISupports* data);
+
+ int64_t PredictedDataSize() { return mPredictedDataSize; }
+ void SetPredictedDataSize(int64_t size) { mPredictedDataSize = size; }
+
+ uint32_t DataSize() { return mDataSize; }
+ void SetDataSize(uint32_t size) { mDataSize = size; }
+
+ void TouchData();
+
+ /**
+ * Meta data accessors
+ */
+ const char* GetMetaDataElement(const char* key) {
+ return mMetaData.GetElement(key);
+ }
+ nsresult SetMetaDataElement(const char* key, const char* value) {
+ return mMetaData.SetElement(key, value);
+ }
+ nsresult VisitMetaDataElements(nsICacheMetaDataVisitor* visitor) {
+ return mMetaData.VisitElements(visitor);
+ }
+ nsresult FlattenMetaData(char* buffer, uint32_t bufSize) {
+ return mMetaData.FlattenMetaData(buffer, bufSize);
+ }
+ nsresult UnflattenMetaData(const char* buffer, uint32_t bufSize) {
+ return mMetaData.UnflattenMetaData(buffer, bufSize);
+ }
+ uint32_t MetaDataSize() { return mMetaData.Size(); }
+
+ void TouchMetaData();
+
+ /**
+ * Security Info accessors
+ */
+ nsISupports* SecurityInfo() { return mSecurityInfo; }
+ void SetSecurityInfo(nsISupports* info) { mSecurityInfo = info; }
+
+ // XXX enumerate MetaData method
+
+ enum CacheEntryFlags {
+ eStoragePolicyMask = 0x000000FF,
+ eDoomedMask = 0x00000100,
+ eEntryDirtyMask = 0x00000200,
+ eDataDirtyMask = 0x00000400,
+ eMetaDataDirtyMask = 0x00000800,
+ eStreamDataMask = 0x00001000,
+ eActiveMask = 0x00002000,
+ eInitializedMask = 0x00004000,
+ eValidMask = 0x00008000,
+ eBindingMask = 0x00010000,
+ ePrivateMask = 0x00020000
+ };
+
+ void MarkBinding() { mFlags |= eBindingMask; }
+ void ClearBinding() { mFlags &= ~eBindingMask; }
+ bool IsBinding() { return (mFlags & eBindingMask) != 0; }
+
+ void MarkEntryDirty() { mFlags |= eEntryDirtyMask; }
+ void MarkEntryClean() { mFlags &= ~eEntryDirtyMask; }
+ void MarkDataDirty() { mFlags |= eDataDirtyMask; }
+ void MarkDataClean() { mFlags &= ~eDataDirtyMask; }
+ void MarkMetaDataDirty() { mFlags |= eMetaDataDirtyMask; }
+ void MarkMetaDataClean() { mFlags &= ~eMetaDataDirtyMask; }
+ void MarkStreamData() { mFlags |= eStreamDataMask; }
+ void MarkValid() { mFlags |= eValidMask; }
+ void MarkInvalid() { mFlags &= ~eValidMask; }
+ void MarkPrivate() { mFlags |= ePrivateMask; }
+ void MarkPublic() { mFlags &= ~ePrivateMask; }
+ // void MarkAllowedInMemory() { mFlags |= eAllowedInMemoryMask; }
+ // void MarkAllowedOnDisk() { mFlags |= eAllowedOnDiskMask; }
+
+ bool IsDoomed() { return (mFlags & eDoomedMask) != 0; }
+ bool IsEntryDirty() { return (mFlags & eEntryDirtyMask) != 0; }
+ bool IsDataDirty() { return (mFlags & eDataDirtyMask) != 0; }
+ bool IsMetaDataDirty() { return (mFlags & eMetaDataDirtyMask) != 0; }
+ bool IsStreamData() { return (mFlags & eStreamDataMask) != 0; }
+ bool IsActive() { return (mFlags & eActiveMask) != 0; }
+ bool IsInitialized() { return (mFlags & eInitializedMask) != 0; }
+ bool IsValid() { return (mFlags & eValidMask) != 0; }
+ bool IsInvalid() { return (mFlags & eValidMask) == 0; }
+ bool IsInUse() {
+ return IsBinding() ||
+ !(PR_CLIST_IS_EMPTY(&mRequestQ) && PR_CLIST_IS_EMPTY(&mDescriptorQ));
+ }
+ bool IsNotInUse() { return !IsInUse(); }
+ bool IsPrivate() { return (mFlags & ePrivateMask) != 0; }
+
+ bool IsAllowedInMemory() {
+ return (StoragePolicy() == nsICache::STORE_ANYWHERE) ||
+ (StoragePolicy() == nsICache::STORE_IN_MEMORY);
+ }
+
+ bool IsAllowedOnDisk() {
+ return !IsPrivate() && ((StoragePolicy() == nsICache::STORE_ANYWHERE) ||
+ (StoragePolicy() == nsICache::STORE_ON_DISK));
+ }
+
+ bool IsAllowedOffline() {
+ return (StoragePolicy() == nsICache::STORE_OFFLINE);
+ }
+
+ nsCacheStoragePolicy StoragePolicy() {
+ return (nsCacheStoragePolicy)(mFlags & eStoragePolicyMask);
+ }
+
+ void SetStoragePolicy(nsCacheStoragePolicy policy) {
+ NS_ASSERTION(policy <= 0xFF, "too many bits in nsCacheStoragePolicy");
+ mFlags &= ~eStoragePolicyMask; // clear storage policy bits
+ mFlags |= policy;
+ }
+
+ // methods for nsCacheService
+ nsresult RequestAccess(nsCacheRequest* request,
+ nsCacheAccessMode* accessGranted);
+ nsresult CreateDescriptor(nsCacheRequest* request,
+ nsCacheAccessMode accessGranted,
+ nsICacheEntryDescriptor** result);
+
+ bool RemoveRequest(nsCacheRequest* request);
+ bool RemoveDescriptor(nsCacheEntryDescriptor* descriptor, bool* doomEntry);
+
+ void GetDescriptors(
+ nsTArray<RefPtr<nsCacheEntryDescriptor> >& outDescriptors);
+
+ private:
+ friend class nsCacheEntryHashTable;
+ friend class nsCacheService;
+
+ void DetachDescriptors();
+
+ // internal methods
+ void MarkDoomed() { mFlags |= eDoomedMask; }
+ void MarkStreamBased() { mFlags |= eStreamDataMask; }
+ void MarkInitialized() { mFlags |= eInitializedMask; }
+ void MarkActive() { mFlags |= eActiveMask; }
+ void MarkInactive() { mFlags &= ~eActiveMask; }
+
+ nsCString mKey;
+ uint32_t mFetchCount; // 4
+ uint32_t mLastFetched; // 4
+ uint32_t mLastModified; // 4
+ uint32_t mLastValidated; // 4
+ uint32_t mExpirationTime; // 4
+ uint32_t mFlags; // 4
+ int64_t mPredictedDataSize; // Size given by ContentLength.
+ uint32_t mDataSize; // 4
+ nsCacheDevice* mCacheDevice; // 4
+ nsCacheDevice* mCustomDevice; // 4
+ nsCOMPtr<nsISupports> mSecurityInfo; //
+ nsISupports* mData; // strong ref
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+ nsCacheMetaData mMetaData; // 4
+ PRCList mRequestQ; // 8
+ PRCList mDescriptorQ; // 8
+};
+
+/******************************************************************************
+ * nsCacheEntryInfo
+ *******************************************************************************/
+class nsCacheEntryInfo : public nsICacheEntryInfo {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSICACHEENTRYINFO
+
+ explicit nsCacheEntryInfo(nsCacheEntry* entry) : mCacheEntry(entry) {}
+
+ void DetachEntry() { mCacheEntry = nullptr; }
+
+ private:
+ nsCacheEntry* mCacheEntry;
+
+ virtual ~nsCacheEntryInfo() = default;
+};
+
+/******************************************************************************
+ * nsCacheEntryHashTable
+ *******************************************************************************/
+
+struct nsCacheEntryHashTableEntry : public PLDHashEntryHdr {
+ nsCacheEntry* cacheEntry;
+};
+
+class nsCacheEntryHashTable {
+ public:
+ nsCacheEntryHashTable();
+ ~nsCacheEntryHashTable();
+
+ void Init();
+ void Shutdown();
+
+ nsCacheEntry* GetEntry(const nsCString* key) const;
+ nsresult AddEntry(nsCacheEntry* entry);
+ void RemoveEntry(nsCacheEntry* entry);
+
+ PLDHashTable::Iterator Iter();
+
+ private:
+ // PLDHashTable operation callbacks
+ static PLDHashNumber HashKey(const void* key);
+
+ static bool MatchEntry(const PLDHashEntryHdr* entry, const void* key);
+
+ static void MoveEntry(PLDHashTable* table, const PLDHashEntryHdr* from,
+ PLDHashEntryHdr* to);
+
+ static void ClearEntry(PLDHashTable* table, PLDHashEntryHdr* entry);
+
+ static void Finalize(PLDHashTable* table);
+
+ // member variables
+ static const PLDHashTableOps ops;
+ PLDHashTable table;
+ bool initialized;
+
+ static const uint32_t kInitialTableLength = 256;
+};
+
+#endif // _nsCacheEntry_h_
diff --git a/netwerk/cache/nsCacheEntryDescriptor.cpp b/netwerk/cache/nsCacheEntryDescriptor.cpp
new file mode 100644
index 0000000000..c879e8b31b
--- /dev/null
+++ b/netwerk/cache/nsCacheEntryDescriptor.cpp
@@ -0,0 +1,1307 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsICache.h"
+#include "nsCache.h"
+#include "nsCacheService.h"
+#include "nsCacheEntryDescriptor.h"
+#include "nsCacheEntry.h"
+#include "nsReadableUtils.h"
+#include "nsIOutputStream.h"
+#include "nsCRT.h"
+#include "nsThreadUtils.h"
+#include <algorithm>
+#include "mozilla/IntegerPrintfMacros.h"
+
+#define kMinDecompressReadBufLen 1024
+#define kMinCompressWriteBufLen 1024
+
+/******************************************************************************
+ * nsAsyncDoomEvent
+ *****************************************************************************/
+
+class nsAsyncDoomEvent : public mozilla::Runnable {
+ public:
+ nsAsyncDoomEvent(nsCacheEntryDescriptor* descriptor,
+ nsICacheListener* listener)
+ : mozilla::Runnable("nsAsyncDoomEvent") {
+ mDescriptor = descriptor;
+ mListener = listener;
+ mEventTarget = GetCurrentEventTarget();
+ // We addref the listener here and release it in nsNotifyDoomListener
+ // on the callers thread. If posting of nsNotifyDoomListener event fails
+ // we leak the listener which is better than releasing it on a wrong
+ // thread.
+ NS_IF_ADDREF(mListener);
+ }
+
+ NS_IMETHOD Run() override {
+ nsresult status = NS_OK;
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSASYNCDOOMEVENT_RUN));
+
+ if (mDescriptor->mCacheEntry) {
+ status = nsCacheService::gService->DoomEntry_Internal(
+ mDescriptor->mCacheEntry, true);
+ } else if (!mDescriptor->mDoomedOnClose) {
+ status = NS_ERROR_NOT_AVAILABLE;
+ }
+ }
+
+ if (mListener) {
+ mEventTarget->Dispatch(new nsNotifyDoomListener(mListener, status),
+ NS_DISPATCH_NORMAL);
+ // posted event will release the reference on the correct thread
+ mListener = nullptr;
+ }
+
+ return NS_OK;
+ }
+
+ private:
+ RefPtr<nsCacheEntryDescriptor> mDescriptor;
+ nsICacheListener* mListener;
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+};
+
+NS_IMPL_ISUPPORTS(nsCacheEntryDescriptor, nsICacheEntryDescriptor,
+ nsICacheEntryInfo)
+
+nsCacheEntryDescriptor::nsCacheEntryDescriptor(nsCacheEntry* entry,
+ nsCacheAccessMode accessGranted)
+ : mCacheEntry(entry),
+ mAccessGranted(accessGranted),
+ mOutputWrapper(nullptr),
+ mLock("nsCacheEntryDescriptor.mLock"),
+ mAsyncDoomPending(false),
+ mDoomedOnClose(false),
+ mClosingDescriptor(false) {
+ PR_INIT_CLIST(this);
+ // We need to make sure the cache service lives for the entire lifetime
+ // of the descriptor
+ mCacheService = nsCacheService::GlobalInstance();
+}
+
+nsCacheEntryDescriptor::~nsCacheEntryDescriptor() {
+ // No need to close if the cache entry has already been severed. This
+ // helps avoid a shutdown assertion (bug 285519) that is caused when
+ // consumers end up holding onto these objects past xpcom-shutdown. It's
+ // okay for them to do that because the cache service calls our Close
+ // method during xpcom-shutdown, so we don't need to complain about it.
+ if (mCacheEntry) Close();
+
+ NS_ASSERTION(mInputWrappers.IsEmpty(), "We have still some input wrapper!");
+ NS_ASSERTION(!mOutputWrapper, "We have still an output wrapper!");
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetClientID(nsACString& aClientID) {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETCLIENTID));
+ if (!mCacheEntry) {
+ aClientID.Truncate();
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return ClientIDFromCacheKey(*(mCacheEntry->Key()), aClientID);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetDeviceID(nsACString& aDeviceID) {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETDEVICEID));
+ if (!mCacheEntry) {
+ aDeviceID.Truncate();
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ aDeviceID.Assign(mCacheEntry->GetDeviceID());
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetKey(nsACString& result) {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETKEY));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return ClientKeyFromCacheKey(*(mCacheEntry->Key()), result);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetFetchCount(int32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETFETCHCOUNT));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->FetchCount();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetLastFetched(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETLASTFETCHED));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->LastFetched();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetLastModified(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETLASTMODIFIED));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->LastModified();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetExpirationTime(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETEXPIRATIONTIME));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->ExpirationTime();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetExpirationTime(uint32_t expirationTime) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETEXPIRATIONTIME));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ mCacheEntry->SetExpirationTime(expirationTime);
+ mCacheEntry->MarkEntryDirty();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::IsStreamBased(bool* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_ISSTREAMBASED));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->IsStreamData();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::GetPredictedDataSize(int64_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETPREDICTEDDATASIZE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->PredictedDataSize();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::SetPredictedDataSize(
+ int64_t predictedSize) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETPREDICTEDDATASIZE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ mCacheEntry->SetPredictedDataSize(predictedSize);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::GetDataSize(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETDATASIZE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ const char* val = mCacheEntry->GetMetaDataElement("uncompressed-len");
+ if (!val) {
+ *result = mCacheEntry->DataSize();
+ } else {
+ *result = atol(val);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::GetStorageDataSize(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETSTORAGEDATASIZE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->DataSize();
+
+ return NS_OK;
+}
+
+nsresult nsCacheEntryDescriptor::RequestDataSizeChange(int32_t deltaSize) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_REQUESTDATASIZECHANGE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ nsresult rv;
+ rv = nsCacheService::OnDataSizeChange(mCacheEntry, deltaSize);
+ if (NS_SUCCEEDED(rv)) {
+ // XXX review for signed/unsigned math errors
+ uint32_t newDataSize = mCacheEntry->DataSize() + deltaSize;
+ mCacheEntry->SetDataSize(newDataSize);
+ mCacheEntry->TouchData();
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetDataSize(uint32_t dataSize) {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETDATASIZE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ // XXX review for signed/unsigned math errors
+ int32_t deltaSize = dataSize - mCacheEntry->DataSize();
+
+ nsresult rv;
+ rv = nsCacheService::OnDataSizeChange(mCacheEntry, deltaSize);
+ // this had better be NS_OK, this call instance is advisory for memory cache
+ // objects
+ if (NS_SUCCEEDED(rv)) {
+ // XXX review for signed/unsigned math errors
+ uint32_t newDataSize = mCacheEntry->DataSize() + deltaSize;
+ mCacheEntry->SetDataSize(newDataSize);
+ mCacheEntry->TouchData();
+ } else {
+ NS_WARNING("failed SetDataSize() on memory cache object!");
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::OpenInputStream(uint32_t offset,
+ nsIInputStream** result) {
+ NS_ENSURE_ARG_POINTER(result);
+
+ RefPtr<nsInputStreamWrapper> cacheInput;
+ {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_OPENINPUTSTREAM));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+ if (!mCacheEntry->IsStreamData()) return NS_ERROR_CACHE_DATA_IS_NOT_STREAM;
+
+ // Don't open any new stream when closing descriptor or clearing entries
+ if (mClosingDescriptor || nsCacheService::GetClearingEntries())
+ return NS_ERROR_NOT_AVAILABLE;
+
+ // ensure valid permissions
+ if (!(mAccessGranted & nsICache::ACCESS_READ))
+ return NS_ERROR_CACHE_READ_ACCESS_DENIED;
+
+ const char* val;
+ val = mCacheEntry->GetMetaDataElement("uncompressed-len");
+ if (val) {
+ cacheInput = new nsDecompressInputStreamWrapper(this, offset);
+ } else {
+ cacheInput = new nsInputStreamWrapper(this, offset);
+ }
+ if (!cacheInput) return NS_ERROR_OUT_OF_MEMORY;
+
+ mInputWrappers.AppendElement(cacheInput);
+ }
+
+ cacheInput.forget(result);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::OpenOutputStream(uint32_t offset,
+ nsIOutputStream** result) {
+ NS_ENSURE_ARG_POINTER(result);
+
+ RefPtr<nsOutputStreamWrapper> cacheOutput;
+ {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_OPENOUTPUTSTREAM));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+ if (!mCacheEntry->IsStreamData()) return NS_ERROR_CACHE_DATA_IS_NOT_STREAM;
+
+ // Don't open any new stream when closing descriptor or clearing entries
+ if (mClosingDescriptor || nsCacheService::GetClearingEntries())
+ return NS_ERROR_NOT_AVAILABLE;
+
+ // ensure valid permissions
+ if (!(mAccessGranted & nsICache::ACCESS_WRITE))
+ return NS_ERROR_CACHE_WRITE_ACCESS_DENIED;
+
+ int32_t compressionLevel = nsCacheService::CacheCompressionLevel();
+ const char* val;
+ val = mCacheEntry->GetMetaDataElement("uncompressed-len");
+ if ((compressionLevel > 0) && val) {
+ cacheOutput = new nsCompressOutputStreamWrapper(this, offset);
+ } else {
+ // clear compression flag when compression disabled - see bug 715198
+ if (val) {
+ mCacheEntry->SetMetaDataElement("uncompressed-len", nullptr);
+ }
+ cacheOutput = new nsOutputStreamWrapper(this, offset);
+ }
+ if (!cacheOutput) return NS_ERROR_OUT_OF_MEMORY;
+
+ mOutputWrapper = cacheOutput;
+ }
+
+ cacheOutput.forget(result);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetCacheElement(nsISupports** result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETCACHEELEMENT));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+ if (mCacheEntry->IsStreamData()) return NS_ERROR_CACHE_DATA_IS_STREAM;
+
+ NS_IF_ADDREF(*result = mCacheEntry->Data());
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetCacheElement(nsISupports* cacheElement) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETCACHEELEMENT));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+ if (mCacheEntry->IsStreamData()) return NS_ERROR_CACHE_DATA_IS_STREAM;
+
+ return nsCacheService::SetCacheElement(mCacheEntry, cacheElement);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetAccessGranted(nsCacheAccessMode* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = mAccessGranted;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetStoragePolicy(nsCacheStoragePolicy* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETSTORAGEPOLICY));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->StoragePolicy();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetStoragePolicy(nsCacheStoragePolicy policy) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETSTORAGEPOLICY));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+ // XXX validate policy against session?
+
+ bool storageEnabled = false;
+ storageEnabled = nsCacheService::IsStorageEnabledForPolicy_Locked(policy);
+ if (!storageEnabled) return NS_ERROR_FAILURE;
+
+ // Don't change the storage policy of entries we can't write
+ if (!(mAccessGranted & nsICache::ACCESS_WRITE)) return NS_ERROR_NOT_AVAILABLE;
+
+ // Don't allow a cache entry to move from memory-only to anything else
+ if (mCacheEntry->StoragePolicy() == nsICache::STORE_IN_MEMORY &&
+ policy != nsICache::STORE_IN_MEMORY)
+ return NS_ERROR_NOT_AVAILABLE;
+
+ mCacheEntry->SetStoragePolicy(policy);
+ mCacheEntry->MarkEntryDirty();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetFile(nsIFile** result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETFILE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return nsCacheService::GetFileForEntry(mCacheEntry, result);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetSecurityInfo(nsISupports** result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETSECURITYINFO));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = mCacheEntry->SecurityInfo();
+ NS_IF_ADDREF(*result);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetSecurityInfo(nsISupports* securityInfo) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETSECURITYINFO));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ mCacheEntry->SetSecurityInfo(securityInfo);
+ mCacheEntry->MarkEntryDirty();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::Doom() {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_DOOM));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return nsCacheService::DoomEntry(mCacheEntry);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::DoomAndFailPendingRequests(nsresult status) {
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_DOOMANDFAILPENDINGREQUESTS));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::AsyncDoom(nsICacheListener* listener) {
+ bool asyncDoomPending;
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ asyncDoomPending = mAsyncDoomPending;
+ mAsyncDoomPending = true;
+ }
+
+ if (asyncDoomPending) {
+ // AsyncDoom was already called. Notify listener if it is non-null,
+ // otherwise just return success.
+ if (listener) {
+ nsresult rv = NS_DispatchToCurrentThread(
+ new nsNotifyDoomListener(listener, NS_ERROR_NOT_AVAILABLE));
+ if (NS_SUCCEEDED(rv)) NS_IF_ADDREF(listener);
+ return rv;
+ }
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIRunnable> event = new nsAsyncDoomEvent(this, listener);
+ return nsCacheService::DispatchToCacheIOThread(event);
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::MarkValid() {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_MARKVALID));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ nsresult rv = nsCacheService::ValidateEntry(mCacheEntry);
+ return rv;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::Close() {
+ RefPtr<nsOutputStreamWrapper> outputWrapper;
+ nsTArray<RefPtr<nsInputStreamWrapper> > inputWrappers;
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_CLOSE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ // Make sure no other stream can be opened
+ mClosingDescriptor = true;
+ outputWrapper = mOutputWrapper;
+ for (size_t i = 0; i < mInputWrappers.Length(); i++)
+ inputWrappers.AppendElement(mInputWrappers[i]);
+ }
+
+ // Call Close() on the streams outside the lock since it might need to call
+ // methods that grab the cache service lock, e.g. compressed output stream
+ // when it finalizes the entry
+ if (outputWrapper) {
+ if (NS_FAILED(outputWrapper->Close())) {
+ NS_WARNING("Dooming entry because Close() failed!!!");
+ Doom();
+ }
+ outputWrapper = nullptr;
+ }
+
+ for (uint32_t i = 0; i < inputWrappers.Length(); i++)
+ inputWrappers[i]->Close();
+
+ inputWrappers.Clear();
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_CLOSE));
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ // XXX perhaps closing descriptors should clear/sever transports
+
+ // tell nsCacheService we're going away
+ nsCacheService::CloseDescriptor(this);
+ NS_ASSERTION(mCacheEntry == nullptr, "mCacheEntry not null");
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::GetMetaDataElement(const char* key, char** result) {
+ NS_ENSURE_ARG_POINTER(key);
+ *result = nullptr;
+
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_GETMETADATAELEMENT));
+ NS_ENSURE_TRUE(mCacheEntry, NS_ERROR_NOT_AVAILABLE);
+
+ const char* value;
+
+ value = mCacheEntry->GetMetaDataElement(key);
+ if (!value) return NS_ERROR_NOT_AVAILABLE;
+
+ *result = NS_xstrdup(value);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::SetMetaDataElement(const char* key, const char* value) {
+ NS_ENSURE_ARG_POINTER(key);
+
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_SETMETADATAELEMENT));
+ NS_ENSURE_TRUE(mCacheEntry, NS_ERROR_NOT_AVAILABLE);
+
+ // XXX allow null value, for clearing key?
+
+ nsresult rv = mCacheEntry->SetMetaDataElement(key, value);
+ if (NS_SUCCEEDED(rv)) mCacheEntry->TouchMetaData();
+ return rv;
+}
+
+NS_IMETHODIMP
+nsCacheEntryDescriptor::VisitMetaData(nsICacheMetaDataVisitor* visitor) {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHEENTRYDESCRIPTOR_VISITMETADATA));
+ // XXX check callers, we're calling out of module
+ NS_ENSURE_ARG_POINTER(visitor);
+ if (!mCacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ return mCacheEntry->VisitMetaDataElements(visitor);
+}
+
+/******************************************************************************
+ * nsCacheInputStream - a wrapper for nsIInputStream keeps the cache entry
+ * open while referenced.
+ ******************************************************************************/
+
+NS_IMPL_ADDREF(nsCacheEntryDescriptor::nsInputStreamWrapper)
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsCacheEntryDescriptor::nsInputStreamWrapper::Release() {
+ // Holding a reference to descriptor ensures that cache service won't go
+ // away. Do not grab cache service lock if there is no descriptor.
+ RefPtr<nsCacheEntryDescriptor> desc;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ desc = mDescriptor;
+ }
+
+ if (desc) nsCacheService::Lock(LOCK_TELEM(NSINPUTSTREAMWRAPPER_RELEASE));
+
+ nsrefcnt count;
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "nsCacheEntryDescriptor::nsInputStreamWrapper");
+
+ if (0 == count) {
+ // don't use desc here since mDescriptor might be already nulled out
+ if (mDescriptor) {
+ NS_ASSERTION(mDescriptor->mInputWrappers.Contains(this),
+ "Wrapper not found in array!");
+ mDescriptor->mInputWrappers.RemoveElement(this);
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(nsCacheEntryDescriptor::nsInputStreamWrapper)
+ NS_INTERFACE_MAP_ENTRY(nsIInputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::LazyInit() {
+ // Check if we have the descriptor. If not we can't even grab the cache
+ // lock since it is not ensured that the cache service still exists.
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSINPUTSTREAMWRAPPER_LAZYINIT));
+
+ nsCacheAccessMode mode;
+ nsresult rv = mDescriptor->GetAccessGranted(&mode);
+ if (NS_FAILED(rv)) return rv;
+
+ NS_ENSURE_TRUE(mode & nsICache::ACCESS_READ, NS_ERROR_UNEXPECTED);
+
+ nsCacheEntry* cacheEntry = mDescriptor->CacheEntry();
+ if (!cacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ rv = nsCacheService::OpenInputStreamForEntry(cacheEntry, mode, mStartOffset,
+ getter_AddRefs(mInput));
+
+ CACHE_LOG_DEBUG(
+ ("nsInputStreamWrapper::LazyInit "
+ "[entry=%p, wrapper=%p, mInput=%p, rv=%d]",
+ mDescriptor, this, mInput.get(), int(rv)));
+
+ if (NS_FAILED(rv)) return rv;
+
+ mInitialized = true;
+ return NS_OK;
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::EnsureInit() {
+ if (mInitialized) {
+ NS_ASSERTION(mDescriptor, "Bad state");
+ return NS_OK;
+ }
+
+ return LazyInit();
+}
+
+void nsCacheEntryDescriptor::nsInputStreamWrapper::CloseInternal() {
+ mLock.AssertCurrentThreadOwns();
+ if (!mDescriptor) {
+ NS_ASSERTION(!mInitialized, "Bad state");
+ NS_ASSERTION(!mInput, "Bad state");
+ return;
+ }
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSINPUTSTREAMWRAPPER_CLOSEINTERNAL));
+
+ if (mDescriptor) {
+ mDescriptor->mInputWrappers.RemoveElement(this);
+ nsCacheService::ReleaseObject_Locked(mDescriptor);
+ mDescriptor = nullptr;
+ }
+ mInitialized = false;
+ mInput = nullptr;
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::Close() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ return Close_Locked();
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::Close_Locked() {
+ nsresult rv = EnsureInit();
+ if (NS_SUCCEEDED(rv)) {
+ rv = mInput->Close();
+ } else {
+ NS_ASSERTION(!mInput, "Shouldn't have mInput when EnsureInit() failed");
+ }
+
+ // Call CloseInternal() even when EnsureInit() failed, e.g. in case we are
+ // closing streams with nsCacheService::CloseAllStream()
+ CloseInternal();
+ return rv;
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::Available(
+ uint64_t* avail) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ nsresult rv = EnsureInit();
+ if (NS_FAILED(rv)) return rv;
+
+ return mInput->Available(avail);
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::Read(
+ char* buf, uint32_t count, uint32_t* countRead) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ return Read_Locked(buf, count, countRead);
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::Read_Locked(
+ char* buf, uint32_t count, uint32_t* countRead) {
+ nsresult rv = EnsureInit();
+ if (NS_SUCCEEDED(rv)) rv = mInput->Read(buf, count, countRead);
+
+ CACHE_LOG_DEBUG(
+ ("nsInputStreamWrapper::Read "
+ "[entry=%p, wrapper=%p, mInput=%p, rv=%" PRId32 "]",
+ mDescriptor, this, mInput.get(), static_cast<uint32_t>(rv)));
+
+ return rv;
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::ReadSegments(
+ nsWriteSegmentFun writer, void* closure, uint32_t count,
+ uint32_t* countRead) {
+ // cache stream not buffered
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsCacheEntryDescriptor::nsInputStreamWrapper::IsNonBlocking(
+ bool* result) {
+ // cache streams will never return NS_BASE_STREAM_WOULD_BLOCK
+ *result = false;
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsDecompressInputStreamWrapper - an input stream wrapper that decompresses
+ ******************************************************************************/
+
+NS_IMPL_ADDREF(nsCacheEntryDescriptor::nsDecompressInputStreamWrapper)
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsCacheEntryDescriptor::nsDecompressInputStreamWrapper::Release() {
+ // Holding a reference to descriptor ensures that cache service won't go
+ // away. Do not grab cache service lock if there is no descriptor.
+ RefPtr<nsCacheEntryDescriptor> desc;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ desc = mDescriptor;
+ }
+
+ if (desc)
+ nsCacheService::Lock(LOCK_TELEM(NSDECOMPRESSINPUTSTREAMWRAPPER_RELEASE));
+
+ nsrefcnt count;
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count,
+ "nsCacheEntryDescriptor::nsDecompressInputStreamWrapper");
+
+ if (0 == count) {
+ // don't use desc here since mDescriptor might be already nulled out
+ if (mDescriptor) {
+ NS_ASSERTION(mDescriptor->mInputWrappers.Contains(this),
+ "Wrapper not found in array!");
+ mDescriptor->mInputWrappers.RemoveElement(this);
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(nsCacheEntryDescriptor::nsDecompressInputStreamWrapper)
+ NS_INTERFACE_MAP_ENTRY(nsIInputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsDecompressInputStreamWrapper::Read(
+ char* buf, uint32_t count, uint32_t* countRead) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ int zerr = Z_OK;
+ nsresult rv = NS_OK;
+
+ if (!mStreamInitialized) {
+ rv = InitZstream();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ mZstream.next_out = (Bytef*)buf;
+ mZstream.avail_out = count;
+
+ if (mReadBufferLen < count) {
+ // Allocate a buffer for reading from the input stream. This will
+ // determine the max number of compressed bytes read from the
+ // input stream at one time. Making the buffer size proportional
+ // to the request size is not necessary, but helps minimize the
+ // number of read requests to the input stream.
+ uint32_t newBufLen = std::max(count, (uint32_t)kMinDecompressReadBufLen);
+ mReadBuffer = (unsigned char*)moz_xrealloc(mReadBuffer, newBufLen);
+ mReadBufferLen = newBufLen;
+ if (!mReadBuffer) {
+ mReadBufferLen = 0;
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ // read and inflate data until the output buffer is full, or
+ // there is no more data to read
+ while (NS_SUCCEEDED(rv) && zerr == Z_OK && mZstream.avail_out > 0 &&
+ count > 0) {
+ if (mZstream.avail_in == 0) {
+ rv = nsInputStreamWrapper::Read_Locked((char*)mReadBuffer, mReadBufferLen,
+ &mZstream.avail_in);
+ if (NS_FAILED(rv) || !mZstream.avail_in) {
+ break;
+ }
+ mZstream.next_in = mReadBuffer;
+ }
+ zerr = inflate(&mZstream, Z_NO_FLUSH);
+ if (zerr == Z_STREAM_END) {
+ // The compressed data may have been stored in multiple
+ // chunks/streams. To allow for this case, re-initialize
+ // the inflate stream and continue decompressing from
+ // the next byte.
+ Bytef* saveNextIn = mZstream.next_in;
+ unsigned int saveAvailIn = mZstream.avail_in;
+ Bytef* saveNextOut = mZstream.next_out;
+ unsigned int saveAvailOut = mZstream.avail_out;
+ inflateReset(&mZstream);
+ mZstream.next_in = saveNextIn;
+ mZstream.avail_in = saveAvailIn;
+ mZstream.next_out = saveNextOut;
+ mZstream.avail_out = saveAvailOut;
+ zerr = Z_OK;
+ } else if (zerr != Z_OK) {
+ rv = NS_ERROR_INVALID_CONTENT_ENCODING;
+ }
+ }
+ if (NS_SUCCEEDED(rv)) {
+ *countRead = count - mZstream.avail_out;
+ }
+ return rv;
+}
+
+nsresult nsCacheEntryDescriptor::nsDecompressInputStreamWrapper::Close() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ EndZstream();
+ if (mReadBuffer) {
+ free(mReadBuffer);
+ mReadBuffer = nullptr;
+ mReadBufferLen = 0;
+ }
+ return nsInputStreamWrapper::Close_Locked();
+}
+
+nsresult nsCacheEntryDescriptor::nsDecompressInputStreamWrapper::InitZstream() {
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ if (mStreamEnded) return NS_ERROR_FAILURE;
+
+ // Initialize zlib inflate stream
+ mZstream.zalloc = Z_NULL;
+ mZstream.zfree = Z_NULL;
+ mZstream.opaque = Z_NULL;
+ mZstream.next_out = Z_NULL;
+ mZstream.avail_out = 0;
+ mZstream.next_in = Z_NULL;
+ mZstream.avail_in = 0;
+ if (inflateInit(&mZstream) != Z_OK) {
+ return NS_ERROR_FAILURE;
+ }
+ mStreamInitialized = true;
+ return NS_OK;
+}
+
+nsresult nsCacheEntryDescriptor::nsDecompressInputStreamWrapper::EndZstream() {
+ if (mStreamInitialized && !mStreamEnded) {
+ inflateEnd(&mZstream);
+ mStreamInitialized = false;
+ mStreamEnded = true;
+ }
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsOutputStreamWrapper - a wrapper for nsIOutputstream to track the amount of
+ * data written to a cache entry.
+ * - also keeps the cache entry open while referenced.
+ ******************************************************************************/
+
+NS_IMPL_ADDREF(nsCacheEntryDescriptor::nsOutputStreamWrapper)
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsCacheEntryDescriptor::nsOutputStreamWrapper::Release() {
+ // Holding a reference to descriptor ensures that cache service won't go
+ // away. Do not grab cache service lock if there is no descriptor.
+ RefPtr<nsCacheEntryDescriptor> desc;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ desc = mDescriptor;
+ }
+
+ if (desc) nsCacheService::Lock(LOCK_TELEM(NSOUTPUTSTREAMWRAPPER_RELEASE));
+
+ nsrefcnt count;
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "nsCacheEntryDescriptor::nsOutputStreamWrapper");
+
+ if (0 == count) {
+ // don't use desc here since mDescriptor might be already nulled out
+ if (mDescriptor) mDescriptor->mOutputWrapper = nullptr;
+
+ if (desc) nsCacheService::Unlock();
+
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(nsCacheEntryDescriptor::nsOutputStreamWrapper)
+ NS_INTERFACE_MAP_ENTRY(nsIOutputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+nsresult nsCacheEntryDescriptor::nsOutputStreamWrapper::LazyInit() {
+ // Check if we have the descriptor. If not we can't even grab the cache
+ // lock since it is not ensured that the cache service still exists.
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSOUTPUTSTREAMWRAPPER_LAZYINIT));
+
+ nsCacheAccessMode mode;
+ nsresult rv = mDescriptor->GetAccessGranted(&mode);
+ if (NS_FAILED(rv)) return rv;
+
+ NS_ENSURE_TRUE(mode & nsICache::ACCESS_WRITE, NS_ERROR_UNEXPECTED);
+
+ nsCacheEntry* cacheEntry = mDescriptor->CacheEntry();
+ if (!cacheEntry) return NS_ERROR_NOT_AVAILABLE;
+
+ NS_ASSERTION(mOutput == nullptr, "mOutput set in LazyInit");
+
+ nsCOMPtr<nsIOutputStream> stream;
+ rv = nsCacheService::OpenOutputStreamForEntry(cacheEntry, mode, mStartOffset,
+ getter_AddRefs(stream));
+ if (NS_FAILED(rv)) return rv;
+
+ nsCacheDevice* device = cacheEntry->CacheDevice();
+ if (device) {
+ // the entry has been truncated to mStartOffset bytes, inform device
+ int32_t size = cacheEntry->DataSize();
+ rv = device->OnDataSizeChange(cacheEntry, mStartOffset - size);
+ if (NS_SUCCEEDED(rv)) cacheEntry->SetDataSize(mStartOffset);
+ } else {
+ rv = NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // If anything above failed, clean up internal state and get out of here
+ // (see bug #654926)...
+ if (NS_FAILED(rv)) {
+ nsCacheService::ReleaseObject_Locked(stream.forget().take());
+ mDescriptor->mOutputWrapper = nullptr;
+ nsCacheService::ReleaseObject_Locked(mDescriptor);
+ mDescriptor = nullptr;
+ mInitialized = false;
+ return rv;
+ }
+
+ mOutput = stream;
+ mInitialized = true;
+ return NS_OK;
+}
+
+nsresult nsCacheEntryDescriptor::nsOutputStreamWrapper::EnsureInit() {
+ if (mInitialized) {
+ NS_ASSERTION(mDescriptor, "Bad state");
+ return NS_OK;
+ }
+
+ return LazyInit();
+}
+
+nsresult nsCacheEntryDescriptor::nsOutputStreamWrapper::OnWrite(
+ uint32_t count) {
+ if (count > INT32_MAX) return NS_ERROR_UNEXPECTED;
+ return mDescriptor->RequestDataSizeChange((int32_t)count);
+}
+
+void nsCacheEntryDescriptor::nsOutputStreamWrapper::CloseInternal() {
+ mLock.AssertCurrentThreadOwns();
+ if (!mDescriptor) {
+ NS_ASSERTION(!mInitialized, "Bad state");
+ NS_ASSERTION(!mOutput, "Bad state");
+ return;
+ }
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSOUTPUTSTREAMWRAPPER_CLOSEINTERNAL));
+
+ if (mDescriptor) {
+ mDescriptor->mOutputWrapper = nullptr;
+ nsCacheService::ReleaseObject_Locked(mDescriptor);
+ mDescriptor = nullptr;
+ }
+ mInitialized = false;
+ mOutput = nullptr;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::Close() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ return Close_Locked();
+}
+
+nsresult nsCacheEntryDescriptor::nsOutputStreamWrapper::Close_Locked() {
+ nsresult rv = EnsureInit();
+ if (NS_SUCCEEDED(rv)) {
+ rv = mOutput->Close();
+ } else {
+ NS_ASSERTION(!mOutput, "Shouldn't have mOutput when EnsureInit() failed");
+ }
+
+ // Call CloseInternal() even when EnsureInit() failed, e.g. in case we are
+ // closing streams with nsCacheService::CloseAllStream()
+ CloseInternal();
+ return rv;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::Flush() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ nsresult rv = EnsureInit();
+ if (NS_FAILED(rv)) return rv;
+
+ return mOutput->Flush();
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::Write(
+ const char* buf, uint32_t count, uint32_t* result) {
+ mozilla::MutexAutoLock lock(mLock);
+ return Write_Locked(buf, count, result);
+}
+
+nsresult nsCacheEntryDescriptor::nsOutputStreamWrapper::Write_Locked(
+ const char* buf, uint32_t count, uint32_t* result) {
+ nsresult rv = EnsureInit();
+ if (NS_FAILED(rv)) return rv;
+
+ rv = OnWrite(count);
+ if (NS_FAILED(rv)) return rv;
+
+ return mOutput->Write(buf, count, result);
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::WriteFrom(
+ nsIInputStream* inStr, uint32_t count, uint32_t* result) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::WriteSegments(
+ nsReadSegmentFun reader, void* closure, uint32_t count, uint32_t* result) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsOutputStreamWrapper::IsNonBlocking(
+ bool* result) {
+ // cache streams will never return NS_BASE_STREAM_WOULD_BLOCK
+ *result = false;
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsCompressOutputStreamWrapper - an output stream wrapper that compresses
+ * data before it is written
+ ******************************************************************************/
+
+NS_IMPL_ADDREF(nsCacheEntryDescriptor::nsCompressOutputStreamWrapper)
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsCacheEntryDescriptor::nsCompressOutputStreamWrapper::Release() {
+ // Holding a reference to descriptor ensures that cache service won't go
+ // away. Do not grab cache service lock if there is no descriptor.
+ RefPtr<nsCacheEntryDescriptor> desc;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ desc = mDescriptor;
+ }
+
+ if (desc)
+ nsCacheService::Lock(LOCK_TELEM(NSCOMPRESSOUTPUTSTREAMWRAPPER_RELEASE));
+
+ nsrefcnt count;
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count,
+ "nsCacheEntryDescriptor::nsCompressOutputStreamWrapper");
+
+ if (0 == count) {
+ // don't use desc here since mDescriptor might be already nulled out
+ if (mDescriptor) mDescriptor->mOutputWrapper = nullptr;
+
+ if (desc) nsCacheService::Unlock();
+
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ if (desc) nsCacheService::Unlock();
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(nsCacheEntryDescriptor::nsCompressOutputStreamWrapper)
+ NS_INTERFACE_MAP_ENTRY(nsIOutputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsCompressOutputStreamWrapper::Write(
+ const char* buf, uint32_t count, uint32_t* result) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ int zerr = Z_OK;
+ nsresult rv = NS_OK;
+
+ if (!mStreamInitialized) {
+ rv = InitZstream();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ if (!mWriteBuffer) {
+ // Once allocated, this buffer is referenced by the zlib stream and
+ // cannot be grown. We use 2x(initial write request) to approximate
+ // a stream buffer size proportional to request buffers.
+ mWriteBufferLen = std::max(count * 2, (uint32_t)kMinCompressWriteBufLen);
+ mWriteBuffer = (unsigned char*)moz_xmalloc(mWriteBufferLen);
+ mZstream.next_out = mWriteBuffer;
+ mZstream.avail_out = mWriteBufferLen;
+ }
+
+ // Compress (deflate) the requested buffer. Keep going
+ // until the entire buffer has been deflated.
+ mZstream.avail_in = count;
+ mZstream.next_in = (Bytef*)buf;
+ while (mZstream.avail_in > 0) {
+ zerr = deflate(&mZstream, Z_NO_FLUSH);
+ if (zerr == Z_STREAM_ERROR) {
+ deflateEnd(&mZstream);
+ mStreamEnded = true;
+ mStreamInitialized = false;
+ return NS_ERROR_FAILURE;
+ }
+ // Note: Z_BUF_ERROR is non-fatal and sometimes expected here.
+
+ // If the compression stream output buffer is filled, write
+ // it out to the underlying stream wrapper.
+ if (mZstream.avail_out == 0) {
+ rv = WriteBuffer();
+ if (NS_FAILED(rv)) {
+ deflateEnd(&mZstream);
+ mStreamEnded = true;
+ mStreamInitialized = false;
+ return rv;
+ }
+ }
+ }
+ *result = count;
+ mUncompressedCount += *result;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheEntryDescriptor::nsCompressOutputStreamWrapper::Close() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ nsresult retval = NS_OK;
+ nsresult rv;
+ int zerr = 0;
+
+ if (mStreamInitialized) {
+ // complete compression of any data remaining in the zlib stream
+ do {
+ zerr = deflate(&mZstream, Z_FINISH);
+ rv = WriteBuffer();
+ if (NS_FAILED(rv)) retval = rv;
+ } while (zerr == Z_OK && rv == NS_OK);
+ deflateEnd(&mZstream);
+ mStreamInitialized = false;
+ }
+ // Do not allow to initialize stream after calling Close().
+ mStreamEnded = true;
+
+ if (mDescriptor->CacheEntry()) {
+ nsAutoCString uncompressedLenStr;
+ rv = mDescriptor->GetMetaDataElement("uncompressed-len",
+ getter_Copies(uncompressedLenStr));
+ if (NS_SUCCEEDED(rv)) {
+ int32_t oldCount = uncompressedLenStr.ToInteger(&rv);
+ if (NS_SUCCEEDED(rv)) {
+ mUncompressedCount += oldCount;
+ }
+ }
+ uncompressedLenStr.Adopt(nullptr);
+ uncompressedLenStr.AppendInt(mUncompressedCount);
+ rv = mDescriptor->SetMetaDataElement("uncompressed-len",
+ uncompressedLenStr.get());
+ if (NS_FAILED(rv)) retval = rv;
+ }
+
+ if (mWriteBuffer) {
+ free(mWriteBuffer);
+ mWriteBuffer = nullptr;
+ mWriteBufferLen = 0;
+ }
+
+ rv = nsOutputStreamWrapper::Close_Locked();
+ if (NS_FAILED(rv)) retval = rv;
+
+ return retval;
+}
+
+nsresult nsCacheEntryDescriptor::nsCompressOutputStreamWrapper::InitZstream() {
+ if (!mDescriptor) return NS_ERROR_NOT_AVAILABLE;
+
+ if (mStreamEnded) return NS_ERROR_FAILURE;
+
+ // Determine compression level: Aggressive compression
+ // may impact performance on mobile devices, while a
+ // lower compression level still provides substantial
+ // space savings for many text streams.
+ int32_t compressionLevel = nsCacheService::CacheCompressionLevel();
+
+ // Initialize zlib deflate stream
+ mZstream.zalloc = Z_NULL;
+ mZstream.zfree = Z_NULL;
+ mZstream.opaque = Z_NULL;
+ if (deflateInit2(&mZstream, compressionLevel, Z_DEFLATED, MAX_WBITS, 8,
+ Z_DEFAULT_STRATEGY) != Z_OK) {
+ return NS_ERROR_FAILURE;
+ }
+ mZstream.next_in = Z_NULL;
+ mZstream.avail_in = 0;
+
+ mStreamInitialized = true;
+
+ return NS_OK;
+}
+
+nsresult nsCacheEntryDescriptor::nsCompressOutputStreamWrapper::WriteBuffer() {
+ uint32_t bytesToWrite = mWriteBufferLen - mZstream.avail_out;
+ uint32_t result = 0;
+ nsresult rv = nsCacheEntryDescriptor::nsOutputStreamWrapper::Write_Locked(
+ (const char*)mWriteBuffer, bytesToWrite, &result);
+ mZstream.next_out = mWriteBuffer;
+ mZstream.avail_out = mWriteBufferLen;
+ return rv;
+}
diff --git a/netwerk/cache/nsCacheEntryDescriptor.h b/netwerk/cache/nsCacheEntryDescriptor.h
new file mode 100644
index 0000000000..2274150cff
--- /dev/null
+++ b/netwerk/cache/nsCacheEntryDescriptor.h
@@ -0,0 +1,220 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheEntryDescriptor_h_
+#define _nsCacheEntryDescriptor_h_
+
+#include "nsICacheEntryDescriptor.h"
+#include "nsCacheEntry.h"
+#include "nsIInputStream.h"
+#include "nsIOutputStream.h"
+#include "nsCacheService.h"
+#include "zlib.h"
+#include "mozilla/Mutex.h"
+
+/******************************************************************************
+ * nsCacheEntryDescriptor
+ *******************************************************************************/
+class nsCacheEntryDescriptor final : public PRCList,
+ public nsICacheEntryDescriptor {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHEENTRYDESCRIPTOR
+ NS_DECL_NSICACHEENTRYINFO
+
+ friend class nsAsyncDoomEvent;
+ friend class nsCacheService;
+
+ nsCacheEntryDescriptor(nsCacheEntry* entry, nsCacheAccessMode mode);
+
+ /**
+ * utility method to attempt changing data size of associated entry
+ */
+ nsresult RequestDataSizeChange(int32_t deltaSize);
+
+ /**
+ * methods callbacks for nsCacheService
+ */
+ nsCacheEntry* CacheEntry(void) { return mCacheEntry; }
+ bool ClearCacheEntry(void) {
+ NS_ASSERTION(mInputWrappers.IsEmpty(), "Bad state");
+ NS_ASSERTION(!mOutputWrapper, "Bad state");
+
+ bool doomEntry = false;
+ bool asyncDoomPending;
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ asyncDoomPending = mAsyncDoomPending;
+ }
+
+ if (asyncDoomPending && mCacheEntry) {
+ doomEntry = true;
+ mDoomedOnClose = true;
+ }
+ mCacheEntry = nullptr;
+
+ return doomEntry;
+ }
+
+ private:
+ virtual ~nsCacheEntryDescriptor();
+
+ /*************************************************************************
+ * input stream wrapper class -
+ *
+ * The input stream wrapper references the descriptor, but the descriptor
+ * doesn't need any references to the stream wrapper.
+ *************************************************************************/
+ class nsInputStreamWrapper : public nsIInputStream {
+ friend class nsCacheEntryDescriptor;
+
+ private:
+ nsCacheEntryDescriptor* mDescriptor;
+ nsCOMPtr<nsIInputStream> mInput;
+ uint32_t mStartOffset;
+ bool mInitialized;
+ mozilla::Mutex mLock;
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIINPUTSTREAM
+
+ nsInputStreamWrapper(nsCacheEntryDescriptor* desc, uint32_t off)
+ : mDescriptor(desc),
+ mStartOffset(off),
+ mInitialized(false),
+ mLock("nsInputStreamWrapper.mLock") {
+ NS_ADDREF(mDescriptor);
+ }
+
+ private:
+ virtual ~nsInputStreamWrapper() { NS_IF_RELEASE(mDescriptor); }
+
+ nsresult LazyInit();
+ nsresult EnsureInit();
+ nsresult Read_Locked(char* buf, uint32_t count, uint32_t* countRead);
+ nsresult Close_Locked();
+ void CloseInternal();
+ };
+
+ class nsDecompressInputStreamWrapper : public nsInputStreamWrapper {
+ private:
+ unsigned char* mReadBuffer;
+ uint32_t mReadBufferLen;
+ z_stream mZstream;
+ bool mStreamInitialized;
+ bool mStreamEnded;
+
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+
+ nsDecompressInputStreamWrapper(nsCacheEntryDescriptor* desc, uint32_t off)
+ : nsInputStreamWrapper(desc, off),
+ mReadBuffer(nullptr),
+ mReadBufferLen(0),
+ mZstream{},
+ mStreamInitialized(false),
+ mStreamEnded(false) {}
+ NS_IMETHOD Read(char* buf, uint32_t count, uint32_t* result) override;
+ NS_IMETHOD Close() override;
+
+ private:
+ virtual ~nsDecompressInputStreamWrapper() { Close(); }
+ nsresult InitZstream();
+ nsresult EndZstream();
+ };
+
+ /*************************************************************************
+ * output stream wrapper class -
+ *
+ * The output stream wrapper references the descriptor, but the descriptor
+ * doesn't need any references to the stream wrapper.
+ *************************************************************************/
+ class nsOutputStreamWrapper : public nsIOutputStream {
+ friend class nsCacheEntryDescriptor;
+
+ protected:
+ nsCacheEntryDescriptor* mDescriptor;
+ nsCOMPtr<nsIOutputStream> mOutput;
+ uint32_t mStartOffset;
+ bool mInitialized;
+ mozilla::Mutex mLock;
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIOUTPUTSTREAM
+
+ nsOutputStreamWrapper(nsCacheEntryDescriptor* desc, uint32_t off)
+ : mDescriptor(desc),
+ mStartOffset(off),
+ mInitialized(false),
+ mLock("nsOutputStreamWrapper.mLock") {
+ NS_ADDREF(mDescriptor); // owning ref
+ }
+
+ private:
+ virtual ~nsOutputStreamWrapper() {
+ Close();
+
+ NS_ASSERTION(!mOutput, "Bad state");
+ NS_ASSERTION(!mDescriptor, "Bad state");
+ }
+
+ nsresult LazyInit();
+ nsresult EnsureInit();
+ nsresult OnWrite(uint32_t count);
+ nsresult Write_Locked(const char* buf, uint32_t count, uint32_t* result);
+ nsresult Close_Locked();
+ void CloseInternal();
+ };
+
+ class nsCompressOutputStreamWrapper : public nsOutputStreamWrapper {
+ private:
+ unsigned char* mWriteBuffer;
+ uint32_t mWriteBufferLen;
+ z_stream mZstream;
+ bool mStreamInitialized;
+ bool mStreamEnded;
+ uint32_t mUncompressedCount;
+
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+
+ nsCompressOutputStreamWrapper(nsCacheEntryDescriptor* desc, uint32_t off)
+ : nsOutputStreamWrapper(desc, off),
+ mWriteBuffer(nullptr),
+ mWriteBufferLen(0),
+ mZstream{},
+ mStreamInitialized(false),
+ mStreamEnded(false),
+ mUncompressedCount(0) {}
+ NS_IMETHOD Write(const char* buf, uint32_t count,
+ uint32_t* result) override;
+ NS_IMETHOD Close() override;
+
+ private:
+ virtual ~nsCompressOutputStreamWrapper() { Close(); }
+ nsresult InitZstream();
+ nsresult WriteBuffer();
+ };
+
+ private:
+ /**
+ * nsCacheEntryDescriptor data members
+ */
+
+ nsCOMPtr<nsICacheServiceInternal> mCacheService;
+ nsCacheEntry* mCacheEntry; // we are a child of the entry
+ nsCacheAccessMode mAccessGranted;
+ nsTArray<nsInputStreamWrapper*> mInputWrappers;
+ nsOutputStreamWrapper* mOutputWrapper;
+ mozilla::Mutex mLock;
+ bool mAsyncDoomPending;
+ bool mDoomedOnClose;
+ bool mClosingDescriptor;
+};
+
+#endif // _nsCacheEntryDescriptor_h_
diff --git a/netwerk/cache/nsCacheMetaData.cpp b/netwerk/cache/nsCacheMetaData.cpp
new file mode 100644
index 0000000000..386b569ea0
--- /dev/null
+++ b/netwerk/cache/nsCacheMetaData.cpp
@@ -0,0 +1,151 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCacheMetaData.h"
+#include "nsICacheEntryDescriptor.h"
+
+const char* nsCacheMetaData::GetElement(const char* key) {
+ const char* data = mBuffer;
+ const char* limit = mBuffer + mMetaSize;
+
+ while (data < limit) {
+ // Point to the value part
+ const char* value = data + strlen(data) + 1;
+ MOZ_ASSERT(value < limit, "Cache Metadata corrupted");
+ if (strcmp(data, key) == 0) return value;
+
+ // Skip value part
+ data = value + strlen(value) + 1;
+ }
+ MOZ_ASSERT(data == limit, "Metadata corrupted");
+ return nullptr;
+}
+
+nsresult nsCacheMetaData::SetElement(const char* key, const char* value) {
+ const uint32_t keySize = strlen(key) + 1;
+ char* pos = (char*)GetElement(key);
+
+ if (!value) {
+ // No value means remove the key/value pair completely, if existing
+ if (pos) {
+ uint32_t oldValueSize = strlen(pos) + 1;
+ uint32_t offset = pos - mBuffer;
+ uint32_t remainder = mMetaSize - (offset + oldValueSize);
+
+ memmove(pos - keySize, pos + oldValueSize, remainder);
+ mMetaSize -= keySize + oldValueSize;
+ }
+ return NS_OK;
+ }
+
+ const uint32_t valueSize = strlen(value) + 1;
+ uint32_t newSize = mMetaSize + valueSize;
+ if (pos) {
+ const uint32_t oldValueSize = strlen(pos) + 1;
+ const uint32_t offset = pos - mBuffer;
+ const uint32_t remainder = mMetaSize - (offset + oldValueSize);
+
+ // Update the value in place
+ newSize -= oldValueSize;
+ nsresult rv = EnsureBuffer(newSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Move the remainder to the right place
+ pos = mBuffer + offset;
+ memmove(pos + valueSize, pos + oldValueSize, remainder);
+ } else {
+ // allocate new meta data element
+ newSize += keySize;
+ nsresult rv = EnsureBuffer(newSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Add after last element
+ pos = mBuffer + mMetaSize;
+ memcpy(pos, key, keySize);
+ pos += keySize;
+ }
+
+ // Update value
+ memcpy(pos, value, valueSize);
+ mMetaSize = newSize;
+
+ return NS_OK;
+}
+
+nsresult nsCacheMetaData::FlattenMetaData(char* buffer, uint32_t bufSize) {
+ if (mMetaSize > bufSize) {
+ NS_ERROR("buffer size too small for meta data.");
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (!mBuffer) {
+ MOZ_ASSERT(mMetaSize == 0);
+ MOZ_ASSERT(bufSize == 0);
+ return NS_OK;
+ }
+
+ memcpy(buffer, mBuffer, mMetaSize);
+ return NS_OK;
+}
+
+nsresult nsCacheMetaData::UnflattenMetaData(const char* data, uint32_t size) {
+ if (data && size) {
+ // Check if the metadata ends with a zero byte.
+ if (data[size - 1] != '\0') {
+ NS_ERROR("Cache MetaData is not null terminated");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+ // Check that there are an even number of zero bytes
+ // to match the pattern { key \0 value \0 }
+ bool odd = false;
+ for (uint32_t i = 0; i < size; i++) {
+ if (data[i] == '\0') odd = !odd;
+ }
+ if (odd) {
+ NS_ERROR("Cache MetaData is malformed");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ nsresult rv = EnsureBuffer(size);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ memcpy(mBuffer, data, size);
+ mMetaSize = size;
+ }
+ return NS_OK;
+}
+
+nsresult nsCacheMetaData::VisitElements(nsICacheMetaDataVisitor* visitor) {
+ const char* data = mBuffer;
+ const char* limit = mBuffer + mMetaSize;
+
+ while (data < limit) {
+ const char* key = data;
+ // Skip key part
+ data += strlen(data) + 1;
+ MOZ_ASSERT(data < limit, "Metadata corrupted");
+ bool keepGoing;
+ nsresult rv = visitor->VisitMetaDataElement(key, data, &keepGoing);
+ if (NS_FAILED(rv) || !keepGoing) return NS_OK;
+
+ // Skip value part
+ data += strlen(data) + 1;
+ }
+ MOZ_ASSERT(data == limit, "Metadata corrupted");
+ return NS_OK;
+}
+
+nsresult nsCacheMetaData::EnsureBuffer(uint32_t bufSize) {
+ if (mBufferSize < bufSize) {
+ char* buf = (char*)realloc(mBuffer, bufSize);
+ if (!buf) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ mBuffer = buf;
+ mBufferSize = bufSize;
+ }
+ return NS_OK;
+}
diff --git a/netwerk/cache/nsCacheMetaData.h b/netwerk/cache/nsCacheMetaData.h
new file mode 100644
index 0000000000..4cf84ad5fa
--- /dev/null
+++ b/netwerk/cache/nsCacheMetaData.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheMetaData_h_
+#define _nsCacheMetaData_h_
+
+#include "nspr.h"
+#include "nscore.h"
+
+class nsICacheMetaDataVisitor;
+
+class nsCacheMetaData {
+ public:
+ nsCacheMetaData() : mBuffer(nullptr), mBufferSize(0), mMetaSize(0) {}
+
+ ~nsCacheMetaData() {
+ mBufferSize = mMetaSize = 0;
+ free(mBuffer);
+ mBuffer = nullptr;
+ }
+
+ const char* GetElement(const char* key);
+
+ nsresult SetElement(const char* key, const char* value);
+
+ uint32_t Size(void) { return mMetaSize; }
+
+ nsresult FlattenMetaData(char* buffer, uint32_t bufSize);
+
+ nsresult UnflattenMetaData(const char* buffer, uint32_t bufSize);
+
+ nsresult VisitElements(nsICacheMetaDataVisitor* visitor);
+
+ private:
+ nsresult EnsureBuffer(uint32_t size);
+
+ char* mBuffer;
+ uint32_t mBufferSize;
+ uint32_t mMetaSize;
+};
+
+#endif // _nsCacheMetaData_h
diff --git a/netwerk/cache/nsCacheRequest.h b/netwerk/cache/nsCacheRequest.h
new file mode 100644
index 0000000000..1d5dcd3106
--- /dev/null
+++ b/netwerk/cache/nsCacheRequest.h
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheRequest_h_
+#define _nsCacheRequest_h_
+
+#include "nspr.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/Mutex.h"
+#include "nsCOMPtr.h"
+#include "nsICache.h"
+#include "nsICacheListener.h"
+#include "nsCacheSession.h"
+#include "nsCacheService.h"
+
+class nsCacheRequest : public PRCList {
+ typedef mozilla::CondVar CondVar;
+ typedef mozilla::MutexAutoLock MutexAutoLock;
+ typedef mozilla::Mutex Mutex;
+
+ private:
+ friend class nsCacheService;
+ friend class nsCacheEntry;
+ friend class nsProcessRequestEvent;
+
+ nsCacheRequest(const nsACString& key, nsICacheListener* listener,
+ nsCacheAccessMode accessRequested, bool blockingMode,
+ nsCacheSession* session)
+ : mKey(key),
+ mInfo(0),
+ mListener(listener),
+ mLock("nsCacheRequest.mLock"),
+ mCondVar(mLock, "nsCacheRequest.mCondVar"),
+ mProfileDir(session->ProfileDir()) {
+ MOZ_COUNT_CTOR(nsCacheRequest);
+ PR_INIT_CLIST(this);
+ SetAccessRequested(accessRequested);
+ SetStoragePolicy(session->StoragePolicy());
+ if (session->IsStreamBased()) MarkStreamBased();
+ if (session->WillDoomEntriesIfExpired()) MarkDoomEntriesIfExpired();
+ if (session->IsPrivate()) MarkPrivate();
+ if (blockingMode == nsICache::BLOCKING) MarkBlockingMode();
+ MarkWaitingForValidation();
+ NS_IF_ADDREF(mListener);
+ }
+
+ ~nsCacheRequest() {
+ MOZ_COUNT_DTOR(nsCacheRequest);
+ NS_ASSERTION(PR_CLIST_IS_EMPTY(this), "request still on a list");
+
+ if (mListener)
+ nsCacheService::ReleaseObject_Locked(mListener, mEventTarget);
+ }
+
+ /**
+ * Simple Accessors
+ */
+ enum CacheRequestInfo {
+ eStoragePolicyMask = 0x000000FF,
+ eStreamBasedMask = 0x00000100,
+ ePrivateMask = 0x00000200,
+ eDoomEntriesIfExpiredMask = 0x00001000,
+ eBlockingModeMask = 0x00010000,
+ eWaitingForValidationMask = 0x00100000,
+ eAccessRequestedMask = 0xFF000000
+ };
+
+ void SetAccessRequested(nsCacheAccessMode mode) {
+ NS_ASSERTION(mode <= 0xFF, "too many bits in nsCacheAccessMode");
+ mInfo &= ~eAccessRequestedMask;
+ mInfo |= mode << 24;
+ }
+
+ nsCacheAccessMode AccessRequested() {
+ return (nsCacheAccessMode)((mInfo >> 24) & 0xFF);
+ }
+
+ void MarkStreamBased() { mInfo |= eStreamBasedMask; }
+ bool IsStreamBased() { return (mInfo & eStreamBasedMask) != 0; }
+
+ void MarkDoomEntriesIfExpired() { mInfo |= eDoomEntriesIfExpiredMask; }
+ bool WillDoomEntriesIfExpired() {
+ return (0 != (mInfo & eDoomEntriesIfExpiredMask));
+ }
+
+ void MarkBlockingMode() { mInfo |= eBlockingModeMask; }
+ bool IsBlocking() { return (0 != (mInfo & eBlockingModeMask)); }
+ bool IsNonBlocking() { return !(mInfo & eBlockingModeMask); }
+
+ void SetStoragePolicy(nsCacheStoragePolicy policy) {
+ NS_ASSERTION(policy <= 0xFF, "too many bits in nsCacheStoragePolicy");
+ mInfo &= ~eStoragePolicyMask; // clear storage policy bits
+ mInfo |= policy; // or in new bits
+ }
+
+ nsCacheStoragePolicy StoragePolicy() {
+ return (nsCacheStoragePolicy)(mInfo & eStoragePolicyMask);
+ }
+
+ void MarkPrivate() { mInfo |= ePrivateMask; }
+ void MarkPublic() { mInfo &= ~ePrivateMask; }
+ bool IsPrivate() { return (mInfo & ePrivateMask) != 0; }
+
+ void MarkWaitingForValidation() { mInfo |= eWaitingForValidationMask; }
+ void DoneWaitingForValidation() { mInfo &= ~eWaitingForValidationMask; }
+ bool WaitingForValidation() {
+ return (mInfo & eWaitingForValidationMask) != 0;
+ }
+
+ nsresult WaitForValidation(void) {
+ if (!WaitingForValidation()) { // flag already cleared
+ MarkWaitingForValidation(); // set up for next time
+ return NS_OK; // early exit;
+ }
+ {
+ MutexAutoLock lock(mLock);
+ while (WaitingForValidation()) {
+ mCondVar.Wait();
+ }
+ MarkWaitingForValidation(); // set up for next time
+ }
+ return NS_OK;
+ }
+
+ void WakeUp(void) {
+ DoneWaitingForValidation();
+ MutexAutoLock lock(mLock);
+ mCondVar.Notify();
+ }
+
+ /**
+ * Data members
+ */
+ nsCString mKey;
+ uint32_t mInfo;
+ nsICacheListener* mListener; // strong ref
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+ Mutex mLock;
+ CondVar mCondVar;
+ nsCOMPtr<nsIFile> mProfileDir;
+};
+
+#endif // _nsCacheRequest_h_
diff --git a/netwerk/cache/nsCacheService.cpp b/netwerk/cache/nsCacheService.cpp
new file mode 100644
index 0000000000..f7cb31d010
--- /dev/null
+++ b/netwerk/cache/nsCacheService.cpp
@@ -0,0 +1,1910 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCacheService.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/FileUtils.h"
+
+#include "nsCache.h"
+#include "nsCacheRequest.h"
+#include "nsCacheEntry.h"
+#include "nsCacheEntryDescriptor.h"
+#include "nsCacheDevice.h"
+#include "nsICacheVisitor.h"
+#include "nsDiskCacheDeviceSQL.h"
+#include "nsCacheUtils.h"
+#include "../cache2/CacheObserver.h"
+#include "nsIObserverService.h"
+#include "nsIPrefBranch.h"
+#include "nsIFile.h"
+#include "nsIOService.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsAppDirectoryServiceDefs.h"
+#include "nsThreadUtils.h"
+#include "nsProxyRelease.h"
+#include "nsDeleteDir.h"
+#include "nsNetCID.h"
+#include <math.h> // for log()
+#include "mozilla/Services.h"
+#include "mozilla/StaticPrefs_browser.h"
+
+#include "mozilla/net/NeckoCommon.h"
+#include <algorithm>
+
+using namespace mozilla;
+using namespace mozilla::net;
+
+/******************************************************************************
+ * nsCacheProfilePrefObserver
+ *****************************************************************************/
+#define OFFLINE_CACHE_DIR_PREF "browser.cache.offline.parent_directory"
+#define OFFLINE_CACHE_CAPACITY_PREF "browser.cache.offline.capacity"
+#define OFFLINE_CACHE_CAPACITY 512000
+
+#define CACHE_COMPRESSION_LEVEL 1
+
+static const char* observerList[] = {
+ "profile-before-change", "profile-do-change",
+ NS_XPCOM_SHUTDOWN_OBSERVER_ID, "last-pb-context-exited",
+ "suspend_process_notification", "resume_process_notification"};
+
+class nsCacheProfilePrefObserver : public nsIObserver {
+ virtual ~nsCacheProfilePrefObserver() = default;
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIOBSERVER
+
+ nsCacheProfilePrefObserver()
+ : mHaveProfile(false),
+ mOfflineCacheEnabled(false),
+ mOfflineStorageCacheEnabled(false),
+ mOfflineCacheCapacity(0),
+ mCacheCompressionLevel(CACHE_COMPRESSION_LEVEL),
+ mSanitizeOnShutdown(false),
+ mClearCacheOnShutdown(false) {}
+
+ nsresult Install();
+ void Remove();
+ nsresult ReadPrefs(nsIPrefBranch* branch);
+
+ nsIFile* DiskCacheParentDirectory() { return mDiskCacheParentDirectory; }
+
+ bool OfflineCacheEnabled();
+ int32_t OfflineCacheCapacity() { return mOfflineCacheCapacity; }
+ nsIFile* OfflineCacheParentDirectory() {
+ return mOfflineCacheParentDirectory;
+ }
+
+ int32_t CacheCompressionLevel();
+
+ bool SanitizeAtShutdown() {
+ return mSanitizeOnShutdown && mClearCacheOnShutdown;
+ }
+
+ private:
+ bool mHaveProfile;
+
+ nsCOMPtr<nsIFile> mDiskCacheParentDirectory;
+
+ bool mOfflineCacheEnabled;
+ bool mOfflineStorageCacheEnabled;
+ int32_t mOfflineCacheCapacity; // in kilobytes
+ nsCOMPtr<nsIFile> mOfflineCacheParentDirectory;
+
+ int32_t mCacheCompressionLevel;
+
+ bool mSanitizeOnShutdown;
+ bool mClearCacheOnShutdown;
+};
+
+NS_IMPL_ISUPPORTS(nsCacheProfilePrefObserver, nsIObserver)
+
+class nsBlockOnCacheThreadEvent : public Runnable {
+ public:
+ nsBlockOnCacheThreadEvent()
+ : mozilla::Runnable("nsBlockOnCacheThreadEvent") {}
+ NS_IMETHOD Run() override {
+ nsCacheServiceAutoLock autoLock(LOCK_TELEM(NSBLOCKONCACHETHREADEVENT_RUN));
+ CACHE_LOG_DEBUG(("nsBlockOnCacheThreadEvent [%p]\n", this));
+ nsCacheService::gService->mNotified = true;
+ nsCacheService::gService->mCondVar.Notify();
+ return NS_OK;
+ }
+};
+
+nsresult nsCacheProfilePrefObserver::Install() {
+ // install profile-change observer
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (!observerService) return NS_ERROR_FAILURE;
+
+ nsresult rv, rv2 = NS_OK;
+ for (auto& observer : observerList) {
+ rv = observerService->AddObserver(this, observer, false);
+ if (NS_FAILED(rv)) rv2 = rv;
+ }
+
+ // install preferences observer
+ nsCOMPtr<nsIPrefBranch> branch = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ if (!branch) return NS_ERROR_FAILURE;
+
+ // Determine if we have a profile already
+ // Install() is called *after* the profile-after-change notification
+ // when there is only a single profile, or it is specified on the
+ // commandline at startup.
+ // In that case, we detect the presence of a profile by the existence
+ // of the NS_APP_USER_PROFILE_50_DIR directory.
+
+ nsCOMPtr<nsIFile> directory;
+ rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
+ getter_AddRefs(directory));
+ if (NS_SUCCEEDED(rv)) mHaveProfile = true;
+
+ rv = ReadPrefs(branch);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return rv2;
+}
+
+void nsCacheProfilePrefObserver::Remove() {
+ // remove Observer Service observers
+ nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
+ if (obs) {
+ for (auto& observer : observerList) {
+ obs->RemoveObserver(this, observer);
+ }
+ }
+}
+
+NS_IMETHODIMP
+nsCacheProfilePrefObserver::Observe(nsISupports* subject, const char* topic,
+ const char16_t* data_unicode) {
+ NS_ConvertUTF16toUTF8 data(data_unicode);
+ CACHE_LOG_INFO(("Observe [topic=%s data=%s]\n", topic, data.get()));
+
+ if (!nsCacheService::IsInitialized()) {
+ if (!strcmp("resume_process_notification", topic)) {
+ // A suspended process has a closed cache, so re-open it here.
+ nsCacheService::GlobalInstance()->Init();
+ }
+ return NS_OK;
+ }
+
+ if (!strcmp(NS_XPCOM_SHUTDOWN_OBSERVER_ID, topic)) {
+ // xpcom going away, shutdown cache service
+ nsCacheService::GlobalInstance()->Shutdown();
+ } else if (!strcmp("profile-before-change", topic)) {
+ // profile before change
+ mHaveProfile = false;
+
+ // XXX shutdown devices
+ nsCacheService::OnProfileShutdown();
+ } else if (!strcmp("suspend_process_notification", topic)) {
+ // A suspended process may never return, so shutdown the cache to reduce
+ // cache corruption.
+ nsCacheService::GlobalInstance()->Shutdown();
+ } else if (!strcmp("profile-do-change", topic)) {
+ // profile after change
+ mHaveProfile = true;
+ nsCOMPtr<nsIPrefBranch> branch = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ if (!branch) {
+ return NS_ERROR_FAILURE;
+ }
+ (void)ReadPrefs(branch);
+ nsCacheService::OnProfileChanged();
+
+ } else if (!strcmp("last-pb-context-exited", topic)) {
+ nsCacheService::LeavePrivateBrowsing();
+ }
+
+ return NS_OK;
+}
+
+static already_AddRefed<nsIFile> GetCacheDirectory(const char* aSubdir,
+ bool aAllowProcDirCache) {
+ nsCOMPtr<nsIFile> directory;
+
+ // try to get the disk cache parent directory
+ Unused << NS_GetSpecialDirectory(NS_APP_CACHE_PARENT_DIR,
+ getter_AddRefs(directory));
+ if (!directory) {
+ // try to get the profile directory (there may not be a profile yet)
+ nsCOMPtr<nsIFile> profDir;
+ NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(profDir));
+ NS_GetSpecialDirectory(NS_APP_USER_PROFILE_LOCAL_50_DIR,
+ getter_AddRefs(directory));
+ if (!directory)
+ directory = profDir;
+ else if (profDir) {
+ nsCacheService::MoveOrRemoveDiskCache(profDir, directory, aSubdir);
+ }
+ }
+ if (!directory && aAllowProcDirCache) {
+ Unused << NS_GetSpecialDirectory(NS_XPCOM_CURRENT_PROCESS_DIR,
+ getter_AddRefs(directory));
+ }
+ return directory.forget();
+}
+
+nsresult nsCacheProfilePrefObserver::ReadPrefs(nsIPrefBranch* branch) {
+ if (!mDiskCacheParentDirectory) {
+ // use file cache in build tree only if asked, to avoid cache dir litter
+ bool allowProcDirCache = PR_GetEnv("NECKO_DEV_ENABLE_DISK_CACHE");
+ mDiskCacheParentDirectory = GetCacheDirectory("Cache", allowProcDirCache);
+ }
+
+ // read offline cache device prefs
+ mOfflineCacheEnabled = StaticPrefs::browser_cache_offline_enable();
+ mOfflineStorageCacheEnabled =
+ StaticPrefs::browser_cache_offline_storage_enable();
+
+ mOfflineCacheCapacity = OFFLINE_CACHE_CAPACITY;
+ (void)branch->GetIntPref(OFFLINE_CACHE_CAPACITY_PREF, &mOfflineCacheCapacity);
+ mOfflineCacheCapacity = std::max(0, mOfflineCacheCapacity);
+
+ (void)branch->GetComplexValue(OFFLINE_CACHE_DIR_PREF, // ignore error
+ NS_GET_IID(nsIFile),
+ getter_AddRefs(mOfflineCacheParentDirectory));
+
+ if (!mOfflineCacheParentDirectory) {
+#ifdef DEBUG
+ bool allowProcDirCache = true;
+#else
+ bool allowProcDirCache = false;
+#endif
+ mOfflineCacheParentDirectory =
+ GetCacheDirectory("OfflineCache", allowProcDirCache);
+ }
+
+ if (!mDiskCacheParentDirectory || !mOfflineCacheParentDirectory) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!mOfflineStorageCacheEnabled) {
+ // Dispatch cleanup task
+ nsCOMPtr<nsIRunnable> runnable =
+ NS_NewRunnableFunction("Delete OfflineCache", []() {
+ nsCOMPtr<nsIFile> dir;
+ nsCacheService::GetAppCacheDirectory(getter_AddRefs(dir));
+ bool exists = false;
+ if (dir && NS_SUCCEEDED(dir->Exists(&exists)) && exists) {
+ // Delay delete by 1 minute to avoid IO thrash on startup.
+ CACHE_LOG_INFO(
+ ("Queuing Delete of AppCacheDirectory in 60 seconds"));
+ nsDeleteDir::DeleteDir(dir, false, 60000);
+ }
+ });
+ Unused << nsCacheService::DispatchToCacheIOThread(runnable);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsCacheService::DispatchToCacheIOThread(nsIRunnable* event) {
+ if (!gService || !gService->mCacheIOThread) return NS_ERROR_NOT_AVAILABLE;
+ return gService->mCacheIOThread->Dispatch(event, NS_DISPATCH_NORMAL);
+}
+
+nsresult nsCacheService::SyncWithCacheIOThread() {
+ if (!gService || !gService->mCacheIOThread) return NS_ERROR_NOT_AVAILABLE;
+ gService->mLock.AssertCurrentThreadOwns();
+
+ nsCOMPtr<nsIRunnable> event = new nsBlockOnCacheThreadEvent();
+
+ // dispatch event - it will notify the monitor when it's done
+ nsresult rv = gService->mCacheIOThread->Dispatch(event, NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Failed dispatching block-event");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // wait until notified, then return
+ gService->mNotified = false;
+ while (!gService->mNotified) {
+ gService->mCondVar.Wait();
+ }
+
+ return NS_OK;
+}
+
+bool nsCacheProfilePrefObserver::OfflineCacheEnabled() {
+ if ((mOfflineCacheCapacity == 0) || (!mOfflineCacheParentDirectory))
+ return false;
+
+ return mOfflineCacheEnabled && mOfflineStorageCacheEnabled;
+}
+
+int32_t nsCacheProfilePrefObserver::CacheCompressionLevel() {
+ return mCacheCompressionLevel;
+}
+
+/******************************************************************************
+ * nsProcessRequestEvent
+ *****************************************************************************/
+
+class nsProcessRequestEvent : public Runnable {
+ public:
+ explicit nsProcessRequestEvent(nsCacheRequest* aRequest)
+ : mozilla::Runnable("nsProcessRequestEvent") {
+ mRequest = aRequest;
+ }
+
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ NS_ASSERTION(mRequest->mListener,
+ "Sync OpenCacheEntry() posted to background thread!");
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSPROCESSREQUESTEVENT_RUN));
+ rv = nsCacheService::gService->ProcessRequest(mRequest, false, nullptr);
+
+ // Don't delete the request if it was queued
+ if (!(mRequest->IsBlocking() && rv == NS_ERROR_CACHE_WAIT_FOR_VALIDATION))
+ delete mRequest;
+
+ return NS_OK;
+ }
+
+ protected:
+ virtual ~nsProcessRequestEvent() = default;
+
+ private:
+ nsCacheRequest* mRequest;
+};
+
+/******************************************************************************
+ * nsDoomEvent
+ *****************************************************************************/
+
+class nsDoomEvent : public Runnable {
+ public:
+ nsDoomEvent(nsCacheSession* session, const nsACString& key,
+ nsICacheListener* listener)
+ : mozilla::Runnable("nsDoomEvent") {
+ mKey = *session->ClientID();
+ mKey.Append(':');
+ mKey.Append(key);
+ mStoragePolicy = session->StoragePolicy();
+ mListener = listener;
+ mEventTarget = GetCurrentEventTarget();
+ // We addref the listener here and release it in nsNotifyDoomListener
+ // on the callers thread. If posting of nsNotifyDoomListener event fails
+ // we leak the listener which is better than releasing it on a wrong
+ // thread.
+ NS_IF_ADDREF(mListener);
+ }
+
+ NS_IMETHOD Run() override {
+ nsCacheServiceAutoLock lock;
+
+ bool foundActive = true;
+ nsresult status = NS_ERROR_NOT_AVAILABLE;
+ nsCacheEntry* entry;
+ entry = nsCacheService::gService->mActiveEntries.GetEntry(&mKey);
+ if (!entry) {
+ bool collision = false;
+ foundActive = false;
+ entry = nsCacheService::gService->SearchCacheDevices(
+ &mKey, mStoragePolicy, &collision);
+ }
+
+ if (entry) {
+ status = NS_OK;
+ nsCacheService::gService->DoomEntry_Internal(entry, foundActive);
+ }
+
+ if (mListener) {
+ mEventTarget->Dispatch(new nsNotifyDoomListener(mListener, status),
+ NS_DISPATCH_NORMAL);
+ // posted event will release the reference on the correct thread
+ mListener = nullptr;
+ }
+
+ return NS_OK;
+ }
+
+ private:
+ nsCString mKey;
+ nsCacheStoragePolicy mStoragePolicy;
+ nsICacheListener* mListener;
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+};
+
+/******************************************************************************
+ * nsCacheService
+ *****************************************************************************/
+nsCacheService* nsCacheService::gService = nullptr;
+
+NS_IMPL_ISUPPORTS(nsCacheService, nsICacheService, nsICacheServiceInternal)
+
+nsCacheService::nsCacheService()
+ : mObserver(nullptr),
+ mLock("nsCacheService.mLock"),
+ mCondVar(mLock, "nsCacheService.mCondVar"),
+ mNotified(false),
+ mTimeStampLock("nsCacheService.mTimeStampLock"),
+ mInitialized(false),
+ mClearingEntries(false),
+ mEnableOfflineDevice(false),
+ mOfflineDevice(nullptr),
+ mDoomedEntries{},
+ mTotalEntries(0),
+ mCacheHits(0),
+ mCacheMisses(0),
+ mMaxKeyLength(0),
+ mMaxDataSize(0),
+ mMaxMetaSize(0),
+ mDeactivateFailures(0),
+ mDeactivatedUnboundEntries(0) {
+ NS_ASSERTION(gService == nullptr, "multiple nsCacheService instances!");
+ gService = this;
+
+ // create list of cache devices
+ PR_INIT_CLIST(&mDoomedEntries);
+}
+
+nsCacheService::~nsCacheService() {
+ if (mInitialized) // Shutdown hasn't been called yet.
+ (void)Shutdown();
+
+ if (mObserver) {
+ mObserver->Remove();
+ NS_RELEASE(mObserver);
+ }
+
+ gService = nullptr;
+}
+
+nsresult nsCacheService::Init() {
+ // Thie method must be called on the main thread because mCacheIOThread must
+ // only be modified on the main thread.
+ if (!NS_IsMainThread()) {
+ NS_ERROR("nsCacheService::Init called off the main thread");
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ NS_ASSERTION(!mInitialized, "nsCacheService already initialized.");
+ if (mInitialized) return NS_ERROR_ALREADY_INITIALIZED;
+
+ if (mozilla::net::IsNeckoChild()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsresult rv;
+
+ mStorageService = do_GetService("@mozilla.org/storage/service;1", &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = NS_NewNamedThread("Cache I/O", getter_AddRefs(mCacheIOThread));
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Can't create cache IO thread");
+ }
+
+ rv = nsDeleteDir::Init();
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Can't initialize nsDeleteDir");
+ }
+
+ // initialize hashtable for active cache entries
+ mActiveEntries.Init();
+
+ // create profile/preference observer
+ if (!mObserver) {
+ mObserver = new nsCacheProfilePrefObserver();
+ NS_ADDREF(mObserver);
+ mObserver->Install();
+ }
+
+ mEnableOfflineDevice = mObserver->OfflineCacheEnabled();
+
+ mInitialized = true;
+ return NS_OK;
+}
+
+void nsCacheService::Shutdown() {
+ // This method must be called on the main thread because mCacheIOThread must
+ // only be modified on the main thread.
+ if (!NS_IsMainThread()) {
+ MOZ_CRASH("nsCacheService::Shutdown called off the main thread");
+ }
+
+ nsCOMPtr<nsIThread> cacheIOThread;
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_SHUTDOWN> totalTimer;
+
+ bool shouldSanitize = false;
+ nsCOMPtr<nsIFile> parentDir;
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_SHUTDOWN));
+ NS_ASSERTION(
+ mInitialized,
+ "can't shutdown nsCacheService unless it has been initialized.");
+ if (!mInitialized) return;
+
+ mClearingEntries = true;
+ DoomActiveEntries(nullptr);
+ }
+
+ CloseAllStreams();
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_SHUTDOWN));
+ NS_ASSERTION(mInitialized, "Bad state");
+
+ mInitialized = false;
+
+ // Clear entries
+ ClearDoomList();
+
+ // Make sure to wait for any pending cache-operations before
+ // proceeding with destructive actions (bug #620660)
+ (void)SyncWithCacheIOThread();
+ mActiveEntries.Shutdown();
+
+ // obtain the disk cache directory in case we need to sanitize it
+ parentDir = mObserver->DiskCacheParentDirectory();
+ shouldSanitize = mObserver->SanitizeAtShutdown();
+
+ if (mOfflineDevice) mOfflineDevice->Shutdown();
+
+ NS_IF_RELEASE(mOfflineDevice);
+
+ for (auto iter = mCustomOfflineDevices.Iter(); !iter.Done(); iter.Next()) {
+ iter.Data()->Shutdown();
+ iter.Remove();
+ }
+
+ LogCacheStatistics();
+
+ mClearingEntries = false;
+ mCacheIOThread.swap(cacheIOThread);
+ }
+
+ if (cacheIOThread) nsShutdownThread::BlockingShutdown(cacheIOThread);
+
+ if (shouldSanitize) {
+ nsresult rv = parentDir->AppendNative("Cache"_ns);
+ if (NS_SUCCEEDED(rv)) {
+ bool exists;
+ if (NS_SUCCEEDED(parentDir->Exists(&exists)) && exists)
+ nsDeleteDir::DeleteDir(parentDir, false);
+ }
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_SHUTDOWN_CLEAR_PRIVATE>
+ timer;
+ nsDeleteDir::Shutdown(shouldSanitize);
+ } else {
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_DELETEDIR_SHUTDOWN>
+ timer;
+ nsDeleteDir::Shutdown(shouldSanitize);
+ }
+}
+
+nsresult nsCacheService::Create(nsISupports* aOuter, const nsIID& aIID,
+ void** aResult) {
+ nsresult rv;
+
+ if (aOuter != nullptr) return NS_ERROR_NO_AGGREGATION;
+
+ RefPtr<nsCacheService> cacheService = new nsCacheService();
+ rv = cacheService->Init();
+ if (NS_SUCCEEDED(rv)) {
+ rv = cacheService->QueryInterface(aIID, aResult);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+nsCacheService::CreateSession(const char* clientID,
+ nsCacheStoragePolicy storagePolicy,
+ bool streamBased, nsICacheSession** result) {
+ *result = nullptr;
+
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsCacheService::CreateSessionInternal(
+ const char* clientID, nsCacheStoragePolicy storagePolicy, bool streamBased,
+ nsICacheSession** result) {
+ RefPtr<nsCacheSession> session =
+ new nsCacheSession(clientID, storagePolicy, streamBased);
+ session.forget(result);
+
+ return NS_OK;
+}
+
+nsresult nsCacheService::EvictEntriesForSession(nsCacheSession* session) {
+ NS_ASSERTION(gService, "nsCacheService::gService is null.");
+ return gService->EvictEntriesForClient(session->ClientID()->get(),
+ session->StoragePolicy());
+}
+
+namespace {
+
+class EvictionNotifierRunnable : public Runnable {
+ public:
+ explicit EvictionNotifierRunnable(nsISupports* aSubject)
+ : mozilla::Runnable("EvictionNotifierRunnable"), mSubject(aSubject) {}
+
+ NS_DECL_NSIRUNNABLE
+
+ private:
+ nsCOMPtr<nsISupports> mSubject;
+};
+
+NS_IMETHODIMP
+EvictionNotifierRunnable::Run() {
+ nsCOMPtr<nsIObserverService> obsSvc = mozilla::services::GetObserverService();
+ if (obsSvc) {
+ obsSvc->NotifyObservers(mSubject, NS_CACHESERVICE_EMPTYCACHE_TOPIC_ID,
+ nullptr);
+ }
+ return NS_OK;
+}
+
+} // namespace
+
+nsresult nsCacheService::EvictEntriesForClient(
+ const char* clientID, nsCacheStoragePolicy storagePolicy) {
+ RefPtr<EvictionNotifierRunnable> r =
+ new EvictionNotifierRunnable(NS_ISUPPORTS_CAST(nsICacheService*, this));
+ NS_DispatchToMainThread(r);
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_EVICTENTRIESFORCLIENT));
+ nsresult res = NS_OK;
+
+ // Only clear the offline cache if it has been specifically asked for.
+ if (storagePolicy == nsICache::STORE_OFFLINE) {
+ if (mEnableOfflineDevice) {
+ nsresult rv = NS_OK;
+ if (!mOfflineDevice) rv = CreateOfflineDevice();
+ if (mOfflineDevice) rv = mOfflineDevice->EvictEntries(clientID);
+ if (NS_FAILED(rv)) res = rv;
+ }
+ }
+
+ return res;
+}
+
+nsresult nsCacheService::IsStorageEnabledForPolicy(
+ nsCacheStoragePolicy storagePolicy, bool* result) {
+ if (gService == nullptr) return NS_ERROR_NOT_AVAILABLE;
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHESERVICE_ISSTORAGEENABLEDFORPOLICY));
+
+ *result = nsCacheService::IsStorageEnabledForPolicy_Locked(storagePolicy);
+ return NS_OK;
+}
+
+nsresult nsCacheService::DoomEntry(nsCacheSession* session,
+ const nsACString& key,
+ nsICacheListener* listener) {
+ CACHE_LOG_DEBUG(("Dooming entry for session %p, key %s\n", session,
+ PromiseFlatCString(key).get()));
+ if (!gService || !gService->mInitialized) return NS_ERROR_NOT_INITIALIZED;
+
+ return DispatchToCacheIOThread(new nsDoomEvent(session, key, listener));
+}
+
+bool nsCacheService::IsStorageEnabledForPolicy_Locked(
+ nsCacheStoragePolicy storagePolicy) {
+ if (gService->mEnableOfflineDevice &&
+ storagePolicy == nsICache::STORE_OFFLINE) {
+ return true;
+ }
+
+ return false;
+}
+
+NS_IMETHODIMP nsCacheService::VisitEntries(nsICacheVisitor* visitor) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsCacheService::VisitEntriesInternal(nsICacheVisitor* visitor) {
+ NS_ENSURE_ARG_POINTER(visitor);
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_VISITENTRIES));
+
+ if (!mEnableOfflineDevice) return NS_ERROR_NOT_AVAILABLE;
+
+ // XXX record the fact that a visitation is in progress,
+ // XXX i.e. keep list of visitors in progress.
+
+ nsresult rv = NS_OK;
+
+ if (mEnableOfflineDevice) {
+ if (!mOfflineDevice) {
+ rv = CreateOfflineDevice();
+ if (NS_FAILED(rv)) return rv;
+ }
+ rv = mOfflineDevice->Visit(visitor);
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ // XXX notify any shutdown process that visitation is complete for THIS
+ // visitor.
+ // XXX keep queue of visitors
+
+ return NS_OK;
+}
+
+void nsCacheService::FireClearNetworkCacheStoredAnywhereNotification() {
+ MOZ_ASSERT(NS_IsMainThread());
+ nsCOMPtr<nsIObserverService> obsvc = mozilla::services::GetObserverService();
+ if (obsvc) {
+ obsvc->NotifyObservers(nullptr, "network-clear-cache-stored-anywhere",
+ nullptr);
+ }
+}
+
+NS_IMETHODIMP nsCacheService::EvictEntries(nsCacheStoragePolicy storagePolicy) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsCacheService::EvictEntriesInternal(
+ nsCacheStoragePolicy storagePolicy) {
+ if (storagePolicy == nsICache::STORE_ANYWHERE) {
+ // if not called on main thread, dispatch the notification to the main
+ // thread to notify observers
+ if (!NS_IsMainThread()) {
+ nsCOMPtr<nsIRunnable> event = NewRunnableMethod(
+ "nsCacheService::FireClearNetworkCacheStoredAnywhereNotification",
+ this,
+ &nsCacheService::FireClearNetworkCacheStoredAnywhereNotification);
+ NS_DispatchToMainThread(event);
+ } else {
+ // else you're already on main thread - notify observers
+ FireClearNetworkCacheStoredAnywhereNotification();
+ }
+ }
+ return EvictEntriesForClient(nullptr, storagePolicy);
+}
+
+NS_IMETHODIMP nsCacheService::GetCacheIOTarget(
+ nsIEventTarget** aCacheIOTarget) {
+ NS_ENSURE_ARG_POINTER(aCacheIOTarget);
+
+ // Because mCacheIOThread can only be changed on the main thread, it can be
+ // read from the main thread without the lock. This is useful to prevent
+ // blocking the main thread on other cache operations.
+ if (!NS_IsMainThread()) {
+ Lock(LOCK_TELEM(NSCACHESERVICE_GETCACHEIOTARGET));
+ }
+
+ nsresult rv;
+ if (mCacheIOThread) {
+ *aCacheIOTarget = do_AddRef(mCacheIOThread).take();
+ rv = NS_OK;
+ } else {
+ *aCacheIOTarget = nullptr;
+ rv = NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (!NS_IsMainThread()) {
+ Unlock();
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP nsCacheService::GetLockHeldTime(double* aLockHeldTime) {
+ MutexAutoLock lock(mTimeStampLock);
+
+ if (mLockAcquiredTimeStamp.IsNull()) {
+ *aLockHeldTime = 0.0;
+ } else {
+ *aLockHeldTime =
+ (TimeStamp::Now() - mLockAcquiredTimeStamp).ToMilliseconds();
+ }
+
+ return NS_OK;
+}
+
+/**
+ * Internal Methods
+ */
+nsresult nsCacheService::GetOfflineDevice(nsOfflineCacheDevice** aDevice) {
+ if (!mOfflineDevice) {
+ nsresult rv = CreateOfflineDevice();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ *aDevice = do_AddRef(mOfflineDevice).take();
+ return NS_OK;
+}
+
+nsresult nsCacheService::GetCustomOfflineDevice(
+ nsIFile* aProfileDir, int32_t aQuota, nsOfflineCacheDevice** aDevice) {
+ nsresult rv;
+
+ nsAutoString profilePath;
+ rv = aProfileDir->GetPath(profilePath);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!mCustomOfflineDevices.Get(profilePath, aDevice)) {
+ rv = CreateCustomOfflineDevice(aProfileDir, aQuota, aDevice);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ (*aDevice)->SetAutoShutdown();
+ mCustomOfflineDevices.Put(profilePath, RefPtr{*aDevice});
+ }
+
+ return NS_OK;
+}
+
+nsresult nsCacheService::CreateOfflineDevice() {
+ CACHE_LOG_INFO(("Creating default offline device"));
+
+ if (mOfflineDevice) return NS_OK;
+ if (!nsCacheService::IsInitialized()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return CreateCustomOfflineDevice(mObserver->OfflineCacheParentDirectory(),
+ mObserver->OfflineCacheCapacity(),
+ &mOfflineDevice);
+}
+
+nsresult nsCacheService::CreateCustomOfflineDevice(
+ nsIFile* aProfileDir, int32_t aQuota, nsOfflineCacheDevice** aDevice) {
+ NS_ENSURE_ARG(aProfileDir);
+
+ if (MOZ_LOG_TEST(gCacheLog, LogLevel::Info)) {
+ CACHE_LOG_INFO(("Creating custom offline device, %s, %d",
+ aProfileDir->HumanReadablePath().get(), aQuota));
+ }
+
+ if (!mInitialized) {
+ NS_WARNING("nsCacheService not initialized");
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (!mEnableOfflineDevice) return NS_ERROR_NOT_AVAILABLE;
+
+ RefPtr<nsOfflineCacheDevice> device = new nsOfflineCacheDevice();
+
+ // set the preferences
+ device->SetCacheParentDirectory(aProfileDir);
+ device->SetCapacity(aQuota);
+
+ nsresult rv = device->InitWithSqlite(mStorageService);
+ if (NS_FAILED(rv)) {
+ CACHE_LOG_DEBUG(("OfflineDevice->InitWithSqlite() failed (0x%.8" PRIx32
+ ")\n",
+ static_cast<uint32_t>(rv)));
+ CACHE_LOG_DEBUG((" - disabling offline cache for this session.\n"));
+ device = nullptr;
+ }
+
+ device.forget(aDevice);
+ return rv;
+}
+
+nsresult nsCacheService::RemoveCustomOfflineDevice(
+ nsOfflineCacheDevice* aDevice) {
+ nsCOMPtr<nsIFile> profileDir = aDevice->BaseDirectory();
+ if (!profileDir) return NS_ERROR_UNEXPECTED;
+
+ nsAutoString profilePath;
+ nsresult rv = profileDir->GetPath(profilePath);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mCustomOfflineDevices.Remove(profilePath);
+ return NS_OK;
+}
+
+nsresult nsCacheService::CreateRequest(nsCacheSession* session,
+ const nsACString& clientKey,
+ nsCacheAccessMode accessRequested,
+ bool blockingMode,
+ nsICacheListener* listener,
+ nsCacheRequest** request) {
+ NS_ASSERTION(request, "CreateRequest: request is null");
+
+ nsAutoCString key(*session->ClientID());
+ key.Append(':');
+ key.Append(clientKey);
+
+ if (mMaxKeyLength < key.Length()) mMaxKeyLength = key.Length();
+
+ // create request
+ *request =
+ new nsCacheRequest(key, listener, accessRequested, blockingMode, session);
+
+ if (!listener) return NS_OK; // we're sync, we're done.
+
+ // get the request's thread
+ (*request)->mEventTarget = GetCurrentEventTarget();
+
+ return NS_OK;
+}
+
+class nsCacheListenerEvent : public Runnable {
+ public:
+ nsCacheListenerEvent(nsICacheListener* listener,
+ nsICacheEntryDescriptor* descriptor,
+ nsCacheAccessMode accessGranted, nsresult status)
+ : mozilla::Runnable("nsCacheListenerEvent"),
+ mListener(listener) // transfers reference
+ ,
+ mDescriptor(descriptor) // transfers reference (may be null)
+ ,
+ mAccessGranted(accessGranted),
+ mStatus(status) {}
+
+ NS_IMETHOD Run() override {
+ mListener->OnCacheEntryAvailable(mDescriptor, mAccessGranted, mStatus);
+
+ NS_RELEASE(mListener);
+ NS_IF_RELEASE(mDescriptor);
+ return NS_OK;
+ }
+
+ private:
+ // We explicitly leak mListener or mDescriptor if Run is not called
+ // because otherwise we cannot guarantee that they are destroyed on
+ // the right thread.
+
+ nsICacheListener* mListener;
+ nsICacheEntryDescriptor* mDescriptor;
+ nsCacheAccessMode mAccessGranted;
+ nsresult mStatus;
+};
+
+nsresult nsCacheService::NotifyListener(nsCacheRequest* request,
+ nsICacheEntryDescriptor* descriptor,
+ nsCacheAccessMode accessGranted,
+ nsresult status) {
+ NS_ASSERTION(request->mEventTarget, "no thread set in async request!");
+
+ // Swap ownership, and release listener on target thread...
+ nsICacheListener* listener = request->mListener;
+ request->mListener = nullptr;
+
+ nsCOMPtr<nsIRunnable> ev =
+ new nsCacheListenerEvent(listener, descriptor, accessGranted, status);
+ if (!ev) {
+ // Better to leak listener and descriptor if we fail because we don't
+ // want to destroy them inside the cache service lock or on potentially
+ // the wrong thread.
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ return request->mEventTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
+}
+
+nsresult nsCacheService::ProcessRequest(nsCacheRequest* request,
+ bool calledFromOpenCacheEntry,
+ nsICacheEntryDescriptor** result) {
+ // !!! must be called with mLock held !!!
+ nsresult rv;
+ nsCacheEntry* entry = nullptr;
+ nsCacheEntry* doomedEntry = nullptr;
+ nsCacheAccessMode accessGranted = nsICache::ACCESS_NONE;
+ if (result) *result = nullptr;
+
+ while (true) { // Activate entry loop
+ rv = ActivateEntry(request, &entry,
+ &doomedEntry); // get the entry for this request
+ if (NS_FAILED(rv)) break;
+
+ while (true) { // Request Access loop
+ NS_ASSERTION(entry, "no entry in Request Access loop!");
+ // entry->RequestAccess queues request on entry
+ rv = entry->RequestAccess(request, &accessGranted);
+ if (rv != NS_ERROR_CACHE_WAIT_FOR_VALIDATION) break;
+
+ if (request->IsBlocking()) {
+ if (request->mListener) {
+ // async exits - validate, doom, or close will resume
+ return rv;
+ }
+
+ // XXX this is probably wrong...
+ Unlock();
+ rv = request->WaitForValidation();
+ Lock(LOCK_TELEM(NSCACHESERVICE_PROCESSREQUEST));
+ }
+
+ PR_REMOVE_AND_INIT_LINK(request);
+ if (NS_FAILED(rv))
+ break; // non-blocking mode returns WAIT_FOR_VALIDATION error
+ // okay, we're ready to process this request, request access again
+ }
+ if (rv != NS_ERROR_CACHE_ENTRY_DOOMED) break;
+
+ if (entry->IsNotInUse()) {
+ // this request was the last one keeping it around, so get rid of it
+ DeactivateEntry(entry);
+ }
+ // loop back around to look for another entry
+ }
+
+ if (NS_SUCCEEDED(rv) && request->mProfileDir) {
+ // Custom cache directory has been demanded. Preset the cache device.
+ if (entry->StoragePolicy() != nsICache::STORE_OFFLINE) {
+ // Failsafe check: this is implemented only for offline cache atm.
+ rv = NS_ERROR_FAILURE;
+ } else {
+ RefPtr<nsOfflineCacheDevice> customCacheDevice;
+ rv = GetCustomOfflineDevice(request->mProfileDir, -1,
+ getter_AddRefs(customCacheDevice));
+ if (NS_SUCCEEDED(rv)) entry->SetCustomCacheDevice(customCacheDevice);
+ }
+ }
+
+ nsICacheEntryDescriptor* descriptor = nullptr;
+
+ if (NS_SUCCEEDED(rv))
+ rv = entry->CreateDescriptor(request, accessGranted, &descriptor);
+
+ // If doomedEntry is set, ActivatEntry() doomed an existing entry and
+ // created a new one for that cache-key. However, any pending requests
+ // on the doomed entry were not processed and we need to do that here.
+ // This must be done after adding the created entry to list of active
+ // entries (which is done in ActivateEntry()) otherwise the hashkeys crash
+ // (see bug ##561313). It is also important to do this after creating a
+ // descriptor for this request, or some other request may end up being
+ // executed first for the newly created entry.
+ // Finally, it is worth to emphasize that if doomedEntry is set,
+ // ActivateEntry() created a new entry for the request, which will be
+ // initialized by RequestAccess() and they both should have returned NS_OK.
+ if (doomedEntry) {
+ (void)ProcessPendingRequests(doomedEntry);
+ if (doomedEntry->IsNotInUse()) DeactivateEntry(doomedEntry);
+ doomedEntry = nullptr;
+ }
+
+ if (request->mListener) { // Asynchronous
+
+ if (NS_FAILED(rv) && calledFromOpenCacheEntry && request->IsBlocking())
+ return rv; // skip notifying listener, just return rv to caller
+
+ // call listener to report error or descriptor
+ nsresult rv2 = NotifyListener(request, descriptor, accessGranted, rv);
+ if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) {
+ rv = rv2; // trigger delete request
+ }
+ } else { // Synchronous
+ *result = descriptor;
+ }
+ return rv;
+}
+
+nsresult nsCacheService::OpenCacheEntry(nsCacheSession* session,
+ const nsACString& key,
+ nsCacheAccessMode accessRequested,
+ bool blockingMode,
+ nsICacheListener* listener,
+ nsICacheEntryDescriptor** result) {
+ CACHE_LOG_DEBUG(
+ ("Opening entry for session %p, key %s, mode %d, blocking %d\n", session,
+ PromiseFlatCString(key).get(), accessRequested, blockingMode));
+ if (result) *result = nullptr;
+
+ if (!gService || !gService->mInitialized) return NS_ERROR_NOT_INITIALIZED;
+
+ nsCacheRequest* request = nullptr;
+
+ nsresult rv = gService->CreateRequest(session, key, accessRequested,
+ blockingMode, listener, &request);
+ if (NS_FAILED(rv)) return rv;
+
+ CACHE_LOG_DEBUG(("Created request %p\n", request));
+
+ // Process the request on the background thread if we are on the main thread
+ // and the the request is asynchronous
+ if (NS_IsMainThread() && listener && gService->mCacheIOThread) {
+ nsCOMPtr<nsIRunnable> ev = new nsProcessRequestEvent(request);
+ rv = DispatchToCacheIOThread(ev);
+
+ // delete request if we didn't post the event
+ if (NS_FAILED(rv)) delete request;
+ } else {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_OPENCACHEENTRY));
+ rv = gService->ProcessRequest(request, true, result);
+
+ // delete requests that have completed
+ if (!(listener && blockingMode &&
+ (rv == NS_ERROR_CACHE_WAIT_FOR_VALIDATION)))
+ delete request;
+ }
+
+ return rv;
+}
+
+nsresult nsCacheService::ActivateEntry(nsCacheRequest* request,
+ nsCacheEntry** result,
+ nsCacheEntry** doomedEntry) {
+ CACHE_LOG_DEBUG(("Activate entry for request %p\n", request));
+ if (!mInitialized || mClearingEntries) return NS_ERROR_NOT_AVAILABLE;
+
+ nsresult rv = NS_OK;
+
+ NS_ASSERTION(request != nullptr, "ActivateEntry called with no request");
+ if (result) *result = nullptr;
+ if (doomedEntry) *doomedEntry = nullptr;
+ if ((!request) || (!result) || (!doomedEntry)) return NS_ERROR_NULL_POINTER;
+
+ // check if the request can be satisfied
+ if (!request->IsStreamBased()) return NS_ERROR_FAILURE;
+ if (!IsStorageEnabledForPolicy_Locked(request->StoragePolicy()))
+ return NS_ERROR_FAILURE;
+
+ // search active entries (including those not bound to device)
+ nsCacheEntry* entry = mActiveEntries.GetEntry(&(request->mKey));
+ CACHE_LOG_DEBUG(("Active entry for request %p is %p\n", request, entry));
+
+ if (!entry) {
+ // search cache devices for entry
+ bool collision = false;
+ entry = SearchCacheDevices(&(request->mKey), request->StoragePolicy(),
+ &collision);
+ CACHE_LOG_DEBUG(
+ ("Device search for request %p returned %p\n", request, entry));
+ // When there is a hashkey collision just refuse to cache it...
+ if (collision) return NS_ERROR_CACHE_IN_USE;
+
+ if (entry) entry->MarkInitialized();
+ } else {
+ NS_ASSERTION(entry->IsActive(), "Inactive entry found in mActiveEntries!");
+ }
+
+ if (entry) {
+ ++mCacheHits;
+ entry->Fetched();
+ } else {
+ ++mCacheMisses;
+ }
+
+ if (entry && ((request->AccessRequested() == nsICache::ACCESS_WRITE) ||
+ ((request->StoragePolicy() != nsICache::STORE_OFFLINE) &&
+ (entry->mExpirationTime <= SecondsFromPRTime(PR_Now()) &&
+ request->WillDoomEntriesIfExpired()))))
+
+ {
+ // this is FORCE-WRITE request or the entry has expired
+ // we doom entry without processing pending requests, but store it in
+ // doomedEntry which causes pending requests to be processed below
+ rv = DoomEntry_Internal(entry, false);
+ *doomedEntry = entry;
+ if (NS_FAILED(rv)) {
+ // XXX what to do? Increment FailedDooms counter?
+ }
+ entry = nullptr;
+ }
+
+ if (!entry) {
+ if (!(request->AccessRequested() & nsICache::ACCESS_WRITE)) {
+ // this is a READ-ONLY request
+ rv = NS_ERROR_CACHE_KEY_NOT_FOUND;
+ goto error;
+ }
+
+ entry = new nsCacheEntry(request->mKey, request->IsStreamBased(),
+ request->StoragePolicy());
+ if (!entry) return NS_ERROR_OUT_OF_MEMORY;
+
+ if (request->IsPrivate()) entry->MarkPrivate();
+
+ entry->Fetched();
+ ++mTotalEntries;
+
+ // XXX we could perform an early bind in some cases based on storage policy
+ }
+
+ if (!entry->IsActive()) {
+ rv = mActiveEntries.AddEntry(entry);
+ if (NS_FAILED(rv)) goto error;
+ CACHE_LOG_DEBUG(("Added entry %p to mActiveEntries\n", entry));
+ entry->MarkActive(); // mark entry active, because it's now in
+ // mActiveEntries
+ }
+ *result = entry;
+ return NS_OK;
+
+error:
+ *result = nullptr;
+ delete entry;
+ return rv;
+}
+
+nsCacheEntry* nsCacheService::SearchCacheDevices(nsCString* key,
+ nsCacheStoragePolicy policy,
+ bool* collision) {
+ Telemetry::AutoTimer<Telemetry::CACHE_DEVICE_SEARCH_2> timer;
+ nsCacheEntry* entry = nullptr;
+
+ *collision = false;
+ if (policy == nsICache::STORE_OFFLINE ||
+ (policy == nsICache::STORE_ANYWHERE && gIOService->IsOffline())) {
+ if (mEnableOfflineDevice) {
+ if (!mOfflineDevice) {
+ nsresult rv = CreateOfflineDevice();
+ if (NS_FAILED(rv)) return nullptr;
+ }
+
+ entry = mOfflineDevice->FindEntry(key, collision);
+ }
+ }
+
+ return entry;
+}
+
+nsCacheDevice* nsCacheService::EnsureEntryHasDevice(nsCacheEntry* entry) {
+ nsCacheDevice* device = entry->CacheDevice();
+ // return device if found, possibly null if the entry is doomed i.e prevent
+ // doomed entries to bind to a device (see e.g. bugs #548406 and #596443)
+ if (device || entry->IsDoomed()) return device;
+
+ if (!device && entry->IsStreamData() && entry->IsAllowedOffline() &&
+ mEnableOfflineDevice) {
+ if (!mOfflineDevice) {
+ (void)CreateOfflineDevice(); // ignore the error (check for
+ // mOfflineDevice instead)
+ }
+
+ device = entry->CustomCacheDevice() ? entry->CustomCacheDevice()
+ : mOfflineDevice;
+
+ if (device) {
+ entry->MarkBinding();
+ nsresult rv = device->BindEntry(entry);
+ entry->ClearBinding();
+ if (NS_FAILED(rv)) device = nullptr;
+ }
+ }
+
+ if (device) entry->SetCacheDevice(device);
+ return device;
+}
+
+nsresult nsCacheService::DoomEntry(nsCacheEntry* entry) {
+ return gService->DoomEntry_Internal(entry, true);
+}
+
+nsresult nsCacheService::DoomEntry_Internal(nsCacheEntry* entry,
+ bool doProcessPendingRequests) {
+ if (entry->IsDoomed()) return NS_OK;
+
+ CACHE_LOG_DEBUG(("Dooming entry %p\n", entry));
+ nsresult rv = NS_OK;
+ entry->MarkDoomed();
+
+ NS_ASSERTION(!entry->IsBinding(), "Dooming entry while binding device.");
+ nsCacheDevice* device = entry->CacheDevice();
+ if (device) device->DoomEntry(entry);
+
+ if (entry->IsActive()) {
+ // remove from active entries
+ mActiveEntries.RemoveEntry(entry);
+ CACHE_LOG_DEBUG(("Removed entry %p from mActiveEntries\n", entry));
+ entry->MarkInactive();
+ }
+
+ // put on doom list to wait for descriptors to close
+ NS_ASSERTION(PR_CLIST_IS_EMPTY(entry), "doomed entry still on device list");
+ PR_APPEND_LINK(entry, &mDoomedEntries);
+
+ // handle pending requests only if we're supposed to
+ if (doProcessPendingRequests) {
+ // tell pending requests to get on with their lives...
+ rv = ProcessPendingRequests(entry);
+
+ // All requests have been removed, but there may still be open descriptors
+ if (entry->IsNotInUse()) {
+ DeactivateEntry(entry); // tell device to get rid of it
+ }
+ }
+ return rv;
+}
+
+void nsCacheService::OnProfileShutdown() {
+ if (!gService || !gService->mInitialized) {
+ // The cache service has been shut down, but someone is still holding
+ // a reference to it. Ignore this call.
+ return;
+ }
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_ONPROFILESHUTDOWN));
+ gService->mClearingEntries = true;
+ gService->DoomActiveEntries(nullptr);
+ }
+
+ gService->CloseAllStreams();
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_ONPROFILESHUTDOWN));
+ gService->ClearDoomList();
+
+ // Make sure to wait for any pending cache-operations before
+ // proceeding with destructive actions (bug #620660)
+ (void)SyncWithCacheIOThread();
+
+ if (gService->mOfflineDevice && gService->mEnableOfflineDevice) {
+ gService->mOfflineDevice->Shutdown();
+ }
+ for (auto iter = gService->mCustomOfflineDevices.Iter(); !iter.Done();
+ iter.Next()) {
+ iter.Data()->Shutdown();
+ iter.Remove();
+ }
+
+ gService->mEnableOfflineDevice = false;
+
+ gService->mClearingEntries = false;
+}
+
+void nsCacheService::OnProfileChanged() {
+ if (!gService) return;
+
+ CACHE_LOG_DEBUG(("nsCacheService::OnProfileChanged"));
+
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_ONPROFILECHANGED));
+
+ gService->mEnableOfflineDevice = gService->mObserver->OfflineCacheEnabled();
+
+ if (gService->mOfflineDevice) {
+ gService->mOfflineDevice->SetCacheParentDirectory(
+ gService->mObserver->OfflineCacheParentDirectory());
+ gService->mOfflineDevice->SetCapacity(
+ gService->mObserver->OfflineCacheCapacity());
+
+ // XXX initialization of mOfflineDevice could be made lazily, if
+ // mEnableOfflineDevice is false
+ nsresult rv =
+ gService->mOfflineDevice->InitWithSqlite(gService->mStorageService);
+ if (NS_FAILED(rv)) {
+ NS_ERROR(
+ "nsCacheService::OnProfileChanged: Re-initializing offline device "
+ "failed");
+ gService->mEnableOfflineDevice = false;
+ // XXX delete mOfflineDevice?
+ }
+ }
+}
+
+void nsCacheService::SetOfflineCacheEnabled(bool enabled) {
+ if (!gService) return;
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHESERVICE_SETOFFLINECACHEENABLED));
+ gService->mEnableOfflineDevice = enabled;
+}
+
+void nsCacheService::SetOfflineCacheCapacity(int32_t capacity) {
+ if (!gService) return;
+ nsCacheServiceAutoLock lock(
+ LOCK_TELEM(NSCACHESERVICE_SETOFFLINECACHECAPACITY));
+
+ if (gService->mOfflineDevice) {
+ gService->mOfflineDevice->SetCapacity(capacity);
+ }
+
+ gService->mEnableOfflineDevice = gService->mObserver->OfflineCacheEnabled();
+}
+
+/******************************************************************************
+ * static methods for nsCacheEntryDescriptor
+ *****************************************************************************/
+void nsCacheService::CloseDescriptor(nsCacheEntryDescriptor* descriptor) {
+ // ask entry to remove descriptor
+ nsCacheEntry* entry = descriptor->CacheEntry();
+ bool doomEntry;
+ bool stillActive = entry->RemoveDescriptor(descriptor, &doomEntry);
+
+ if (!entry->IsValid()) {
+ gService->ProcessPendingRequests(entry);
+ }
+
+ if (doomEntry) {
+ gService->DoomEntry_Internal(entry, true);
+ return;
+ }
+
+ if (!stillActive) {
+ gService->DeactivateEntry(entry);
+ }
+}
+
+nsresult nsCacheService::GetFileForEntry(nsCacheEntry* entry,
+ nsIFile** result) {
+ nsCacheDevice* device = gService->EnsureEntryHasDevice(entry);
+ if (!device) return NS_ERROR_UNEXPECTED;
+
+ return device->GetFileForEntry(entry, result);
+}
+
+nsresult nsCacheService::OpenInputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIInputStream** result) {
+ nsCacheDevice* device = gService->EnsureEntryHasDevice(entry);
+ if (!device) return NS_ERROR_UNEXPECTED;
+
+ return device->OpenInputStreamForEntry(entry, mode, offset, result);
+}
+
+nsresult nsCacheService::OpenOutputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIOutputStream** result) {
+ nsCacheDevice* device = gService->EnsureEntryHasDevice(entry);
+ if (!device) return NS_ERROR_UNEXPECTED;
+
+ return device->OpenOutputStreamForEntry(entry, mode, offset, result);
+}
+
+nsresult nsCacheService::OnDataSizeChange(nsCacheEntry* entry,
+ int32_t deltaSize) {
+ nsCacheDevice* device = gService->EnsureEntryHasDevice(entry);
+ if (!device) return NS_ERROR_UNEXPECTED;
+
+ return device->OnDataSizeChange(entry, deltaSize);
+}
+
+void nsCacheService::LockAcquired() {
+ MutexAutoLock lock(mTimeStampLock);
+ mLockAcquiredTimeStamp = TimeStamp::Now();
+}
+
+void nsCacheService::LockReleased() {
+ MutexAutoLock lock(mTimeStampLock);
+ mLockAcquiredTimeStamp = TimeStamp();
+}
+
+void nsCacheService::Lock() {
+ gService->mLock.Lock();
+ gService->LockAcquired();
+}
+
+void nsCacheService::Lock(mozilla::Telemetry::HistogramID mainThreadLockerID) {
+ mozilla::Telemetry::HistogramID lockerID;
+ mozilla::Telemetry::HistogramID generalID;
+
+ if (NS_IsMainThread()) {
+ lockerID = mainThreadLockerID;
+ generalID = mozilla::Telemetry::CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_2;
+ } else {
+ lockerID = mozilla::Telemetry::HistogramCount;
+ generalID = mozilla::Telemetry::CACHE_SERVICE_LOCK_WAIT_2;
+ }
+
+ TimeStamp start(TimeStamp::Now());
+
+ nsCacheService::Lock();
+
+ TimeStamp stop(TimeStamp::Now());
+
+ // Telemetry isn't thread safe on its own, but this is OK because we're
+ // protecting it with the cache lock.
+ if (lockerID != mozilla::Telemetry::HistogramCount) {
+ mozilla::Telemetry::AccumulateTimeDelta(lockerID, start, stop);
+ }
+ mozilla::Telemetry::AccumulateTimeDelta(generalID, start, stop);
+}
+
+void nsCacheService::Unlock() {
+ gService->mLock.AssertCurrentThreadOwns();
+
+ nsTArray<nsISupports*> doomed = std::move(gService->mDoomedObjects);
+
+ gService->LockReleased();
+ gService->mLock.Unlock();
+
+ for (uint32_t i = 0; i < doomed.Length(); ++i) doomed[i]->Release();
+}
+
+void nsCacheService::ReleaseObject_Locked(nsISupports* obj,
+ nsIEventTarget* target) {
+ gService->mLock.AssertCurrentThreadOwns();
+
+ bool isCur;
+ if (!target || (NS_SUCCEEDED(target->IsOnCurrentThread(&isCur)) && isCur)) {
+ gService->mDoomedObjects.AppendElement(obj);
+ } else {
+ NS_ProxyRelease("nsCacheService::ReleaseObject_Locked::obj", target,
+ dont_AddRef(obj));
+ }
+}
+
+nsresult nsCacheService::SetCacheElement(nsCacheEntry* entry,
+ nsISupports* element) {
+ entry->SetData(element);
+ entry->TouchData();
+ return NS_OK;
+}
+
+nsresult nsCacheService::ValidateEntry(nsCacheEntry* entry) {
+ nsCacheDevice* device = gService->EnsureEntryHasDevice(entry);
+ if (!device) return NS_ERROR_UNEXPECTED;
+
+ entry->MarkValid();
+ nsresult rv = gService->ProcessPendingRequests(entry);
+ NS_ASSERTION(rv == NS_OK, "ProcessPendingRequests failed.");
+ // XXX what else should be done?
+
+ return rv;
+}
+
+int32_t nsCacheService::CacheCompressionLevel() {
+ int32_t level = gService->mObserver->CacheCompressionLevel();
+ return level;
+}
+
+void nsCacheService::DeactivateEntry(nsCacheEntry* entry) {
+ CACHE_LOG_DEBUG(("Deactivating entry %p\n", entry));
+ nsresult rv = NS_OK;
+ NS_ASSERTION(entry->IsNotInUse(), "### deactivating an entry while in use!");
+ nsCacheDevice* device = nullptr;
+
+ if (mMaxDataSize < entry->DataSize()) mMaxDataSize = entry->DataSize();
+ if (mMaxMetaSize < entry->MetaDataSize())
+ mMaxMetaSize = entry->MetaDataSize();
+
+ if (entry->IsDoomed()) {
+ // remove from Doomed list
+ PR_REMOVE_AND_INIT_LINK(entry);
+ } else if (entry->IsActive()) {
+ // remove from active entries
+ mActiveEntries.RemoveEntry(entry);
+ CACHE_LOG_DEBUG(
+ ("Removed deactivated entry %p from mActiveEntries\n", entry));
+ entry->MarkInactive();
+
+ // bind entry if necessary to store meta-data
+ device = EnsureEntryHasDevice(entry);
+ if (!device) {
+ CACHE_LOG_DEBUG(
+ ("DeactivateEntry: unable to bind active "
+ "entry %p\n",
+ entry));
+ NS_WARNING("DeactivateEntry: unable to bind active entry\n");
+ return;
+ }
+ } else {
+ // if mInitialized == false,
+ // then we're shutting down and this state is okay.
+ NS_ASSERTION(!mInitialized, "DeactivateEntry: bad cache entry state.");
+ }
+
+ device = entry->CacheDevice();
+ if (device) {
+ rv = device->DeactivateEntry(entry);
+ if (NS_FAILED(rv)) {
+ // increment deactivate failure count
+ ++mDeactivateFailures;
+ }
+ } else {
+ // increment deactivating unbound entry statistic
+ ++mDeactivatedUnboundEntries;
+ delete entry; // because no one else will
+ }
+}
+
+nsresult nsCacheService::ProcessPendingRequests(nsCacheEntry* entry) {
+ nsresult rv = NS_OK;
+ nsCacheRequest* request = (nsCacheRequest*)PR_LIST_HEAD(&entry->mRequestQ);
+ nsCacheRequest* nextRequest;
+ bool newWriter = false;
+
+ CACHE_LOG_DEBUG((
+ "ProcessPendingRequests for %sinitialized %s %salid entry %p\n",
+ (entry->IsInitialized() ? "" : "Un"), (entry->IsDoomed() ? "DOOMED" : ""),
+ (entry->IsValid() ? "V" : "Inv"), entry));
+
+ if (request == &entry->mRequestQ) return NS_OK; // no queued requests
+
+ if (!entry->IsDoomed() && entry->IsInvalid()) {
+ // 1st descriptor closed w/o MarkValid()
+ NS_ASSERTION(PR_CLIST_IS_EMPTY(&entry->mDescriptorQ),
+ "shouldn't be here with open descriptors");
+
+#if DEBUG
+ // verify no ACCESS_WRITE requests(shouldn't have any of these)
+ while (request != &entry->mRequestQ) {
+ NS_ASSERTION(request->AccessRequested() != nsICache::ACCESS_WRITE,
+ "ACCESS_WRITE request should have been given a new entry");
+ request = (nsCacheRequest*)PR_NEXT_LINK(request);
+ }
+ request = (nsCacheRequest*)PR_LIST_HEAD(&entry->mRequestQ);
+#endif
+ // find first request with ACCESS_READ_WRITE (if any) and promote it to 1st
+ // writer
+ while (request != &entry->mRequestQ) {
+ if (request->AccessRequested() == nsICache::ACCESS_READ_WRITE) {
+ newWriter = true;
+ CACHE_LOG_DEBUG((" promoting request %p to 1st writer\n", request));
+ break;
+ }
+
+ request = (nsCacheRequest*)PR_NEXT_LINK(request);
+ }
+
+ if (request == &entry->mRequestQ) // no requests asked for
+ // ACCESS_READ_WRITE, back to top
+ request = (nsCacheRequest*)PR_LIST_HEAD(&entry->mRequestQ);
+
+ // XXX what should we do if there are only READ requests in queue?
+ // XXX serialize their accesses, give them only read access, but force them
+ // to check validate flag?
+ // XXX or do readers simply presume the entry is valid
+ // See fix for bug #467392 below
+ }
+
+ nsCacheAccessMode accessGranted = nsICache::ACCESS_NONE;
+
+ while (request != &entry->mRequestQ) {
+ nextRequest = (nsCacheRequest*)PR_NEXT_LINK(request);
+ CACHE_LOG_DEBUG((" %sync request %p for %p\n",
+ (request->mListener ? "As" : "S"), request, entry));
+
+ if (request->mListener) {
+ // Async request
+ PR_REMOVE_AND_INIT_LINK(request);
+
+ if (entry->IsDoomed()) {
+ rv = ProcessRequest(request, false, nullptr);
+ if (rv == NS_ERROR_CACHE_WAIT_FOR_VALIDATION)
+ rv = NS_OK;
+ else
+ delete request;
+
+ if (NS_FAILED(rv)) {
+ // XXX what to do?
+ }
+ } else if (entry->IsValid() || newWriter) {
+ rv = entry->RequestAccess(request, &accessGranted);
+ NS_ASSERTION(NS_SUCCEEDED(rv),
+ "if entry is valid, RequestAccess must succeed.");
+ // XXX if (newWriter) {
+ // NS_ASSERTION( accessGranted ==
+ // request->AccessRequested(), "why not?");
+ // }
+
+ // entry->CreateDescriptor dequeues request, and queues descriptor
+ nsICacheEntryDescriptor* descriptor = nullptr;
+ rv = entry->CreateDescriptor(request, accessGranted, &descriptor);
+
+ // post call to listener to report error or descriptor
+ rv = NotifyListener(request, descriptor, accessGranted, rv);
+ delete request;
+ if (NS_FAILED(rv)) {
+ // XXX what to do?
+ }
+
+ } else {
+ // read-only request to an invalid entry - need to wait for
+ // the entry to become valid so we post an event to process
+ // the request again later (bug #467392)
+ nsCOMPtr<nsIRunnable> ev = new nsProcessRequestEvent(request);
+ rv = DispatchToCacheIOThread(ev);
+ if (NS_FAILED(rv)) {
+ delete request; // avoid leak
+ }
+ }
+ } else {
+ // Synchronous request
+ request->WakeUp();
+ }
+ if (newWriter) break; // process remaining requests after validation
+ request = nextRequest;
+ }
+
+ return NS_OK;
+}
+
+bool nsCacheService::IsDoomListEmpty() {
+ nsCacheEntry* entry = (nsCacheEntry*)PR_LIST_HEAD(&mDoomedEntries);
+ return &mDoomedEntries == entry;
+}
+
+void nsCacheService::ClearDoomList() {
+ nsCacheEntry* entry = (nsCacheEntry*)PR_LIST_HEAD(&mDoomedEntries);
+
+ while (entry != &mDoomedEntries) {
+ nsCacheEntry* next = (nsCacheEntry*)PR_NEXT_LINK(entry);
+
+ entry->DetachDescriptors();
+ DeactivateEntry(entry);
+ entry = next;
+ }
+}
+
+void nsCacheService::DoomActiveEntries(DoomCheckFn check) {
+ AutoTArray<nsCacheEntry*, 8> array;
+
+ for (auto iter = mActiveEntries.Iter(); !iter.Done(); iter.Next()) {
+ nsCacheEntry* entry =
+ static_cast<nsCacheEntryHashTableEntry*>(iter.Get())->cacheEntry;
+
+ if (check && !check(entry)) {
+ continue;
+ }
+
+ array.AppendElement(entry);
+
+ // entry is being removed from the active entry list
+ entry->MarkInactive();
+ iter.Remove();
+ }
+
+ uint32_t count = array.Length();
+ for (uint32_t i = 0; i < count; ++i) {
+ DoomEntry_Internal(array[i], true);
+ }
+}
+
+void nsCacheService::CloseAllStreams() {
+ nsTArray<RefPtr<nsCacheEntryDescriptor::nsInputStreamWrapper> > inputs;
+ nsTArray<RefPtr<nsCacheEntryDescriptor::nsOutputStreamWrapper> > outputs;
+
+ {
+ nsCacheServiceAutoLock lock(LOCK_TELEM(NSCACHESERVICE_CLOSEALLSTREAMS));
+
+ nsTArray<nsCacheEntry*> entries;
+
+#if DEBUG
+ // make sure there is no active entry
+ for (auto iter = mActiveEntries.Iter(); !iter.Done(); iter.Next()) {
+ auto entry = static_cast<nsCacheEntryHashTableEntry*>(iter.Get());
+ entries.AppendElement(entry->cacheEntry);
+ }
+ NS_ASSERTION(entries.IsEmpty(), "Bad state");
+#endif
+
+ // Get doomed entries
+ nsCacheEntry* entry = (nsCacheEntry*)PR_LIST_HEAD(&mDoomedEntries);
+ while (entry != &mDoomedEntries) {
+ nsCacheEntry* next = (nsCacheEntry*)PR_NEXT_LINK(entry);
+ entries.AppendElement(entry);
+ entry = next;
+ }
+
+ // Iterate through all entries and collect input and output streams
+ for (size_t i = 0; i < entries.Length(); i++) {
+ entry = entries.ElementAt(i);
+
+ nsTArray<RefPtr<nsCacheEntryDescriptor> > descs;
+ entry->GetDescriptors(descs);
+
+ for (uint32_t j = 0; j < descs.Length(); j++) {
+ if (descs[j]->mOutputWrapper)
+ outputs.AppendElement(descs[j]->mOutputWrapper);
+
+ for (size_t k = 0; k < descs[j]->mInputWrappers.Length(); k++)
+ inputs.AppendElement(descs[j]->mInputWrappers[k]);
+ }
+ }
+ }
+
+ uint32_t i;
+ for (i = 0; i < inputs.Length(); i++) inputs[i]->Close();
+
+ for (i = 0; i < outputs.Length(); i++) outputs[i]->Close();
+}
+
+bool nsCacheService::GetClearingEntries() {
+ AssertOwnsLock();
+ return gService->mClearingEntries;
+}
+
+// static
+void nsCacheService::GetCacheBaseDirectoty(nsIFile** result) {
+ *result = nullptr;
+ if (!gService || !gService->mObserver) return;
+
+ nsCOMPtr<nsIFile> directory = gService->mObserver->DiskCacheParentDirectory();
+ if (!directory) return;
+
+ directory->Clone(result);
+}
+
+// static
+void nsCacheService::GetDiskCacheDirectory(nsIFile** result) {
+ nsCOMPtr<nsIFile> directory;
+ GetCacheBaseDirectoty(getter_AddRefs(directory));
+ if (!directory) return;
+
+ nsresult rv = directory->AppendNative("Cache"_ns);
+ if (NS_FAILED(rv)) return;
+
+ directory.forget(result);
+}
+
+// static
+void nsCacheService::GetAppCacheDirectory(nsIFile** result) {
+ nsCOMPtr<nsIFile> directory;
+ GetCacheBaseDirectoty(getter_AddRefs(directory));
+ if (!directory) return;
+
+ nsresult rv = directory->AppendNative("OfflineCache"_ns);
+ if (NS_FAILED(rv)) return;
+
+ directory.forget(result);
+}
+
+void nsCacheService::LogCacheStatistics() {
+ uint32_t hitPercentage = 0;
+ double sum = (double)(mCacheHits + mCacheMisses);
+ if (sum != 0) {
+ hitPercentage = (uint32_t)((((double)mCacheHits) / sum) * 100);
+ }
+ CACHE_LOG_INFO(("\nCache Service Statistics:\n\n"));
+ CACHE_LOG_INFO((" TotalEntries = %d\n", mTotalEntries));
+ CACHE_LOG_INFO((" Cache Hits = %d\n", mCacheHits));
+ CACHE_LOG_INFO((" Cache Misses = %d\n", mCacheMisses));
+ CACHE_LOG_INFO((" Cache Hit %% = %d%%\n", hitPercentage));
+ CACHE_LOG_INFO((" Max Key Length = %d\n", mMaxKeyLength));
+ CACHE_LOG_INFO((" Max Meta Size = %d\n", mMaxMetaSize));
+ CACHE_LOG_INFO((" Max Data Size = %d\n", mMaxDataSize));
+ CACHE_LOG_INFO(("\n"));
+ CACHE_LOG_INFO(
+ (" Deactivate Failures = %d\n", mDeactivateFailures));
+ CACHE_LOG_INFO(
+ (" Deactivated Unbound Entries = %d\n", mDeactivatedUnboundEntries));
+}
+
+void nsCacheService::MoveOrRemoveDiskCache(nsIFile* aOldCacheDir,
+ nsIFile* aNewCacheDir,
+ const char* aCacheSubdir) {
+ bool same;
+ if (NS_FAILED(aOldCacheDir->Equals(aNewCacheDir, &same)) || same) return;
+
+ nsCOMPtr<nsIFile> aOldCacheSubdir;
+ aOldCacheDir->Clone(getter_AddRefs(aOldCacheSubdir));
+
+ nsresult rv = aOldCacheSubdir->AppendNative(nsDependentCString(aCacheSubdir));
+ if (NS_FAILED(rv)) return;
+
+ bool exists;
+ if (NS_FAILED(aOldCacheSubdir->Exists(&exists)) || !exists) return;
+
+ nsCOMPtr<nsIFile> aNewCacheSubdir;
+ aNewCacheDir->Clone(getter_AddRefs(aNewCacheSubdir));
+
+ rv = aNewCacheSubdir->AppendNative(nsDependentCString(aCacheSubdir));
+ if (NS_FAILED(rv)) return;
+
+ PathString newPath = aNewCacheSubdir->NativePath();
+
+ if (NS_SUCCEEDED(aNewCacheSubdir->Exists(&exists)) && !exists) {
+ // New cache directory does not exist, try to move the old one here
+ // rename needs an empty target directory
+
+ // Make sure the parent of the target sub-dir exists
+ rv = aNewCacheDir->Create(nsIFile::DIRECTORY_TYPE, 0777);
+ if (NS_SUCCEEDED(rv) || NS_ERROR_FILE_ALREADY_EXISTS == rv) {
+ PathString oldPath = aOldCacheSubdir->NativePath();
+#ifdef XP_WIN
+ if (MoveFileW(oldPath.get(), newPath.get()))
+#else
+ if (rename(oldPath.get(), newPath.get()) == 0)
+#endif
+ {
+ return;
+ }
+ }
+ }
+
+ // Delay delete by 1 minute to avoid IO thrash on startup.
+ nsDeleteDir::DeleteDir(aOldCacheSubdir, false, 60000);
+}
+
+static bool IsEntryPrivate(nsCacheEntry* entry) { return entry->IsPrivate(); }
+
+void nsCacheService::LeavePrivateBrowsing() {
+ nsCacheServiceAutoLock lock;
+
+ gService->DoomActiveEntries(IsEntryPrivate);
+}
diff --git a/netwerk/cache/nsCacheService.h b/netwerk/cache/nsCacheService.h
new file mode 100644
index 0000000000..2f9ca28af0
--- /dev/null
+++ b/netwerk/cache/nsCacheService.h
@@ -0,0 +1,331 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheService_h_
+#define _nsCacheService_h_
+
+#include "nsICacheService.h"
+#include "nsCacheSession.h"
+#include "nsCacheDevice.h"
+#include "nsCacheEntry.h"
+#include "nsThreadUtils.h"
+#include "nsICacheListener.h"
+
+#include "prthread.h"
+#include "nsString.h"
+#include "nsTArray.h"
+#include "nsRefPtrHashtable.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Telemetry.h"
+
+class nsCacheRequest;
+class nsCacheProfilePrefObserver;
+class nsOfflineCacheDevice;
+class nsCacheServiceAutoLock;
+class nsITimer;
+class mozIStorageService;
+
+/******************************************************************************
+ * nsNotifyDoomListener
+ *****************************************************************************/
+
+class nsNotifyDoomListener : public mozilla::Runnable {
+ public:
+ nsNotifyDoomListener(nsICacheListener* listener, nsresult status)
+ : mozilla::Runnable("nsNotifyDoomListener"),
+ mListener(listener) // transfers reference
+ ,
+ mStatus(status) {}
+
+ NS_IMETHOD Run() override {
+ mListener->OnCacheEntryDoomed(mStatus);
+ NS_RELEASE(mListener);
+ return NS_OK;
+ }
+
+ private:
+ nsICacheListener* mListener;
+ nsresult mStatus;
+};
+
+/******************************************************************************
+ * nsCacheService
+ ******************************************************************************/
+
+class nsCacheService final : public nsICacheServiceInternal {
+ virtual ~nsCacheService();
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHESERVICE
+ NS_DECL_NSICACHESERVICEINTERNAL
+
+ nsCacheService();
+
+ // Define a Create method to be used with a factory:
+ static nsresult Create(nsISupports* outer, const nsIID& iid, void** result);
+
+ /**
+ * Methods called by nsCacheSession
+ */
+ static nsresult OpenCacheEntry(nsCacheSession* session, const nsACString& key,
+ nsCacheAccessMode accessRequested,
+ bool blockingMode, nsICacheListener* listener,
+ nsICacheEntryDescriptor** result);
+
+ static nsresult EvictEntriesForSession(nsCacheSession* session);
+
+ static nsresult IsStorageEnabledForPolicy(nsCacheStoragePolicy storagePolicy,
+ bool* result);
+
+ static nsresult DoomEntry(nsCacheSession* session, const nsACString& key,
+ nsICacheListener* listener);
+
+ /**
+ * Methods called by nsCacheEntryDescriptor
+ */
+
+ static void CloseDescriptor(nsCacheEntryDescriptor* descriptor);
+
+ static nsresult GetFileForEntry(nsCacheEntry* entry, nsIFile** result);
+
+ static nsresult OpenInputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIInputStream** result);
+
+ static nsresult OpenOutputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIOutputStream** result);
+
+ static nsresult OnDataSizeChange(nsCacheEntry* entry, int32_t deltaSize);
+
+ static nsresult SetCacheElement(nsCacheEntry* entry, nsISupports* element);
+
+ static nsresult ValidateEntry(nsCacheEntry* entry);
+
+ static int32_t CacheCompressionLevel();
+
+ static bool GetClearingEntries();
+
+ static void GetCacheBaseDirectoty(nsIFile** result);
+ static void GetDiskCacheDirectory(nsIFile** result);
+ static void GetAppCacheDirectory(nsIFile** result);
+
+ /**
+ * Methods called by any cache classes
+ */
+
+ static nsCacheService* GlobalInstance() { return gService; }
+
+ static nsresult DoomEntry(nsCacheEntry* entry);
+
+ static bool IsStorageEnabledForPolicy_Locked(nsCacheStoragePolicy policy);
+
+ /**
+ * Methods called by nsApplicationCacheService
+ */
+
+ nsresult GetOfflineDevice(nsOfflineCacheDevice** aDevice);
+
+ /**
+ * Creates an offline cache device that works over a specific profile
+ * directory. A tool to preload offline cache for profiles different from the
+ * current application's profile directory.
+ */
+ nsresult GetCustomOfflineDevice(nsIFile* aProfileDir, int32_t aQuota,
+ nsOfflineCacheDevice** aDevice);
+
+ // This method may be called to release an object while the cache service
+ // lock is being held. If a non-null target is specified and the target
+ // does not correspond to the current thread, then the release will be
+ // proxied to the specified target. Otherwise, the object will be added to
+ // the list of objects to be released when the cache service is unlocked.
+ static void ReleaseObject_Locked(nsISupports* object,
+ nsIEventTarget* target = nullptr);
+
+ static nsresult DispatchToCacheIOThread(nsIRunnable* event);
+
+ // Calling this method will block the calling thread until all pending
+ // events on the cache-io thread has finished. The calling thread must
+ // hold the cache-lock
+ static nsresult SyncWithCacheIOThread();
+
+ /**
+ * Methods called by nsCacheProfilePrefObserver
+ */
+ static void OnProfileShutdown();
+ static void OnProfileChanged();
+
+ static void SetOfflineCacheEnabled(bool enabled);
+ // Sets the offline cache capacity (in kilobytes)
+ static void SetOfflineCacheCapacity(int32_t capacity);
+
+ static void MoveOrRemoveDiskCache(nsIFile* aOldCacheDir,
+ nsIFile* aNewCacheDir,
+ const char* aCacheSubdir);
+
+ nsresult Init();
+ void Shutdown();
+
+ static bool IsInitialized() {
+ if (!gService) {
+ return false;
+ }
+ return gService->mInitialized;
+ }
+
+ static void AssertOwnsLock() { gService->mLock.AssertCurrentThreadOwns(); }
+
+ static void LeavePrivateBrowsing();
+ bool IsDoomListEmpty();
+
+ typedef bool (*DoomCheckFn)(nsCacheEntry* entry);
+
+ // Accessors to the disabled functionality
+ nsresult CreateSessionInternal(const char* clientID,
+ nsCacheStoragePolicy storagePolicy,
+ bool streamBased, nsICacheSession** result);
+ nsresult VisitEntriesInternal(nsICacheVisitor* visitor);
+ nsresult EvictEntriesInternal(nsCacheStoragePolicy storagePolicy);
+
+ private:
+ friend class nsCacheServiceAutoLock;
+ friend class nsOfflineCacheDevice;
+ friend class nsProcessRequestEvent;
+ friend class nsBlockOnCacheThreadEvent;
+ friend class nsDoomEvent;
+ friend class nsAsyncDoomEvent;
+ friend class nsCacheEntryDescriptor;
+
+ /**
+ * Internal Methods
+ */
+
+ static void Lock();
+ static void Lock(::mozilla::Telemetry::HistogramID mainThreadLockerID);
+ static void Unlock();
+ void LockAcquired();
+ void LockReleased();
+
+ nsresult CreateDiskDevice();
+ nsresult CreateOfflineDevice();
+ nsresult CreateCustomOfflineDevice(nsIFile* aProfileDir, int32_t aQuota,
+ nsOfflineCacheDevice** aDevice);
+ nsresult CreateMemoryDevice();
+
+ nsresult RemoveCustomOfflineDevice(nsOfflineCacheDevice* aDevice);
+
+ nsresult CreateRequest(nsCacheSession* session, const nsACString& clientKey,
+ nsCacheAccessMode accessRequested, bool blockingMode,
+ nsICacheListener* listener, nsCacheRequest** request);
+
+ nsresult DoomEntry_Internal(nsCacheEntry* entry,
+ bool doProcessPendingRequests);
+
+ nsresult EvictEntriesForClient(const char* clientID,
+ nsCacheStoragePolicy storagePolicy);
+
+ // Notifies request listener asynchronously on the request's thread, and
+ // releases the descriptor on the request's thread. If this method fails,
+ // the descriptor is not released.
+ nsresult NotifyListener(nsCacheRequest* request,
+ nsICacheEntryDescriptor* descriptor,
+ nsCacheAccessMode accessGranted, nsresult error);
+
+ nsresult ActivateEntry(nsCacheRequest* request, nsCacheEntry** entry,
+ nsCacheEntry** doomedEntry);
+
+ nsCacheDevice* EnsureEntryHasDevice(nsCacheEntry* entry);
+
+ nsCacheEntry* SearchCacheDevices(nsCString* key, nsCacheStoragePolicy policy,
+ bool* collision);
+
+ void DeactivateEntry(nsCacheEntry* entry);
+
+ nsresult ProcessRequest(nsCacheRequest* request,
+ bool calledFromOpenCacheEntry,
+ nsICacheEntryDescriptor** result);
+
+ nsresult ProcessPendingRequests(nsCacheEntry* entry);
+
+ void ClearDoomList(void);
+ void DoomActiveEntries(DoomCheckFn check);
+ void CloseAllStreams();
+ void FireClearNetworkCacheStoredAnywhereNotification();
+
+ void LogCacheStatistics();
+
+ /**
+ * Data Members
+ */
+
+ static nsCacheService* gService; // there can be only one...
+
+ nsCOMPtr<mozIStorageService> mStorageService;
+
+ nsCacheProfilePrefObserver* mObserver;
+
+ mozilla::Mutex mLock;
+ mozilla::CondVar mCondVar;
+ bool mNotified;
+
+ mozilla::Mutex mTimeStampLock;
+ mozilla::TimeStamp mLockAcquiredTimeStamp;
+
+ nsCOMPtr<nsIThread> mCacheIOThread;
+
+ nsTArray<nsISupports*> mDoomedObjects;
+
+ bool mInitialized;
+ bool mClearingEntries;
+
+ bool mEnableOfflineDevice;
+
+ nsOfflineCacheDevice* mOfflineDevice;
+
+ nsRefPtrHashtable<nsStringHashKey, nsOfflineCacheDevice>
+ mCustomOfflineDevices;
+
+ nsCacheEntryHashTable mActiveEntries;
+ PRCList mDoomedEntries;
+
+ // stats
+
+ uint32_t mTotalEntries;
+ uint32_t mCacheHits;
+ uint32_t mCacheMisses;
+ uint32_t mMaxKeyLength;
+ uint32_t mMaxDataSize;
+ uint32_t mMaxMetaSize;
+
+ // Unexpected error totals
+ uint32_t mDeactivateFailures;
+ uint32_t mDeactivatedUnboundEntries;
+};
+
+/******************************************************************************
+ * nsCacheServiceAutoLock
+ ******************************************************************************/
+
+#define LOCK_TELEM(x) \
+ (::mozilla::Telemetry::CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_##x)
+
+// Instantiate this class to acquire the cache service lock for a particular
+// execution scope.
+class nsCacheServiceAutoLock {
+ public:
+ nsCacheServiceAutoLock() { nsCacheService::Lock(); }
+ explicit nsCacheServiceAutoLock(
+ mozilla::Telemetry::HistogramID mainThreadLockerID) {
+ nsCacheService::Lock(mainThreadLockerID);
+ }
+ ~nsCacheServiceAutoLock() { nsCacheService::Unlock(); }
+};
+
+#endif // _nsCacheService_h_
diff --git a/netwerk/cache/nsCacheSession.cpp b/netwerk/cache/nsCacheSession.cpp
new file mode 100644
index 0000000000..3e4a1f92da
--- /dev/null
+++ b/netwerk/cache/nsCacheSession.cpp
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCacheSession.h"
+#include "nsCacheService.h"
+#include "nsCRT.h"
+#include "nsThreadUtils.h"
+
+NS_IMPL_ISUPPORTS(nsCacheSession, nsICacheSession)
+
+nsCacheSession::nsCacheSession(const char* clientID,
+ nsCacheStoragePolicy storagePolicy,
+ bool streamBased)
+ : mClientID(clientID), mInfo(0) {
+ SetStoragePolicy(storagePolicy);
+
+ if (streamBased)
+ MarkStreamBased();
+ else
+ SetStoragePolicy(nsICache::STORE_IN_MEMORY);
+
+ MarkPublic();
+
+ MarkDoomEntriesIfExpired();
+}
+
+nsCacheSession::~nsCacheSession() {
+ /* destructor code */
+ // notify service we are going away?
+}
+
+NS_IMETHODIMP nsCacheSession::GetDoomEntriesIfExpired(bool* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = WillDoomEntriesIfExpired();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheSession::SetProfileDirectory(nsIFile* profileDir) {
+ if (StoragePolicy() != nsICache::STORE_OFFLINE && profileDir) {
+ // Profile directory override is currently implemented only for
+ // offline cache. This is an early failure to prevent the request
+ // being processed before it would fail later because of inability
+ // to assign a cache base dir.
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mProfileDir = profileDir;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheSession::GetProfileDirectory(nsIFile** profileDir) {
+ *profileDir = do_AddRef(mProfileDir).take();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheSession::SetDoomEntriesIfExpired(
+ bool doomEntriesIfExpired) {
+ if (doomEntriesIfExpired)
+ MarkDoomEntriesIfExpired();
+ else
+ ClearDoomEntriesIfExpired();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsCacheSession::OpenCacheEntry(const nsACString& key,
+ nsCacheAccessMode accessRequested,
+ bool blockingMode,
+ nsICacheEntryDescriptor** result) {
+ nsresult rv;
+
+ if (NS_IsMainThread())
+ rv = NS_ERROR_NOT_AVAILABLE;
+ else
+ rv =
+ nsCacheService::OpenCacheEntry(this, key, accessRequested, blockingMode,
+ nullptr, // no listener
+ result);
+ return rv;
+}
+
+NS_IMETHODIMP nsCacheSession::AsyncOpenCacheEntry(
+ const nsACString& key, nsCacheAccessMode accessRequested,
+ nsICacheListener* listener, bool noWait) {
+ nsresult rv;
+ rv = nsCacheService::OpenCacheEntry(this, key, accessRequested, !noWait,
+ listener,
+ nullptr); // no result
+
+ if (rv == NS_ERROR_CACHE_WAIT_FOR_VALIDATION) rv = NS_OK;
+ return rv;
+}
+
+NS_IMETHODIMP nsCacheSession::EvictEntries() {
+ return nsCacheService::EvictEntriesForSession(this);
+}
+
+NS_IMETHODIMP nsCacheSession::IsStorageEnabled(bool* result) {
+ return nsCacheService::IsStorageEnabledForPolicy(StoragePolicy(), result);
+}
+
+NS_IMETHODIMP nsCacheSession::DoomEntry(const nsACString& key,
+ nsICacheListener* listener) {
+ return nsCacheService::DoomEntry(this, key, listener);
+}
+
+NS_IMETHODIMP nsCacheSession::GetIsPrivate(bool* aPrivate) {
+ *aPrivate = IsPrivate();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsCacheSession::SetIsPrivate(bool aPrivate) {
+ if (aPrivate)
+ MarkPrivate();
+ else
+ MarkPublic();
+ return NS_OK;
+}
diff --git a/netwerk/cache/nsCacheSession.h b/netwerk/cache/nsCacheSession.h
new file mode 100644
index 0000000000..59bb748231
--- /dev/null
+++ b/netwerk/cache/nsCacheSession.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheSession_h_
+#define _nsCacheSession_h_
+
+#include "nspr.h"
+#include "nsError.h"
+#include "nsCOMPtr.h"
+#include "nsICacheSession.h"
+#include "nsIFile.h"
+#include "nsString.h"
+
+class nsCacheSession : public nsICacheSession {
+ virtual ~nsCacheSession();
+
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSICACHESESSION
+
+ nsCacheSession(const char* clientID, nsCacheStoragePolicy storagePolicy,
+ bool streamBased);
+
+ nsCString* ClientID() { return &mClientID; }
+
+ enum SessionInfo {
+ eStoragePolicyMask = 0x000000FF,
+ eStreamBasedMask = 0x00000100,
+ eDoomEntriesIfExpiredMask = 0x00001000,
+ ePrivateMask = 0x00010000
+ };
+
+ void MarkStreamBased() { mInfo |= eStreamBasedMask; }
+ void ClearStreamBased() { mInfo &= ~eStreamBasedMask; }
+ bool IsStreamBased() { return (mInfo & eStreamBasedMask) != 0; }
+
+ void MarkDoomEntriesIfExpired() { mInfo |= eDoomEntriesIfExpiredMask; }
+ void ClearDoomEntriesIfExpired() { mInfo &= ~eDoomEntriesIfExpiredMask; }
+ bool WillDoomEntriesIfExpired() {
+ return (0 != (mInfo & eDoomEntriesIfExpiredMask));
+ }
+
+ void MarkPrivate() { mInfo |= ePrivateMask; }
+ void MarkPublic() { mInfo &= ~ePrivateMask; }
+ bool IsPrivate() { return (mInfo & ePrivateMask) != 0; }
+ nsCacheStoragePolicy StoragePolicy() {
+ return (nsCacheStoragePolicy)(mInfo & eStoragePolicyMask);
+ }
+
+ void SetStoragePolicy(nsCacheStoragePolicy policy) {
+ NS_ASSERTION(policy <= 0xFF, "too many bits in nsCacheStoragePolicy");
+ mInfo &= ~eStoragePolicyMask; // clear storage policy bits
+ mInfo |= policy;
+ }
+
+ nsIFile* ProfileDir() { return mProfileDir; }
+
+ private:
+ nsCString mClientID;
+ uint32_t mInfo;
+ nsCOMPtr<nsIFile> mProfileDir;
+};
+
+#endif // _nsCacheSession_h_
diff --git a/netwerk/cache/nsCacheUtils.cpp b/netwerk/cache/nsCacheUtils.cpp
new file mode 100644
index 0000000000..88bcabce12
--- /dev/null
+++ b/netwerk/cache/nsCacheUtils.cpp
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsCache.h"
+#include "nsCacheUtils.h"
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+
+class nsDestroyThreadEvent : public Runnable {
+ public:
+ explicit nsDestroyThreadEvent(nsIThread* thread)
+ : mozilla::Runnable("nsDestroyThreadEvent"), mThread(thread) {}
+ NS_IMETHOD Run() override {
+ mThread->Shutdown();
+ return NS_OK;
+ }
+
+ private:
+ nsCOMPtr<nsIThread> mThread;
+};
+
+nsShutdownThread::nsShutdownThread(nsIThread* aThread)
+ : mozilla::Runnable("nsShutdownThread"),
+ mMonitor("nsShutdownThread.mMonitor"),
+ mShuttingDown(false),
+ mThread(aThread) {}
+
+nsresult nsShutdownThread::Shutdown(nsIThread* aThread) {
+ nsresult rv;
+ RefPtr<nsDestroyThreadEvent> ev = new nsDestroyThreadEvent(aThread);
+ rv = NS_DispatchToMainThread(ev);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Dispatching event in nsShutdownThread::Shutdown failed!");
+ }
+ return rv;
+}
+
+nsresult nsShutdownThread::BlockingShutdown(nsIThread* aThread) {
+ nsresult rv;
+
+ RefPtr<nsShutdownThread> st = new nsShutdownThread(aThread);
+ nsCOMPtr<nsIThread> workerThread;
+
+ rv = NS_NewNamedThread("thread shutdown", getter_AddRefs(workerThread));
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Can't create nsShutDownThread worker thread!");
+ return rv;
+ }
+
+ {
+ MonitorAutoLock mon(st->mMonitor);
+ rv = workerThread->Dispatch(st, NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_WARNING(
+ "Dispatching event in nsShutdownThread::BlockingShutdown failed!");
+ } else {
+ st->mShuttingDown = true;
+ while (st->mShuttingDown) {
+ mon.Wait();
+ }
+ }
+ }
+
+ return Shutdown(workerThread);
+}
+
+NS_IMETHODIMP
+nsShutdownThread::Run() {
+ MonitorAutoLock mon(mMonitor);
+ mThread->Shutdown();
+ mShuttingDown = false;
+ mon.Notify();
+ return NS_OK;
+}
diff --git a/netwerk/cache/nsCacheUtils.h b/netwerk/cache/nsCacheUtils.h
new file mode 100644
index 0000000000..71f3056f89
--- /dev/null
+++ b/netwerk/cache/nsCacheUtils.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et tw=80 : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsCacheUtils_h_
+#define _nsCacheUtils_h_
+
+#include "nsThreadUtils.h"
+#include "nsCOMPtr.h"
+#include "mozilla/Monitor.h"
+
+class nsIThread;
+
+/**
+ * A class with utility methods for shutting down nsIThreads easily.
+ */
+class nsShutdownThread : public mozilla::Runnable {
+ public:
+ explicit nsShutdownThread(nsIThread* aThread);
+ ~nsShutdownThread() = default;
+
+ NS_IMETHOD Run() override;
+
+ /**
+ * Shutdown ensures that aThread->Shutdown() is called on a main thread
+ */
+ static nsresult Shutdown(nsIThread* aThread);
+
+ /**
+ * BlockingShutdown ensures that by the time it returns, aThread->Shutdown()
+ * has been called and no pending events have been processed on the current
+ * thread.
+ */
+ static nsresult BlockingShutdown(nsIThread* aThread);
+
+ private:
+ mozilla::Monitor mMonitor;
+ bool mShuttingDown;
+ nsCOMPtr<nsIThread> mThread;
+};
+
+#endif
diff --git a/netwerk/cache/nsDeleteDir.cpp b/netwerk/cache/nsDeleteDir.cpp
new file mode 100644
index 0000000000..f99896b721
--- /dev/null
+++ b/netwerk/cache/nsDeleteDir.cpp
@@ -0,0 +1,362 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsDeleteDir.h"
+#include "nsIFile.h"
+#include "nsString.h"
+#include "mozilla/Telemetry.h"
+#include "nsITimer.h"
+#include "nsThreadUtils.h"
+#include "nsISupportsPriority.h"
+#include "nsCacheUtils.h"
+#include "prtime.h"
+#include <time.h>
+
+using namespace mozilla;
+
+class nsBlockOnBackgroundThreadEvent : public Runnable {
+ public:
+ nsBlockOnBackgroundThreadEvent()
+ : mozilla::Runnable("nsBlockOnBackgroundThreadEvent") {}
+ NS_IMETHOD Run() override {
+ MutexAutoLock lock(nsDeleteDir::gInstance->mLock);
+ nsDeleteDir::gInstance->mNotified = true;
+ nsDeleteDir::gInstance->mCondVar.Notify();
+ return NS_OK;
+ }
+};
+
+nsDeleteDir* nsDeleteDir::gInstance = nullptr;
+
+nsDeleteDir::nsDeleteDir()
+ : mLock("nsDeleteDir.mLock"),
+ mCondVar(mLock, "nsDeleteDir.mCondVar"),
+ mNotified(false),
+ mShutdownPending(false),
+ mStopDeleting(false) {
+ NS_ASSERTION(gInstance == nullptr, "multiple nsCacheService instances!");
+}
+
+nsDeleteDir::~nsDeleteDir() { gInstance = nullptr; }
+
+nsresult nsDeleteDir::Init() {
+ if (gInstance) return NS_ERROR_ALREADY_INITIALIZED;
+
+ gInstance = new nsDeleteDir();
+ return NS_OK;
+}
+
+nsresult nsDeleteDir::Shutdown(bool finishDeleting) {
+ if (!gInstance) return NS_ERROR_NOT_INITIALIZED;
+
+ nsCOMArray<nsIFile> dirsToRemove;
+ nsCOMPtr<nsISerialEventTarget> eventTarget;
+ {
+ MutexAutoLock lock(gInstance->mLock);
+ NS_ASSERTION(!gInstance->mShutdownPending,
+ "Unexpected state in nsDeleteDir::Shutdown()");
+ gInstance->mShutdownPending = true;
+
+ if (!finishDeleting) gInstance->mStopDeleting = true;
+
+ // remove all pending timers
+ for (int32_t i = gInstance->mTimers.Count(); i > 0; i--) {
+ nsCOMPtr<nsITimer> timer = gInstance->mTimers[i - 1];
+ gInstance->mTimers.RemoveObjectAt(i - 1);
+
+ nsCOMArray<nsIFile>* arg;
+ timer->GetClosure((reinterpret_cast<void**>(&arg)));
+ timer->Cancel();
+
+ if (finishDeleting) dirsToRemove.AppendObjects(*arg);
+
+ // delete argument passed to the timer
+ delete arg;
+ }
+
+ eventTarget.swap(gInstance->mBackgroundET);
+ if (eventTarget) {
+ // dispatch event and wait for it to run and notify us, so we know thread
+ // has completed all work and can be shutdown
+ nsCOMPtr<nsIRunnable> event = new nsBlockOnBackgroundThreadEvent();
+ nsresult rv = eventTarget->Dispatch(event, NS_DISPATCH_EVENT_MAY_BLOCK);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Failed dispatching block-event");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ gInstance->mNotified = false;
+ while (!gInstance->mNotified) {
+ gInstance->mCondVar.Wait();
+ }
+ }
+ }
+
+ delete gInstance;
+
+ for (int32_t i = 0; i < dirsToRemove.Count(); i++)
+ dirsToRemove[i]->Remove(true);
+
+ return NS_OK;
+}
+
+nsresult nsDeleteDir::InitThread() {
+ if (mBackgroundET) return NS_OK;
+
+ nsresult rv = NS_CreateBackgroundTaskQueue("Cache Deleter",
+ getter_AddRefs(mBackgroundET));
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Can't create background task queue");
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+void nsDeleteDir::DestroyThread() {
+ if (!mBackgroundET) return;
+
+ if (mTimers.Count())
+ // more work to do, so don't delete thread.
+ return;
+
+ mBackgroundET = nullptr;
+}
+
+void nsDeleteDir::TimerCallback(nsITimer* aTimer, void* arg) {
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_DELETEDIR> timer;
+ {
+ MutexAutoLock lock(gInstance->mLock);
+
+ int32_t idx = gInstance->mTimers.IndexOf(aTimer);
+ if (idx == -1) {
+ // Timer was canceled and removed during shutdown.
+ return;
+ }
+
+ gInstance->mTimers.RemoveObjectAt(idx);
+ }
+
+ UniquePtr<nsCOMArray<nsIFile>> dirList;
+ dirList.reset(static_cast<nsCOMArray<nsIFile>*>(arg));
+
+ bool shuttingDown = false;
+
+ // Intentional extra braces to control variable sope.
+ {
+ // Low IO priority can only be set when running in the context of the
+ // current thread. So this shouldn't be moved to where we set the priority
+ // of the Cache deleter thread using the nsThread's NSPR priority constants.
+ nsAutoLowPriorityIO autoLowPriority;
+ for (int32_t i = 0; i < dirList->Count() && !shuttingDown; i++) {
+ gInstance->RemoveDir((*dirList)[i], &shuttingDown);
+ }
+ }
+
+ {
+ MutexAutoLock lock(gInstance->mLock);
+ gInstance->DestroyThread();
+ }
+}
+
+nsresult nsDeleteDir::DeleteDir(nsIFile* dirIn, bool moveToTrash,
+ uint32_t delay) {
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_TRASHRENAME> timer;
+
+ if (!gInstance) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv;
+ nsCOMPtr<nsIFile> trash, dir;
+
+ // Need to make a clone of this since we don't want to modify the input
+ // file object.
+ rv = dirIn->Clone(getter_AddRefs(dir));
+ if (NS_FAILED(rv)) return rv;
+
+ if (moveToTrash) {
+ rv = GetTrashDir(dir, &trash);
+ if (NS_FAILED(rv)) return rv;
+ nsAutoCString origLeaf;
+ rv = trash->GetNativeLeafName(origLeaf);
+ if (NS_FAILED(rv)) return rv;
+
+ // Append random number to the trash directory and check if it exists.
+ srand(static_cast<unsigned>(PR_Now()));
+ nsAutoCString leaf;
+ for (int32_t i = 0; i < 10; i++) {
+ leaf = origLeaf;
+ leaf.AppendInt(rand());
+ rv = trash->SetNativeLeafName(leaf);
+ if (NS_FAILED(rv)) return rv;
+
+ bool exists;
+ if (NS_SUCCEEDED(trash->Exists(&exists)) && !exists) {
+ break;
+ }
+
+ leaf.Truncate();
+ }
+
+ // Fail if we didn't find unused trash directory within the limit
+ if (!leaf.Length()) return NS_ERROR_FAILURE;
+
+#if defined(MOZ_WIDGET_ANDROID)
+ nsCOMPtr<nsIFile> parent;
+ rv = trash->GetParent(getter_AddRefs(parent));
+ if (NS_FAILED(rv)) return rv;
+ rv = dir->MoveToNative(parent, leaf);
+#else
+ // Important: must rename directory w/o changing parent directory: else on
+ // NTFS we'll wait (with cache lock) while nsIFile's ACL reset walks file
+ // tree: was hanging GUI for *minutes* on large cache dirs.
+ rv = dir->MoveToNative(nullptr, leaf);
+#endif
+ if (NS_FAILED(rv)) return rv;
+ } else {
+ // we want to pass a clone of the original off to the worker thread.
+ trash.swap(dir);
+ }
+
+ UniquePtr<nsCOMArray<nsIFile>> arg(new nsCOMArray<nsIFile>);
+ arg->AppendObject(trash);
+
+ rv = gInstance->PostTimer(arg.get(), delay);
+ if (NS_FAILED(rv)) return rv;
+
+ Unused << arg.release();
+ return NS_OK;
+}
+
+nsresult nsDeleteDir::GetTrashDir(nsIFile* target, nsCOMPtr<nsIFile>* result) {
+ nsresult rv;
+#if defined(MOZ_WIDGET_ANDROID)
+ // Try to use the app cache folder for cache trash on Android
+ char* cachePath = getenv("CACHE_DIRECTORY");
+ if (cachePath) {
+ rv = NS_NewNativeLocalFile(nsDependentCString(cachePath), true,
+ getter_AddRefs(*result));
+ if (NS_FAILED(rv)) return rv;
+
+ // Add a sub folder with the cache folder name
+ nsAutoCString leaf;
+ rv = target->GetNativeLeafName(leaf);
+ (*result)->AppendNative(leaf);
+ } else
+#endif
+ {
+ rv = target->Clone(getter_AddRefs(*result));
+ }
+ if (NS_FAILED(rv)) return rv;
+
+ nsAutoCString leaf;
+ rv = (*result)->GetNativeLeafName(leaf);
+ if (NS_FAILED(rv)) return rv;
+ leaf.AppendLiteral(".Trash");
+
+ return (*result)->SetNativeLeafName(leaf);
+}
+
+nsresult nsDeleteDir::RemoveOldTrashes(nsIFile* cacheDir) {
+ if (!gInstance) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv;
+
+ nsCOMPtr<nsIFile> trash;
+ rv = GetTrashDir(cacheDir, &trash);
+ if (NS_FAILED(rv)) return rv;
+
+ nsAutoString trashName;
+ rv = trash->GetLeafName(trashName);
+ if (NS_FAILED(rv)) return rv;
+
+ nsCOMPtr<nsIFile> parent;
+#if defined(MOZ_WIDGET_ANDROID)
+ rv = trash->GetParent(getter_AddRefs(parent));
+#else
+ rv = cacheDir->GetParent(getter_AddRefs(parent));
+#endif
+ if (NS_FAILED(rv)) return rv;
+
+ nsCOMPtr<nsIDirectoryEnumerator> iter;
+ rv = parent->GetDirectoryEntries(getter_AddRefs(iter));
+ if (NS_FAILED(rv)) return rv;
+
+ UniquePtr<nsCOMArray<nsIFile>> dirList;
+
+ nsCOMPtr<nsIFile> file;
+ while (NS_SUCCEEDED(iter->GetNextFile(getter_AddRefs(file))) && file) {
+ nsAutoString leafName;
+ rv = file->GetLeafName(leafName);
+ if (NS_FAILED(rv)) continue;
+
+ // match all names that begin with the trash name (i.e. "Cache.Trash")
+ if (Substring(leafName, 0, trashName.Length()).Equals(trashName)) {
+ if (!dirList) dirList = MakeUnique<nsCOMArray<nsIFile>>();
+ dirList->AppendObject(file);
+ }
+ }
+
+ if (dirList) {
+ rv = gInstance->PostTimer(dirList.get(), 90000);
+ if (NS_FAILED(rv)) return rv;
+
+ Unused << dirList.release();
+ }
+
+ return NS_OK;
+}
+
+nsresult nsDeleteDir::PostTimer(void* arg, uint32_t delay) {
+ nsresult rv;
+
+ MutexAutoLock lock(mLock);
+
+ rv = InitThread();
+ if (NS_FAILED(rv)) return rv;
+
+ nsCOMPtr<nsITimer> timer;
+ rv = NS_NewTimerWithFuncCallback(getter_AddRefs(timer), TimerCallback, arg,
+ delay, nsITimer::TYPE_ONE_SHOT,
+ "nsDeleteDir::PostTimer", mBackgroundET);
+ if (NS_FAILED(rv)) return rv;
+
+ mTimers.AppendObject(timer);
+ return NS_OK;
+}
+
+nsresult nsDeleteDir::RemoveDir(nsIFile* file, bool* stopDeleting) {
+ nsresult rv;
+ bool isLink;
+
+ rv = file->IsSymlink(&isLink);
+ if (NS_FAILED(rv) || isLink) return NS_ERROR_UNEXPECTED;
+
+ bool isDir;
+ rv = file->IsDirectory(&isDir);
+ if (NS_FAILED(rv)) return rv;
+
+ if (isDir) {
+ nsCOMPtr<nsIDirectoryEnumerator> iter;
+ rv = file->GetDirectoryEntries(getter_AddRefs(iter));
+ if (NS_FAILED(rv)) return rv;
+
+ nsCOMPtr<nsIFile> file2;
+ while (NS_SUCCEEDED(iter->GetNextFile(getter_AddRefs(file2))) && file2) {
+ RemoveDir(file2, stopDeleting);
+ // No check for errors to remove as much as possible
+
+ if (*stopDeleting) return NS_OK;
+ }
+ }
+
+ file->Remove(false);
+ // No check for errors to remove as much as possible
+
+ MutexAutoLock lock(mLock);
+ if (mStopDeleting) *stopDeleting = true;
+
+ return NS_OK;
+}
diff --git a/netwerk/cache/nsDeleteDir.h b/netwerk/cache/nsDeleteDir.h
new file mode 100644
index 0000000000..cf6de26dde
--- /dev/null
+++ b/netwerk/cache/nsDeleteDir.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsDeleteDir_h__
+#define nsDeleteDir_h__
+
+#include "nsCOMPtr.h"
+#include "nsCOMArray.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/CondVar.h"
+
+class nsIFile;
+class nsISerialEventTarget;
+class nsITimer;
+
+class nsDeleteDir {
+ public:
+ nsDeleteDir();
+ ~nsDeleteDir();
+
+ static nsresult Init();
+ static nsresult Shutdown(bool finishDeleting);
+
+ /**
+ * This routine attempts to delete a directory that may contain some files
+ * that are still in use. This latter point is only an issue on Windows and
+ * a few other systems.
+ *
+ * If the moveToTrash parameter is true we first rename the given directory
+ * "foo.Trash123" (where "foo" is the original directory name, and "123" is
+ * a random number, in order to support multiple concurrent deletes). The
+ * directory is then deleted, file-by-file, on a background thread.
+ *
+ * If the moveToTrash parameter is false, then the given directory is deleted
+ * directly.
+ *
+ * If 'delay' is non-zero, the directory will not be deleted until the
+ * specified number of milliseconds have passed. (The directory is still
+ * renamed immediately if 'moveToTrash' is passed, so upon return it is safe
+ * to create a directory with the same name).
+ */
+ static nsresult DeleteDir(nsIFile* dir, bool moveToTrash, uint32_t delay = 0);
+
+ /**
+ * Returns the trash directory corresponding to the given directory.
+ */
+ static nsresult GetTrashDir(nsIFile* dir, nsCOMPtr<nsIFile>* result);
+
+ /**
+ * Remove all trashes left from previous run. This function does nothing when
+ * called second and more times.
+ */
+ static nsresult RemoveOldTrashes(nsIFile* cacheDir);
+
+ static void TimerCallback(nsITimer* aTimer, void* arg);
+
+ private:
+ friend class nsBlockOnBackgroundThreadEvent;
+ friend class nsDestroyThreadEvent;
+
+ nsresult InitThread();
+ void DestroyThread();
+ nsresult PostTimer(void* arg, uint32_t delay);
+ nsresult RemoveDir(nsIFile* file, bool* stopDeleting);
+
+ static nsDeleteDir* gInstance;
+ mozilla::Mutex mLock;
+ mozilla::CondVar mCondVar;
+ bool mNotified;
+ nsCOMArray<nsITimer> mTimers;
+ nsCOMPtr<nsISerialEventTarget> mBackgroundET;
+ bool mShutdownPending;
+ bool mStopDeleting;
+};
+
+#endif // nsDeleteDir_h__
diff --git a/netwerk/cache/nsDiskCache.h b/netwerk/cache/nsDiskCache.h
new file mode 100644
index 0000000000..e01860b7bf
--- /dev/null
+++ b/netwerk/cache/nsDiskCache.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsDiskCache_h_
+#define _nsDiskCache_h_
+
+#include "nsCacheEntry.h"
+
+#ifdef XP_WIN
+# include <winsock.h> // for htonl/ntohl
+#endif
+
+class nsDiskCache {
+ public:
+ enum {
+ kCurrentVersion =
+ 0x00010013 // format = 16 bits major version/16 bits minor version
+ };
+
+ enum { kData, kMetaData };
+
+ // Stores the reason why the cache is corrupt.
+ // Note: I'm only listing the enum values explicitly for easy mapping when
+ // looking at telemetry data.
+ enum CorruptCacheInfo {
+ kNotCorrupt = 0,
+ kInvalidArgPointer = 1,
+ kUnexpectedError = 2,
+ kOpenCacheMapError = 3,
+ kBlockFilesShouldNotExist = 4,
+ kOutOfMemory = 5,
+ kCreateCacheSubdirectories = 6,
+ kBlockFilesShouldExist = 7,
+ kHeaderSizeNotRead = 8,
+ kHeaderIsDirty = 9,
+ kVersionMismatch = 10,
+ kRecordsIncomplete = 11,
+ kHeaderIncomplete = 12,
+ kNotEnoughToRead = 13,
+ kEntryCountIncorrect = 14,
+ kCouldNotGetBlockFileForIndex = 15,
+ kCouldNotCreateBlockFile = 16,
+ kBlockFileSizeError = 17,
+ kBlockFileBitMapWriteError = 18,
+ kBlockFileSizeLessThanBitMap = 19,
+ kBlockFileBitMapReadError = 20,
+ kBlockFileEstimatedSizeError = 21,
+ kFlushHeaderError = 22,
+ kCacheCleanFilePathError = 23,
+ kCacheCleanOpenFileError = 24,
+ kCacheCleanTimerError = 25
+ };
+
+ // Parameter initval initializes internal state of hash function. Hash values
+ // are different for the same text when different initval is used. It can be
+ // any random number.
+ //
+ // It can be used for generating 64-bit hash value:
+ // (uint64_t(Hash(key, initval1)) << 32) | Hash(key, initval2)
+ //
+ // It can be also used to hash multiple strings:
+ // h = Hash(string1, 0);
+ // h = Hash(string2, h);
+ // ...
+ static PLDHashNumber Hash(const char* key, PLDHashNumber initval = 0);
+ static nsresult Truncate(PRFileDesc* fd, uint32_t newEOF);
+};
+
+#endif // _nsDiskCache_h_
diff --git a/netwerk/cache/nsDiskCacheDeviceSQL.cpp b/netwerk/cache/nsDiskCacheDeviceSQL.cpp
new file mode 100644
index 0000000000..b2523a4d3f
--- /dev/null
+++ b/netwerk/cache/nsDiskCacheDeviceSQL.cpp
@@ -0,0 +1,2608 @@
+/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cin: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <inttypes.h>
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "nsCache.h"
+#include "nsDiskCache.h"
+#include "nsDiskCacheDeviceSQL.h"
+#include "nsCacheService.h"
+#include "nsApplicationCache.h"
+#include "../cache2/CacheHashUtils.h"
+
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsIURI.h"
+#include "nsEscape.h"
+#include "nsIPrefBranch.h"
+#include "nsString.h"
+#include "nsPrintfCString.h"
+#include "nsCRT.h"
+#include "nsArrayUtils.h"
+#include "nsIArray.h"
+#include "nsIVariant.h"
+#include "nsILoadContextInfo.h"
+#include "nsThreadUtils.h"
+#include "nsISerializable.h"
+#include "nsIInputStream.h"
+#include "nsIOutputStream.h"
+#include "nsSerializationHelper.h"
+#include "nsMemory.h"
+
+#include "mozIStorageService.h"
+#include "mozIStorageStatement.h"
+#include "mozIStorageFunction.h"
+#include "mozStorageHelper.h"
+
+#include "nsICacheVisitor.h"
+#include "nsISeekableStream.h"
+
+#include "mozilla/Telemetry.h"
+
+#include "mozilla/storage.h"
+#include "nsVariant.h"
+#include "mozilla/BasePrincipal.h"
+
+using namespace mozilla;
+using namespace mozilla::storage;
+using mozilla::OriginAttributes;
+
+static const char OFFLINE_CACHE_DEVICE_ID[] = {"offline"};
+
+#define LOG(args) CACHE_LOG_DEBUG(args)
+
+static uint32_t gNextTemporaryClientID = 0;
+
+/*****************************************************************************
+ * helpers
+ */
+
+static nsresult EnsureDir(nsIFile* dir) {
+ bool exists;
+ nsresult rv = dir->Exists(&exists);
+ if (NS_SUCCEEDED(rv) && !exists)
+ rv = dir->Create(nsIFile::DIRECTORY_TYPE, 0700);
+ return rv;
+}
+
+static bool DecomposeCacheEntryKey(const nsCString* fullKey, const char** cid,
+ const char** key, nsCString& buf) {
+ buf = *fullKey;
+
+ int32_t colon = buf.FindChar(':');
+ if (colon == kNotFound) {
+ NS_ERROR("Invalid key");
+ return false;
+ }
+ buf.SetCharAt('\0', colon);
+
+ *cid = buf.get();
+ *key = buf.get() + colon + 1;
+
+ return true;
+}
+
+class AutoResetStatement {
+ public:
+ explicit AutoResetStatement(mozIStorageStatement* s) : mStatement(s) {}
+ ~AutoResetStatement() { mStatement->Reset(); }
+ mozIStorageStatement* operator->() MOZ_NO_ADDREF_RELEASE_ON_RETURN {
+ return mStatement;
+ }
+
+ private:
+ mozIStorageStatement* mStatement;
+};
+
+class EvictionObserver {
+ public:
+ EvictionObserver(mozIStorageConnection* db,
+ nsOfflineCacheEvictionFunction* evictionFunction)
+ : mDB(db), mEvictionFunction(evictionFunction) {
+ mEvictionFunction->Init();
+ mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE TEMP TRIGGER cache_on_delete BEFORE DELETE"
+ " ON moz_cache FOR EACH ROW BEGIN SELECT"
+ " cache_eviction_observer("
+ " OLD.ClientID, OLD.key, OLD.generation);"
+ " END;"));
+ }
+
+ ~EvictionObserver() {
+ mDB->ExecuteSimpleSQL("DROP TRIGGER cache_on_delete;"_ns);
+ mEvictionFunction->Reset();
+ }
+
+ void Apply() { return mEvictionFunction->Apply(); }
+
+ private:
+ mozIStorageConnection* mDB;
+ RefPtr<nsOfflineCacheEvictionFunction> mEvictionFunction;
+};
+
+#define DCACHE_HASH_MAX INT64_MAX
+#define DCACHE_HASH_BITS 64
+
+/**
+ * nsOfflineCache::Hash(const char * key)
+ *
+ * This algorithm of this method implies nsOfflineCacheRecords will be stored
+ * in a certain order on disk. If the algorithm changes, existing cache
+ * map files may become invalid, and therefore the kCurrentVersion needs
+ * to be revised.
+ */
+static uint64_t DCacheHash(const char* key) {
+ // initval 0x7416f295 was chosen randomly
+ return (uint64_t(CacheHash::Hash(key, strlen(key), 0)) << 32) |
+ CacheHash::Hash(key, strlen(key), 0x7416f295);
+}
+
+/******************************************************************************
+ * nsOfflineCacheEvictionFunction
+ */
+
+NS_IMPL_ISUPPORTS(nsOfflineCacheEvictionFunction, mozIStorageFunction)
+
+// helper function for directly exposing the same data file binding
+// path algorithm used in nsOfflineCacheBinding::Create
+static nsresult GetCacheDataFile(nsIFile* cacheDir, const char* key,
+ int generation, nsCOMPtr<nsIFile>& file) {
+ cacheDir->Clone(getter_AddRefs(file));
+ if (!file) return NS_ERROR_OUT_OF_MEMORY;
+
+ uint64_t hash = DCacheHash(key);
+
+ uint32_t dir1 = (uint32_t)(hash & 0x0F);
+ uint32_t dir2 = (uint32_t)((hash & 0xF0) >> 4);
+
+ hash >>= 8;
+
+ file->AppendNative(nsPrintfCString("%X", dir1));
+ file->AppendNative(nsPrintfCString("%X", dir2));
+
+ char leaf[64];
+ SprintfLiteral(leaf, "%014" PRIX64 "-%X", hash, generation);
+ return file->AppendNative(nsDependentCString(leaf));
+}
+
+namespace appcachedetail {
+
+typedef nsCOMArray<nsIFile> FileArray;
+static MOZ_THREAD_LOCAL(FileArray*) tlsEvictionItems;
+
+} // namespace appcachedetail
+
+NS_IMETHODIMP
+nsOfflineCacheEvictionFunction::OnFunctionCall(mozIStorageValueArray* values,
+ nsIVariant** _retval) {
+ LOG(("nsOfflineCacheEvictionFunction::OnFunctionCall\n"));
+
+ *_retval = nullptr;
+
+ uint32_t numEntries;
+ nsresult rv = values->GetNumEntries(&numEntries);
+ NS_ENSURE_SUCCESS(rv, rv);
+ NS_ASSERTION(numEntries == 3, "unexpected number of arguments");
+
+ uint32_t valueLen;
+ const char* clientID = values->AsSharedUTF8String(0, &valueLen);
+ const char* key = values->AsSharedUTF8String(1, &valueLen);
+ nsAutoCString fullKey(clientID);
+ fullKey.Append(':');
+ fullKey.Append(key);
+ int generation = values->AsInt32(2);
+
+ // If the key is currently locked, refuse to delete this row.
+ if (mDevice->IsLocked(fullKey)) {
+ // This code thought it was performing the equivalent of invoking the SQL
+ // "RAISE(IGNORE)" function. It was not. Please see bug 1470961 and any
+ // follow-ups to understand the plan for correcting this bug.
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetCacheDataFile(mDevice->CacheDirectory(), key, generation, file);
+ if (NS_FAILED(rv)) {
+ LOG(("GetCacheDataFile [key=%s generation=%d] failed [rv=%" PRIx32 "]!\n",
+ key, generation, static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ appcachedetail::FileArray* items = appcachedetail::tlsEvictionItems.get();
+ MOZ_ASSERT(items);
+ if (items) {
+ items->AppendObject(file);
+ }
+
+ return NS_OK;
+}
+
+nsOfflineCacheEvictionFunction::nsOfflineCacheEvictionFunction(
+ nsOfflineCacheDevice* device)
+ : mDevice(device) {
+ mTLSInited = appcachedetail::tlsEvictionItems.init();
+}
+
+void nsOfflineCacheEvictionFunction::Init() {
+ if (mTLSInited) {
+ appcachedetail::tlsEvictionItems.set(new appcachedetail::FileArray());
+ }
+}
+
+void nsOfflineCacheEvictionFunction::Reset() {
+ if (!mTLSInited) {
+ return;
+ }
+
+ appcachedetail::FileArray* items = appcachedetail::tlsEvictionItems.get();
+ if (!items) {
+ return;
+ }
+
+ appcachedetail::tlsEvictionItems.set(nullptr);
+ delete items;
+}
+
+void nsOfflineCacheEvictionFunction::Apply() {
+ LOG(("nsOfflineCacheEvictionFunction::Apply\n"));
+
+ if (!mTLSInited) {
+ return;
+ }
+
+ appcachedetail::FileArray* pitems = appcachedetail::tlsEvictionItems.get();
+ if (!pitems) {
+ return;
+ }
+
+ appcachedetail::FileArray items = std::move(*pitems);
+
+ for (int32_t i = 0; i < items.Count(); i++) {
+ if (MOZ_LOG_TEST(gCacheLog, LogLevel::Debug)) {
+ LOG((" removing %s\n", items[i]->HumanReadablePath().get()));
+ }
+
+ items[i]->Remove(false);
+ }
+}
+
+class nsOfflineCacheDiscardCache : public Runnable {
+ public:
+ nsOfflineCacheDiscardCache(nsOfflineCacheDevice* device, nsCString& group,
+ nsCString& clientID)
+ : mozilla::Runnable("nsOfflineCacheDiscardCache"),
+ mDevice(device),
+ mGroup(group),
+ mClientID(clientID) {}
+
+ NS_IMETHOD Run() override {
+ if (mDevice->IsActiveCache(mGroup, mClientID)) {
+ mDevice->DeactivateGroup(mGroup);
+ }
+
+ return mDevice->EvictEntries(mClientID.get());
+ }
+
+ private:
+ RefPtr<nsOfflineCacheDevice> mDevice;
+ nsCString mGroup;
+ nsCString mClientID;
+};
+
+/******************************************************************************
+ * nsOfflineCacheDeviceInfo
+ */
+
+class nsOfflineCacheDeviceInfo final : public nsICacheDeviceInfo {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSICACHEDEVICEINFO
+
+ explicit nsOfflineCacheDeviceInfo(nsOfflineCacheDevice* device)
+ : mDevice(device) {}
+
+ private:
+ ~nsOfflineCacheDeviceInfo() = default;
+
+ nsOfflineCacheDevice* mDevice;
+};
+
+NS_IMPL_ISUPPORTS(nsOfflineCacheDeviceInfo, nsICacheDeviceInfo)
+
+NS_IMETHODIMP
+nsOfflineCacheDeviceInfo::GetDescription(nsACString& aDescription) {
+ aDescription.AssignLiteral("Offline cache device");
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheDeviceInfo::GetUsageReport(nsACString& aUsageReport) {
+ nsAutoCString buffer;
+ buffer.AssignLiteral(
+ " <tr>\n"
+ " <th>Cache Directory:</th>\n"
+ " <td>");
+ nsIFile* cacheDir = mDevice->CacheDirectory();
+ if (!cacheDir) return NS_OK;
+
+ nsAutoString path;
+ nsresult rv = cacheDir->GetPath(path);
+ if (NS_SUCCEEDED(rv))
+ AppendUTF16toUTF8(path, buffer);
+ else
+ buffer.AppendLiteral("directory unavailable");
+
+ buffer.AppendLiteral(
+ "</td>\n"
+ " </tr>\n");
+
+ aUsageReport.Assign(buffer);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheDeviceInfo::GetEntryCount(uint32_t* aEntryCount) {
+ *aEntryCount = mDevice->EntryCount();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheDeviceInfo::GetTotalSize(uint32_t* aTotalSize) {
+ *aTotalSize = mDevice->CacheSize();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheDeviceInfo::GetMaximumSize(uint32_t* aMaximumSize) {
+ *aMaximumSize = mDevice->CacheCapacity();
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsOfflineCacheBinding
+ */
+
+class nsOfflineCacheBinding final : public nsISupports {
+ ~nsOfflineCacheBinding() = default;
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ static nsOfflineCacheBinding* Create(nsIFile* cacheDir, const nsCString* key,
+ int generation);
+
+ enum { FLAG_NEW_ENTRY = 1 };
+
+ nsCOMPtr<nsIFile> mDataFile;
+ int mGeneration;
+ int mFlags;
+
+ bool IsNewEntry() { return mFlags & FLAG_NEW_ENTRY; }
+ void MarkNewEntry() { mFlags |= FLAG_NEW_ENTRY; }
+ void ClearNewEntry() { mFlags &= ~FLAG_NEW_ENTRY; }
+};
+
+NS_IMPL_ISUPPORTS0(nsOfflineCacheBinding)
+
+nsOfflineCacheBinding* nsOfflineCacheBinding::Create(nsIFile* cacheDir,
+ const nsCString* fullKey,
+ int generation) {
+ nsCOMPtr<nsIFile> file;
+ cacheDir->Clone(getter_AddRefs(file));
+ if (!file) return nullptr;
+
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+ if (!DecomposeCacheEntryKey(fullKey, &cid, &key, keyBuf)) return nullptr;
+
+ uint64_t hash = DCacheHash(key);
+
+ uint32_t dir1 = (uint32_t)(hash & 0x0F);
+ uint32_t dir2 = (uint32_t)((hash & 0xF0) >> 4);
+
+ hash >>= 8;
+
+ // XXX we might want to create these directories up-front
+
+ file->AppendNative(nsPrintfCString("%X", dir1));
+ Unused << file->Create(nsIFile::DIRECTORY_TYPE, 00700);
+
+ file->AppendNative(nsPrintfCString("%X", dir2));
+ Unused << file->Create(nsIFile::DIRECTORY_TYPE, 00700);
+
+ nsresult rv;
+ char leaf[64];
+
+ if (generation == -1) {
+ file->AppendNative("placeholder"_ns);
+
+ for (generation = 0;; ++generation) {
+ SprintfLiteral(leaf, "%014" PRIX64 "-%X", hash, generation);
+
+ rv = file->SetNativeLeafName(nsDependentCString(leaf));
+ if (NS_FAILED(rv)) return nullptr;
+ rv = file->Create(nsIFile::NORMAL_FILE_TYPE, 00600);
+ if (NS_FAILED(rv) && rv != NS_ERROR_FILE_ALREADY_EXISTS) return nullptr;
+ if (NS_SUCCEEDED(rv)) break;
+ }
+ } else {
+ SprintfLiteral(leaf, "%014" PRIX64 "-%X", hash, generation);
+ rv = file->AppendNative(nsDependentCString(leaf));
+ if (NS_FAILED(rv)) return nullptr;
+ }
+
+ nsOfflineCacheBinding* binding = new nsOfflineCacheBinding;
+ if (!binding) return nullptr;
+
+ binding->mDataFile.swap(file);
+ binding->mGeneration = generation;
+ binding->mFlags = 0;
+ return binding;
+}
+
+/******************************************************************************
+ * nsOfflineCacheRecord
+ */
+
+struct nsOfflineCacheRecord {
+ const char* clientID;
+ const char* key;
+ const uint8_t* metaData;
+ uint32_t metaDataLen;
+ int32_t generation;
+ int32_t dataSize;
+ int32_t fetchCount;
+ int64_t lastFetched;
+ int64_t lastModified;
+ int64_t expirationTime;
+};
+
+static nsCacheEntry* CreateCacheEntry(nsOfflineCacheDevice* device,
+ const nsCString* fullKey,
+ const nsOfflineCacheRecord& rec) {
+ nsCacheEntry* entry;
+
+ if (device->IsLocked(*fullKey)) {
+ return nullptr;
+ }
+
+ nsresult rv = nsCacheEntry::Create(fullKey->get(), // XXX enable sharing
+ nsICache::STREAM_BASED,
+ nsICache::STORE_OFFLINE, device, &entry);
+ if (NS_FAILED(rv)) return nullptr;
+
+ entry->SetFetchCount((uint32_t)rec.fetchCount);
+ entry->SetLastFetched(SecondsFromPRTime(rec.lastFetched));
+ entry->SetLastModified(SecondsFromPRTime(rec.lastModified));
+ entry->SetExpirationTime(SecondsFromPRTime(rec.expirationTime));
+ entry->SetDataSize((uint32_t)rec.dataSize);
+
+ entry->UnflattenMetaData((const char*)rec.metaData, rec.metaDataLen);
+
+ // Restore security info, if present
+ const char* info = entry->GetMetaDataElement("security-info");
+ if (info) {
+ nsCOMPtr<nsISupports> infoObj;
+ rv =
+ NS_DeserializeObject(nsDependentCString(info), getter_AddRefs(infoObj));
+ if (NS_FAILED(rv)) {
+ delete entry;
+ return nullptr;
+ }
+ entry->SetSecurityInfo(infoObj);
+ }
+
+ // create a binding object for this entry
+ nsOfflineCacheBinding* binding = nsOfflineCacheBinding::Create(
+ device->CacheDirectory(), fullKey, rec.generation);
+ if (!binding) {
+ delete entry;
+ return nullptr;
+ }
+ entry->SetData(binding);
+
+ return entry;
+}
+
+/******************************************************************************
+ * nsOfflineCacheEntryInfo
+ */
+
+class nsOfflineCacheEntryInfo final : public nsICacheEntryInfo {
+ ~nsOfflineCacheEntryInfo() = default;
+
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSICACHEENTRYINFO
+
+ nsOfflineCacheRecord* mRec;
+};
+
+NS_IMPL_ISUPPORTS(nsOfflineCacheEntryInfo, nsICacheEntryInfo)
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetClientID(nsACString& aClientID) {
+ aClientID.Assign(mRec->clientID);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetDeviceID(nsACString& aDeviceID) {
+ aDeviceID.Assign(OFFLINE_CACHE_DEVICE_ID);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetKey(nsACString& clientKey) {
+ clientKey.Assign(mRec->key);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetFetchCount(int32_t* aFetchCount) {
+ *aFetchCount = mRec->fetchCount;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetLastFetched(uint32_t* aLastFetched) {
+ *aLastFetched = SecondsFromPRTime(mRec->lastFetched);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetLastModified(uint32_t* aLastModified) {
+ *aLastModified = SecondsFromPRTime(mRec->lastModified);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetExpirationTime(uint32_t* aExpirationTime) {
+ *aExpirationTime = SecondsFromPRTime(mRec->expirationTime);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::IsStreamBased(bool* aStreamBased) {
+ *aStreamBased = true;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsOfflineCacheEntryInfo::GetDataSize(uint32_t* aDataSize) {
+ *aDataSize = mRec->dataSize;
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsApplicationCacheNamespace
+ */
+
+NS_IMPL_ISUPPORTS(nsApplicationCacheNamespace, nsIApplicationCacheNamespace)
+
+NS_IMETHODIMP
+nsApplicationCacheNamespace::Init(uint32_t itemType,
+ const nsACString& namespaceSpec,
+ const nsACString& data) {
+ mItemType = itemType;
+ mNamespaceSpec = namespaceSpec;
+ mData = data;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCacheNamespace::GetItemType(uint32_t* out) {
+ *out = mItemType;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCacheNamespace::GetNamespaceSpec(nsACString& out) {
+ out = mNamespaceSpec;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCacheNamespace::GetData(nsACString& out) {
+ out = mData;
+ return NS_OK;
+}
+
+/******************************************************************************
+ * nsApplicationCache
+ */
+
+NS_IMPL_ISUPPORTS(nsApplicationCache, nsIApplicationCache,
+ nsISupportsWeakReference)
+
+nsApplicationCache::nsApplicationCache() : mDevice(nullptr), mValid(true) {}
+
+nsApplicationCache::nsApplicationCache(nsOfflineCacheDevice* device,
+ const nsACString& group,
+ const nsACString& clientID)
+ : mDevice(device), mGroup(group), mClientID(clientID), mValid(true) {}
+
+nsApplicationCache::~nsApplicationCache() {
+ if (!mDevice) return;
+
+ {
+ MutexAutoLock lock(mDevice->mLock);
+ mDevice->mCaches.Remove(mClientID);
+ }
+
+ // If this isn't an active cache anymore, it can be destroyed.
+ if (mValid && !mDevice->IsActiveCache(mGroup, mClientID)) Discard();
+}
+
+void nsApplicationCache::MarkInvalid() { mValid = false; }
+
+NS_IMETHODIMP
+nsApplicationCache::InitAsHandle(const nsACString& groupId,
+ const nsACString& clientId) {
+ NS_ENSURE_FALSE(mDevice, NS_ERROR_ALREADY_INITIALIZED);
+ NS_ENSURE_TRUE(mGroup.IsEmpty(), NS_ERROR_ALREADY_INITIALIZED);
+
+ mGroup = groupId;
+ mClientID = clientId;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetManifestURI(nsIURI** out) {
+ nsCOMPtr<nsIURI> uri;
+ nsresult rv = NS_NewURI(getter_AddRefs(uri), mGroup);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = NS_GetURIWithNewRef(uri, ""_ns, out);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetGroupID(nsACString& out) {
+ out = mGroup;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetClientID(nsACString& out) {
+ out = mClientID;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetProfileDirectory(nsIFile** out) {
+ *out = do_AddRef(mDevice->BaseDirectory()).take();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetActive(bool* out) {
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ *out = mDevice->IsActiveCache(mGroup, mClientID);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::Activate() {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ mDevice->ActivateCache(mGroup, mClientID);
+
+ if (mDevice->AutoShutdown(this)) mDevice = nullptr;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::Discard() {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ mValid = false;
+
+ nsCOMPtr<nsIRunnable> ev =
+ new nsOfflineCacheDiscardCache(mDevice, mGroup, mClientID);
+ nsresult rv = nsCacheService::DispatchToCacheIOThread(ev);
+ return rv;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::MarkEntry(const nsACString& key, uint32_t typeBits) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->MarkEntry(mClientID, key, typeBits);
+}
+
+NS_IMETHODIMP
+nsApplicationCache::UnmarkEntry(const nsACString& key, uint32_t typeBits) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->UnmarkEntry(mClientID, key, typeBits);
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetTypes(const nsACString& key, uint32_t* typeBits) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->GetTypes(mClientID, key, typeBits);
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GatherEntries(uint32_t typeBits,
+ nsTArray<nsCString>& keys) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->GatherEntries(mClientID, typeBits, keys);
+}
+
+NS_IMETHODIMP
+nsApplicationCache::AddNamespaces(nsIArray* namespaces) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ if (!namespaces) return NS_OK;
+
+ mozStorageTransaction transaction(mDevice->mDB, false);
+
+ uint32_t length;
+ nsresult rv = namespaces->GetLength(&length);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ for (uint32_t i = 0; i < length; i++) {
+ nsCOMPtr<nsIApplicationCacheNamespace> ns =
+ do_QueryElementAt(namespaces, i);
+ if (ns) {
+ rv = mDevice->AddNamespace(mClientID, ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ }
+
+ rv = transaction.Commit();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetMatchingNamespace(const nsACString& key,
+ nsIApplicationCacheNamespace** out)
+
+{
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->GetMatchingNamespace(mClientID, key, out);
+}
+
+NS_IMETHODIMP
+nsApplicationCache::GetUsage(uint32_t* usage) {
+ NS_ENSURE_TRUE(mValid, NS_ERROR_NOT_AVAILABLE);
+ NS_ENSURE_TRUE(mDevice, NS_ERROR_NOT_AVAILABLE);
+
+ return mDevice->GetUsage(mClientID, usage);
+}
+
+/******************************************************************************
+ * nsCloseDBEvent
+ *****************************************************************************/
+
+class nsCloseDBEvent : public Runnable {
+ public:
+ explicit nsCloseDBEvent(mozIStorageConnection* aDB)
+ : mozilla::Runnable("nsCloseDBEvent") {
+ mDB = aDB;
+ }
+
+ NS_IMETHOD Run() override {
+ mDB->Close();
+ return NS_OK;
+ }
+
+ protected:
+ virtual ~nsCloseDBEvent() = default;
+
+ private:
+ nsCOMPtr<mozIStorageConnection> mDB;
+};
+
+/******************************************************************************
+ * nsOfflineCacheDevice
+ */
+
+NS_IMPL_ISUPPORTS0(nsOfflineCacheDevice)
+
+nsOfflineCacheDevice::nsOfflineCacheDevice()
+ : mDB(nullptr),
+ mCacheCapacity(0),
+ mDeltaCounter(0),
+ mAutoShutdown(false),
+ mLock("nsOfflineCacheDevice.lock"),
+ mActiveCaches(4),
+ mLockedEntries(32) {}
+
+/* static */
+bool nsOfflineCacheDevice::GetStrictFileOriginPolicy() {
+ nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
+
+ bool retval;
+ if (prefs && NS_SUCCEEDED(prefs->GetBoolPref(
+ "security.fileuri.strict_origin_policy", &retval)))
+ return retval;
+
+ // As default value use true (be more strict)
+ return true;
+}
+
+uint32_t nsOfflineCacheDevice::CacheSize() {
+ NS_ENSURE_TRUE(Initialized(), 0);
+
+ AutoResetStatement statement(mStatement_CacheSize);
+
+ bool hasRows;
+ nsresult rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_TRUE(NS_SUCCEEDED(rv) && hasRows, 0);
+
+ return (uint32_t)statement->AsInt32(0);
+}
+
+uint32_t nsOfflineCacheDevice::EntryCount() {
+ NS_ENSURE_TRUE(Initialized(), 0);
+
+ AutoResetStatement statement(mStatement_EntryCount);
+
+ bool hasRows;
+ nsresult rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_TRUE(NS_SUCCEEDED(rv) && hasRows, 0);
+
+ return (uint32_t)statement->AsInt32(0);
+}
+
+nsresult nsOfflineCacheDevice::UpdateEntry(nsCacheEntry* entry) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ // Decompose the key into "ClientID" and "Key"
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+
+ if (!DecomposeCacheEntryKey(entry->Key(), &cid, &key, keyBuf))
+ return NS_ERROR_UNEXPECTED;
+
+ // Store security info, if it is serializable
+ nsCOMPtr<nsISupports> infoObj = entry->SecurityInfo();
+ nsCOMPtr<nsISerializable> serializable = do_QueryInterface(infoObj);
+ if (infoObj && !serializable) return NS_ERROR_UNEXPECTED;
+
+ if (serializable) {
+ nsCString info;
+ nsresult rv = NS_SerializeToString(serializable, info);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = entry->SetMetaDataElement("security-info", info.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ nsCString metaDataBuf;
+ uint32_t mdSize = entry->MetaDataSize();
+ if (!metaDataBuf.SetLength(mdSize, fallible)) return NS_ERROR_OUT_OF_MEMORY;
+ char* md = metaDataBuf.BeginWriting();
+ entry->FlattenMetaData(md, mdSize);
+
+ nsOfflineCacheRecord rec;
+ rec.metaData = (const uint8_t*)md;
+ rec.metaDataLen = mdSize;
+ rec.dataSize = entry->DataSize();
+ rec.fetchCount = entry->FetchCount();
+ rec.lastFetched = PRTimeFromSeconds(entry->LastFetched());
+ rec.lastModified = PRTimeFromSeconds(entry->LastModified());
+ rec.expirationTime = PRTimeFromSeconds(entry->ExpirationTime());
+
+ AutoResetStatement statement(mStatement_UpdateEntry);
+
+ nsresult rv;
+ rv = statement->BindBlobByIndex(0, rec.metaData, rec.metaDataLen);
+ nsresult tmp = statement->BindInt32ByIndex(1, rec.dataSize);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt32ByIndex(2, rec.fetchCount);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(3, rec.lastFetched);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(4, rec.lastModified);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(5, rec.expirationTime);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindUTF8StringByIndex(6, nsDependentCString(cid));
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindUTF8StringByIndex(7, nsDependentCString(key));
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ NS_ASSERTION(!hasRows, "UPDATE should not result in output");
+ return rv;
+}
+
+nsresult nsOfflineCacheDevice::UpdateEntrySize(nsCacheEntry* entry,
+ uint32_t newSize) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ // Decompose the key into "ClientID" and "Key"
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+ if (!DecomposeCacheEntryKey(entry->Key(), &cid, &key, keyBuf))
+ return NS_ERROR_UNEXPECTED;
+
+ AutoResetStatement statement(mStatement_UpdateEntrySize);
+
+ nsresult rv = statement->BindInt32ByIndex(0, newSize);
+ nsresult tmp = statement->BindUTF8StringByIndex(1, nsDependentCString(cid));
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindUTF8StringByIndex(2, nsDependentCString(key));
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ NS_ASSERTION(!hasRows, "UPDATE should not result in output");
+ return rv;
+}
+
+nsresult nsOfflineCacheDevice::DeleteEntry(nsCacheEntry* entry,
+ bool deleteData) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ if (deleteData) {
+ nsresult rv = DeleteData(entry);
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ // Decompose the key into "ClientID" and "Key"
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+ if (!DecomposeCacheEntryKey(entry->Key(), &cid, &key, keyBuf))
+ return NS_ERROR_UNEXPECTED;
+
+ AutoResetStatement statement(mStatement_DeleteEntry);
+
+ nsresult rv = statement->BindUTF8StringByIndex(0, nsDependentCString(cid));
+ nsresult rv2 = statement->BindUTF8StringByIndex(1, nsDependentCString(key));
+ NS_ENSURE_SUCCESS(rv, rv);
+ NS_ENSURE_SUCCESS(rv2, rv2);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ NS_ASSERTION(!hasRows, "DELETE should not result in output");
+ return rv;
+}
+
+nsresult nsOfflineCacheDevice::DeleteData(nsCacheEntry* entry) {
+ nsOfflineCacheBinding* binding = (nsOfflineCacheBinding*)entry->Data();
+ NS_ENSURE_STATE(binding);
+
+ return binding->mDataFile->Remove(false);
+}
+
+/**
+ * nsCacheDevice implementation
+ */
+
+// This struct is local to nsOfflineCacheDevice::Init, but ISO C++98 doesn't
+// allow a template (mozilla::ArrayLength) to be instantiated based on a local
+// type. Boo-urns!
+struct StatementSql {
+ nsCOMPtr<mozIStorageStatement>& statement;
+ const char* sql;
+ StatementSql(nsCOMPtr<mozIStorageStatement>& aStatement, const char* aSql)
+ : statement(aStatement), sql(aSql) {}
+};
+
+nsresult nsOfflineCacheDevice::Init() {
+ MOZ_ASSERT(false, "Need to be initialized with sqlite");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsOfflineCacheDevice::InitWithSqlite(mozIStorageService* ss) {
+ NS_ENSURE_TRUE(!mDB, NS_ERROR_ALREADY_INITIALIZED);
+
+ // SetCacheParentDirectory must have been called
+ NS_ENSURE_TRUE(mCacheDirectory, NS_ERROR_UNEXPECTED);
+
+ // make sure the cache directory exists
+ nsresult rv = EnsureDir(mCacheDirectory);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // build path to index file
+ nsCOMPtr<nsIFile> indexFile;
+ rv = mCacheDirectory->Clone(getter_AddRefs(indexFile));
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = indexFile->AppendNative("index.sqlite"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MOZ_ASSERT(ss,
+ "nsOfflineCacheDevice::InitWithSqlite called before "
+ "nsCacheService::Init() ?");
+ NS_ENSURE_TRUE(ss, NS_ERROR_UNEXPECTED);
+
+ rv = ss->OpenDatabase(indexFile, getter_AddRefs(mDB));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mInitEventTarget = GetCurrentEventTarget();
+
+ mDB->ExecuteSimpleSQL("PRAGMA synchronous = OFF;"_ns);
+
+ // XXX ... other initialization steps
+
+ // XXX in the future we may wish to verify the schema for moz_cache
+ // perhaps using "PRAGMA table_info" ?
+
+ // build the table
+ //
+ // "Generation" is the data file generation number.
+ //
+ rv = mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE TABLE IF NOT EXISTS moz_cache (\n"
+ " ClientID TEXT,\n"
+ " Key TEXT,\n"
+ " MetaData BLOB,\n"
+ " Generation INTEGER,\n"
+ " DataSize INTEGER,\n"
+ " FetchCount INTEGER,\n"
+ " LastFetched INTEGER,\n"
+ " LastModified INTEGER,\n"
+ " ExpirationTime INTEGER,\n"
+ " ItemType INTEGER DEFAULT 0\n"
+ ");\n"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Databases from 1.9.0 don't have the ItemType column. Add the column
+ // here, but don't worry about failures (the column probably already exists)
+ mDB->ExecuteSimpleSQL(
+ "ALTER TABLE moz_cache ADD ItemType INTEGER DEFAULT 0"_ns);
+
+ // Create the table for storing cache groups. All actions on
+ // moz_cache_groups use the GroupID, so use it as the primary key.
+ rv = mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE TABLE IF NOT EXISTS moz_cache_groups (\n"
+ " GroupID TEXT PRIMARY KEY,\n"
+ " ActiveClientID TEXT\n"
+ ");\n"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mDB->ExecuteSimpleSQL(
+ nsLiteralCString("ALTER TABLE moz_cache_groups "
+ "ADD ActivateTimeStamp INTEGER DEFAULT 0"));
+
+ // ClientID: clientID joining moz_cache and moz_cache_namespaces
+ // tables.
+ // Data: Data associated with this namespace (e.g. a fallback URI
+ // for fallback entries).
+ // ItemType: the type of namespace.
+ rv =
+ mDB->ExecuteSimpleSQL(nsLiteralCString("CREATE TABLE IF NOT EXISTS"
+ " moz_cache_namespaces (\n"
+ " ClientID TEXT,\n"
+ " NameSpace TEXT,\n"
+ " Data TEXT,\n"
+ " ItemType INTEGER\n"
+ ");\n"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Databases from 1.9.0 have a moz_cache_index that should be dropped
+ rv = mDB->ExecuteSimpleSQL("DROP INDEX IF EXISTS moz_cache_index"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Key/ClientID pairs should be unique in the database. All queries
+ // against moz_cache use the Key (which is also the most unique), so
+ // use it as the primary key for this index.
+ rv = mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE UNIQUE INDEX IF NOT EXISTS "
+ " moz_cache_key_clientid_index"
+ " ON moz_cache (Key, ClientID);"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Used for ClientID lookups and to keep ClientID/NameSpace pairs unique.
+ rv = mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE UNIQUE INDEX IF NOT EXISTS"
+ " moz_cache_namespaces_clientid_index"
+ " ON moz_cache_namespaces (ClientID, NameSpace);"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Used for namespace lookups.
+ rv = mDB->ExecuteSimpleSQL(
+ nsLiteralCString("CREATE INDEX IF NOT EXISTS"
+ " moz_cache_namespaces_namespace_index"
+ " ON moz_cache_namespaces (NameSpace);"));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mEvictionFunction = new nsOfflineCacheEvictionFunction(this);
+ if (!mEvictionFunction) return NS_ERROR_OUT_OF_MEMORY;
+
+ rv = mDB->CreateFunction("cache_eviction_observer"_ns, 3, mEvictionFunction);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // create all (most) of our statements up front
+ StatementSql prepared[] = {
+ // clang-format off
+ StatementSql ( mStatement_CacheSize, "SELECT Sum(DataSize) from moz_cache;" ),
+ StatementSql ( mStatement_ApplicationCacheSize, "SELECT Sum(DataSize) from moz_cache WHERE ClientID = ?;" ),
+ StatementSql ( mStatement_EntryCount, "SELECT count(*) from moz_cache;" ),
+ StatementSql ( mStatement_UpdateEntry, "UPDATE moz_cache SET MetaData = ?, DataSize = ?, FetchCount = ?, LastFetched = ?, LastModified = ?, ExpirationTime = ? WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_UpdateEntrySize, "UPDATE moz_cache SET DataSize = ? WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_DeleteEntry, "DELETE FROM moz_cache WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_FindEntry, "SELECT MetaData, Generation, DataSize, FetchCount, LastFetched, LastModified, ExpirationTime, ItemType FROM moz_cache WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_BindEntry, "INSERT INTO moz_cache (ClientID, Key, MetaData, Generation, DataSize, FetchCount, LastFetched, LastModified, ExpirationTime) VALUES(?,?,?,?,?,?,?,?,?);" ),
+
+ StatementSql ( mStatement_MarkEntry, "UPDATE moz_cache SET ItemType = (ItemType | ?) WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_UnmarkEntry, "UPDATE moz_cache SET ItemType = (ItemType & ~?) WHERE ClientID = ? AND Key = ?;" ),
+ StatementSql ( mStatement_GetTypes, "SELECT ItemType FROM moz_cache WHERE ClientID = ? AND Key = ?;"),
+ StatementSql ( mStatement_CleanupUnmarked, "DELETE FROM moz_cache WHERE ClientID = ? AND Key = ? AND ItemType = 0;" ),
+ StatementSql ( mStatement_GatherEntries, "SELECT Key FROM moz_cache WHERE ClientID = ? AND (ItemType & ?) > 0;" ),
+
+ StatementSql ( mStatement_ActivateClient, "INSERT OR REPLACE INTO moz_cache_groups (GroupID, ActiveClientID, ActivateTimeStamp) VALUES (?, ?, ?);" ),
+ StatementSql ( mStatement_DeactivateGroup, "DELETE FROM moz_cache_groups WHERE GroupID = ?;" ),
+ StatementSql ( mStatement_FindClient, "/* do not warn (bug 1293375) */ SELECT ClientID, ItemType FROM moz_cache WHERE Key = ? ORDER BY LastFetched DESC, LastModified DESC;" ),
+
+ // Search for namespaces that match the URI. Use the <= operator
+ // to ensure that we use the index on moz_cache_namespaces.
+ StatementSql ( mStatement_FindClientByNamespace, "/* do not warn (bug 1293375) */ SELECT ns.ClientID, ns.ItemType FROM"
+ " moz_cache_namespaces AS ns JOIN moz_cache_groups AS groups"
+ " ON ns.ClientID = groups.ActiveClientID"
+ " WHERE ns.NameSpace <= ?1 AND ?1 GLOB ns.NameSpace || '*'"
+ " ORDER BY ns.NameSpace DESC, groups.ActivateTimeStamp DESC;"),
+ StatementSql ( mStatement_FindNamespaceEntry, "SELECT NameSpace, Data, ItemType FROM moz_cache_namespaces"
+ " WHERE ClientID = ?1"
+ " AND NameSpace <= ?2 AND ?2 GLOB NameSpace || '*'"
+ " ORDER BY NameSpace DESC;"),
+ StatementSql ( mStatement_InsertNamespaceEntry, "INSERT INTO moz_cache_namespaces (ClientID, NameSpace, Data, ItemType) VALUES(?, ?, ?, ?);"),
+ StatementSql ( mStatement_EnumerateApps, "SELECT GroupID, ActiveClientID FROM moz_cache_groups WHERE GroupID LIKE ?1;"),
+ StatementSql ( mStatement_EnumerateGroups, "SELECT GroupID, ActiveClientID FROM moz_cache_groups;"),
+ StatementSql ( mStatement_EnumerateGroupsTimeOrder, "SELECT GroupID, ActiveClientID FROM moz_cache_groups ORDER BY ActivateTimeStamp;")
+ // clang-format on
+ };
+ for (uint32_t i = 0; NS_SUCCEEDED(rv) && i < ArrayLength(prepared); ++i) {
+ LOG(("Creating statement: %s\n", prepared[i].sql));
+
+ rv = mDB->CreateStatement(nsDependentCString(prepared[i].sql),
+ getter_AddRefs(prepared[i].statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ rv = InitActiveCaches();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+namespace {
+
+nsresult GetGroupForCache(const nsACString& clientID, nsCString& group) {
+ group.Assign(clientID);
+ group.Truncate(group.FindChar('|'));
+ NS_UnescapeURL(group);
+
+ return NS_OK;
+}
+
+} // namespace
+
+// static
+nsresult nsOfflineCacheDevice::BuildApplicationCacheGroupID(
+ nsIURI* aManifestURL, nsACString const& aOriginSuffix,
+ nsACString& _result) {
+ nsCOMPtr<nsIURI> newURI;
+ nsresult rv =
+ NS_GetURIWithNewRef(aManifestURL, ""_ns, getter_AddRefs(newURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString manifestSpec;
+ rv = newURI->GetAsciiSpec(manifestSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ _result.Assign(manifestSpec);
+ _result.Append('#');
+ _result.Append(aOriginSuffix);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::InitActiveCaches() {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ MutexAutoLock lock(mLock);
+
+ AutoResetStatement statement(mStatement_EnumerateGroups);
+
+ bool hasRows;
+ nsresult rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ while (hasRows) {
+ nsAutoCString group;
+ statement->GetUTF8String(0, group);
+ nsCString clientID;
+ statement->GetUTF8String(1, clientID);
+
+ mActiveCaches.PutEntry(clientID);
+ mActiveCachesByGroup.Put(group, new nsCString(clientID));
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::Shutdown() {
+ NS_ENSURE_TRUE(mDB, NS_ERROR_NOT_INITIALIZED);
+
+ {
+ MutexAutoLock lock(mLock);
+ for (auto iter = mCaches.Iter(); !iter.Done(); iter.Next()) {
+ nsCOMPtr<nsIApplicationCache> obj = do_QueryReferent(iter.UserData());
+ if (obj) {
+ auto appCache = static_cast<nsApplicationCache*>(obj.get());
+ appCache->MarkInvalid();
+ }
+ }
+ }
+
+ {
+ EvictionObserver evictionObserver(mDB, mEvictionFunction);
+
+ // Delete all rows whose clientID is not an active clientID.
+ nsresult rv = mDB->ExecuteSimpleSQL(nsLiteralCString(
+ "DELETE FROM moz_cache WHERE rowid IN"
+ " (SELECT moz_cache.rowid FROM"
+ " moz_cache LEFT OUTER JOIN moz_cache_groups ON"
+ " (moz_cache.ClientID = moz_cache_groups.ActiveClientID)"
+ " WHERE moz_cache_groups.GroupID ISNULL)"));
+
+ if (NS_FAILED(rv))
+ NS_WARNING("Failed to clean up unused application caches.");
+ else
+ evictionObserver.Apply();
+
+ // Delete all namespaces whose clientID is not an active clientID.
+ rv = mDB->ExecuteSimpleSQL(nsLiteralCString(
+ "DELETE FROM moz_cache_namespaces WHERE rowid IN"
+ " (SELECT moz_cache_namespaces.rowid FROM"
+ " moz_cache_namespaces LEFT OUTER JOIN moz_cache_groups ON"
+ " (moz_cache_namespaces.ClientID = "
+ "moz_cache_groups.ActiveClientID)"
+ " WHERE moz_cache_groups.GroupID ISNULL)"));
+
+ if (NS_FAILED(rv)) NS_WARNING("Failed to clean up namespaces.");
+
+ mEvictionFunction = nullptr;
+
+ mStatement_CacheSize = nullptr;
+ mStatement_ApplicationCacheSize = nullptr;
+ mStatement_EntryCount = nullptr;
+ mStatement_UpdateEntry = nullptr;
+ mStatement_UpdateEntrySize = nullptr;
+ mStatement_DeleteEntry = nullptr;
+ mStatement_FindEntry = nullptr;
+ mStatement_BindEntry = nullptr;
+ mStatement_ClearDomain = nullptr;
+ mStatement_MarkEntry = nullptr;
+ mStatement_UnmarkEntry = nullptr;
+ mStatement_GetTypes = nullptr;
+ mStatement_FindNamespaceEntry = nullptr;
+ mStatement_InsertNamespaceEntry = nullptr;
+ mStatement_CleanupUnmarked = nullptr;
+ mStatement_GatherEntries = nullptr;
+ mStatement_ActivateClient = nullptr;
+ mStatement_DeactivateGroup = nullptr;
+ mStatement_FindClient = nullptr;
+ mStatement_FindClientByNamespace = nullptr;
+ mStatement_EnumerateApps = nullptr;
+ mStatement_EnumerateGroups = nullptr;
+ mStatement_EnumerateGroupsTimeOrder = nullptr;
+ }
+
+ // Close Database on the correct thread
+ bool isOnCurrentThread = true;
+ if (mInitEventTarget)
+ isOnCurrentThread = mInitEventTarget->IsOnCurrentThread();
+
+ if (!isOnCurrentThread) {
+ nsCOMPtr<nsIRunnable> ev = new nsCloseDBEvent(mDB);
+
+ if (ev) {
+ mInitEventTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
+ }
+ } else {
+ mDB->Close();
+ }
+
+ mDB = nullptr;
+ mInitEventTarget = nullptr;
+
+ return NS_OK;
+}
+
+const char* nsOfflineCacheDevice::GetDeviceID() {
+ return OFFLINE_CACHE_DEVICE_ID;
+}
+
+nsCacheEntry* nsOfflineCacheDevice::FindEntry(nsCString* fullKey,
+ bool* collision) {
+ NS_ENSURE_TRUE(Initialized(), nullptr);
+
+ mozilla::Telemetry::AutoTimer<mozilla::Telemetry::CACHE_OFFLINE_SEARCH_2>
+ timer;
+ LOG(("nsOfflineCacheDevice::FindEntry [key=%s]\n", fullKey->get()));
+
+ // SELECT * FROM moz_cache WHERE key = ?
+
+ // Decompose the key into "ClientID" and "Key"
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+ if (!DecomposeCacheEntryKey(fullKey, &cid, &key, keyBuf)) return nullptr;
+
+ AutoResetStatement statement(mStatement_FindEntry);
+
+ nsresult rv = statement->BindUTF8StringByIndex(0, nsDependentCString(cid));
+ nsresult rv2 = statement->BindUTF8StringByIndex(1, nsDependentCString(key));
+ NS_ENSURE_SUCCESS(rv, nullptr);
+ NS_ENSURE_SUCCESS(rv2, nullptr);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ if (NS_FAILED(rv) || !hasRows) return nullptr; // entry not found
+
+ nsOfflineCacheRecord rec;
+ statement->GetSharedBlob(0, &rec.metaDataLen, (const uint8_t**)&rec.metaData);
+ rec.generation = statement->AsInt32(1);
+ rec.dataSize = statement->AsInt32(2);
+ rec.fetchCount = statement->AsInt32(3);
+ rec.lastFetched = statement->AsInt64(4);
+ rec.lastModified = statement->AsInt64(5);
+ rec.expirationTime = statement->AsInt64(6);
+
+ LOG(("entry: [%u %d %d %d %" PRId64 " %" PRId64 " %" PRId64 "]\n",
+ rec.metaDataLen, rec.generation, rec.dataSize, rec.fetchCount,
+ rec.lastFetched, rec.lastModified, rec.expirationTime));
+
+ nsCacheEntry* entry = CreateCacheEntry(this, fullKey, rec);
+
+ if (entry) {
+ // make sure that the data file exists
+ nsOfflineCacheBinding* binding = (nsOfflineCacheBinding*)entry->Data();
+ bool isFile;
+ rv = binding->mDataFile->IsFile(&isFile);
+ if (NS_FAILED(rv) || !isFile) {
+ DeleteEntry(entry, false);
+ delete entry;
+ return nullptr;
+ }
+
+ // lock the entry
+ Lock(*fullKey);
+ }
+
+ return entry;
+}
+
+nsresult nsOfflineCacheDevice::DeactivateEntry(nsCacheEntry* entry) {
+ LOG(("nsOfflineCacheDevice::DeactivateEntry [key=%s]\n",
+ entry->Key()->get()));
+
+ // This method is called to inform us that the nsCacheEntry object is going
+ // away. We should persist anything that needs to be persisted, or if the
+ // entry is doomed, we can go ahead and clear its storage.
+
+ if (entry->IsDoomed()) {
+ // remove corresponding row and file if they exist
+
+ // the row should have been removed in DoomEntry... we could assert that
+ // that happened. otherwise, all we have to do here is delete the file
+ // on disk.
+ DeleteData(entry);
+ } else if (((nsOfflineCacheBinding*)entry->Data())->IsNewEntry()) {
+ // UPDATE the database row
+
+ // Only new entries are updated, since offline cache is updated in
+ // transactions. New entries are those who is returned from
+ // BindEntry().
+
+ LOG(("nsOfflineCacheDevice::DeactivateEntry updating new entry\n"));
+ UpdateEntry(entry);
+ } else {
+ LOG(
+ ("nsOfflineCacheDevice::DeactivateEntry "
+ "skipping update since entry is not dirty\n"));
+ }
+
+ // Unlock the entry
+ Unlock(*entry->Key());
+
+ delete entry;
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::BindEntry(nsCacheEntry* entry) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::BindEntry [key=%s]\n", entry->Key()->get()));
+
+ NS_ENSURE_STATE(!entry->Data());
+
+ // This method is called to inform us that we have a new entry. The entry
+ // may collide with an existing entry in our DB, but if that happens we can
+ // assume that the entry is not being used.
+
+ // INSERT the database row
+
+ // XXX Assumption: if the row already exists, then FindEntry would have
+ // returned it. if that entry was doomed, then DoomEntry would have removed
+ // it from the table. so, we should always have to insert at this point.
+
+ // Decompose the key into "ClientID" and "Key"
+ nsAutoCString keyBuf;
+ const char *cid, *key;
+ if (!DecomposeCacheEntryKey(entry->Key(), &cid, &key, keyBuf))
+ return NS_ERROR_UNEXPECTED;
+
+ // create binding, pick best generation number
+ RefPtr<nsOfflineCacheBinding> binding =
+ nsOfflineCacheBinding::Create(mCacheDirectory, entry->Key(), -1);
+ if (!binding) return NS_ERROR_OUT_OF_MEMORY;
+ binding->MarkNewEntry();
+
+ nsOfflineCacheRecord rec;
+ rec.clientID = cid;
+ rec.key = key;
+ rec.metaData = nullptr; // don't write any metadata now.
+ rec.metaDataLen = 0;
+ rec.generation = binding->mGeneration;
+ rec.dataSize = 0;
+ rec.fetchCount = entry->FetchCount();
+ rec.lastFetched = PRTimeFromSeconds(entry->LastFetched());
+ rec.lastModified = PRTimeFromSeconds(entry->LastModified());
+ rec.expirationTime = PRTimeFromSeconds(entry->ExpirationTime());
+
+ AutoResetStatement statement(mStatement_BindEntry);
+
+ nsresult rv =
+ statement->BindUTF8StringByIndex(0, nsDependentCString(rec.clientID));
+ nsresult tmp =
+ statement->BindUTF8StringByIndex(1, nsDependentCString(rec.key));
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindBlobByIndex(2, rec.metaData, rec.metaDataLen);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt32ByIndex(3, rec.generation);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt32ByIndex(4, rec.dataSize);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt32ByIndex(5, rec.fetchCount);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(6, rec.lastFetched);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(7, rec.lastModified);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ tmp = statement->BindInt64ByIndex(8, rec.expirationTime);
+ if (NS_FAILED(tmp)) {
+ rv = tmp;
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ NS_ASSERTION(!hasRows, "INSERT should not result in output");
+
+ entry->SetData(binding);
+
+ // lock the entry
+ Lock(*entry->Key());
+
+ return NS_OK;
+}
+
+void nsOfflineCacheDevice::DoomEntry(nsCacheEntry* entry) {
+ LOG(("nsOfflineCacheDevice::DoomEntry [key=%s]\n", entry->Key()->get()));
+
+ // This method is called to inform us that we should mark the entry to be
+ // deleted when it is no longer in use.
+
+ // We can go ahead and delete the corresponding row in our table,
+ // but we must not delete the file on disk until we are deactivated.
+ // In another word, the file should be deleted if the entry had been
+ // deactivated.
+
+ DeleteEntry(entry, !entry->IsActive());
+}
+
+nsresult nsOfflineCacheDevice::OpenInputStreamForEntry(
+ nsCacheEntry* entry, nsCacheAccessMode mode, uint32_t offset,
+ nsIInputStream** result) {
+ LOG(("nsOfflineCacheDevice::OpenInputStreamForEntry [key=%s]\n",
+ entry->Key()->get()));
+
+ *result = nullptr;
+
+ NS_ENSURE_TRUE(!offset || (offset < entry->DataSize()), NS_ERROR_INVALID_ARG);
+
+ // return an input stream to the entry's data file. the stream
+ // may be read on a background thread.
+
+ nsOfflineCacheBinding* binding = (nsOfflineCacheBinding*)entry->Data();
+ NS_ENSURE_STATE(binding);
+
+ nsCOMPtr<nsIInputStream> in;
+ NS_NewLocalFileInputStream(getter_AddRefs(in), binding->mDataFile, PR_RDONLY);
+ if (!in) return NS_ERROR_UNEXPECTED;
+
+ // respect |offset| param
+ if (offset != 0) {
+ nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(in);
+ NS_ENSURE_TRUE(seekable, NS_ERROR_UNEXPECTED);
+
+ seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
+ }
+
+ in.swap(*result);
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::OpenOutputStreamForEntry(
+ nsCacheEntry* entry, nsCacheAccessMode mode, uint32_t offset,
+ nsIOutputStream** result) {
+ LOG(("nsOfflineCacheDevice::OpenOutputStreamForEntry [key=%s]\n",
+ entry->Key()->get()));
+
+ *result = nullptr;
+
+ NS_ENSURE_TRUE(offset <= entry->DataSize(), NS_ERROR_INVALID_ARG);
+
+ // return an output stream to the entry's data file. we can assume
+ // that the output stream will only be used on the main thread.
+
+ nsOfflineCacheBinding* binding = (nsOfflineCacheBinding*)entry->Data();
+ NS_ENSURE_STATE(binding);
+
+ nsCOMPtr<nsIOutputStream> out;
+ NS_NewLocalFileOutputStream(getter_AddRefs(out), binding->mDataFile,
+ PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE, 00600);
+ if (!out) return NS_ERROR_UNEXPECTED;
+
+ // respect |offset| param
+ nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(out);
+ NS_ENSURE_TRUE(seekable, NS_ERROR_UNEXPECTED);
+ if (offset != 0) seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
+
+ // truncate the file at the given offset
+ seekable->SetEOF();
+
+ nsCOMPtr<nsIOutputStream> bufferedOut;
+ nsresult rv = NS_NewBufferedOutputStream(getter_AddRefs(bufferedOut),
+ out.forget(), 16 * 1024);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bufferedOut.forget(result);
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetFileForEntry(nsCacheEntry* entry,
+ nsIFile** result) {
+ LOG(("nsOfflineCacheDevice::GetFileForEntry [key=%s]\n",
+ entry->Key()->get()));
+
+ nsOfflineCacheBinding* binding = (nsOfflineCacheBinding*)entry->Data();
+ NS_ENSURE_STATE(binding);
+
+ NS_IF_ADDREF(*result = binding->mDataFile);
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::OnDataSizeChange(nsCacheEntry* entry,
+ int32_t deltaSize) {
+ LOG(("nsOfflineCacheDevice::OnDataSizeChange [key=%s delta=%d]\n",
+ entry->Key()->get(), deltaSize));
+
+ const int32_t DELTA_THRESHOLD = 1 << 14; // 16k
+
+ // called to notify us of an impending change in the total size of the
+ // specified entry.
+
+ uint32_t oldSize = entry->DataSize();
+ NS_ASSERTION(deltaSize >= 0 || int32_t(oldSize) + deltaSize >= 0, "oops");
+ uint32_t newSize = int32_t(oldSize) + deltaSize;
+ UpdateEntrySize(entry, newSize);
+
+ mDeltaCounter += deltaSize; // this may go negative
+
+ if (mDeltaCounter >= DELTA_THRESHOLD) {
+ if (CacheSize() > mCacheCapacity) {
+ // the entry will overrun the cache capacity, doom the entry
+ // and abort
+#ifdef DEBUG
+ nsresult rv =
+#endif
+ nsCacheService::DoomEntry(entry);
+ NS_ASSERTION(NS_SUCCEEDED(rv), "DoomEntry() failed.");
+ return NS_ERROR_ABORT;
+ }
+
+ mDeltaCounter = 0; // reset counter
+ }
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::Visit(nsICacheVisitor* visitor) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ // called to enumerate the offline cache.
+
+ nsCOMPtr<nsICacheDeviceInfo> deviceInfo = new nsOfflineCacheDeviceInfo(this);
+
+ bool keepGoing;
+ nsresult rv =
+ visitor->VisitDevice(OFFLINE_CACHE_DEVICE_ID, deviceInfo, &keepGoing);
+ if (NS_FAILED(rv)) return rv;
+
+ if (!keepGoing) return NS_OK;
+
+ // SELECT * from moz_cache;
+
+ nsOfflineCacheRecord rec;
+ RefPtr<nsOfflineCacheEntryInfo> info = new nsOfflineCacheEntryInfo;
+ if (!info) return NS_ERROR_OUT_OF_MEMORY;
+ info->mRec = &rec;
+
+ // XXX may want to list columns explicitly
+ nsCOMPtr<mozIStorageStatement> statement;
+ rv = mDB->CreateStatement("SELECT * FROM moz_cache;"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ for (;;) {
+ rv = statement->ExecuteStep(&hasRows);
+ if (NS_FAILED(rv) || !hasRows) break;
+
+ statement->GetSharedUTF8String(0, nullptr, &rec.clientID);
+ statement->GetSharedUTF8String(1, nullptr, &rec.key);
+ statement->GetSharedBlob(2, &rec.metaDataLen,
+ (const uint8_t**)&rec.metaData);
+ rec.generation = statement->AsInt32(3);
+ rec.dataSize = statement->AsInt32(4);
+ rec.fetchCount = statement->AsInt32(5);
+ rec.lastFetched = statement->AsInt64(6);
+ rec.lastModified = statement->AsInt64(7);
+ rec.expirationTime = statement->AsInt64(8);
+
+ bool keepGoing;
+ rv = visitor->VisitEntry(OFFLINE_CACHE_DEVICE_ID, info, &keepGoing);
+ if (NS_FAILED(rv) || !keepGoing) break;
+ }
+
+ info->mRec = nullptr;
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::EvictEntries(const char* clientID) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::EvictEntries [cid=%s]\n",
+ clientID ? clientID : ""));
+
+ // called to evict all entries matching the given clientID.
+
+ // need trigger to fire user defined function after a row is deleted
+ // so we can delete the corresponding data file.
+ EvictionObserver evictionObserver(mDB, mEvictionFunction);
+
+ nsCOMPtr<mozIStorageStatement> statement;
+ nsresult rv;
+ if (clientID) {
+ rv = mDB->CreateStatement("DELETE FROM moz_cache WHERE ClientID=?;"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindUTF8StringByIndex(0, nsDependentCString(clientID));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = mDB->CreateStatement(
+ nsLiteralCString(
+ "DELETE FROM moz_cache_groups WHERE ActiveClientID=?;"),
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindUTF8StringByIndex(0, nsDependentCString(clientID));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // TODO - Should update internal hashtables.
+ // Low priority, since this API is not widely used.
+ } else {
+ rv = mDB->CreateStatement("DELETE FROM moz_cache;"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = mDB->CreateStatement("DELETE FROM moz_cache_groups;"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MutexAutoLock lock(mLock);
+ mCaches.Clear();
+ mActiveCaches.Clear();
+ mActiveCachesByGroup.Clear();
+ }
+
+ evictionObserver.Apply();
+
+ statement = nullptr;
+ // Also evict any namespaces associated with this clientID.
+ if (clientID) {
+ rv = mDB->CreateStatement(
+ "DELETE FROM moz_cache_namespaces WHERE ClientID=?"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindUTF8StringByIndex(0, nsDependentCString(clientID));
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ rv = mDB->CreateStatement("DELETE FROM moz_cache_namespaces;"_ns,
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::MarkEntry(const nsCString& clientID,
+ const nsACString& key,
+ uint32_t typeBits) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::MarkEntry [cid=%s, key=%s, typeBits=%d]\n",
+ clientID.get(), PromiseFlatCString(key).get(), typeBits));
+
+ AutoResetStatement statement(mStatement_MarkEntry);
+ nsresult rv = statement->BindInt32ByIndex(0, typeBits);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(1, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(2, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::UnmarkEntry(const nsCString& clientID,
+ const nsACString& key,
+ uint32_t typeBits) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::UnmarkEntry [cid=%s, key=%s, typeBits=%d]\n",
+ clientID.get(), PromiseFlatCString(key).get(), typeBits));
+
+ AutoResetStatement statement(mStatement_UnmarkEntry);
+ nsresult rv = statement->BindInt32ByIndex(0, typeBits);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(1, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(2, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Remove the entry if it is now empty.
+
+ EvictionObserver evictionObserver(mDB, mEvictionFunction);
+
+ AutoResetStatement cleanupStatement(mStatement_CleanupUnmarked);
+ rv = cleanupStatement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = cleanupStatement->BindUTF8StringByIndex(1, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = cleanupStatement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ evictionObserver.Apply();
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetMatchingNamespace(
+ const nsCString& clientID, const nsACString& key,
+ nsIApplicationCacheNamespace** out) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GetMatchingNamespace [cid=%s, key=%s]\n",
+ clientID.get(), PromiseFlatCString(key).get()));
+
+ nsresult rv;
+
+ AutoResetStatement statement(mStatement_FindNamespaceEntry);
+
+ rv = statement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(1, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *out = nullptr;
+
+ bool found = false;
+ nsCString nsSpec;
+ int32_t nsType = 0;
+ nsCString nsData;
+
+ while (hasRows) {
+ int32_t itemType;
+ rv = statement->GetInt32(2, &itemType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!found || itemType > nsType) {
+ nsType = itemType;
+
+ rv = statement->GetUTF8String(0, nsSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->GetUTF8String(1, nsData);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ found = true;
+ }
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (found) {
+ nsCOMPtr<nsIApplicationCacheNamespace> ns =
+ new nsApplicationCacheNamespace();
+ if (!ns) return NS_ERROR_OUT_OF_MEMORY;
+ rv = ns->Init(nsType, nsSpec, nsData);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ ns.swap(*out);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::CacheOpportunistically(const nsCString& clientID,
+ const nsACString& key) {
+ // XXX: We should also be propagating this cache entry to other matching
+ // caches. See bug 444807.
+
+ return MarkEntry(clientID, key, nsIApplicationCache::ITEM_OPPORTUNISTIC);
+}
+
+nsresult nsOfflineCacheDevice::GetTypes(const nsCString& clientID,
+ const nsACString& key,
+ uint32_t* typeBits) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GetTypes [cid=%s, key=%s]\n", clientID.get(),
+ PromiseFlatCString(key).get()));
+
+ AutoResetStatement statement(mStatement_GetTypes);
+ nsresult rv = statement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(1, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!hasRows) return NS_ERROR_CACHE_KEY_NOT_FOUND;
+
+ *typeBits = statement->AsInt32(0);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GatherEntries(const nsCString& clientID,
+ uint32_t typeBits,
+ nsTArray<nsCString>& keys) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GatherEntries [cid=%s, typeBits=%X]\n",
+ clientID.get(), typeBits));
+
+ AutoResetStatement statement(mStatement_GatherEntries);
+ nsresult rv = statement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindInt32ByIndex(1, typeBits);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return RunSimpleQuery(mStatement_GatherEntries, 0, keys);
+}
+
+nsresult nsOfflineCacheDevice::AddNamespace(const nsCString& clientID,
+ nsIApplicationCacheNamespace* ns) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ nsCString namespaceSpec;
+ nsresult rv = ns->GetNamespaceSpec(namespaceSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCString data;
+ rv = ns->GetData(data);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t itemType;
+ rv = ns->GetItemType(&itemType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ LOG(("nsOfflineCacheDevice::AddNamespace [cid=%s, ns=%s, data=%s, type=%d]",
+ clientID.get(), namespaceSpec.get(), data.get(), itemType));
+
+ AutoResetStatement statement(mStatement_InsertNamespaceEntry);
+
+ rv = statement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindUTF8StringByIndex(1, namespaceSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindUTF8StringByIndex(2, data);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->BindInt32ByIndex(3, itemType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetUsage(const nsACString& clientID,
+ uint32_t* usage) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GetUsage [cid=%s]\n",
+ PromiseFlatCString(clientID).get()));
+
+ *usage = 0;
+
+ AutoResetStatement statement(mStatement_ApplicationCacheSize);
+
+ nsresult rv = statement->BindUTF8StringByIndex(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!hasRows) return NS_OK;
+
+ *usage = static_cast<uint32_t>(statement->AsInt32(0));
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetGroups(nsTArray<nsCString>& keys) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GetGroups"));
+
+ return RunSimpleQuery(mStatement_EnumerateGroups, 0, keys);
+}
+
+nsresult nsOfflineCacheDevice::GetGroupsTimeOrdered(nsTArray<nsCString>& keys) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ LOG(("nsOfflineCacheDevice::GetGroupsTimeOrder"));
+
+ return RunSimpleQuery(mStatement_EnumerateGroupsTimeOrder, 0, keys);
+}
+
+bool nsOfflineCacheDevice::IsLocked(const nsACString& key) {
+ MutexAutoLock lock(mLock);
+ return mLockedEntries.GetEntry(key);
+}
+
+void nsOfflineCacheDevice::Lock(const nsACString& key) {
+ MutexAutoLock lock(mLock);
+ mLockedEntries.PutEntry(key);
+}
+
+void nsOfflineCacheDevice::Unlock(const nsACString& key) {
+ MutexAutoLock lock(mLock);
+ mLockedEntries.RemoveEntry(key);
+}
+
+nsresult nsOfflineCacheDevice::RunSimpleQuery(mozIStorageStatement* statement,
+ uint32_t resultIndex,
+ nsTArray<nsCString>& values) {
+ bool hasRows;
+ nsresult rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsTArray<nsCString> valArray;
+ while (hasRows) {
+ uint32_t length;
+ valArray.AppendElement(nsDependentCString(
+ statement->AsSharedUTF8String(resultIndex, &length)));
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ values = std::move(valArray);
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::CreateApplicationCache(
+ const nsACString& group, nsIApplicationCache** out) {
+ *out = nullptr;
+
+ nsCString clientID;
+ // Some characters are special in the clientID. Escape the groupID
+ // before putting it in to the client key.
+ if (!NS_Escape(nsCString(group), clientID, url_Path)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ PRTime now = PR_Now();
+
+ // Include the timestamp to guarantee uniqueness across runs, and
+ // the gNextTemporaryClientID for uniqueness within a second.
+ clientID.Append(nsPrintfCString("|%016" PRId64 "|%d", now / PR_USEC_PER_SEC,
+ gNextTemporaryClientID++));
+
+ nsCOMPtr<nsIApplicationCache> cache =
+ new nsApplicationCache(this, group, clientID);
+ if (!cache) return NS_ERROR_OUT_OF_MEMORY;
+
+ nsWeakPtr weak = do_GetWeakReference(cache);
+ if (!weak) return NS_ERROR_OUT_OF_MEMORY;
+
+ MutexAutoLock lock(mLock);
+ mCaches.Put(clientID, weak);
+
+ cache.swap(*out);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetApplicationCache(const nsACString& clientID,
+ nsIApplicationCache** out) {
+ MutexAutoLock lock(mLock);
+ return GetApplicationCache_Unlocked(clientID, out);
+}
+
+nsresult nsOfflineCacheDevice::GetApplicationCache_Unlocked(
+ const nsACString& clientID, nsIApplicationCache** out) {
+ *out = nullptr;
+
+ nsCOMPtr<nsIApplicationCache> cache;
+
+ nsWeakPtr weak;
+ if (mCaches.Get(clientID, getter_AddRefs(weak)))
+ cache = do_QueryReferent(weak);
+
+ if (!cache) {
+ nsCString group;
+ nsresult rv = GetGroupForCache(clientID, group);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (group.IsEmpty()) {
+ return NS_OK;
+ }
+
+ cache = new nsApplicationCache(this, group, clientID);
+ weak = do_GetWeakReference(cache);
+ if (!weak) return NS_ERROR_OUT_OF_MEMORY;
+
+ mCaches.Put(clientID, weak);
+ }
+
+ cache.swap(*out);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::GetActiveCache(const nsACString& group,
+ nsIApplicationCache** out) {
+ *out = nullptr;
+
+ MutexAutoLock lock(mLock);
+
+ nsCString* clientID;
+ if (mActiveCachesByGroup.Get(group, &clientID))
+ return GetApplicationCache_Unlocked(*clientID, out);
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::DeactivateGroup(const nsACString& group) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ nsCString* active = nullptr;
+
+ AutoResetStatement statement(mStatement_DeactivateGroup);
+ nsresult rv = statement->BindUTF8StringByIndex(0, group);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MutexAutoLock lock(mLock);
+
+ if (mActiveCachesByGroup.Get(group, &active)) {
+ mActiveCaches.RemoveEntry(*active);
+ mActiveCachesByGroup.Remove(group);
+ active = nullptr;
+ }
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::Evict(nsILoadContextInfo* aInfo) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_ARG(aInfo);
+
+ nsresult rv;
+
+ mozilla::OriginAttributes const* oa = aInfo->OriginAttributesPtr();
+
+ if (oa->mInIsolatedMozBrowser == false) {
+ nsCOMPtr<nsICacheService> serv = do_GetService(kCacheServiceCID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return nsCacheService::GlobalInstance()->EvictEntriesInternal(
+ nsICache::STORE_OFFLINE);
+ }
+
+ nsAutoCString jaridsuffix;
+ jaridsuffix.Append('%');
+
+ nsAutoCString suffix;
+ oa->CreateSuffix(suffix);
+ jaridsuffix.Append('#');
+ jaridsuffix.Append(suffix);
+
+ AutoResetStatement statement(mStatement_EnumerateApps);
+ rv = statement->BindUTF8StringByIndex(0, jaridsuffix);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ while (hasRows) {
+ nsAutoCString group;
+ rv = statement->GetUTF8String(0, group);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCString clientID;
+ rv = statement->GetUTF8String(1, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIRunnable> ev =
+ new nsOfflineCacheDiscardCache(this, group, clientID);
+
+ rv = nsCacheService::DispatchToCacheIOThread(ev);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+namespace { // anon
+
+class OriginMatch final : public mozIStorageFunction {
+ ~OriginMatch() = default;
+ mozilla::OriginAttributesPattern const mPattern;
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_MOZISTORAGEFUNCTION
+ explicit OriginMatch(mozilla::OriginAttributesPattern const& aPattern)
+ : mPattern(aPattern) {}
+};
+
+NS_IMPL_ISUPPORTS(OriginMatch, mozIStorageFunction)
+
+NS_IMETHODIMP
+OriginMatch::OnFunctionCall(mozIStorageValueArray* aFunctionArguments,
+ nsIVariant** aResult) {
+ nsresult rv;
+
+ nsAutoCString groupId;
+ rv = aFunctionArguments->GetUTF8String(0, groupId);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ int32_t hash = groupId.Find("#"_ns);
+ if (hash == kNotFound) {
+ // Just ignore...
+ return NS_OK;
+ }
+
+ ++hash;
+
+ nsDependentCSubstring suffix(groupId.BeginReading() + hash,
+ groupId.Length() - hash);
+
+ mozilla::OriginAttributes oa;
+ bool ok = oa.PopulateFromSuffix(suffix);
+ NS_ENSURE_TRUE(ok, NS_ERROR_UNEXPECTED);
+
+ bool match = mPattern.Matches(oa);
+
+ RefPtr<nsVariant> outVar(new nsVariant());
+ rv = outVar->SetAsUint32(match ? 1 : 0);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ outVar.forget(aResult);
+ return NS_OK;
+}
+
+} // namespace
+
+nsresult nsOfflineCacheDevice::Evict(
+ mozilla::OriginAttributesPattern const& aPattern) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ nsresult rv;
+
+ nsCOMPtr<mozIStorageFunction> function1(new OriginMatch(aPattern));
+ rv = mDB->CreateFunction("ORIGIN_MATCH"_ns, 1, function1);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ class AutoRemoveFunc {
+ public:
+ mozIStorageConnection* mDB;
+ explicit AutoRemoveFunc(mozIStorageConnection* aDB) : mDB(aDB) {}
+ ~AutoRemoveFunc() { mDB->RemoveFunction("ORIGIN_MATCH"_ns); }
+ };
+ AutoRemoveFunc autoRemove(mDB);
+
+ nsCOMPtr<mozIStorageStatement> statement;
+ rv = mDB->CreateStatement(
+ nsLiteralCString("SELECT GroupID, ActiveClientID FROM moz_cache_groups "
+ "WHERE ORIGIN_MATCH(GroupID);"),
+ getter_AddRefs(statement));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ AutoResetStatement statementScope(statement);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ while (hasRows) {
+ nsAutoCString group;
+ rv = statement->GetUTF8String(0, group);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCString clientID;
+ rv = statement->GetUTF8String(1, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIRunnable> ev =
+ new nsOfflineCacheDiscardCache(this, group, clientID);
+
+ rv = nsCacheService::DispatchToCacheIOThread(ev);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+bool nsOfflineCacheDevice::CanUseCache(nsIURI* keyURI,
+ const nsACString& clientID,
+ nsILoadContextInfo* loadContextInfo) {
+ {
+ MutexAutoLock lock(mLock);
+ if (!mActiveCaches.Contains(clientID)) return false;
+ }
+
+ nsAutoCString groupID;
+ nsresult rv = GetGroupForCache(clientID, groupID);
+ NS_ENSURE_SUCCESS(rv, false);
+
+ nsCOMPtr<nsIURI> groupURI;
+ rv = NS_NewURI(getter_AddRefs(groupURI), groupID);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+
+ // When we are choosing an initial cache to load the top
+ // level document from, the URL of that document must have
+ // the same origin as the manifest, according to the spec.
+ // The following check is here because explicit, fallback
+ // and dynamic entries might have origin different from the
+ // manifest origin.
+ if (!NS_SecurityCompareURIs(keyURI, groupURI, GetStrictFileOriginPolicy())) {
+ return false;
+ }
+
+ // Check the groupID we found is equal to groupID based
+ // on the load context demanding load from app cache.
+ // This is check of extended origin.
+
+ nsAutoCString originSuffix;
+ loadContextInfo->OriginAttributesPtr()->CreateSuffix(originSuffix);
+
+ nsAutoCString demandedGroupID;
+ rv = BuildApplicationCacheGroupID(groupURI, originSuffix, demandedGroupID);
+ NS_ENSURE_SUCCESS(rv, false);
+
+ if (groupID != demandedGroupID) {
+ return false;
+ }
+
+ return true;
+}
+
+nsresult nsOfflineCacheDevice::ChooseApplicationCache(
+ const nsACString& key, nsILoadContextInfo* loadContextInfo,
+ nsIApplicationCache** out) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_ARG(loadContextInfo);
+
+ nsresult rv;
+
+ *out = nullptr;
+
+ nsCOMPtr<nsIURI> keyURI;
+ rv = NS_NewURI(getter_AddRefs(keyURI), key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // First try to find a matching cache entry.
+ AutoResetStatement statement(mStatement_FindClient);
+ rv = statement->BindUTF8StringByIndex(0, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasRows;
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ while (hasRows) {
+ int32_t itemType;
+ rv = statement->GetInt32(1, &itemType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!(itemType & nsIApplicationCache::ITEM_FOREIGN)) {
+ nsAutoCString clientID;
+ rv = statement->GetUTF8String(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (CanUseCache(keyURI, clientID, loadContextInfo)) {
+ return GetApplicationCache(clientID, out);
+ }
+ }
+
+ rv = statement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ // OK, we didn't find an exact match. Search for a client with a
+ // matching namespace.
+
+ AutoResetStatement nsstatement(mStatement_FindClientByNamespace);
+
+ rv = nsstatement->BindUTF8StringByIndex(0, key);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = nsstatement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ while (hasRows) {
+ int32_t itemType;
+ rv = nsstatement->GetInt32(1, &itemType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Don't associate with a cache based solely on a whitelist entry
+ if (!(itemType & nsIApplicationCacheNamespace::NAMESPACE_BYPASS)) {
+ nsAutoCString clientID;
+ rv = nsstatement->GetUTF8String(0, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (CanUseCache(keyURI, clientID, loadContextInfo)) {
+ return GetApplicationCache(clientID, out);
+ }
+ }
+
+ rv = nsstatement->ExecuteStep(&hasRows);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsOfflineCacheDevice::CacheOpportunistically(
+ nsIApplicationCache* cache, const nsACString& key) {
+ NS_ENSURE_ARG_POINTER(cache);
+
+ nsresult rv;
+
+ nsAutoCString clientID;
+ rv = cache->GetClientID(clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return CacheOpportunistically(clientID, key);
+}
+
+nsresult nsOfflineCacheDevice::ActivateCache(const nsACString& group,
+ const nsACString& clientID) {
+ NS_ENSURE_TRUE(Initialized(), NS_ERROR_NOT_INITIALIZED);
+
+ AutoResetStatement statement(mStatement_ActivateClient);
+ nsresult rv = statement->BindUTF8StringByIndex(0, group);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindUTF8StringByIndex(1, clientID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = statement->BindInt32ByIndex(2, SecondsFromPRTime(PR_Now()));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = statement->Execute();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MutexAutoLock lock(mLock);
+
+ nsCString* active;
+ if (mActiveCachesByGroup.Get(group, &active)) {
+ mActiveCaches.RemoveEntry(*active);
+ mActiveCachesByGroup.Remove(group);
+ active = nullptr;
+ }
+
+ if (!clientID.IsEmpty()) {
+ mActiveCaches.PutEntry(clientID);
+ mActiveCachesByGroup.Put(group, new nsCString(clientID));
+ }
+
+ return NS_OK;
+}
+
+bool nsOfflineCacheDevice::IsActiveCache(const nsACString& group,
+ const nsACString& clientID) {
+ nsCString* active = nullptr;
+ MutexAutoLock lock(mLock);
+ return mActiveCachesByGroup.Get(group, &active) && *active == clientID;
+}
+
+/**
+ * Preference accessors
+ */
+
+void nsOfflineCacheDevice::SetCacheParentDirectory(nsIFile* parentDir) {
+ if (Initialized()) {
+ NS_ERROR("cannot switch cache directory once initialized");
+ return;
+ }
+
+ if (!parentDir) {
+ mCacheDirectory = nullptr;
+ return;
+ }
+
+ // ensure parent directory exists
+ nsresult rv = EnsureDir(parentDir);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("unable to create parent directory");
+ return;
+ }
+
+ mBaseDirectory = parentDir;
+
+ // cache dir may not exist, but that's ok
+ nsCOMPtr<nsIFile> dir;
+ rv = parentDir->Clone(getter_AddRefs(dir));
+ if (NS_FAILED(rv)) return;
+ rv = dir->AppendNative("OfflineCache"_ns);
+ if (NS_FAILED(rv)) return;
+
+ mCacheDirectory = dir;
+}
+
+void nsOfflineCacheDevice::SetCapacity(uint32_t capacity) {
+ mCacheCapacity = capacity * 1024;
+}
+
+bool nsOfflineCacheDevice::AutoShutdown(nsIApplicationCache* aAppCache) {
+ if (!mAutoShutdown) return false;
+
+ mAutoShutdown = false;
+
+ Shutdown();
+
+ nsCOMPtr<nsICacheService> serv = do_GetService(kCacheServiceCID);
+ RefPtr<nsCacheService> cacheService = nsCacheService::GlobalInstance();
+ cacheService->RemoveCustomOfflineDevice(this);
+
+ nsAutoCString clientID;
+ aAppCache->GetClientID(clientID);
+
+ MutexAutoLock lock(mLock);
+ mCaches.Remove(clientID);
+
+ return true;
+}
diff --git a/netwerk/cache/nsDiskCacheDeviceSQL.h b/netwerk/cache/nsDiskCacheDeviceSQL.h
new file mode 100644
index 0000000000..772bc21b83
--- /dev/null
+++ b/netwerk/cache/nsDiskCacheDeviceSQL.h
@@ -0,0 +1,263 @@
+/* vim:set ts=2 sw=2 sts=2 et cin: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsOfflineCacheDevice_h__
+#define nsOfflineCacheDevice_h__
+
+#include "nsCacheDevice.h"
+#include "nsIApplicationCache.h"
+#include "mozIStorageConnection.h"
+#include "mozIStorageFunction.h"
+#include "nsIFile.h"
+#include "nsCOMPtr.h"
+#include "nsCOMArray.h"
+#include "nsInterfaceHashtable.h"
+#include "nsClassHashtable.h"
+#include "nsIWeakReference.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+
+class nsIURI;
+class nsOfflineCacheDevice;
+class mozIStorageService;
+class nsILoadContextInfo;
+namespace mozilla {
+class OriginAttributesPattern;
+}
+
+class nsApplicationCacheNamespace final : public nsIApplicationCacheNamespace {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIAPPLICATIONCACHENAMESPACE
+
+ nsApplicationCacheNamespace() : mItemType(0) {}
+
+ private:
+ ~nsApplicationCacheNamespace() = default;
+
+ uint32_t mItemType;
+ nsCString mNamespaceSpec;
+ nsCString mData;
+};
+
+class nsOfflineCacheEvictionFunction final : public mozIStorageFunction {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_MOZISTORAGEFUNCTION
+
+ explicit nsOfflineCacheEvictionFunction(nsOfflineCacheDevice* device);
+
+ void Init();
+ void Reset();
+ void Apply();
+
+ private:
+ ~nsOfflineCacheEvictionFunction() = default;
+
+ nsOfflineCacheDevice* mDevice;
+ bool mTLSInited;
+};
+
+class nsOfflineCacheDevice final : public nsCacheDevice, public nsISupports {
+ public:
+ nsOfflineCacheDevice();
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ /**
+ * nsCacheDevice methods
+ */
+
+ virtual nsresult Init() override;
+ nsresult InitWithSqlite(mozIStorageService* ss);
+ virtual nsresult Shutdown() override;
+
+ virtual const char* GetDeviceID(void) override;
+ virtual nsCacheEntry* FindEntry(nsCString* key, bool* collision) override;
+ virtual nsresult DeactivateEntry(nsCacheEntry* entry) override;
+ virtual nsresult BindEntry(nsCacheEntry* entry) override;
+ virtual void DoomEntry(nsCacheEntry* entry) override;
+
+ virtual nsresult OpenInputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIInputStream** result) override;
+
+ virtual nsresult OpenOutputStreamForEntry(nsCacheEntry* entry,
+ nsCacheAccessMode mode,
+ uint32_t offset,
+ nsIOutputStream** result) override;
+
+ virtual nsresult GetFileForEntry(nsCacheEntry* entry,
+ nsIFile** result) override;
+
+ virtual nsresult OnDataSizeChange(nsCacheEntry* entry,
+ int32_t deltaSize) override;
+
+ virtual nsresult Visit(nsICacheVisitor* visitor) override;
+
+ virtual nsresult EvictEntries(const char* clientID) override;
+
+ /* Entry ownership */
+ nsresult GetOwnerDomains(const char* clientID, uint32_t* count,
+ char*** domains);
+ nsresult GetOwnerURIs(const char* clientID, const nsACString& ownerDomain,
+ uint32_t* count, char*** uris);
+ nsresult SetOwnedKeys(const char* clientID, const nsACString& ownerDomain,
+ const nsACString& ownerUrl, uint32_t count,
+ const char** keys);
+ nsresult GetOwnedKeys(const char* clientID, const nsACString& ownerDomain,
+ const nsACString& ownerUrl, uint32_t* count,
+ char*** keys);
+ nsresult AddOwnedKey(const char* clientID, const nsACString& ownerDomain,
+ const nsACString& ownerURI, const nsACString& key);
+ nsresult RemoveOwnedKey(const char* clientID, const nsACString& ownerDomain,
+ const nsACString& ownerURI, const nsACString& key);
+ nsresult KeyIsOwned(const char* clientID, const nsACString& ownerDomain,
+ const nsACString& ownerURI, const nsACString& key,
+ bool* isOwned);
+
+ nsresult ClearKeysOwnedByDomain(const char* clientID,
+ const nsACString& ownerDomain);
+ nsresult EvictUnownedEntries(const char* clientID);
+
+ static nsresult BuildApplicationCacheGroupID(nsIURI* aManifestURL,
+ nsACString const& aOriginSuffix,
+ nsACString& _result);
+
+ nsresult ActivateCache(const nsACString& group, const nsACString& clientID);
+ bool IsActiveCache(const nsACString& group, const nsACString& clientID);
+ nsresult CreateApplicationCache(const nsACString& group,
+ nsIApplicationCache** out);
+
+ nsresult GetApplicationCache(const nsACString& clientID,
+ nsIApplicationCache** out);
+ nsresult GetApplicationCache_Unlocked(const nsACString& clientID,
+ nsIApplicationCache** out);
+
+ nsresult GetActiveCache(const nsACString& group, nsIApplicationCache** out);
+
+ nsresult DeactivateGroup(const nsACString& group);
+
+ nsresult ChooseApplicationCache(const nsACString& key,
+ nsILoadContextInfo* loadContext,
+ nsIApplicationCache** out);
+
+ nsresult CacheOpportunistically(nsIApplicationCache* cache,
+ const nsACString& key);
+
+ nsresult Evict(nsILoadContextInfo* aInfo);
+ nsresult Evict(mozilla::OriginAttributesPattern const& aPattern);
+
+ nsresult GetGroups(nsTArray<nsCString>& keys);
+
+ nsresult GetGroupsTimeOrdered(nsTArray<nsCString>& keys);
+
+ bool IsLocked(const nsACString& key);
+ void Lock(const nsACString& key);
+ void Unlock(const nsACString& key);
+
+ /**
+ * Preference accessors
+ */
+
+ void SetCacheParentDirectory(nsIFile* parentDir);
+ void SetCapacity(uint32_t capacity);
+ void SetAutoShutdown() { mAutoShutdown = true; }
+ bool AutoShutdown(nsIApplicationCache* aAppCache);
+
+ nsIFile* BaseDirectory() { return mBaseDirectory; }
+ nsIFile* CacheDirectory() { return mCacheDirectory; }
+ uint32_t CacheCapacity() { return mCacheCapacity; }
+ uint32_t CacheSize();
+ uint32_t EntryCount();
+
+ private:
+ ~nsOfflineCacheDevice() = default;
+
+ friend class nsApplicationCache;
+
+ static bool GetStrictFileOriginPolicy();
+
+ bool Initialized() { return mDB != nullptr; }
+
+ nsresult InitActiveCaches();
+ nsresult UpdateEntry(nsCacheEntry* entry);
+ nsresult UpdateEntrySize(nsCacheEntry* entry, uint32_t newSize);
+ nsresult DeleteEntry(nsCacheEntry* entry, bool deleteData);
+ nsresult DeleteData(nsCacheEntry* entry);
+ nsresult EnableEvictionObserver();
+ nsresult DisableEvictionObserver();
+
+ bool CanUseCache(nsIURI* keyURI, const nsACString& clientID,
+ nsILoadContextInfo* loadContext);
+
+ nsresult MarkEntry(const nsCString& clientID, const nsACString& key,
+ uint32_t typeBits);
+ nsresult UnmarkEntry(const nsCString& clientID, const nsACString& key,
+ uint32_t typeBits);
+
+ nsresult CacheOpportunistically(const nsCString& clientID,
+ const nsACString& key);
+ nsresult GetTypes(const nsCString& clientID, const nsACString& key,
+ uint32_t* typeBits);
+
+ nsresult GetMatchingNamespace(const nsCString& clientID,
+ const nsACString& key,
+ nsIApplicationCacheNamespace** out);
+ nsresult GatherEntries(const nsCString& clientID, uint32_t typeBits,
+ nsTArray<nsCString>& keys);
+ nsresult AddNamespace(const nsCString& clientID,
+ nsIApplicationCacheNamespace* ns);
+
+ nsresult GetUsage(const nsACString& clientID, uint32_t* usage);
+
+ nsresult RunSimpleQuery(mozIStorageStatement* statment, uint32_t resultIndex,
+ nsTArray<nsCString>& values);
+
+ nsCOMPtr<mozIStorageConnection> mDB;
+ RefPtr<nsOfflineCacheEvictionFunction> mEvictionFunction;
+
+ nsCOMPtr<mozIStorageStatement> mStatement_CacheSize;
+ nsCOMPtr<mozIStorageStatement> mStatement_ApplicationCacheSize;
+ nsCOMPtr<mozIStorageStatement> mStatement_EntryCount;
+ nsCOMPtr<mozIStorageStatement> mStatement_UpdateEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_UpdateEntrySize;
+ nsCOMPtr<mozIStorageStatement> mStatement_DeleteEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_FindEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_BindEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_ClearDomain;
+ nsCOMPtr<mozIStorageStatement> mStatement_MarkEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_UnmarkEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_GetTypes;
+ nsCOMPtr<mozIStorageStatement> mStatement_FindNamespaceEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_InsertNamespaceEntry;
+ nsCOMPtr<mozIStorageStatement> mStatement_CleanupUnmarked;
+ nsCOMPtr<mozIStorageStatement> mStatement_GatherEntries;
+ nsCOMPtr<mozIStorageStatement> mStatement_ActivateClient;
+ nsCOMPtr<mozIStorageStatement> mStatement_DeactivateGroup;
+ nsCOMPtr<mozIStorageStatement> mStatement_FindClient;
+ nsCOMPtr<mozIStorageStatement> mStatement_FindClientByNamespace;
+ nsCOMPtr<mozIStorageStatement> mStatement_EnumerateApps;
+ nsCOMPtr<mozIStorageStatement> mStatement_EnumerateGroups;
+ nsCOMPtr<mozIStorageStatement> mStatement_EnumerateGroupsTimeOrder;
+
+ nsCOMPtr<nsIFile> mBaseDirectory;
+ nsCOMPtr<nsIFile> mCacheDirectory;
+ uint32_t mCacheCapacity; // in bytes
+ int32_t mDeltaCounter;
+ bool mAutoShutdown;
+
+ mozilla::Mutex mLock;
+
+ nsInterfaceHashtable<nsCStringHashKey, nsIWeakReference> mCaches;
+ nsClassHashtable<nsCStringHashKey, nsCString> mActiveCachesByGroup;
+ nsTHashtable<nsCStringHashKey> mActiveCaches;
+ nsTHashtable<nsCStringHashKey> mLockedEntries;
+
+ nsCOMPtr<nsIEventTarget> mInitEventTarget;
+};
+
+#endif // nsOfflineCacheDevice_h__
diff --git a/netwerk/cache/nsICache.idl b/netwerk/cache/nsICache.idl
new file mode 100644
index 0000000000..cd14c98312
--- /dev/null
+++ b/netwerk/cache/nsICache.idl
@@ -0,0 +1,142 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+typedef long nsCacheStoragePolicy;
+typedef long nsCacheAccessMode;
+
+/**
+ * nsICache is a namespace for various cache constants. It does not represent
+ * an actual object.
+ */
+[builtinclass, uuid(d6c67f38-b39a-4582-8a48-4c4f8a56dfd0)]
+interface nsICache : nsISupports
+{
+ /**
+ * Access Modes
+ *
+ *
+ * Mode Requested | Not Cached | Cached
+ * ------------------------------------------------------------------------
+ * READ | KEY_NOT_FOUND | NS_OK
+ * | Mode = NONE | Mode = READ
+ * | No Descriptor | Descriptor
+ * ------------------------------------------------------------------------
+ * WRITE | NS_OK | NS_OK (Cache service
+ * | Mode = WRITE | Mode = WRITE dooms existing
+ * | Descriptor | Descriptor cache entry)
+ * ------------------------------------------------------------------------
+ * READ_WRITE | NS_OK | NS_OK
+ * (1st req.) | Mode = WRITE | Mode = READ_WRITE
+ * | Descriptor | Descriptor
+ * ------------------------------------------------------------------------
+ * READ_WRITE | N/A | NS_OK
+ * (Nth req.) | | Mode = READ
+ * | | Descriptor
+ * ------------------------------------------------------------------------
+ *
+ *
+ * Access Requested:
+ *
+ * READ - I only want to READ, if there isn't an entry just fail
+ * WRITE - I have something new I want to write into the cache, make
+ * me a new entry and doom the old one, if any.
+ * READ_WRITE - I want to READ, but I'm willing to update an existing
+ * entry if necessary, or create a new one if none exists.
+ *
+ *
+ * Access Granted:
+ *
+ * NONE - No descriptor is provided. You get zilch. Nada. Nothing.
+ * READ - You can READ from this descriptor.
+ * WRITE - You must WRITE to this descriptor because the cache entry
+ * was just created for you.
+ * READ_WRITE - You can READ the descriptor to determine if it's valid,
+ * you may WRITE if it needs updating.
+ *
+ *
+ * Comments:
+ *
+ * If you think that you might need to modify cached data or meta data,
+ * then you must open a cache entry requesting WRITE access. Only one
+ * cache entry descriptor, per cache entry, will be granted WRITE access.
+ *
+ * Usually, you will request READ_WRITE access in order to first test the
+ * meta data and informational fields to determine if a write (ie. going
+ * to the net) may actually be necessary. If you determine that it is
+ * not, then you would mark the cache entry as valid (using MarkValid) and
+ * then simply read the data from the cache.
+ *
+ * A descriptor granted WRITE access has exclusive access to the cache
+ * entry up to the point at which it marks it as valid. Once the cache
+ * entry has been "validated", other descriptors with READ access may be
+ * opened to the cache entry.
+ *
+ * If you make a request for READ_WRITE access to a cache entry, the cache
+ * service will downgrade your access to READ if there is already a
+ * cache entry descriptor open with WRITE access.
+ *
+ * If you make a request for only WRITE access to a cache entry and another
+ * descriptor with WRITE access is currently open, then the existing cache
+ * entry will be 'doomed', and you will be given a descriptor (with WRITE
+ * access only) to a new cache entry.
+ *
+ */
+ const nsCacheAccessMode ACCESS_NONE = 0;
+ const nsCacheAccessMode ACCESS_READ = 1;
+ const nsCacheAccessMode ACCESS_WRITE = 2;
+ const nsCacheAccessMode ACCESS_READ_WRITE = 3;
+
+ /**
+ * Storage Policy
+ *
+ * The storage policy of a cache entry determines the device(s) to which
+ * it belongs. See nsICacheSession and nsICacheEntryDescriptor for more
+ * details.
+ *
+ * STORE_ANYWHERE - Allows the cache entry to be stored in any device.
+ * The cache service decides which cache device to use
+ * based on "some resource management calculation."
+ * STORE_IN_MEMORY - Requires the cache entry to reside in non-persistent
+ * storage (ie. typically in system RAM).
+ * STORE_ON_DISK - Requires the cache entry to reside in persistent
+ * storage (ie. typically on a system's hard disk).
+ * STORE_OFFLINE - Requires the cache entry to reside in persistent,
+ * reliable storage for offline use.
+ */
+ const nsCacheStoragePolicy STORE_ANYWHERE = 0;
+ const nsCacheStoragePolicy STORE_IN_MEMORY = 1;
+ const nsCacheStoragePolicy STORE_ON_DISK = 2;
+ // value 3 was used by STORE_ON_DISK_AS_FILE which was removed
+ const nsCacheStoragePolicy STORE_OFFLINE = 4;
+
+ /**
+ * All entries for a cache session are stored as streams of data or
+ * as objects. These constant my be used to specify the type of entries
+ * when calling nsICacheService::CreateSession().
+ */
+ const long NOT_STREAM_BASED = 0;
+ const long STREAM_BASED = 1;
+
+ /**
+ * The synchronous OpenCacheEntry() may be blocking or non-blocking. If a cache entry is
+ * waiting to be validated by another cache descriptor (so no new cache descriptors for that
+ * key can be created, OpenCacheEntry() will return NS_ERROR_CACHE_WAIT_FOR_VALIDATION in
+ * non-blocking mode. In blocking mode, it will wait until the cache entry for the key has
+ * been validated or doomed. If the cache entry is validated, then a descriptor for that
+ * entry will be created and returned. If the cache entry was doomed, then a descriptor
+ * will be created for a new cache entry for the key.
+ */
+ const long NON_BLOCKING = 0;
+ const long BLOCKING = 1;
+
+ /**
+ * Constant meaning no expiration time.
+ */
+ const unsigned long NO_EXPIRATION_TIME = 0xFFFFFFFF;
+};
+
diff --git a/netwerk/cache/nsICacheEntryDescriptor.idl b/netwerk/cache/nsICacheEntryDescriptor.idl
new file mode 100644
index 0000000000..085ab388a8
--- /dev/null
+++ b/netwerk/cache/nsICacheEntryDescriptor.idl
@@ -0,0 +1,164 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsICacheVisitor.idl"
+#include "nsICache.idl"
+
+interface nsISimpleEnumerator;
+interface nsICacheListener;
+interface nsIInputStream;
+interface nsIOutputStream;
+interface nsIFile;
+interface nsICacheMetaDataVisitor;
+
+
+[scriptable, uuid(90b17d31-46aa-4fb1-a206-473c966cbc18)]
+interface nsICacheEntryDescriptor : nsICacheEntryInfo
+{
+ /**
+ * Set the time at which the cache entry should be considered invalid (in
+ * seconds since the Epoch).
+ */
+ void setExpirationTime(in uint32_t expirationTime);
+
+ /**
+ * Set the cache entry data size. This will fail if the cache entry
+ * IS stream based.
+ */
+ void setDataSize(in unsigned long size);
+
+ /**
+ * Open blocking input stream to cache data. This will fail if the cache
+ * entry IS NOT stream based. Use the stream transport service to
+ * asynchronously read this stream on a background thread. The returned
+ * stream MAY implement nsISeekableStream.
+ *
+ * @param offset
+ * read starting from this offset into the cached data. an offset
+ * beyond the end of the stream has undefined consequences.
+ *
+ * @return blocking, unbuffered input stream.
+ */
+ nsIInputStream openInputStream(in unsigned long offset);
+
+ /**
+ * Open blocking output stream to cache data. This will fail if the cache
+ * entry IS NOT stream based. Use the stream transport service to
+ * asynchronously write to this stream on a background thread. The returned
+ * stream MAY implement nsISeekableStream.
+ *
+ * If opening an output stream to existing cached data, the data will be
+ * truncated to the specified offset.
+ *
+ * @param offset
+ * write starting from this offset into the cached data. an offset
+ * beyond the end of the stream has undefined consequences.
+ *
+ * @return blocking, unbuffered output stream.
+ */
+ nsIOutputStream openOutputStream(in unsigned long offset);
+
+ /**
+ * Get/set the cache data element. This will fail if the cache entry
+ * IS stream based. The cache entry holds a strong reference to this
+ * object. The object will be released when the cache entry is destroyed.
+ */
+ attribute nsISupports cacheElement;
+
+ /**
+ * Stores the Content-Length specified in the HTTP header for this
+ * entry. Checked before we write to the cache entry, to prevent ever
+ * taking up space in the cache for an entry that we know up front
+ * is going to have to be evicted anyway. See bug 588507.
+ */
+ attribute int64_t predictedDataSize;
+
+ /**
+ * Get the access granted to this descriptor. See nsICache.idl for the
+ * definitions of the access modes and a thorough description of their
+ * corresponding meanings.
+ */
+ readonly attribute nsCacheAccessMode accessGranted;
+
+ /**
+ * Get/set the storage policy of the cache entry. See nsICache.idl for
+ * the definitions of the storage policies.
+ */
+ attribute nsCacheStoragePolicy storagePolicy;
+
+ /**
+ * Get the disk file associated with the cache entry.
+ */
+ readonly attribute nsIFile file;
+
+ /**
+ * Get/set security info on the cache entry for this descriptor. This fails
+ * if the storage policy is not STORE_IN_MEMORY.
+ */
+ attribute nsISupports securityInfo;
+
+ /**
+ * Get the size of the cache entry data, as stored. This may differ
+ * from the entry's dataSize, if the entry is compressed.
+ */
+ readonly attribute unsigned long storageDataSize;
+
+ /**
+ * Doom the cache entry this descriptor references in order to slate it for
+ * removal. Once doomed a cache entry cannot be undoomed.
+ *
+ * A descriptor with WRITE access can doom the cache entry and choose to
+ * fail pending requests. This means that pending requests will not get
+ * a cache descriptor. This is meant as a tool for clients that wish to
+ * instruct pending requests to skip the cache.
+ */
+ void doom();
+ void doomAndFailPendingRequests(in nsresult status);
+
+ /**
+ * Asynchronously doom an entry. Listener will be notified about the status
+ * of the operation. Null may be passed if caller doesn't care about the
+ * result.
+ */
+ void asyncDoom(in nsICacheListener listener);
+
+ /**
+ * A writer must validate this cache object before any readers are given
+ * a descriptor to the object.
+ */
+ void markValid();
+
+ /**
+ * Explicitly close the descriptor (optional).
+ */
+
+ void close();
+
+ /**
+ * Methods for accessing meta data. Meta data is a table of key/value
+ * string pairs. The strings do not have to conform to any particular
+ * charset, but they must be null terminated.
+ */
+ string getMetaDataElement(in string key);
+ void setMetaDataElement(in string key, in string value);
+
+ /**
+ * Visitor will be called with key/value pair for each meta data element.
+ */
+ void visitMetaData(in nsICacheMetaDataVisitor visitor);
+};
+
+
+
+[scriptable, uuid(22f9a49c-3cf8-4c23-8006-54efb11ac562)]
+interface nsICacheMetaDataVisitor : nsISupports
+{
+ /**
+ * Called for each key/value pair in the meta data for a cache entry
+ */
+ boolean visitMetaDataElement(in string key,
+ in string value);
+};
diff --git a/netwerk/cache/nsICacheListener.idl b/netwerk/cache/nsICacheListener.idl
new file mode 100644
index 0000000000..cfb2dd4708
--- /dev/null
+++ b/netwerk/cache/nsICacheListener.idl
@@ -0,0 +1,32 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#include "nsISupports.idl"
+#include "nsICache.idl"
+
+
+interface nsICacheEntryDescriptor;
+
+[scriptable, uuid(8eadf2ed-8cac-4961-8025-6da6d5827e74)]
+interface nsICacheListener : nsISupports
+{
+ /**
+ * Called when the requested access (or appropriate subset) is
+ * acquired. The status parameter equals NS_OK on success.
+ * See nsICacheService.idl for accessGranted values.
+ */
+ void onCacheEntryAvailable(in nsICacheEntryDescriptor descriptor,
+ in nsCacheAccessMode accessGranted,
+ in nsresult status);
+
+ /**
+ * Called when nsCacheSession::DoomEntry() is completed. The status
+ * parameter is NS_OK when the entry was doomed, or NS_ERROR_NOT_AVAILABLE
+ * when there is no such entry.
+ */
+ void onCacheEntryDoomed(in nsresult status);
+};
diff --git a/netwerk/cache/nsICacheService.idl b/netwerk/cache/nsICacheService.idl
new file mode 100644
index 0000000000..c062a7910f
--- /dev/null
+++ b/netwerk/cache/nsICacheService.idl
@@ -0,0 +1,98 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsICache.idl"
+
+interface nsISimpleEnumerator;
+interface nsICacheListener;
+interface nsICacheSession;
+interface nsICacheVisitor;
+interface nsIEventTarget;
+
+/**
+ * @deprecated
+ *
+ * IMPORTANT NOTE: THIS INTERFACE IS NO LONGER SUPPORTED AND PLANNED TO BE
+ * REMOVED SOON. WE STRONGLY ENCORAGE TO MIGRATE THE EXISTING CODE AND FOR
+ * THE NEW CODE USE ONLY THE NEW HTTP CACHE API IN netwerk/cache2/.
+ */
+[scriptable, uuid(14dbe1e9-f3bc-45af-92f4-2c574fcd4e39)]
+interface nsICacheService : nsISupports
+{
+ /**
+ * @throws NS_ERROR_NOT_IMPLEMENTED when the cache v2 is prefered to use.
+ *
+ * Create a cache session
+ *
+ * A cache session represents a client's access into the cache. The cache
+ * session is not "owned" by the cache service. Hence, it is possible to
+ * create duplicate cache sessions. Entries created by a cache session
+ * are invisible to other cache sessions, unless the cache sessions are
+ * equivalent.
+ *
+ * @param clientID - Specifies the name of the client using the cache.
+ * @param storagePolicy - Limits the storage policy for all entries
+ * accessed via the returned session. As a result, devices excluded
+ * by the storage policy will not be searched when opening entries
+ * from the returned session.
+ * @param streamBased - Indicates whether or not the data being cached
+ * can be represented as a stream. The storagePolicy must be
+ * consistent with the value of this field. For example, a non-stream-
+ * based cache entry can only have a storage policy of STORE_IN_MEMORY.
+ * @return new cache session.
+ */
+ nsICacheSession createSession(in string clientID,
+ in nsCacheStoragePolicy storagePolicy,
+ in boolean streamBased);
+
+ /**
+ * @throws NS_ERROR_NOT_IMPLEMENTED when the cache v2 is prefered to use.
+ *
+ * Visit entries stored in the cache. Used to implement about:cache.
+ */
+ void visitEntries(in nsICacheVisitor visitor);
+
+ /**
+ * @throws NS_ERROR_NOT_IMPLEMENTED when the cache v2 is prefered to use.
+ *
+ * Evicts all entries in all devices implied by the storage policy.
+ *
+ * @note This function may evict some items but will throw if it fails to evict
+ * everything.
+ */
+ void evictEntries(in nsCacheStoragePolicy storagePolicy);
+
+ /**
+ * Event target which is used for I/O operations
+ */
+ readonly attribute nsIEventTarget cacheIOTarget;
+};
+
+%{C++
+/**
+ * Observer service notification that is sent when
+ * nsICacheService::evictEntries() or nsICacheSession::evictEntries()
+ * is called.
+ */
+#define NS_CACHESERVICE_EMPTYCACHE_TOPIC_ID "cacheservice:empty-cache"
+%}
+
+[scriptable, builtinclass, uuid(d0fc8d38-db80-4928-bf1c-b0085ddfa9dc)]
+interface nsICacheServiceInternal : nsICacheService
+{
+ /**
+ * This is an internal interface. It changes so frequently that it probably
+ * went away while you were reading this.
+ */
+
+ /**
+ * Milliseconds for which the service lock has been held. 0 if unlocked.
+ */
+ readonly attribute double lockHeldTime;
+};
+
+
diff --git a/netwerk/cache/nsICacheSession.idl b/netwerk/cache/nsICacheSession.idl
new file mode 100644
index 0000000000..cb8871c28d
--- /dev/null
+++ b/netwerk/cache/nsICacheSession.idl
@@ -0,0 +1,88 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsICache.idl"
+
+interface nsICacheEntryDescriptor;
+interface nsICacheListener;
+interface nsIFile;
+
+[scriptable, uuid(1dd7708c-de48-4ffe-b5aa-cd218c762887)]
+interface nsICacheSession : nsISupports
+{
+ /**
+ * Expired entries will be doomed or evicted if this attribute is set to
+ * true. If false, expired entries will be returned (useful for offline-
+ * mode and clients, such as HTTP, that can update the valid lifetime of
+ * cached content). This attribute defaults to true.
+ */
+ attribute boolean doomEntriesIfExpired;
+
+ /**
+ * When set, entries created with this session will be placed to a cache
+ * based at this directory. Use when storing entries to a different
+ * profile than the active profile of the the current running application
+ * process.
+ */
+ attribute nsIFile profileDirectory;
+
+ /**
+ * A cache session can only give out one descriptor with WRITE access
+ * to a given cache entry at a time. Until the client calls MarkValid on
+ * its descriptor, other attempts to open the same cache entry will block.
+ */
+
+ /**
+ * Synchronous cache access. This method fails if it is called on the main
+ * thread. Use asyncOpenCacheEntry() instead. This returns a unique
+ * descriptor each time it is called, even if the same key is specified.
+ * When called by multiple threads for write access, only one writable
+ * descriptor will be granted. If 'blockingMode' is set to false, it will
+ * return NS_ERROR_CACHE_WAIT_FOR_VALIDATION rather than block when another
+ * descriptor has been given WRITE access but hasn't validated the entry yet.
+ */
+ nsICacheEntryDescriptor openCacheEntry(in ACString key,
+ in nsCacheAccessMode accessRequested,
+ in boolean blockingMode);
+
+ /**
+ * Asynchronous cache access. Does not block the calling thread. Instead,
+ * the listener will be notified when the descriptor is available. If
+ * 'noWait' is set to true, the listener will be notified immediately with
+ * status NS_ERROR_CACHE_WAIT_FOR_VALIDATION rather than queuing the request
+ * when another descriptor has been given WRITE access but hasn't validated
+ * the entry yet.
+ */
+ void asyncOpenCacheEntry(in ACString key,
+ in nsCacheAccessMode accessRequested,
+ in nsICacheListener listener,
+ [optional] in boolean noWait);
+
+ /**
+ * Evict all entries for this session's clientID according to its storagePolicy.
+ */
+ void evictEntries();
+
+ /**
+ * Return whether any of the cache devices implied by the session storage policy
+ * are currently enabled for instantiation if they don't already exist.
+ */
+ boolean isStorageEnabled();
+
+ /**
+ * Asynchronously doom an entry specified by the key. Listener will be
+ * notified about the status of the operation. Null may be passed if caller
+ * doesn't care about the result.
+ */
+ void doomEntry(in ACString key, in nsICacheListener listener);
+
+ /**
+ * Private entries will be doomed when the last private browsing session
+ * finishes.
+ */
+ attribute boolean isPrivate;
+};
diff --git a/netwerk/cache/nsICacheVisitor.idl b/netwerk/cache/nsICacheVisitor.idl
new file mode 100644
index 0000000000..56b503d430
--- /dev/null
+++ b/netwerk/cache/nsICacheVisitor.idl
@@ -0,0 +1,123 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/* XXX we should define device and entry info as well (stats, etc) */
+
+interface nsICacheDeviceInfo;
+interface nsICacheEntryInfo;
+
+
+[scriptable, uuid(f8c08c4b-d778-49d1-a59b-866fdc500d95)]
+interface nsICacheVisitor : nsISupports
+{
+ /**
+ * Called to provide information about a cache device.
+ *
+ * @param deviceID - specifies the device being visited.
+ * @param deviceInfo - specifies information about this device.
+ *
+ * @return true to start visiting all entries for this device.
+ * @return false to advance to the next device.
+ */
+ boolean visitDevice(in string deviceID,
+ in nsICacheDeviceInfo deviceInfo);
+
+ /**
+ * Called to provide information about a cache entry.
+ *
+ * @param deviceID - specifies the device being visited.
+ * @param entryInfo - specifies information about this entry.
+ *
+ * @return true to visit the next entry on the current device, or if the
+ * end of the device has been reached, advance to the next device.
+ * @return false to advance to the next device.
+ */
+ boolean visitEntry(in string deviceID,
+ in nsICacheEntryInfo entryInfo);
+};
+
+
+[scriptable, uuid(31d1c294-1dd2-11b2-be3a-c79230dca297)]
+interface nsICacheDeviceInfo : nsISupports
+{
+ /**
+ * Get a human readable description of the cache device.
+ */
+ readonly attribute ACString description;
+
+ /**
+ * Get a usage report, statistics, miscellaneous data about
+ * the cache device.
+ */
+ readonly attribute ACString usageReport;
+
+ /**
+ * Get the number of stored cache entries.
+ */
+ readonly attribute unsigned long entryCount;
+
+ /**
+ * Get the total size of the stored cache entries.
+ */
+ readonly attribute unsigned long totalSize;
+
+ /**
+ * Get the upper limit of the size of the data the cache can store.
+ */
+ readonly attribute unsigned long maximumSize;
+};
+
+
+[scriptable, uuid(fab51c92-95c3-4468-b317-7de4d7588254)]
+interface nsICacheEntryInfo : nsISupports
+{
+ /**
+ * Get the client id associated with this cache entry.
+ */
+ readonly attribute ACString clientID;
+
+ /**
+ * Get the id for the device that stores this cache entry.
+ */
+ readonly attribute ACString deviceID;
+
+ /**
+ * Get the key identifying the cache entry.
+ */
+ readonly attribute ACString key;
+
+ /**
+ * Get the number of times the cache entry has been opened.
+ */
+ readonly attribute long fetchCount;
+
+ /**
+ * Get the last time the cache entry was opened (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t lastFetched;
+
+ /**
+ * Get the last time the cache entry was modified (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t lastModified;
+
+ /**
+ * Get the expiration time of the cache entry (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t expirationTime;
+
+ /**
+ * Get the cache entry data size.
+ */
+ readonly attribute unsigned long dataSize;
+
+ /**
+ * Find out whether or not the cache entry is stream based.
+ */
+ boolean isStreamBased();
+};
diff --git a/netwerk/cache2/AppCacheStorage.cpp b/netwerk/cache2/AppCacheStorage.cpp
new file mode 100644
index 0000000000..3bff3bc086
--- /dev/null
+++ b/netwerk/cache2/AppCacheStorage.cpp
@@ -0,0 +1,185 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "AppCacheStorage.h"
+#include "CacheStorageService.h"
+
+#include "OldWrappers.h"
+
+#include "nsICacheEntryDoomCallback.h"
+
+#include "nsCacheService.h"
+#include "nsIApplicationCache.h"
+#include "nsIApplicationCacheService.h"
+#include "nsIURI.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsServiceManagerUtils.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla::net {
+
+AppCacheStorage::AppCacheStorage(nsILoadContextInfo* aInfo,
+ nsIApplicationCache* aAppCache)
+ : CacheStorage(aInfo, true /* disk */, false /* lookup app cache */,
+ false /* skip size check */, false /* pin */),
+ mAppCache(aAppCache) {}
+
+AppCacheStorage::~AppCacheStorage() {
+ ProxyReleaseMainThread("AppCacheStorage::mAppCache", mAppCache);
+}
+
+NS_IMETHODIMP AppCacheStorage::AsyncOpenURI(
+ nsIURI* aURI, const nsACString& aIdExtension, uint32_t aFlags,
+ nsICacheEntryOpenCallback* aCallback) {
+ if (!CacheStorageService::Self()) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (!LoadInfo()) {
+ return NS_ERROR_CACHE_KEY_NOT_FOUND;
+ }
+
+ NS_ENSURE_ARG(aURI);
+ NS_ENSURE_ARG(aCallback);
+
+ nsresult rv;
+
+ nsCOMPtr<nsIApplicationCache> appCache = mAppCache;
+
+ if (!appCache) {
+ rv = ChooseApplicationCache(aURI, getter_AddRefs(appCache));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (!appCache) {
+ LOG(
+ ("AppCacheStorage::AsyncOpenURI entry not found in any appcache, "
+ "giving up"));
+ aCallback->OnCacheEntryAvailable(nullptr, false, nullptr,
+ NS_ERROR_CACHE_KEY_NOT_FOUND);
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString cacheKey;
+ rv = noRefURI->GetAsciiSpec(cacheKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // This is the only way how to recognize appcache data by the anonymous
+ // flag. There is no way to switch to e.g. a different session, because
+ // there is just a single session for an appcache version (identified
+ // by the client id).
+ if (LoadInfo()->IsAnonymous()) {
+ cacheKey = "anon&"_ns + cacheKey;
+ }
+
+ nsAutoCString scheme;
+ rv = noRefURI->GetScheme(scheme);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<_OldCacheLoad> appCacheLoad = new _OldCacheLoad(
+ scheme, cacheKey, aCallback, appCache, LoadInfo(), WriteToDisk(), aFlags);
+ rv = appCacheLoad->Start();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP AppCacheStorage::OpenTruncate(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntry** aCacheEntry) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP AppCacheStorage::Exists(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ bool* aResult) {
+ *aResult = false;
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP AppCacheStorage::AsyncDoomURI(
+ nsIURI* aURI, const nsACString& aIdExtension,
+ nsICacheEntryDoomCallback* aCallback) {
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ if (!mAppCache) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ if (!LoadInfo()) {
+ return NS_ERROR_CACHE_KEY_NOT_FOUND;
+ }
+
+ RefPtr<_OldStorage> old = new _OldStorage(LoadInfo(), WriteToDisk(),
+ LookupAppCache(), true, mAppCache);
+ return old->AsyncDoomURI(aURI, aIdExtension, aCallback);
+}
+
+NS_IMETHODIMP AppCacheStorage::AsyncEvictStorage(
+ nsICacheEntryDoomCallback* aCallback) {
+ if (!CacheStorageService::Self()) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (!LoadInfo()) {
+ return NS_ERROR_CACHE_KEY_NOT_FOUND;
+ }
+
+ nsresult rv;
+
+ if (!mAppCache) {
+ // Discard everything under this storage context
+ nsCOMPtr<nsIApplicationCacheService> appCacheService =
+ do_GetService(NS_APPLICATIONCACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = appCacheService->Evict(LoadInfo());
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ // Discard the group
+ RefPtr<_OldStorage> old = new _OldStorage(
+ LoadInfo(), WriteToDisk(), LookupAppCache(), true, mAppCache);
+ rv = old->AsyncEvictStorage(aCallback);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+ }
+
+ if (aCallback) aCallback->OnCacheEntryDoomed(NS_OK);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP AppCacheStorage::AsyncVisitStorage(
+ nsICacheStorageVisitor* aVisitor, bool aVisitEntries) {
+ if (!CacheStorageService::Self()) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ LOG(("AppCacheStorage::AsyncVisitStorage [this=%p, cb=%p]", this, aVisitor));
+
+ nsresult rv;
+
+ nsCOMPtr<nsICacheService> serv =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<_OldVisitCallbackWrapper> cb = new _OldVisitCallbackWrapper(
+ "offline", aVisitor, aVisitEntries, LoadInfo());
+ rv = nsCacheService::GlobalInstance()->VisitEntriesInternal(cb);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP AppCacheStorage::GetCacheIndexEntryAttrs(
+ nsIURI* aURI, const nsACString& aIdExtension, bool* aHasAltData,
+ uint32_t* aSizeInKB) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+} // namespace mozilla::net
diff --git a/netwerk/cache2/AppCacheStorage.h b/netwerk/cache2/AppCacheStorage.h
new file mode 100644
index 0000000000..b8dca276d1
--- /dev/null
+++ b/netwerk/cache2/AppCacheStorage.h
@@ -0,0 +1,35 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AppCacheStorage__h__
+#define AppCacheStorage__h__
+
+#include "CacheStorage.h"
+
+#include "nsCOMPtr.h"
+#include "nsILoadContextInfo.h"
+#include "nsIApplicationCache.h"
+
+class nsIApplicationCache;
+
+namespace mozilla {
+namespace net {
+
+class AppCacheStorage : public CacheStorage {
+ NS_INLINE_DECL_REFCOUNTING_INHERITED(AppCacheStorage, CacheStorage)
+ NS_DECL_NSICACHESTORAGE
+
+ public:
+ AppCacheStorage(nsILoadContextInfo* aInfo, nsIApplicationCache* aAppCache);
+
+ private:
+ virtual ~AppCacheStorage();
+
+ nsCOMPtr<nsIApplicationCache> mAppCache;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheEntry.cpp b/netwerk/cache2/CacheEntry.cpp
new file mode 100644
index 0000000000..d5a7bee65f
--- /dev/null
+++ b/netwerk/cache2/CacheEntry.cpp
@@ -0,0 +1,1913 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheEntry.h"
+#include "CacheStorageService.h"
+#include "CacheObserver.h"
+#include "CacheFileUtils.h"
+#include "CacheIndex.h"
+
+#include "nsIInputStream.h"
+#include "nsIOutputStream.h"
+#include "nsISeekableStream.h"
+#include "nsIURI.h"
+#include "nsICacheEntryOpenCallback.h"
+#include "nsICacheStorage.h"
+#include "nsISerializable.h"
+#include "nsISizeOf.h"
+
+#include "nsComponentManagerUtils.h"
+#include "nsServiceManagerUtils.h"
+#include "nsString.h"
+#include "nsProxyRelease.h"
+#include "nsSerializationHelper.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include <math.h>
+#include <algorithm>
+
+namespace mozilla {
+namespace net {
+
+static uint32_t const ENTRY_WANTED = nsICacheEntryOpenCallback::ENTRY_WANTED;
+static uint32_t const RECHECK_AFTER_WRITE_FINISHED =
+ nsICacheEntryOpenCallback::RECHECK_AFTER_WRITE_FINISHED;
+static uint32_t const ENTRY_NEEDS_REVALIDATION =
+ nsICacheEntryOpenCallback::ENTRY_NEEDS_REVALIDATION;
+static uint32_t const ENTRY_NOT_WANTED =
+ nsICacheEntryOpenCallback::ENTRY_NOT_WANTED;
+
+NS_IMPL_ISUPPORTS(CacheEntryHandle, nsICacheEntry)
+
+// CacheEntryHandle
+
+CacheEntryHandle::CacheEntryHandle(CacheEntry* aEntry)
+ : mEntry(aEntry), mClosed(false) {
+#ifdef DEBUG
+ if (!mEntry->HandlesCount()) {
+ // CacheEntry.mHandlesCount must go from zero to one only under
+ // the service lock. Can access CacheStorageService::Self() w/o a check
+ // since CacheEntry hrefs it.
+ CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
+ }
+#endif
+
+ mEntry->AddHandleRef();
+
+ LOG(("New CacheEntryHandle %p for entry %p", this, aEntry));
+}
+
+NS_IMETHODIMP CacheEntryHandle::Dismiss() {
+ LOG(("CacheEntryHandle::Dismiss %p", this));
+
+ if (mClosed.compareExchange(false, true)) {
+ mEntry->OnHandleClosed(this);
+ return NS_OK;
+ }
+
+ LOG((" already dropped"));
+ return NS_ERROR_UNEXPECTED;
+}
+
+CacheEntryHandle::~CacheEntryHandle() {
+ mEntry->ReleaseHandleRef();
+ Dismiss();
+
+ LOG(("CacheEntryHandle::~CacheEntryHandle %p", this));
+}
+
+// CacheEntry::Callback
+
+CacheEntry::Callback::Callback(CacheEntry* aEntry,
+ nsICacheEntryOpenCallback* aCallback,
+ bool aReadOnly, bool aCheckOnAnyThread,
+ bool aSecret)
+ : mEntry(aEntry),
+ mCallback(aCallback),
+ mTarget(GetCurrentEventTarget()),
+ mReadOnly(aReadOnly),
+ mRevalidating(false),
+ mCheckOnAnyThread(aCheckOnAnyThread),
+ mRecheckAfterWrite(false),
+ mNotWanted(false),
+ mSecret(aSecret),
+ mDoomWhenFoundPinned(false),
+ mDoomWhenFoundNonPinned(false) {
+ MOZ_COUNT_CTOR(CacheEntry::Callback);
+
+ // The counter may go from zero to non-null only under the service lock
+ // but here we expect it to be already positive.
+ MOZ_ASSERT(mEntry->HandlesCount());
+ mEntry->AddHandleRef();
+}
+
+CacheEntry::Callback::Callback(CacheEntry* aEntry,
+ bool aDoomWhenFoundInPinStatus)
+ : mEntry(aEntry),
+ mReadOnly(false),
+ mRevalidating(false),
+ mCheckOnAnyThread(true),
+ mRecheckAfterWrite(false),
+ mNotWanted(false),
+ mSecret(false),
+ mDoomWhenFoundPinned(aDoomWhenFoundInPinStatus == true),
+ mDoomWhenFoundNonPinned(aDoomWhenFoundInPinStatus == false) {
+ MOZ_COUNT_CTOR(CacheEntry::Callback);
+ MOZ_ASSERT(mEntry->HandlesCount());
+ mEntry->AddHandleRef();
+}
+
+CacheEntry::Callback::Callback(CacheEntry::Callback const& aThat)
+ : mEntry(aThat.mEntry),
+ mCallback(aThat.mCallback),
+ mTarget(aThat.mTarget),
+ mReadOnly(aThat.mReadOnly),
+ mRevalidating(aThat.mRevalidating),
+ mCheckOnAnyThread(aThat.mCheckOnAnyThread),
+ mRecheckAfterWrite(aThat.mRecheckAfterWrite),
+ mNotWanted(aThat.mNotWanted),
+ mSecret(aThat.mSecret),
+ mDoomWhenFoundPinned(aThat.mDoomWhenFoundPinned),
+ mDoomWhenFoundNonPinned(aThat.mDoomWhenFoundNonPinned) {
+ MOZ_COUNT_CTOR(CacheEntry::Callback);
+
+ // The counter may go from zero to non-null only under the service lock
+ // but here we expect it to be already positive.
+ MOZ_ASSERT(mEntry->HandlesCount());
+ mEntry->AddHandleRef();
+}
+
+CacheEntry::Callback::~Callback() {
+ ProxyRelease("CacheEntry::Callback::mCallback", mCallback, mTarget);
+
+ mEntry->ReleaseHandleRef();
+ MOZ_COUNT_DTOR(CacheEntry::Callback);
+}
+
+void CacheEntry::Callback::ExchangeEntry(CacheEntry* aEntry) {
+ if (mEntry == aEntry) return;
+
+ // The counter may go from zero to non-null only under the service lock
+ // but here we expect it to be already positive.
+ MOZ_ASSERT(aEntry->HandlesCount());
+ aEntry->AddHandleRef();
+ mEntry->ReleaseHandleRef();
+ mEntry = aEntry;
+}
+
+bool CacheEntry::Callback::DeferDoom(bool* aDoom) const {
+ MOZ_ASSERT(mEntry->mPinningKnown);
+
+ if (MOZ_UNLIKELY(mDoomWhenFoundNonPinned) ||
+ MOZ_UNLIKELY(mDoomWhenFoundPinned)) {
+ *aDoom =
+ (MOZ_UNLIKELY(mDoomWhenFoundNonPinned) &&
+ MOZ_LIKELY(!mEntry->mPinned)) ||
+ (MOZ_UNLIKELY(mDoomWhenFoundPinned) && MOZ_UNLIKELY(mEntry->mPinned));
+
+ return true;
+ }
+
+ return false;
+}
+
+nsresult CacheEntry::Callback::OnCheckThread(bool* aOnCheckThread) const {
+ if (!mCheckOnAnyThread) {
+ // Check we are on the target
+ return mTarget->IsOnCurrentThread(aOnCheckThread);
+ }
+
+ // We can invoke check anywhere
+ *aOnCheckThread = true;
+ return NS_OK;
+}
+
+nsresult CacheEntry::Callback::OnAvailThread(bool* aOnAvailThread) const {
+ return mTarget->IsOnCurrentThread(aOnAvailThread);
+}
+
+// CacheEntry
+
+NS_IMPL_ISUPPORTS(CacheEntry, nsIRunnable, CacheFileListener)
+
+/* static */
+uint64_t CacheEntry::GetNextId() {
+ static Atomic<uint64_t, Relaxed> id(0);
+ return ++id;
+}
+
+CacheEntry::CacheEntry(const nsACString& aStorageID, const nsACString& aURI,
+ const nsACString& aEnhanceID, bool aUseDisk,
+ bool aSkipSizeCheck, bool aPin)
+ : mFrecency(0),
+ mSortingExpirationTime(uint32_t(-1)),
+ mLock("CacheEntry"),
+ mFileStatus(NS_ERROR_NOT_INITIALIZED),
+ mURI(aURI),
+ mEnhanceID(aEnhanceID),
+ mStorageID(aStorageID),
+ mUseDisk(aUseDisk),
+ mSkipSizeCheck(aSkipSizeCheck),
+ mIsDoomed(false),
+ mSecurityInfoLoaded(false),
+ mPreventCallbacks(false),
+ mHasData(false),
+ mPinned(aPin),
+ mPinningKnown(false),
+ mState(NOTLOADED),
+ mRegistration(NEVERREGISTERED),
+ mWriter(nullptr),
+ mUseCount(0),
+ mCacheEntryId(GetNextId()) {
+ LOG(("CacheEntry::CacheEntry [this=%p]", this));
+
+ mService = CacheStorageService::Self();
+
+ CacheStorageService::Self()->RecordMemoryOnlyEntry(this, !aUseDisk,
+ true /* overwrite */);
+}
+
+CacheEntry::~CacheEntry() { LOG(("CacheEntry::~CacheEntry [this=%p]", this)); }
+
+char const* CacheEntry::StateString(uint32_t aState) {
+ switch (aState) {
+ case NOTLOADED:
+ return "NOTLOADED";
+ case LOADING:
+ return "LOADING";
+ case EMPTY:
+ return "EMPTY";
+ case WRITING:
+ return "WRITING";
+ case READY:
+ return "READY";
+ case REVALIDATING:
+ return "REVALIDATING";
+ }
+
+ return "?";
+}
+
+nsresult CacheEntry::HashingKeyWithStorage(nsACString& aResult) const {
+ return HashingKey(mStorageID, mEnhanceID, mURI, aResult);
+}
+
+nsresult CacheEntry::HashingKey(nsACString& aResult) const {
+ return HashingKey(""_ns, mEnhanceID, mURI, aResult);
+}
+
+// static
+nsresult CacheEntry::HashingKey(const nsACString& aStorageID,
+ const nsACString& aEnhanceID, nsIURI* aURI,
+ nsACString& aResult) {
+ nsAutoCString spec;
+ nsresult rv = aURI->GetAsciiSpec(spec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return HashingKey(aStorageID, aEnhanceID, spec, aResult);
+}
+
+// static
+nsresult CacheEntry::HashingKey(const nsACString& aStorageID,
+ const nsACString& aEnhanceID,
+ const nsACString& aURISpec,
+ nsACString& aResult) {
+ /**
+ * This key is used to salt hash that is a base for disk file name.
+ * Changing it will cause we will not be able to find files on disk.
+ */
+
+ aResult.Assign(aStorageID);
+
+ if (!aEnhanceID.IsEmpty()) {
+ CacheFileUtils::AppendTagWithValue(aResult, '~', aEnhanceID);
+ }
+
+ // Appending directly
+ aResult.Append(':');
+ aResult.Append(aURISpec);
+
+ return NS_OK;
+}
+
+void CacheEntry::AsyncOpen(nsICacheEntryOpenCallback* aCallback,
+ uint32_t aFlags) {
+ LOG(("CacheEntry::AsyncOpen [this=%p, state=%s, flags=%d, callback=%p]", this,
+ StateString(mState), aFlags, aCallback));
+
+ bool readonly = aFlags & nsICacheStorage::OPEN_READONLY;
+ bool bypassIfBusy = aFlags & nsICacheStorage::OPEN_BYPASS_IF_BUSY;
+ bool truncate = aFlags & nsICacheStorage::OPEN_TRUNCATE;
+ bool priority = aFlags & nsICacheStorage::OPEN_PRIORITY;
+ bool multithread = aFlags & nsICacheStorage::CHECK_MULTITHREADED;
+ bool secret = aFlags & nsICacheStorage::OPEN_SECRETLY;
+
+ MOZ_ASSERT(!readonly || !truncate, "Bad flags combination");
+ MOZ_ASSERT(!(truncate && mState > LOADING),
+ "Must not call truncate on already loaded entry");
+
+ Callback callback(this, aCallback, readonly, multithread, secret);
+
+ if (!Open(callback, truncate, priority, bypassIfBusy)) {
+ // We get here when the callback wants to bypass cache when it's busy.
+ LOG((" writing or revalidating, callback wants to bypass cache"));
+ callback.mNotWanted = true;
+ InvokeAvailableCallback(callback);
+ }
+}
+
+bool CacheEntry::Open(Callback& aCallback, bool aTruncate, bool aPriority,
+ bool aBypassIfBusy) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ // Check state under the lock
+ if (aBypassIfBusy && (mState == WRITING || mState == REVALIDATING)) {
+ return false;
+ }
+
+ RememberCallback(aCallback);
+
+ // Load() opens the lock
+ if (Load(aTruncate, aPriority)) {
+ // Loading is in progress...
+ return true;
+ }
+
+ InvokeCallbacks();
+
+ return true;
+}
+
+bool CacheEntry::Load(bool aTruncate, bool aPriority) {
+ LOG(("CacheEntry::Load [this=%p, trunc=%d]", this, aTruncate));
+
+ mLock.AssertCurrentThreadOwns();
+
+ if (mState > LOADING) {
+ LOG((" already loaded"));
+ return false;
+ }
+
+ if (mState == LOADING) {
+ LOG((" already loading"));
+ return true;
+ }
+
+ mState = LOADING;
+
+ MOZ_ASSERT(!mFile);
+
+ nsresult rv;
+
+ nsAutoCString fileKey;
+ rv = HashingKeyWithStorage(fileKey);
+
+ bool reportMiss = false;
+
+ // Check the index under two conditions for two states and take appropriate
+ // action:
+ // 1. When this is a disk entry and not told to truncate, check there is a
+ // disk file.
+ // If not, set the 'truncate' flag to true so that this entry will open
+ // instantly as a new one.
+ // 2. When this is a memory-only entry, check there is a disk file.
+ // If there is or could be, doom that file.
+ if ((!aTruncate || !mUseDisk) && NS_SUCCEEDED(rv)) {
+ // Check the index right now to know we have or have not the entry
+ // as soon as possible.
+ CacheIndex::EntryStatus status;
+ if (NS_SUCCEEDED(CacheIndex::HasEntry(fileKey, &status))) {
+ switch (status) {
+ case CacheIndex::DOES_NOT_EXIST:
+ // Doesn't apply to memory-only entries, Load() is called only once
+ // for them and never again for their session lifetime.
+ if (!aTruncate && mUseDisk) {
+ LOG(
+ (" entry doesn't exist according information from the index, "
+ "truncating"));
+ reportMiss = true;
+ aTruncate = true;
+ }
+ break;
+ case CacheIndex::EXISTS:
+ case CacheIndex::DO_NOT_KNOW:
+ if (!mUseDisk) {
+ LOG(
+ (" entry open as memory-only, but there is a file, status=%d, "
+ "dooming it",
+ status));
+ CacheFileIOManager::DoomFileByKey(fileKey, nullptr);
+ }
+ break;
+ }
+ }
+ }
+
+ mFile = new CacheFile();
+
+ BackgroundOp(Ops::REGISTER);
+
+ bool directLoad = aTruncate || !mUseDisk;
+ if (directLoad) {
+ // mLoadStart will be used to calculate telemetry of life-time of this
+ // entry. Low resulution is then enough.
+ mLoadStart = TimeStamp::NowLoRes();
+ mPinningKnown = true;
+ } else {
+ mLoadStart = TimeStamp::Now();
+ }
+
+ {
+ mozilla::MutexAutoUnlock unlock(mLock);
+
+ if (reportMiss) {
+ CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
+ CacheFileUtils::DetailedCacheHitTelemetry::MISS, mLoadStart);
+ }
+
+ LOG((" performing load, file=%p", mFile.get()));
+ if (NS_SUCCEEDED(rv)) {
+ rv = mFile->Init(fileKey, aTruncate, !mUseDisk, mSkipSizeCheck, aPriority,
+ mPinned, directLoad ? nullptr : this);
+ }
+
+ if (NS_FAILED(rv)) {
+ mFileStatus = rv;
+ AsyncDoom(nullptr);
+ return false;
+ }
+ }
+
+ if (directLoad) {
+ // Just fake the load has already been done as "new".
+ mFileStatus = NS_OK;
+ mState = EMPTY;
+ }
+
+ return mState == LOADING;
+}
+
+NS_IMETHODIMP CacheEntry::OnFileReady(nsresult aResult, bool aIsNew) {
+ LOG(("CacheEntry::OnFileReady [this=%p, rv=0x%08" PRIx32 ", new=%d]", this,
+ static_cast<uint32_t>(aResult), aIsNew));
+
+ MOZ_ASSERT(!mLoadStart.IsNull());
+
+ if (NS_SUCCEEDED(aResult)) {
+ if (aIsNew) {
+ CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
+ CacheFileUtils::DetailedCacheHitTelemetry::MISS, mLoadStart);
+ } else {
+ CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
+ CacheFileUtils::DetailedCacheHitTelemetry::HIT, mLoadStart);
+ }
+ }
+
+ // OnFileReady, that is the only code that can transit from LOADING
+ // to any follow-on state and can only be invoked ones on an entry.
+ // Until this moment there is no consumer that could manipulate
+ // the entry state.
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ MOZ_ASSERT(mState == LOADING);
+
+ mState = (aIsNew || NS_FAILED(aResult)) ? EMPTY : READY;
+
+ mFileStatus = aResult;
+
+ mPinned = mFile->IsPinned();
+ ;
+ mPinningKnown = true;
+ LOG((" pinning=%d", mPinned));
+
+ if (mState == READY) {
+ mHasData = true;
+
+ uint32_t frecency;
+ mFile->GetFrecency(&frecency);
+ // mFrecency is held in a double to increase computance precision.
+ // It is ok to persist frecency only as a uint32 with some math involved.
+ mFrecency = INT2FRECENCY(frecency);
+ }
+
+ InvokeCallbacks();
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheEntry::OnFileDoomed(nsresult aResult) {
+ if (mDoomCallback) {
+ RefPtr<DoomCallbackRunnable> event =
+ new DoomCallbackRunnable(this, aResult);
+ NS_DispatchToMainThread(event);
+ }
+
+ return NS_OK;
+}
+
+already_AddRefed<CacheEntryHandle> CacheEntry::ReopenTruncated(
+ bool aMemoryOnly, nsICacheEntryOpenCallback* aCallback) {
+ LOG(("CacheEntry::ReopenTruncated [this=%p]", this));
+
+ mLock.AssertCurrentThreadOwns();
+
+ // Hold callbacks invocation, AddStorageEntry would invoke from doom
+ // prematurly
+ mPreventCallbacks = true;
+
+ RefPtr<CacheEntryHandle> handle;
+ RefPtr<CacheEntry> newEntry;
+ {
+ if (mPinned) {
+ MOZ_ASSERT(mUseDisk);
+ // We want to pin even no-store entries (the case we recreate a disk entry
+ // as a memory-only entry.)
+ aMemoryOnly = false;
+ }
+
+ mozilla::MutexAutoUnlock unlock(mLock);
+
+ // The following call dooms this entry (calls DoomAlreadyRemoved on us)
+ nsresult rv = CacheStorageService::Self()->AddStorageEntry(
+ GetStorageID(), GetURI(), GetEnhanceID(), mUseDisk && !aMemoryOnly,
+ mSkipSizeCheck, mPinned,
+ true, // truncate existing (this one)
+ getter_AddRefs(handle));
+
+ if (NS_SUCCEEDED(rv)) {
+ newEntry = handle->Entry();
+ LOG((" exchanged entry %p by entry %p, rv=0x%08" PRIx32, this,
+ newEntry.get(), static_cast<uint32_t>(rv)));
+ newEntry->AsyncOpen(aCallback, nsICacheStorage::OPEN_TRUNCATE);
+ } else {
+ LOG((" exchanged of entry %p failed, rv=0x%08" PRIx32, this,
+ static_cast<uint32_t>(rv)));
+ AsyncDoom(nullptr);
+ }
+ }
+
+ mPreventCallbacks = false;
+
+ if (!newEntry) return nullptr;
+
+ newEntry->TransferCallbacks(*this);
+ mCallbacks.Clear();
+
+ // Must return a new write handle, since the consumer is expected to
+ // write to this newly recreated entry. The |handle| is only a common
+ // reference counter and doesn't revert entry state back when write
+ // fails and also doesn't update the entry frecency. Not updating
+ // frecency causes entries to not be purged from our memory pools.
+ RefPtr<CacheEntryHandle> writeHandle = newEntry->NewWriteHandle();
+ return writeHandle.forget();
+}
+
+void CacheEntry::TransferCallbacks(CacheEntry& aFromEntry) {
+ mozilla::MutexAutoLock lock(mLock);
+
+ LOG(("CacheEntry::TransferCallbacks [entry=%p, from=%p]", this, &aFromEntry));
+
+ if (!mCallbacks.Length())
+ mCallbacks.SwapElements(aFromEntry.mCallbacks);
+ else
+ mCallbacks.AppendElements(aFromEntry.mCallbacks);
+
+ uint32_t callbacksLength = mCallbacks.Length();
+ if (callbacksLength) {
+ // Carry the entry reference (unfortunately, needs to be done manually...)
+ for (uint32_t i = 0; i < callbacksLength; ++i)
+ mCallbacks[i].ExchangeEntry(this);
+
+ BackgroundOp(Ops::CALLBACKS, true);
+ }
+}
+
+void CacheEntry::RememberCallback(Callback& aCallback) {
+ mLock.AssertCurrentThreadOwns();
+
+ LOG(("CacheEntry::RememberCallback [this=%p, cb=%p, state=%s]", this,
+ aCallback.mCallback.get(), StateString(mState)));
+
+ mCallbacks.AppendElement(aCallback);
+}
+
+void CacheEntry::InvokeCallbacksLock() {
+ mozilla::MutexAutoLock lock(mLock);
+ InvokeCallbacks();
+}
+
+void CacheEntry::InvokeCallbacks() {
+ mLock.AssertCurrentThreadOwns();
+
+ LOG(("CacheEntry::InvokeCallbacks BEGIN [this=%p]", this));
+
+ // Invoke first all r/w callbacks, then all r/o callbacks.
+ if (InvokeCallbacks(false)) InvokeCallbacks(true);
+
+ LOG(("CacheEntry::InvokeCallbacks END [this=%p]", this));
+}
+
+bool CacheEntry::InvokeCallbacks(bool aReadOnly) {
+ mLock.AssertCurrentThreadOwns();
+
+ RefPtr<CacheEntryHandle> recreatedHandle;
+
+ uint32_t i = 0;
+ while (i < mCallbacks.Length()) {
+ if (mPreventCallbacks) {
+ LOG((" callbacks prevented!"));
+ return false;
+ }
+
+ if (!mIsDoomed && (mState == WRITING || mState == REVALIDATING)) {
+ LOG((" entry is being written/revalidated"));
+ return false;
+ }
+
+ bool recreate;
+ if (mCallbacks[i].DeferDoom(&recreate)) {
+ mCallbacks.RemoveElementAt(i);
+ if (!recreate) {
+ continue;
+ }
+
+ LOG((" defer doom marker callback hit positive, recreating"));
+ recreatedHandle = ReopenTruncated(!mUseDisk, nullptr);
+ break;
+ }
+
+ if (mCallbacks[i].mReadOnly != aReadOnly) {
+ // Callback is not r/w or r/o, go to another one in line
+ ++i;
+ continue;
+ }
+
+ bool onCheckThread;
+ nsresult rv = mCallbacks[i].OnCheckThread(&onCheckThread);
+
+ if (NS_SUCCEEDED(rv) && !onCheckThread) {
+ // Redispatch to the target thread
+ rv = mCallbacks[i].mTarget->Dispatch(
+ NewRunnableMethod("net::CacheEntry::InvokeCallbacksLock", this,
+ &CacheEntry::InvokeCallbacksLock),
+ nsIEventTarget::DISPATCH_NORMAL);
+ if (NS_SUCCEEDED(rv)) {
+ LOG((" re-dispatching to target thread"));
+ return false;
+ }
+ }
+
+ Callback callback = mCallbacks[i];
+ mCallbacks.RemoveElementAt(i);
+
+ if (NS_SUCCEEDED(rv) && !InvokeCallback(callback)) {
+ // Callback didn't fire, put it back and go to another one in line.
+ // Only reason InvokeCallback returns false is that onCacheEntryCheck
+ // returns RECHECK_AFTER_WRITE_FINISHED. If we would stop the loop, other
+ // readers or potential writers would be unnecessarily kept from being
+ // invoked.
+ size_t pos = std::min(mCallbacks.Length(), static_cast<size_t>(i));
+ mCallbacks.InsertElementAt(pos, callback);
+ ++i;
+ }
+ }
+
+ if (recreatedHandle) {
+ // Must be released outside of the lock, enters InvokeCallback on the new
+ // entry
+ mozilla::MutexAutoUnlock unlock(mLock);
+ recreatedHandle = nullptr;
+ }
+
+ return true;
+}
+
+bool CacheEntry::InvokeCallback(Callback& aCallback) {
+ LOG(("CacheEntry::InvokeCallback [this=%p, state=%s, cb=%p]", this,
+ StateString(mState), aCallback.mCallback.get()));
+
+ mLock.AssertCurrentThreadOwns();
+
+ // When this entry is doomed we want to notify the callback any time
+ if (!mIsDoomed) {
+ // When we are here, the entry must be loaded from disk
+ MOZ_ASSERT(mState > LOADING);
+
+ if (mState == WRITING || mState == REVALIDATING) {
+ // Prevent invoking other callbacks since one of them is now writing
+ // or revalidating this entry. No consumers should get this entry
+ // until metadata are filled with values downloaded from the server
+ // or the entry revalidated and output stream has been opened.
+ LOG((" entry is being written/revalidated, callback bypassed"));
+ return false;
+ }
+
+ // mRecheckAfterWrite flag already set means the callback has already passed
+ // the onCacheEntryCheck call. Until the current write is not finished this
+ // callback will be bypassed.
+ if (!aCallback.mRecheckAfterWrite) {
+ if (!aCallback.mReadOnly) {
+ if (mState == EMPTY) {
+ // Advance to writing state, we expect to invoke the callback and let
+ // it fill content of this entry. Must set and check the state here
+ // to prevent more then one
+ mState = WRITING;
+ LOG((" advancing to WRITING state"));
+ }
+
+ if (!aCallback.mCallback) {
+ // We can be given no callback only in case of recreate, it is ok
+ // to advance to WRITING state since the caller of recreate is
+ // expected to write this entry now.
+ return true;
+ }
+ }
+
+ if (mState == READY) {
+ // Metadata present, validate the entry
+ uint32_t checkResult;
+ {
+ // mayhemer: TODO check and solve any potential races of concurent
+ // OnCacheEntryCheck
+ mozilla::MutexAutoUnlock unlock(mLock);
+
+ RefPtr<CacheEntryHandle> handle = NewHandle();
+
+ nsresult rv = aCallback.mCallback->OnCacheEntryCheck(handle, nullptr,
+ &checkResult);
+ LOG((" OnCacheEntryCheck: rv=0x%08" PRIx32 ", result=%" PRId32,
+ static_cast<uint32_t>(rv), static_cast<uint32_t>(checkResult)));
+
+ if (NS_FAILED(rv)) checkResult = ENTRY_NOT_WANTED;
+ }
+
+ aCallback.mRevalidating = checkResult == ENTRY_NEEDS_REVALIDATION;
+
+ switch (checkResult) {
+ case ENTRY_WANTED:
+ // Nothing more to do here, the consumer is responsible to handle
+ // the result of OnCacheEntryCheck it self.
+ // Proceed to callback...
+ break;
+
+ case RECHECK_AFTER_WRITE_FINISHED:
+ LOG(
+ (" consumer will check on the entry again after write is "
+ "done"));
+ // The consumer wants the entry to complete first.
+ aCallback.mRecheckAfterWrite = true;
+ break;
+
+ case ENTRY_NEEDS_REVALIDATION:
+ LOG((" will be holding callbacks until entry is revalidated"));
+ // State is READY now and from that state entry cannot transit to
+ // any other state then REVALIDATING for which cocurrency is not an
+ // issue. Potentially no need to lock here.
+ mState = REVALIDATING;
+ break;
+
+ case ENTRY_NOT_WANTED:
+ LOG((" consumer not interested in the entry"));
+ // Do not give this entry to the consumer, it is not interested in
+ // us.
+ aCallback.mNotWanted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (aCallback.mCallback) {
+ if (!mIsDoomed && aCallback.mRecheckAfterWrite) {
+ // If we don't have data and the callback wants a complete entry,
+ // don't invoke now.
+ bool bypass = !mHasData;
+ if (!bypass && NS_SUCCEEDED(mFileStatus)) {
+ int64_t _unused;
+ bypass = !mFile->DataSize(&_unused);
+ }
+
+ if (bypass) {
+ LOG((" bypassing, entry data still being written"));
+ return false;
+ }
+
+ // Entry is complete now, do the check+avail call again
+ aCallback.mRecheckAfterWrite = false;
+ return InvokeCallback(aCallback);
+ }
+
+ mozilla::MutexAutoUnlock unlock(mLock);
+ InvokeAvailableCallback(aCallback);
+ }
+
+ return true;
+}
+
+void CacheEntry::InvokeAvailableCallback(Callback const& aCallback) {
+ LOG(
+ ("CacheEntry::InvokeAvailableCallback [this=%p, state=%s, cb=%p, r/o=%d, "
+ "n/w=%d]",
+ this, StateString(mState), aCallback.mCallback.get(),
+ aCallback.mReadOnly, aCallback.mNotWanted));
+
+ nsresult rv;
+
+ uint32_t state;
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ state = mState;
+ }
+
+ // When we are here, the entry must be loaded from disk
+ MOZ_ASSERT(state > LOADING || mIsDoomed);
+
+ bool onAvailThread;
+ rv = aCallback.OnAvailThread(&onAvailThread);
+ if (NS_FAILED(rv)) {
+ LOG((" target thread dead?"));
+ return;
+ }
+
+ if (!onAvailThread) {
+ // Dispatch to the right thread
+ RefPtr<AvailableCallbackRunnable> event =
+ new AvailableCallbackRunnable(this, aCallback);
+
+ rv = aCallback.mTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+ LOG((" redispatched, (rv = 0x%08" PRIx32 ")", static_cast<uint32_t>(rv)));
+ return;
+ }
+
+ if (mIsDoomed || aCallback.mNotWanted) {
+ LOG(
+ (" doomed or not wanted, notifying OCEA with "
+ "NS_ERROR_CACHE_KEY_NOT_FOUND"));
+ aCallback.mCallback->OnCacheEntryAvailable(nullptr, false, nullptr,
+ NS_ERROR_CACHE_KEY_NOT_FOUND);
+ return;
+ }
+
+ if (state == READY) {
+ LOG((" ready/has-meta, notifying OCEA with entry and NS_OK"));
+
+ if (!aCallback.mSecret) {
+ mozilla::MutexAutoLock lock(mLock);
+ BackgroundOp(Ops::FRECENCYUPDATE);
+ }
+
+ OnFetched(aCallback);
+
+ RefPtr<CacheEntryHandle> handle = NewHandle();
+ aCallback.mCallback->OnCacheEntryAvailable(handle, false, nullptr, NS_OK);
+ return;
+ }
+
+ // R/O callbacks may do revalidation, let them fall through
+ if (aCallback.mReadOnly && !aCallback.mRevalidating) {
+ LOG(
+ (" r/o and not ready, notifying OCEA with "
+ "NS_ERROR_CACHE_KEY_NOT_FOUND"));
+ aCallback.mCallback->OnCacheEntryAvailable(nullptr, false, nullptr,
+ NS_ERROR_CACHE_KEY_NOT_FOUND);
+ return;
+ }
+
+ // This is a new or potentially non-valid entry and needs to be fetched first.
+ // The CacheEntryHandle blocks other consumers until the channel
+ // either releases the entry or marks metadata as filled or whole entry valid,
+ // i.e. until MetaDataReady() or SetValid() on the entry is called
+ // respectively.
+
+ // Consumer will be responsible to fill or validate the entry metadata and
+ // data.
+
+ OnFetched(aCallback);
+
+ RefPtr<CacheEntryHandle> handle = NewWriteHandle();
+ rv = aCallback.mCallback->OnCacheEntryAvailable(handle, state == WRITING,
+ nullptr, NS_OK);
+
+ if (NS_FAILED(rv)) {
+ LOG((" writing/revalidating failed (0x%08" PRIx32 ")",
+ static_cast<uint32_t>(rv)));
+
+ // Consumer given a new entry failed to take care of the entry.
+ OnHandleClosed(handle);
+ return;
+ }
+
+ LOG((" writing/revalidating"));
+}
+
+void CacheEntry::OnFetched(Callback const& aCallback) {
+ if (NS_SUCCEEDED(mFileStatus) && !aCallback.mSecret) {
+ // Let the last-fetched and fetch-count properties be updated.
+ mFile->OnFetched();
+ }
+}
+
+CacheEntryHandle* CacheEntry::NewHandle() { return new CacheEntryHandle(this); }
+
+CacheEntryHandle* CacheEntry::NewWriteHandle() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ // Ignore the OPEN_SECRETLY flag on purpose here, which should actually be
+ // used only along with OPEN_READONLY, but there is no need to enforce that.
+ BackgroundOp(Ops::FRECENCYUPDATE);
+
+ return (mWriter = NewHandle());
+}
+
+void CacheEntry::OnHandleClosed(CacheEntryHandle const* aHandle) {
+ LOG(("CacheEntry::OnHandleClosed [this=%p, state=%s, handle=%p]", this,
+ StateString(mState), aHandle));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (IsDoomed() && NS_SUCCEEDED(mFileStatus) &&
+ // Note: mHandlesCount is dropped before this method is called
+ (mHandlesCount == 0 ||
+ (mHandlesCount == 1 && mWriter && mWriter != aHandle))) {
+ // This entry is no longer referenced from outside and is doomed.
+ // We can do this also when there is just reference from the writer,
+ // no one else could ever reach the written data.
+ // Tell the file to kill the handle, i.e. bypass any I/O operations
+ // on it except removing the file.
+ mFile->Kill();
+ }
+
+ if (mWriter != aHandle) {
+ LOG((" not the writer"));
+ return;
+ }
+
+ if (mOutputStream) {
+ LOG((" abandoning phantom output stream"));
+ // No one took our internal output stream, so there are no data
+ // and output stream has to be open symultaneously with input stream
+ // on this entry again.
+ mHasData = false;
+ // This asynchronously ends up invoking callbacks on this entry
+ // through OnOutputClosed() call.
+ mOutputStream->Close();
+ mOutputStream = nullptr;
+ } else {
+ // We must always redispatch, otherwise there is a risk of stack
+ // overflow. This code can recurse deeply. It won't execute sooner
+ // than we release mLock.
+ BackgroundOp(Ops::CALLBACKS, true);
+ }
+
+ mWriter = nullptr;
+
+ if (mState == WRITING) {
+ LOG((" reverting to state EMPTY - write failed"));
+ mState = EMPTY;
+ } else if (mState == REVALIDATING) {
+ LOG((" reverting to state READY - reval failed"));
+ mState = READY;
+ }
+
+ if (mState == READY && !mHasData) {
+ // We may get to this state when following steps happen:
+ // 1. a new entry is given to a consumer
+ // 2. the consumer calls MetaDataReady(), we transit to READY
+ // 3. abandons the entry w/o opening the output stream, mHasData left false
+ //
+ // In this case any following consumer will get a ready entry (with
+ // metadata) but in state like the entry data write was still happening (was
+ // in progress) and will indefinitely wait for the entry data or even the
+ // entry itself when RECHECK_AFTER_WRITE is returned from onCacheEntryCheck.
+ LOG(
+ (" we are in READY state, pretend we have data regardless it"
+ " has actully been never touched"));
+ mHasData = true;
+ }
+}
+
+void CacheEntry::OnOutputClosed() {
+ // Called when the file's output stream is closed. Invoke any callbacks
+ // waiting for complete entry.
+
+ mozilla::MutexAutoLock lock(mLock);
+ InvokeCallbacks();
+}
+
+bool CacheEntry::IsReferenced() const {
+ CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
+
+ // Increasing this counter from 0 to non-null and this check both happen only
+ // under the service lock.
+ return mHandlesCount > 0;
+}
+
+bool CacheEntry::IsFileDoomed() {
+ if (NS_SUCCEEDED(mFileStatus)) {
+ return mFile->IsDoomed();
+ }
+
+ return false;
+}
+
+uint32_t CacheEntry::GetMetadataMemoryConsumption() {
+ NS_ENSURE_SUCCESS(mFileStatus, 0);
+
+ uint32_t size;
+ if (NS_FAILED(mFile->ElementsSize(&size))) return 0;
+
+ return size;
+}
+
+// nsICacheEntry
+
+nsresult CacheEntry::GetPersistent(bool* aPersistToDisk) {
+ // No need to sync when only reading.
+ // When consumer needs to be consistent with state of the memory storage
+ // entries table, then let it use GetUseDisk getter that must be called under
+ // the service lock.
+ *aPersistToDisk = mUseDisk;
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetKey(nsACString& aKey) {
+ aKey.Assign(mURI);
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetCacheEntryId(uint64_t* aCacheEntryId) {
+ *aCacheEntryId = mCacheEntryId;
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetFetchCount(int32_t* aFetchCount) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->GetFetchCount(reinterpret_cast<uint32_t*>(aFetchCount));
+}
+
+nsresult CacheEntry::GetLastFetched(uint32_t* aLastFetched) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->GetLastFetched(aLastFetched);
+}
+
+nsresult CacheEntry::GetLastModified(uint32_t* aLastModified) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->GetLastModified(aLastModified);
+}
+
+nsresult CacheEntry::GetExpirationTime(uint32_t* aExpirationTime) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->GetExpirationTime(aExpirationTime);
+}
+
+nsresult CacheEntry::GetOnStartTime(uint64_t* aTime) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+ return mFile->GetOnStartTime(aTime);
+}
+
+nsresult CacheEntry::GetOnStopTime(uint64_t* aTime) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+ return mFile->GetOnStopTime(aTime);
+}
+
+nsresult CacheEntry::SetNetworkTimes(uint64_t aOnStartTime,
+ uint64_t aOnStopTime) {
+ if (NS_SUCCEEDED(mFileStatus)) {
+ return mFile->SetNetworkTimes(aOnStartTime, aOnStopTime);
+ }
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+nsresult CacheEntry::SetContentType(uint8_t aContentType) {
+ NS_ENSURE_ARG_MAX(aContentType, nsICacheEntry::CONTENT_TYPE_LAST - 1);
+
+ if (NS_SUCCEEDED(mFileStatus)) {
+ return mFile->SetContentType(aContentType);
+ }
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+nsresult CacheEntry::GetIsForcedValid(bool* aIsForcedValid) {
+ NS_ENSURE_ARG(aIsForcedValid);
+
+ MOZ_ASSERT(mState > LOADING);
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ if (mPinned) {
+ *aIsForcedValid = true;
+ return NS_OK;
+ }
+ }
+
+ nsAutoCString key;
+ nsresult rv = HashingKey(key);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ *aIsForcedValid =
+ CacheStorageService::Self()->IsForcedValidEntry(mStorageID, key);
+ LOG(("CacheEntry::GetIsForcedValid [this=%p, IsForcedValid=%d]", this,
+ *aIsForcedValid));
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::ForceValidFor(uint32_t aSecondsToTheFuture) {
+ LOG(("CacheEntry::ForceValidFor [this=%p, aSecondsToTheFuture=%d]", this,
+ aSecondsToTheFuture));
+
+ nsAutoCString key;
+ nsresult rv = HashingKey(key);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ CacheStorageService::Self()->ForceEntryValidFor(mStorageID, key,
+ aSecondsToTheFuture);
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::SetExpirationTime(uint32_t aExpirationTime) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ nsresult rv = mFile->SetExpirationTime(aExpirationTime);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Aligned assignment, thus atomic.
+ mSortingExpirationTime = aExpirationTime;
+ return NS_OK;
+}
+
+nsresult CacheEntry::OpenInputStream(int64_t offset, nsIInputStream** _retval) {
+ LOG(("CacheEntry::OpenInputStream [this=%p]", this));
+ return OpenInputStreamInternal(offset, nullptr, _retval);
+}
+
+nsresult CacheEntry::OpenAlternativeInputStream(const nsACString& type,
+ nsIInputStream** _retval) {
+ LOG(("CacheEntry::OpenAlternativeInputStream [this=%p, type=%s]", this,
+ PromiseFlatCString(type).get()));
+ return OpenInputStreamInternal(0, PromiseFlatCString(type).get(), _retval);
+}
+
+nsresult CacheEntry::OpenInputStreamInternal(int64_t offset,
+ const char* aAltDataType,
+ nsIInputStream** _retval) {
+ LOG(("CacheEntry::OpenInputStreamInternal [this=%p]", this));
+
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ nsresult rv;
+
+ RefPtr<CacheEntryHandle> selfHandle = NewHandle();
+
+ nsCOMPtr<nsIInputStream> stream;
+ if (aAltDataType) {
+ rv = mFile->OpenAlternativeInputStream(selfHandle, aAltDataType,
+ getter_AddRefs(stream));
+ if (NS_FAILED(rv)) {
+ // Failure of this method may be legal when the alternative data requested
+ // is not avaialble or of a different type. Console error logs are
+ // ensured by CacheFile::OpenAlternativeInputStream.
+ return rv;
+ }
+ } else {
+ rv = mFile->OpenInputStream(selfHandle, getter_AddRefs(stream));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(stream, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (!mHasData) {
+ // So far output stream on this new entry not opened, do it now.
+ LOG((" creating phantom output stream"));
+ rv = OpenOutputStreamInternal(0, getter_AddRefs(mOutputStream));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ stream.forget(_retval);
+ return NS_OK;
+}
+
+nsresult CacheEntry::OpenOutputStream(int64_t offset, int64_t predictedSize,
+ nsIOutputStream** _retval) {
+ LOG(("CacheEntry::OpenOutputStream [this=%p]", this));
+
+ nsresult rv;
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ MOZ_ASSERT(mState > EMPTY);
+
+ if (mFile->EntryWouldExceedLimit(0, predictedSize, false)) {
+ LOG((" entry would exceed size limit"));
+ return NS_ERROR_FILE_TOO_BIG;
+ }
+
+ if (mOutputStream && !mIsDoomed) {
+ LOG((" giving phantom output stream"));
+ mOutputStream.forget(_retval);
+ } else {
+ rv = OpenOutputStreamInternal(offset, _retval);
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ // Entry considered ready when writer opens output stream.
+ if (mState < READY) mState = READY;
+
+ // Invoke any pending readers now.
+ InvokeCallbacks();
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::OpenAlternativeOutputStream(
+ const nsACString& type, int64_t predictedSize,
+ nsIAsyncOutputStream** _retval) {
+ LOG(("CacheEntry::OpenAlternativeOutputStream [this=%p, type=%s]", this,
+ PromiseFlatCString(type).get()));
+
+ nsresult rv;
+
+ if (type.IsEmpty()) {
+ // The empty string is reserved to mean no alt-data available.
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (!mHasData || mState < READY || mOutputStream || mIsDoomed) {
+ LOG((" entry not in state to write alt-data"));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mFile->EntryWouldExceedLimit(0, predictedSize, true)) {
+ LOG((" entry would exceed size limit"));
+ return NS_ERROR_FILE_TOO_BIG;
+ }
+
+ nsCOMPtr<nsIAsyncOutputStream> stream;
+ rv = mFile->OpenAlternativeOutputStream(
+ nullptr, PromiseFlatCString(type).get(), getter_AddRefs(stream));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ stream.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheEntry::OpenOutputStreamInternal(int64_t offset,
+ nsIOutputStream** _retval) {
+ LOG(("CacheEntry::OpenOutputStreamInternal [this=%p]", this));
+
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ mLock.AssertCurrentThreadOwns();
+
+ if (mIsDoomed) {
+ LOG((" doomed..."));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ MOZ_ASSERT(mState > LOADING);
+
+ nsresult rv;
+
+ // No need to sync on mUseDisk here, we don't need to be consistent
+ // with content of the memory storage entries hash table.
+ if (!mUseDisk) {
+ rv = mFile->SetMemoryOnly();
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ RefPtr<CacheOutputCloseListener> listener =
+ new CacheOutputCloseListener(this);
+
+ nsCOMPtr<nsIOutputStream> stream;
+ rv = mFile->OpenOutputStream(listener, getter_AddRefs(stream));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(stream, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Prevent opening output stream again.
+ mHasData = true;
+
+ stream.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetSecurityInfo(nsISupports** aSecurityInfo) {
+ {
+ mozilla::MutexAutoLock lock(mLock);
+ if (mSecurityInfoLoaded) {
+ NS_IF_ADDREF(*aSecurityInfo = mSecurityInfo);
+ return NS_OK;
+ }
+ }
+
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ nsCString info;
+ nsCOMPtr<nsISupports> secInfo;
+ nsresult rv;
+
+ rv = mFile->GetElement("security-info", getter_Copies(info));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!info.IsVoid()) {
+ rv = NS_DeserializeObject(info, getter_AddRefs(secInfo));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ mSecurityInfo.swap(secInfo);
+ mSecurityInfoLoaded = true;
+
+ NS_IF_ADDREF(*aSecurityInfo = mSecurityInfo);
+ }
+
+ return NS_OK;
+}
+nsresult CacheEntry::SetSecurityInfo(nsISupports* aSecurityInfo) {
+ nsresult rv;
+
+ NS_ENSURE_SUCCESS(mFileStatus, mFileStatus);
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ mSecurityInfo = aSecurityInfo;
+ mSecurityInfoLoaded = true;
+ }
+
+ nsCOMPtr<nsISerializable> serializable = do_QueryInterface(aSecurityInfo);
+ if (aSecurityInfo && !serializable) return NS_ERROR_UNEXPECTED;
+
+ nsCString info;
+ if (serializable) {
+ rv = NS_SerializeToString(serializable, info);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ rv = mFile->SetElement("security-info", info.Length() ? info.get() : nullptr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetStorageDataSize(uint32_t* aStorageDataSize) {
+ NS_ENSURE_ARG(aStorageDataSize);
+
+ int64_t dataSize;
+ nsresult rv = GetDataSize(&dataSize);
+ if (NS_FAILED(rv)) return rv;
+
+ *aStorageDataSize = (uint32_t)std::min(int64_t(uint32_t(-1)), dataSize);
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::AsyncDoom(nsICacheEntryDoomCallback* aCallback) {
+ LOG(("CacheEntry::AsyncDoom [this=%p]", this));
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mIsDoomed || mDoomCallback)
+ return NS_ERROR_IN_PROGRESS; // to aggregate have DOOMING state
+
+ RemoveForcedValidity();
+
+ mIsDoomed = true;
+ mDoomCallback = aCallback;
+ }
+
+ // This immediately removes the entry from the master hashtable and also
+ // immediately dooms the file. This way we make sure that any consumer
+ // after this point asking for the same entry won't get
+ // a) this entry
+ // b) a new entry with the same file
+ PurgeAndDoom();
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetMetaDataElement(const char* aKey, char** aRetval) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->GetElement(aKey, aRetval);
+}
+
+nsresult CacheEntry::SetMetaDataElement(const char* aKey, const char* aValue) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->SetElement(aKey, aValue);
+}
+
+nsresult CacheEntry::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) {
+ NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
+
+ return mFile->VisitMetaData(aVisitor);
+}
+
+nsresult CacheEntry::MetaDataReady() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ LOG(("CacheEntry::MetaDataReady [this=%p, state=%s]", this,
+ StateString(mState)));
+
+ MOZ_ASSERT(mState > EMPTY);
+
+ if (mState == WRITING) mState = READY;
+
+ InvokeCallbacks();
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::SetValid() {
+ LOG(("CacheEntry::SetValid [this=%p, state=%s]", this, StateString(mState)));
+
+ nsCOMPtr<nsIOutputStream> outputStream;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ MOZ_ASSERT(mState > EMPTY);
+
+ mState = READY;
+ mHasData = true;
+
+ InvokeCallbacks();
+
+ outputStream.swap(mOutputStream);
+ }
+
+ if (outputStream) {
+ LOG((" abandoning phantom output stream"));
+ outputStream->Close();
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheEntry::Recreate(bool aMemoryOnly, nsICacheEntry** _retval) {
+ LOG(("CacheEntry::Recreate [this=%p, state=%s]", this, StateString(mState)));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ RefPtr<CacheEntryHandle> handle = ReopenTruncated(aMemoryOnly, nullptr);
+ if (handle) {
+ handle.forget(_retval);
+ return NS_OK;
+ }
+
+ BackgroundOp(Ops::CALLBACKS, true);
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+nsresult CacheEntry::GetDataSize(int64_t* aDataSize) {
+ LOG(("CacheEntry::GetDataSize [this=%p]", this));
+ *aDataSize = 0;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (!mHasData) {
+ LOG((" write in progress (no data)"));
+ return NS_ERROR_IN_PROGRESS;
+ }
+ }
+
+ NS_ENSURE_SUCCESS(mFileStatus, mFileStatus);
+
+ // mayhemer: TODO Problem with compression?
+ if (!mFile->DataSize(aDataSize)) {
+ LOG((" write in progress (stream active)"));
+ return NS_ERROR_IN_PROGRESS;
+ }
+
+ LOG((" size=%" PRId64, *aDataSize));
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetAltDataSize(int64_t* aDataSize) {
+ LOG(("CacheEntry::GetAltDataSize [this=%p]", this));
+ if (NS_FAILED(mFileStatus)) {
+ return mFileStatus;
+ }
+ return mFile->GetAltDataSize(aDataSize);
+}
+
+nsresult CacheEntry::GetAltDataType(nsACString& aType) {
+ LOG(("CacheEntry::GetAltDataType [this=%p]", this));
+ if (NS_FAILED(mFileStatus)) {
+ return mFileStatus;
+ }
+ return mFile->GetAltDataType(aType);
+}
+
+nsresult CacheEntry::MarkValid() {
+ // NOT IMPLEMENTED ACTUALLY
+ return NS_OK;
+}
+
+nsresult CacheEntry::MaybeMarkValid() {
+ // NOT IMPLEMENTED ACTUALLY
+ return NS_OK;
+}
+
+nsresult CacheEntry::HasWriteAccess(bool aWriteAllowed, bool* aWriteAccess) {
+ *aWriteAccess = aWriteAllowed;
+ return NS_OK;
+}
+
+nsresult CacheEntry::Close() {
+ // NOT IMPLEMENTED ACTUALLY
+ return NS_OK;
+}
+
+nsresult CacheEntry::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
+ if (NS_FAILED(mFileStatus)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return mFile->GetDiskStorageSizeInKB(aDiskStorageSize);
+}
+
+nsresult CacheEntry::GetLoadContextInfo(nsILoadContextInfo** aInfo) {
+ nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(mStorageID);
+ if (!info) {
+ return NS_ERROR_FAILURE;
+ }
+
+ info.forget(aInfo);
+
+ return NS_OK;
+}
+
+// nsIRunnable
+
+NS_IMETHODIMP CacheEntry::Run() {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ BackgroundOp(mBackgroundOperations.Grab());
+ return NS_OK;
+}
+
+// Management methods
+
+double CacheEntry::GetFrecency() const {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+ return mFrecency;
+}
+
+uint32_t CacheEntry::GetExpirationTime() const {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+ return mSortingExpirationTime;
+}
+
+bool CacheEntry::IsRegistered() const {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+ return mRegistration == REGISTERED;
+}
+
+bool CacheEntry::CanRegister() const {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+ return mRegistration == NEVERREGISTERED;
+}
+
+void CacheEntry::SetRegistered(bool aRegistered) {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ if (aRegistered) {
+ MOZ_ASSERT(mRegistration == NEVERREGISTERED);
+ mRegistration = REGISTERED;
+ } else {
+ MOZ_ASSERT(mRegistration == REGISTERED);
+ mRegistration = DEREGISTERED;
+ }
+}
+
+bool CacheEntry::DeferOrBypassRemovalOnPinStatus(bool aPinned) {
+ LOG(("CacheEntry::DeferOrBypassRemovalOnPinStatus [this=%p]", this));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mPinningKnown) {
+ LOG((" pinned=%d, caller=%d", mPinned, aPinned));
+ // Bypass when the pin status of this entry doesn't match the pin status
+ // caller wants to remove
+ return mPinned != aPinned;
+ }
+
+ LOG((" pinning unknown, caller=%d", aPinned));
+ // Oterwise, remember to doom after the status is determined for any
+ // callback opening the entry after this point...
+ Callback c(this, aPinned);
+ RememberCallback(c);
+ // ...and always bypass
+ return true;
+}
+
+bool CacheEntry::Purge(uint32_t aWhat) {
+ LOG(("CacheEntry::Purge [this=%p, what=%d]", this, aWhat));
+
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ switch (aWhat) {
+ case PURGE_DATA_ONLY_DISK_BACKED:
+ case PURGE_WHOLE_ONLY_DISK_BACKED:
+ // This is an in-memory only entry, don't purge it
+ if (!mUseDisk) {
+ LOG((" not using disk"));
+ return false;
+ }
+ }
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mState == WRITING || mState == LOADING || mFrecency == 0) {
+ // In-progress (write or load) entries should (at least for consistency
+ // and from the logical point of view) stay in memory. Zero-frecency
+ // entries are those which have never been given to any consumer, those
+ // are actually very fresh and should not go just because frecency had not
+ // been set so far.
+ LOG((" state=%s, frecency=%1.10f", StateString(mState), mFrecency));
+ return false;
+ }
+ }
+
+ if (NS_SUCCEEDED(mFileStatus) && mFile->IsWriteInProgress()) {
+ // The file is used when there are open streams or chunks/metadata still
+ // waiting for write. In this case, this entry cannot be purged,
+ // otherwise reopenned entry would may not even find the data on disk -
+ // CacheFile is not shared and cannot be left orphan when its job is not
+ // done, hence keep the whole entry.
+ LOG((" file still under use"));
+ return false;
+ }
+
+ switch (aWhat) {
+ case PURGE_WHOLE_ONLY_DISK_BACKED:
+ case PURGE_WHOLE: {
+ if (!CacheStorageService::Self()->RemoveEntry(this, true)) {
+ LOG((" not purging, still referenced"));
+ return false;
+ }
+
+ CacheStorageService::Self()->UnregisterEntry(this);
+
+ // Entry removed it self from control arrays, return true
+ return true;
+ }
+
+ case PURGE_DATA_ONLY_DISK_BACKED: {
+ NS_ENSURE_SUCCESS(mFileStatus, false);
+
+ mFile->ThrowMemoryCachedData();
+
+ // Entry has been left in control arrays, return false (not purged)
+ return false;
+ }
+ }
+
+ LOG((" ?"));
+ return false;
+}
+
+void CacheEntry::PurgeAndDoom() {
+ LOG(("CacheEntry::PurgeAndDoom [this=%p]", this));
+
+ CacheStorageService::Self()->RemoveEntry(this);
+ DoomAlreadyRemoved();
+}
+
+void CacheEntry::DoomAlreadyRemoved() {
+ LOG(("CacheEntry::DoomAlreadyRemoved [this=%p]", this));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ RemoveForcedValidity();
+
+ mIsDoomed = true;
+
+ // Pretend pinning is know. This entry is now doomed for good, so don't
+ // bother with defering doom because of unknown pinning state any more.
+ mPinningKnown = true;
+
+ // This schedules dooming of the file, dooming is ensured to happen
+ // sooner than demand to open the same file made after this point
+ // so that we don't get this file for any newer opened entry(s).
+ DoomFile();
+
+ // Must force post here since may be indirectly called from
+ // InvokeCallbacks of this entry and we don't want reentrancy here.
+ BackgroundOp(Ops::CALLBACKS, true);
+ // Process immediately when on the management thread.
+ BackgroundOp(Ops::UNREGISTER);
+}
+
+void CacheEntry::DoomFile() {
+ nsresult rv = NS_ERROR_NOT_AVAILABLE;
+
+ if (NS_SUCCEEDED(mFileStatus)) {
+ if (mHandlesCount == 0 || (mHandlesCount == 1 && mWriter)) {
+ // We kill the file also when there is just reference from the writer,
+ // no one else could ever reach the written data. Obvisouly also
+ // when there is no reference at all (should we ever end up here
+ // in that case.)
+ // Tell the file to kill the handle, i.e. bypass any I/O operations
+ // on it except removing the file.
+ mFile->Kill();
+ }
+
+ // Always calls the callback asynchronously.
+ rv = mFile->Doom(mDoomCallback ? this : nullptr);
+ if (NS_SUCCEEDED(rv)) {
+ LOG((" file doomed"));
+ return;
+ }
+
+ if (NS_ERROR_FILE_NOT_FOUND == rv) {
+ // File is set to be just memory-only, notify the callbacks
+ // and pretend dooming has succeeded. From point of view of
+ // the entry it actually did - the data is gone and cannot be
+ // reused.
+ rv = NS_OK;
+ }
+ }
+
+ // Always posts to the main thread.
+ OnFileDoomed(rv);
+}
+
+void CacheEntry::RemoveForcedValidity() {
+ mLock.AssertCurrentThreadOwns();
+
+ nsresult rv;
+
+ if (mIsDoomed) {
+ return;
+ }
+
+ nsAutoCString entryKey;
+ rv = HashingKey(entryKey);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return;
+ }
+
+ CacheStorageService::Self()->RemoveEntryForceValid(mStorageID, entryKey);
+}
+
+void CacheEntry::BackgroundOp(uint32_t aOperations, bool aForceAsync) {
+ mLock.AssertCurrentThreadOwns();
+
+ if (!CacheStorageService::IsOnManagementThread() || aForceAsync) {
+ if (mBackgroundOperations.Set(aOperations))
+ CacheStorageService::Self()->Dispatch(this);
+
+ LOG(("CacheEntry::BackgroundOp this=%p dipatch of %x", this, aOperations));
+ return;
+ }
+
+ {
+ mozilla::MutexAutoUnlock unlock(mLock);
+
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ if (aOperations & Ops::FRECENCYUPDATE) {
+ ++mUseCount;
+
+#ifndef M_LN2
+# define M_LN2 0.69314718055994530942
+#endif
+
+ // Half-life is dynamic, in seconds.
+ static double half_life = CacheObserver::HalfLifeSeconds();
+ // Must convert from seconds to milliseconds since PR_Now() gives usecs.
+ static double const decay =
+ (M_LN2 / half_life) / static_cast<double>(PR_USEC_PER_SEC);
+
+ double now_decay = static_cast<double>(PR_Now()) * decay;
+
+ if (mFrecency == 0) {
+ mFrecency = now_decay;
+ } else {
+ // TODO: when C++11 enabled, use std::log1p(n) which is equal to log(n +
+ // 1) but more precise.
+ mFrecency = log(exp(mFrecency - now_decay) + 1) + now_decay;
+ }
+ LOG(("CacheEntry FRECENCYUPDATE [this=%p, frecency=%1.10f]", this,
+ mFrecency));
+
+ // Because CacheFile::Set*() are not thread-safe to use (uses
+ // WeakReference that is not thread-safe) we must post to the main
+ // thread...
+ NS_DispatchToMainThread(
+ NewRunnableMethod<double>("net::CacheEntry::StoreFrecency", this,
+ &CacheEntry::StoreFrecency, mFrecency));
+ }
+
+ if (aOperations & Ops::REGISTER) {
+ LOG(("CacheEntry REGISTER [this=%p]", this));
+
+ CacheStorageService::Self()->RegisterEntry(this);
+ }
+
+ if (aOperations & Ops::UNREGISTER) {
+ LOG(("CacheEntry UNREGISTER [this=%p]", this));
+
+ CacheStorageService::Self()->UnregisterEntry(this);
+ }
+ } // unlock
+
+ if (aOperations & Ops::CALLBACKS) {
+ LOG(("CacheEntry CALLBACKS (invoke) [this=%p]", this));
+
+ InvokeCallbacks();
+ }
+}
+
+void CacheEntry::StoreFrecency(double aFrecency) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (NS_SUCCEEDED(mFileStatus)) {
+ mFile->SetFrecency(FRECENCY2INT(aFrecency));
+ }
+}
+
+// CacheOutputCloseListener
+
+CacheOutputCloseListener::CacheOutputCloseListener(CacheEntry* aEntry)
+ : Runnable("net::CacheOutputCloseListener"), mEntry(aEntry) {}
+
+void CacheOutputCloseListener::OnOutputClosed() {
+ // We need this class and to redispatch since this callback is invoked
+ // under the file's lock and to do the job we need to enter the entry's
+ // lock too. That would lead to potential deadlocks.
+ NS_DispatchToCurrentThread(this);
+}
+
+NS_IMETHODIMP CacheOutputCloseListener::Run() {
+ mEntry->OnOutputClosed();
+ return NS_OK;
+}
+
+// Memory reporting
+
+size_t CacheEntry::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+
+ n += mCallbacks.ShallowSizeOfExcludingThis(mallocSizeOf);
+ if (mFile) {
+ n += mFile->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ n += mURI.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ n += mEnhanceID.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ n += mStorageID.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+
+ // mDoomCallback is an arbitrary class that is probably reported elsewhere.
+ // mOutputStream is reported in mFile.
+ // mWriter is one of many handles we create, but (intentionally) not keep
+ // any reference to, so those unfortunately cannot be reported. Handles are
+ // small, though.
+ // mSecurityInfo doesn't impl nsISizeOf.
+
+ return n;
+}
+
+size_t CacheEntry::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheEntry.h b/netwerk/cache2/CacheEntry.h
new file mode 100644
index 0000000000..ce93bbb105
--- /dev/null
+++ b/netwerk/cache2/CacheEntry.h
@@ -0,0 +1,573 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheEntry__h__
+#define CacheEntry__h__
+
+#include "nsICacheEntry.h"
+#include "CacheFile.h"
+
+#include "nsIRunnable.h"
+#include "nsIOutputStream.h"
+#include "nsICacheEntryOpenCallback.h"
+#include "nsICacheEntryDoomCallback.h"
+
+#include "nsCOMPtr.h"
+#include "nsRefPtrHashtable.h"
+#include "nsDataHashtable.h"
+#include "nsHashKeys.h"
+#include "nsString.h"
+#include "nsCOMArray.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/TimeStamp.h"
+
+static inline uint32_t PRTimeToSeconds(PRTime t_usec) {
+ return uint32_t(t_usec / PR_USEC_PER_SEC);
+}
+
+#define NowInSeconds() PRTimeToSeconds(PR_Now())
+
+class nsIOutputStream;
+class nsIURI;
+class nsIThread;
+
+namespace mozilla {
+namespace net {
+
+class CacheStorageService;
+class CacheStorage;
+class CacheOutputCloseListener;
+class CacheEntryHandle;
+
+class CacheEntry final : public nsIRunnable, public CacheFileListener {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIRUNNABLE
+
+ static uint64_t GetNextId();
+
+ CacheEntry(const nsACString& aStorageID, const nsACString& aURI,
+ const nsACString& aEnhanceID, bool aUseDisk, bool aSkipSizeCheck,
+ bool aPin);
+
+ void AsyncOpen(nsICacheEntryOpenCallback* aCallback, uint32_t aFlags);
+
+ CacheEntryHandle* NewHandle();
+ // For a new and recreated entry w/o a callback, we need to wrap it
+ // with a handle to detect writing consumer is gone.
+ CacheEntryHandle* NewWriteHandle();
+
+ // Forwarded to from CacheEntryHandle : nsICacheEntry
+ nsresult GetKey(nsACString& aKey);
+ nsresult GetCacheEntryId(uint64_t* aCacheEntryId);
+ nsresult GetPersistent(bool* aPersistent);
+ nsresult GetFetchCount(int32_t* aFetchCount);
+ nsresult GetLastFetched(uint32_t* aLastFetched);
+ nsresult GetLastModified(uint32_t* aLastModified);
+ nsresult GetExpirationTime(uint32_t* aExpirationTime);
+ nsresult SetExpirationTime(uint32_t expirationTime);
+ nsresult GetOnStartTime(uint64_t* aOnStartTime);
+ nsresult GetOnStopTime(uint64_t* aOnStopTime);
+ nsresult SetNetworkTimes(uint64_t onStartTime, uint64_t onStopTime);
+ nsresult SetContentType(uint8_t aContentType);
+ nsresult ForceValidFor(uint32_t aSecondsToTheFuture);
+ nsresult GetIsForcedValid(bool* aIsForcedValid);
+ nsresult OpenInputStream(int64_t offset, nsIInputStream** _retval);
+ nsresult OpenOutputStream(int64_t offset, int64_t predictedSize,
+ nsIOutputStream** _retval);
+ nsresult GetSecurityInfo(nsISupports** aSecurityInfo);
+ nsresult SetSecurityInfo(nsISupports* aSecurityInfo);
+ nsresult GetStorageDataSize(uint32_t* aStorageDataSize);
+ nsresult AsyncDoom(nsICacheEntryDoomCallback* listener);
+ nsresult GetMetaDataElement(const char* key, char** _retval);
+ nsresult SetMetaDataElement(const char* key, const char* value);
+ nsresult VisitMetaData(nsICacheEntryMetaDataVisitor* visitor);
+ nsresult MetaDataReady(void);
+ nsresult SetValid(void);
+ nsresult GetDiskStorageSizeInKB(uint32_t* aDiskStorageSizeInKB);
+ nsresult Recreate(bool aMemoryOnly, nsICacheEntry** _retval);
+ nsresult GetDataSize(int64_t* aDataSize);
+ nsresult GetAltDataSize(int64_t* aAltDataSize);
+ nsresult GetAltDataType(nsACString& aAltDataType);
+ nsresult OpenAlternativeOutputStream(const nsACString& type,
+ int64_t predictedSize,
+ nsIAsyncOutputStream** _retval);
+ nsresult OpenAlternativeInputStream(const nsACString& type,
+ nsIInputStream** _retval);
+ nsresult GetLoadContextInfo(nsILoadContextInfo** aLoadContextInfo);
+ nsresult Close(void);
+ nsresult MarkValid(void);
+ nsresult MaybeMarkValid(void);
+ nsresult HasWriteAccess(bool aWriteAllowed, bool* _retval);
+
+ public:
+ uint32_t GetMetadataMemoryConsumption();
+ nsCString const& GetStorageID() const { return mStorageID; }
+ nsCString const& GetEnhanceID() const { return mEnhanceID; }
+ nsCString const& GetURI() const { return mURI; }
+ // Accessible at any time
+ bool IsUsingDisk() const { return mUseDisk; }
+ bool IsReferenced() const;
+ bool IsFileDoomed();
+ bool IsDoomed() const { return mIsDoomed; }
+ bool IsPinned() const { return mPinned; }
+
+ // Methods for entry management (eviction from memory),
+ // called only on the management thread.
+
+ // TODO make these inline
+ double GetFrecency() const;
+ uint32_t GetExpirationTime() const;
+ uint32_t UseCount() const { return mUseCount; }
+
+ bool IsRegistered() const;
+ bool CanRegister() const;
+ void SetRegistered(bool aRegistered);
+
+ TimeStamp const& LoadStart() const { return mLoadStart; }
+
+ enum EPurge {
+ PURGE_DATA_ONLY_DISK_BACKED,
+ PURGE_WHOLE_ONLY_DISK_BACKED,
+ PURGE_WHOLE,
+ };
+
+ bool DeferOrBypassRemovalOnPinStatus(bool aPinned);
+ bool Purge(uint32_t aWhat);
+ void PurgeAndDoom();
+ void DoomAlreadyRemoved();
+
+ nsresult HashingKeyWithStorage(nsACString& aResult) const;
+ nsresult HashingKey(nsACString& aResult) const;
+
+ static nsresult HashingKey(const nsACString& aStorageID,
+ const nsACString& aEnhanceID, nsIURI* aURI,
+ nsACString& aResult);
+
+ static nsresult HashingKey(const nsACString& aStorageID,
+ const nsACString& aEnhanceID,
+ const nsACString& aURISpec, nsACString& aResult);
+
+ // Accessed only on the service management thread
+ double mFrecency;
+ ::mozilla::Atomic<uint32_t, ::mozilla::Relaxed> mSortingExpirationTime;
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ virtual ~CacheEntry();
+
+ // CacheFileListener
+ NS_IMETHOD OnFileReady(nsresult aResult, bool aIsNew) override;
+ NS_IMETHOD OnFileDoomed(nsresult aResult) override;
+
+ // Keep the service alive during life-time of an entry
+ RefPtr<CacheStorageService> mService;
+
+ // We must monitor when a cache entry whose consumer is responsible
+ // for writing it the first time gets released. We must then invoke
+ // waiting callbacks to not break the chain.
+ class Callback {
+ public:
+ Callback(CacheEntry* aEntry, nsICacheEntryOpenCallback* aCallback,
+ bool aReadOnly, bool aCheckOnAnyThread, bool aSecret);
+ // Special constructor for Callback objects added to the chain
+ // just to ensure proper defer dooming (recreation) of this entry.
+ Callback(CacheEntry* aEntry, bool aDoomWhenFoundInPinStatus);
+ Callback(Callback const& aThat);
+ ~Callback();
+
+ // Called when this callback record changes it's owning entry,
+ // mainly during recreation.
+ void ExchangeEntry(CacheEntry* aEntry);
+
+ // Returns true when an entry is about to be "defer" doomed and this is
+ // a "defer" callback.
+ bool DeferDoom(bool* aDoom) const;
+
+ // We are raising reference count here to take into account the pending
+ // callback (that virtually holds a ref to this entry before it gets
+ // it's pointer).
+ RefPtr<CacheEntry> mEntry;
+ nsCOMPtr<nsICacheEntryOpenCallback> mCallback;
+ nsCOMPtr<nsIEventTarget> mTarget;
+ bool mReadOnly : 1;
+ bool mRevalidating : 1;
+ bool mCheckOnAnyThread : 1;
+ bool mRecheckAfterWrite : 1;
+ bool mNotWanted : 1;
+ bool mSecret : 1;
+
+ // These are set only for the defer-doomer Callback instance inserted
+ // to the callback chain. When any of these is set and also any of
+ // the corressponding flags on the entry is set, this callback will
+ // indicate (via DeferDoom()) the entry have to be recreated/doomed.
+ bool mDoomWhenFoundPinned : 1;
+ bool mDoomWhenFoundNonPinned : 1;
+
+ nsresult OnCheckThread(bool* aOnCheckThread) const;
+ nsresult OnAvailThread(bool* aOnAvailThread) const;
+ };
+
+ // Since OnCacheEntryAvailable must be invoked on the main thread
+ // we need a runnable for it...
+ class AvailableCallbackRunnable : public Runnable {
+ public:
+ AvailableCallbackRunnable(CacheEntry* aEntry, Callback const& aCallback)
+ : Runnable("CacheEntry::AvailableCallbackRunnable"),
+ mEntry(aEntry),
+ mCallback(aCallback) {}
+
+ private:
+ NS_IMETHOD Run() override {
+ mEntry->InvokeAvailableCallback(mCallback);
+ return NS_OK;
+ }
+
+ RefPtr<CacheEntry> mEntry;
+ Callback mCallback;
+ };
+
+ // Since OnCacheEntryDoomed must be invoked on the main thread
+ // we need a runnable for it...
+ class DoomCallbackRunnable : public Runnable {
+ public:
+ DoomCallbackRunnable(CacheEntry* aEntry, nsresult aRv)
+ : Runnable("net::CacheEntry::DoomCallbackRunnable"),
+ mEntry(aEntry),
+ mRv(aRv) {}
+
+ private:
+ NS_IMETHOD Run() override {
+ nsCOMPtr<nsICacheEntryDoomCallback> callback;
+ {
+ mozilla::MutexAutoLock lock(mEntry->mLock);
+ mEntry->mDoomCallback.swap(callback);
+ }
+
+ if (callback) callback->OnCacheEntryDoomed(mRv);
+ return NS_OK;
+ }
+
+ RefPtr<CacheEntry> mEntry;
+ nsresult mRv;
+ };
+
+ // Starts the load or just invokes the callback, bypasses (when required)
+ // if busy. Returns true on job done, false on bypass.
+ bool Open(Callback& aCallback, bool aTruncate, bool aPriority,
+ bool aBypassIfBusy);
+ // Loads from disk asynchronously
+ bool Load(bool aTruncate, bool aPriority);
+
+ void RememberCallback(Callback& aCallback);
+ void InvokeCallbacksLock();
+ void InvokeCallbacks();
+ bool InvokeCallbacks(bool aReadOnly);
+ bool InvokeCallback(Callback& aCallback);
+ void InvokeAvailableCallback(Callback const& aCallback);
+ void OnFetched(Callback const& aCallback);
+
+ nsresult OpenOutputStreamInternal(int64_t offset, nsIOutputStream** _retval);
+ nsresult OpenInputStreamInternal(int64_t offset, const char* aAltDataType,
+ nsIInputStream** _retval);
+
+ void OnHandleClosed(CacheEntryHandle const* aHandle);
+
+ private:
+ friend class CacheEntryHandle;
+ // Increment/decrements the number of handles keeping this entry.
+ void AddHandleRef() { ++mHandlesCount; }
+ void ReleaseHandleRef() { --mHandlesCount; }
+ // Current number of handles keeping this entry.
+ uint32_t HandlesCount() const { return mHandlesCount; }
+
+ private:
+ friend class CacheOutputCloseListener;
+ void OnOutputClosed();
+
+ private:
+ // Schedules a background operation on the management thread.
+ // When executed on the management thread directly, the operation(s)
+ // is (are) executed immediately.
+ void BackgroundOp(uint32_t aOperation, bool aForceAsync = false);
+ void StoreFrecency(double aFrecency);
+
+ // Called only from DoomAlreadyRemoved()
+ void DoomFile();
+ // When this entry is doomed the first time, this method removes
+ // any force-valid timing info for this entry.
+ void RemoveForcedValidity();
+
+ already_AddRefed<CacheEntryHandle> ReopenTruncated(
+ bool aMemoryOnly, nsICacheEntryOpenCallback* aCallback);
+ void TransferCallbacks(CacheEntry& aFromEntry);
+
+ mozilla::Mutex mLock;
+
+ // Reflects the number of existing handles for this entry
+ ::mozilla::ThreadSafeAutoRefCnt mHandlesCount;
+
+ nsTArray<Callback> mCallbacks;
+ nsCOMPtr<nsICacheEntryDoomCallback> mDoomCallback;
+
+ RefPtr<CacheFile> mFile;
+
+ // Using ReleaseAcquire since we only control access to mFile with this.
+ // When mFileStatus is read and found success it is ensured there is mFile and
+ // that it is after a successful call to Init().
+ ::mozilla::Atomic<nsresult, ::mozilla::ReleaseAcquire> mFileStatus;
+ nsCString mURI;
+ nsCString mEnhanceID;
+ nsCString mStorageID;
+
+ // mUseDisk, mSkipSizeCheck, mIsDoomed are plain "bool", not "bool:1",
+ // so as to avoid bitfield races with the byte containing
+ // mSecurityInfoLoaded et al. See bug 1278524.
+ //
+ // Whether it's allowed to persist the data to disk
+ bool const mUseDisk;
+ // Whether it should skip max size check.
+ bool const mSkipSizeCheck;
+ // Set when entry is doomed with AsyncDoom() or DoomAlreadyRemoved().
+ bool mIsDoomed;
+
+ // Following flags are all synchronized with the cache entry lock.
+
+ // Whether security info has already been looked up in metadata.
+ bool mSecurityInfoLoaded : 1;
+ // Prevents any callback invocation
+ bool mPreventCallbacks : 1;
+ // true: after load and an existing file, or after output stream has been
+ // opened.
+ // note - when opening an input stream, and this flag is false, output
+ // stream is open along ; this makes input streams on new entries
+ // behave correctly when EOF is reached (WOULD_BLOCK is returned).
+ // false: after load and a new file, or dropped to back to false when a
+ // writer fails to open an output stream.
+ bool mHasData : 1;
+ // The indication of pinning this entry was open with
+ bool mPinned : 1;
+ // Whether the pinning state of the entry is known (equals to the actual state
+ // of the cache file)
+ bool mPinningKnown : 1;
+
+ static char const* StateString(uint32_t aState);
+
+ enum EState { // transiting to:
+ NOTLOADED = 0, // -> LOADING | EMPTY
+ LOADING = 1, // -> EMPTY | READY
+ EMPTY = 2, // -> WRITING
+ WRITING = 3, // -> EMPTY | READY
+ READY = 4, // -> REVALIDATING
+ REVALIDATING = 5 // -> READY
+ };
+
+ // State of this entry.
+ EState mState;
+
+ enum ERegistration {
+ NEVERREGISTERED = 0, // The entry has never been registered
+ REGISTERED = 1, // The entry is stored in the memory pool index
+ DEREGISTERED = 2 // The entry has been removed from the pool
+ };
+
+ // Accessed only on the management thread. Records the state of registration
+ // this entry in the memory pool intermediate cache.
+ ERegistration mRegistration;
+
+ // If a new (empty) entry is requested to open an input stream before
+ // output stream has been opened, we must open output stream internally
+ // on CacheFile and hold until writer releases the entry or opens the output
+ // stream for read (then we trade him mOutputStream).
+ nsCOMPtr<nsIOutputStream> mOutputStream;
+
+ // Weak reference to the current writter. There can be more then one
+ // writer at a time and OnHandleClosed() must be processed only for the
+ // current one.
+ CacheEntryHandle* mWriter;
+
+ // Background thread scheduled operation. Set (under the lock) one
+ // of this flags to tell the background thread what to do.
+ class Ops {
+ public:
+ static uint32_t const REGISTER = 1 << 0;
+ static uint32_t const FRECENCYUPDATE = 1 << 1;
+ static uint32_t const CALLBACKS = 1 << 2;
+ static uint32_t const UNREGISTER = 1 << 3;
+
+ Ops() : mFlags(0) {}
+ uint32_t Grab() {
+ uint32_t flags = mFlags;
+ mFlags = 0;
+ return flags;
+ }
+ bool Set(uint32_t aFlags) {
+ if (mFlags & aFlags) return false;
+ mFlags |= aFlags;
+ return true;
+ }
+
+ private:
+ uint32_t mFlags;
+ } mBackgroundOperations;
+
+ nsCOMPtr<nsISupports> mSecurityInfo;
+ mozilla::TimeStamp mLoadStart;
+ uint32_t mUseCount;
+
+ const uint64_t mCacheEntryId;
+};
+
+class CacheEntryHandle final : public nsICacheEntry {
+ public:
+ explicit CacheEntryHandle(CacheEntry* aEntry);
+ CacheEntry* Entry() const { return mEntry; }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ // Default implementation is simply safely forwarded.
+ NS_IMETHOD GetKey(nsACString& aKey) override { return mEntry->GetKey(aKey); }
+ NS_IMETHOD GetCacheEntryId(uint64_t* aCacheEntryId) override {
+ return mEntry->GetCacheEntryId(aCacheEntryId);
+ }
+ NS_IMETHOD GetPersistent(bool* aPersistent) override {
+ return mEntry->GetPersistent(aPersistent);
+ }
+ NS_IMETHOD GetFetchCount(int32_t* aFetchCount) override {
+ return mEntry->GetFetchCount(aFetchCount);
+ }
+ NS_IMETHOD GetLastFetched(uint32_t* aLastFetched) override {
+ return mEntry->GetLastFetched(aLastFetched);
+ }
+ NS_IMETHOD GetLastModified(uint32_t* aLastModified) override {
+ return mEntry->GetLastModified(aLastModified);
+ }
+ NS_IMETHOD GetExpirationTime(uint32_t* aExpirationTime) override {
+ return mEntry->GetExpirationTime(aExpirationTime);
+ }
+ NS_IMETHOD SetExpirationTime(uint32_t expirationTime) override {
+ return mEntry->SetExpirationTime(expirationTime);
+ }
+ NS_IMETHOD GetOnStartTime(uint64_t* aOnStartTime) override {
+ return mEntry->GetOnStartTime(aOnStartTime);
+ }
+ NS_IMETHOD GetOnStopTime(uint64_t* aOnStopTime) override {
+ return mEntry->GetOnStopTime(aOnStopTime);
+ }
+ NS_IMETHOD SetNetworkTimes(uint64_t onStartTime,
+ uint64_t onStopTime) override {
+ return mEntry->SetNetworkTimes(onStartTime, onStopTime);
+ }
+ NS_IMETHOD SetContentType(uint8_t contentType) override {
+ return mEntry->SetContentType(contentType);
+ }
+ NS_IMETHOD ForceValidFor(uint32_t aSecondsToTheFuture) override {
+ return mEntry->ForceValidFor(aSecondsToTheFuture);
+ }
+ NS_IMETHOD GetIsForcedValid(bool* aIsForcedValid) override {
+ return mEntry->GetIsForcedValid(aIsForcedValid);
+ }
+ NS_IMETHOD OpenInputStream(int64_t offset,
+ nsIInputStream** _retval) override {
+ return mEntry->OpenInputStream(offset, _retval);
+ }
+ NS_IMETHOD OpenOutputStream(int64_t offset, int64_t predictedSize,
+ nsIOutputStream** _retval) override {
+ return mEntry->OpenOutputStream(offset, predictedSize, _retval);
+ }
+ NS_IMETHOD GetSecurityInfo(nsISupports** aSecurityInfo) override {
+ return mEntry->GetSecurityInfo(aSecurityInfo);
+ }
+ NS_IMETHOD SetSecurityInfo(nsISupports* aSecurityInfo) override {
+ return mEntry->SetSecurityInfo(aSecurityInfo);
+ }
+ NS_IMETHOD GetStorageDataSize(uint32_t* aStorageDataSize) override {
+ return mEntry->GetStorageDataSize(aStorageDataSize);
+ }
+ NS_IMETHOD AsyncDoom(nsICacheEntryDoomCallback* listener) override {
+ return mEntry->AsyncDoom(listener);
+ }
+ NS_IMETHOD GetMetaDataElement(const char* key, char** _retval) override {
+ return mEntry->GetMetaDataElement(key, _retval);
+ }
+ NS_IMETHOD SetMetaDataElement(const char* key, const char* value) override {
+ return mEntry->SetMetaDataElement(key, value);
+ }
+ NS_IMETHOD VisitMetaData(nsICacheEntryMetaDataVisitor* visitor) override {
+ return mEntry->VisitMetaData(visitor);
+ }
+ NS_IMETHOD MetaDataReady(void) override { return mEntry->MetaDataReady(); }
+ NS_IMETHOD SetValid(void) override { return mEntry->SetValid(); }
+ NS_IMETHOD GetDiskStorageSizeInKB(uint32_t* aDiskStorageSizeInKB) override {
+ return mEntry->GetDiskStorageSizeInKB(aDiskStorageSizeInKB);
+ }
+ NS_IMETHOD Recreate(bool aMemoryOnly, nsICacheEntry** _retval) override {
+ return mEntry->Recreate(aMemoryOnly, _retval);
+ }
+ NS_IMETHOD GetDataSize(int64_t* aDataSize) override {
+ return mEntry->GetDataSize(aDataSize);
+ }
+ NS_IMETHOD GetAltDataSize(int64_t* aAltDataSize) override {
+ return mEntry->GetAltDataSize(aAltDataSize);
+ }
+ NS_IMETHOD GetAltDataType(nsACString& aType) override {
+ return mEntry->GetAltDataType(aType);
+ }
+ NS_IMETHOD OpenAlternativeOutputStream(
+ const nsACString& type, int64_t predictedSize,
+ nsIAsyncOutputStream** _retval) override {
+ return mEntry->OpenAlternativeOutputStream(type, predictedSize, _retval);
+ }
+ NS_IMETHOD OpenAlternativeInputStream(const nsACString& type,
+ nsIInputStream** _retval) override {
+ return mEntry->OpenAlternativeInputStream(type, _retval);
+ }
+ NS_IMETHOD GetLoadContextInfo(
+ nsILoadContextInfo** aLoadContextInfo) override {
+ return mEntry->GetLoadContextInfo(aLoadContextInfo);
+ }
+ NS_IMETHOD Close(void) override { return mEntry->Close(); }
+ NS_IMETHOD MarkValid(void) override { return mEntry->MarkValid(); }
+ NS_IMETHOD MaybeMarkValid(void) override { return mEntry->MaybeMarkValid(); }
+ NS_IMETHOD HasWriteAccess(bool aWriteAllowed, bool* _retval) override {
+ return mEntry->HasWriteAccess(aWriteAllowed, _retval);
+ }
+
+ // Specific implementation:
+ NS_IMETHOD Dismiss() override;
+
+ private:
+ virtual ~CacheEntryHandle();
+ RefPtr<CacheEntry> mEntry;
+
+ // This is |false| until Dismiss() was called and prevents OnHandleClosed
+ // being called more than once.
+ Atomic<bool, ReleaseAcquire> mClosed;
+};
+
+class CacheOutputCloseListener final : public Runnable {
+ public:
+ void OnOutputClosed();
+
+ private:
+ friend class CacheEntry;
+
+ virtual ~CacheOutputCloseListener() = default;
+
+ NS_DECL_NSIRUNNABLE
+ explicit CacheOutputCloseListener(CacheEntry* aEntry);
+
+ private:
+ RefPtr<CacheEntry> mEntry;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFile.cpp b/netwerk/cache2/CacheFile.cpp
new file mode 100644
index 0000000000..262c2a762c
--- /dev/null
+++ b/netwerk/cache2/CacheFile.cpp
@@ -0,0 +1,2596 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheFile.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "CacheFileChunk.h"
+#include "CacheFileInputStream.h"
+#include "CacheFileOutputStream.h"
+#include "CacheLog.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Telemetry.h"
+#include "nsComponentManagerUtils.h"
+#include "nsProxyRelease.h"
+#include "nsThreadUtils.h"
+
+// When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
+// When it is not defined, we always release the chunks ASAP, i.e. we cache
+// unused chunks only when:
+// - CacheFile is memory-only
+// - CacheFile is still waiting for the handle
+// - the chunk is preloaded
+
+//#define CACHE_CHUNKS
+
+namespace mozilla {
+namespace net {
+
+class NotifyCacheFileListenerEvent : public Runnable {
+ public:
+ NotifyCacheFileListenerEvent(CacheFileListener* aCallback, nsresult aResult,
+ bool aIsNew)
+ : Runnable("net::NotifyCacheFileListenerEvent"),
+ mCallback(aCallback),
+ mRV(aResult),
+ mIsNew(aIsNew) {
+ LOG(
+ ("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() "
+ "[this=%p]",
+ this));
+ }
+
+ protected:
+ ~NotifyCacheFileListenerEvent() {
+ LOG(
+ ("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() "
+ "[this=%p]",
+ this));
+ }
+
+ public:
+ NS_IMETHOD Run() override {
+ LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this));
+
+ mCallback->OnFileReady(mRV, mIsNew);
+ return NS_OK;
+ }
+
+ protected:
+ nsCOMPtr<CacheFileListener> mCallback;
+ nsresult mRV;
+ bool mIsNew;
+};
+
+class NotifyChunkListenerEvent : public Runnable {
+ public:
+ NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, nsresult aResult,
+ uint32_t aChunkIdx, CacheFileChunk* aChunk)
+ : Runnable("net::NotifyChunkListenerEvent"),
+ mCallback(aCallback),
+ mRV(aResult),
+ mChunkIdx(aChunkIdx),
+ mChunk(aChunk) {
+ LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]",
+ this));
+ }
+
+ protected:
+ ~NotifyChunkListenerEvent() {
+ LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]",
+ this));
+ }
+
+ public:
+ NS_IMETHOD Run() override {
+ LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this));
+
+ mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk);
+ return NS_OK;
+ }
+
+ protected:
+ nsCOMPtr<CacheFileChunkListener> mCallback;
+ nsresult mRV;
+ uint32_t mChunkIdx;
+ RefPtr<CacheFileChunk> mChunk;
+};
+
+class DoomFileHelper : public CacheFileIOListener {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ explicit DoomFileHelper(CacheFileListener* aListener)
+ : mListener(aListener) {}
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
+ MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override {
+ MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override {
+ MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override {
+ if (mListener) mListener->OnFileDoomed(aResult);
+ return NS_OK;
+ }
+
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
+ MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
+ nsresult aResult) override {
+ MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ private:
+ virtual ~DoomFileHelper() = default;
+
+ nsCOMPtr<CacheFileListener> mListener;
+};
+
+NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener)
+
+NS_IMPL_ADDREF(CacheFile)
+NS_IMPL_RELEASE(CacheFile)
+NS_INTERFACE_MAP_BEGIN(CacheFile)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports,
+ mozilla::net::CacheFileChunkListener)
+NS_INTERFACE_MAP_END
+
+CacheFile::CacheFile()
+ : mLock("CacheFile.mLock"),
+ mOpeningFile(false),
+ mReady(false),
+ mMemoryOnly(false),
+ mSkipSizeCheck(false),
+ mOpenAsMemoryOnly(false),
+ mPinned(false),
+ mPriority(false),
+ mDataAccessed(false),
+ mDataIsDirty(false),
+ mWritingMetadata(false),
+ mPreloadWithoutInputStreams(true),
+ mPreloadChunkCount(0),
+ mStatus(NS_OK),
+ mDataSize(-1),
+ mAltDataOffset(-1),
+ mKill(false),
+ mOutput(nullptr) {
+ LOG(("CacheFile::CacheFile() [this=%p]", this));
+}
+
+CacheFile::~CacheFile() {
+ LOG(("CacheFile::~CacheFile() [this=%p]", this));
+
+ MutexAutoLock lock(mLock);
+ if (!mMemoryOnly && mReady && !mKill) {
+ // mReady flag indicates we have metadata plus in a valid state.
+ WriteMetadataIfNeededLocked(true);
+ }
+}
+
+nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew,
+ bool aMemoryOnly, bool aSkipSizeCheck, bool aPriority,
+ bool aPinned, CacheFileListener* aCallback) {
+ MOZ_ASSERT(!mListener);
+ MOZ_ASSERT(!mHandle);
+
+ MOZ_ASSERT(!(aMemoryOnly && aPinned));
+
+ nsresult rv;
+
+ mKey = aKey;
+ mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
+ mSkipSizeCheck = aSkipSizeCheck;
+ mPriority = aPriority;
+ mPinned = aPinned;
+
+ // Some consumers (at least nsHTTPCompressConv) assume that Read() can read
+ // such amount of data that was announced by Available().
+ // CacheFileInputStream::Available() uses also preloaded chunks to compute
+ // number of available bytes in the input stream, so we have to make sure the
+ // preloadChunkCount won't change during CacheFile's lifetime since otherwise
+ // we could potentially release some cached chunks that was used to calculate
+ // available bytes but would not be available later during call to
+ // CacheFileInputStream::Read().
+ mPreloadChunkCount = CacheObserver::PreloadChunkCount();
+
+ LOG(
+ ("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
+ "priority=%d, listener=%p]",
+ this, mKey.get(), aCreateNew, aMemoryOnly, aPriority, aCallback));
+
+ if (mMemoryOnly) {
+ MOZ_ASSERT(!aCallback);
+
+ mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey);
+ mReady = true;
+ mDataSize = mMetadata->Offset();
+ return NS_OK;
+ }
+ uint32_t flags;
+ if (aCreateNew) {
+ MOZ_ASSERT(!aCallback);
+ flags = CacheFileIOManager::CREATE_NEW;
+
+ // make sure we can use this entry immediately
+ mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
+ mReady = true;
+ mDataSize = mMetadata->Offset();
+ } else {
+ flags = CacheFileIOManager::CREATE;
+ }
+
+ if (mPriority) {
+ flags |= CacheFileIOManager::PRIORITY;
+ }
+
+ if (mPinned) {
+ flags |= CacheFileIOManager::PINNED;
+ }
+
+ mOpeningFile = true;
+ mListener = aCallback;
+ rv = CacheFileIOManager::OpenFile(mKey, flags, this);
+ if (NS_FAILED(rv)) {
+ mListener = nullptr;
+ mOpeningFile = false;
+
+ if (mPinned) {
+ LOG(
+ ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
+ "but we want to pin, fail the file opening. [this=%p]",
+ this));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (aCreateNew) {
+ NS_WARNING("Forcing memory-only entry since OpenFile failed");
+ LOG(
+ ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
+ "synchronously. We can continue in memory-only mode since "
+ "aCreateNew == true. [this=%p]",
+ this));
+
+ mMemoryOnly = true;
+ } else if (rv == NS_ERROR_NOT_INITIALIZED) {
+ NS_WARNING(
+ "Forcing memory-only entry since CacheIOManager isn't "
+ "initialized.");
+ LOG(
+ ("CacheFile::Init() - CacheFileIOManager isn't initialized, "
+ "initializing entry as memory-only. [this=%p]",
+ this));
+
+ mMemoryOnly = true;
+ mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
+ mReady = true;
+ mDataSize = mMetadata->Offset();
+
+ RefPtr<NotifyCacheFileListenerEvent> ev;
+ ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true);
+ rv = NS_DispatchToCurrentThread(ev);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) {
+ CacheFileAutoLock lock(this);
+
+ nsresult rv;
+
+ uint32_t index = aChunk->Index();
+
+ LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32
+ ", chunk=%p, idx=%u]",
+ this, static_cast<uint32_t>(aResult), aChunk, index));
+
+ if (aChunk->mDiscardedChunk) {
+ // We discard only unused chunks, so it must be still unused when reading
+ // data finishes.
+ MOZ_ASSERT(aChunk->mRefCnt == 2);
+ aChunk->mActiveChunk = false;
+ ReleaseOutsideLock(
+ RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
+
+ DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
+ MOZ_ASSERT(removed);
+ return NS_OK;
+ }
+
+ if (NS_FAILED(aResult)) {
+ SetError(aResult);
+ }
+
+ if (HaveChunkListeners(index)) {
+ rv = NotifyChunkListeners(index, aResult, aChunk);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) {
+ // In case the chunk was reused, made dirty and released between calls to
+ // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write
+ // the chunk to the disk again. When the chunk is unused and is dirty simply
+ // addref and release (outside the lock) the chunk which ensures that
+ // CacheFile::DeactivateChunk() will be called again.
+ RefPtr<CacheFileChunk> deactivateChunkAgain;
+
+ CacheFileAutoLock lock(this);
+
+ nsresult rv;
+
+ LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32
+ ", chunk=%p, idx=%u]",
+ this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index()));
+
+ MOZ_ASSERT(!mMemoryOnly);
+ MOZ_ASSERT(!mOpeningFile);
+ MOZ_ASSERT(mHandle);
+
+ if (aChunk->mDiscardedChunk) {
+ // We discard only unused chunks, so it must be still unused when writing
+ // data finishes.
+ MOZ_ASSERT(aChunk->mRefCnt == 2);
+ aChunk->mActiveChunk = false;
+ ReleaseOutsideLock(
+ RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
+
+ DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
+ MOZ_ASSERT(removed);
+ return NS_OK;
+ }
+
+ if (NS_FAILED(aResult)) {
+ SetError(aResult);
+ }
+
+ if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
+ // update hash value in metadata
+ mMetadata->SetHash(aChunk->Index(), aChunk->Hash());
+ }
+
+ // notify listeners if there is any
+ if (HaveChunkListeners(aChunk->Index())) {
+ // don't release the chunk since there are some listeners queued
+ rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
+ if (NS_SUCCEEDED(rv)) {
+ MOZ_ASSERT(aChunk->mRefCnt != 2);
+ return NS_OK;
+ }
+ }
+
+ if (aChunk->mRefCnt != 2) {
+ LOG(
+ ("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
+ " refcnt=%" PRIuPTR "]",
+ this, aChunk, aChunk->mRefCnt.get()));
+
+ return NS_OK;
+ }
+
+ if (aChunk->IsDirty()) {
+ LOG(
+ ("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go "
+ "through deactivation again. [this=%p, chunk=%p]",
+ this, aChunk));
+
+ deactivateChunkAgain = aChunk;
+ return NS_OK;
+ }
+
+ bool keepChunk = false;
+ if (NS_SUCCEEDED(aResult)) {
+ keepChunk = ShouldCacheChunk(aChunk->Index());
+ LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
+ keepChunk ? "Caching" : "Releasing", this, aChunk));
+ } else {
+ LOG(
+ ("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
+ "chunk=%p]",
+ this, aChunk));
+ }
+
+ RemoveChunkInternal(aChunk, keepChunk);
+
+ WriteMetadataIfNeededLocked();
+
+ return NS_OK;
+}
+
+nsresult CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFile::OnChunkUpdated(CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
+ // Using an 'auto' class to perform doom or fail the listener
+ // outside the CacheFile's lock.
+ class AutoFailDoomListener {
+ public:
+ explicit AutoFailDoomListener(CacheFileHandle* aHandle)
+ : mHandle(aHandle), mAlreadyDoomed(false) {}
+ ~AutoFailDoomListener() {
+ if (!mListener) return;
+
+ if (mHandle) {
+ if (mAlreadyDoomed) {
+ mListener->OnFileDoomed(mHandle, NS_OK);
+ } else {
+ CacheFileIOManager::DoomFile(mHandle, mListener);
+ }
+ } else {
+ mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE);
+ }
+ }
+
+ CacheFileHandle* mHandle;
+ nsCOMPtr<CacheFileIOListener> mListener;
+ bool mAlreadyDoomed;
+ } autoDoom(aHandle);
+
+ nsCOMPtr<CacheFileListener> listener;
+ bool isNew = false;
+ nsresult retval = NS_OK;
+
+ {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mOpeningFile);
+ MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) ||
+ (NS_FAILED(aResult) && !aHandle));
+ MOZ_ASSERT((mListener && !mMetadata) || // !createNew
+ (!mListener && mMetadata)); // createNew
+ MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry
+
+ LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
+ this, static_cast<uint32_t>(aResult), aHandle));
+
+ mOpeningFile = false;
+
+ autoDoom.mListener.swap(mDoomAfterOpenListener);
+
+ if (mMemoryOnly) {
+ // We can be here only in case the entry was initilized as createNew and
+ // SetMemoryOnly() was called.
+
+ // Just don't store the handle into mHandle and exit
+ autoDoom.mAlreadyDoomed = true;
+ return NS_OK;
+ }
+
+ if (NS_FAILED(aResult)) {
+ if (mMetadata) {
+ // This entry was initialized as createNew, just switch to memory-only
+ // mode.
+ NS_WARNING("Forcing memory-only entry since OpenFile failed");
+ LOG(
+ ("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() "
+ "failed asynchronously. We can continue in memory-only mode since "
+ "aCreateNew == true. [this=%p]",
+ this));
+
+ mMemoryOnly = true;
+ return NS_OK;
+ }
+
+ if (aResult == NS_ERROR_FILE_INVALID_PATH) {
+ // CacheFileIOManager doesn't have mCacheDirectory, switch to
+ // memory-only mode.
+ NS_WARNING(
+ "Forcing memory-only entry since CacheFileIOManager doesn't "
+ "have mCacheDirectory.");
+ LOG(
+ ("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have "
+ "mCacheDirectory, initializing entry as memory-only. [this=%p]",
+ this));
+
+ mMemoryOnly = true;
+ mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
+ mReady = true;
+ mDataSize = mMetadata->Offset();
+
+ isNew = true;
+ retval = NS_OK;
+ } else {
+ // CacheFileIOManager::OpenFile() failed for another reason.
+ isNew = false;
+ retval = aResult;
+ }
+
+ mListener.swap(listener);
+ } else {
+ mHandle = aHandle;
+ if (NS_FAILED(mStatus)) {
+ CacheFileIOManager::DoomFile(mHandle, nullptr);
+ }
+
+ if (mMetadata) {
+ InitIndexEntry();
+
+ // The entry was initialized as createNew, don't try to read metadata.
+ mMetadata->SetHandle(mHandle);
+
+ // Write all cached chunks, otherwise they may stay unwritten.
+ for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
+ uint32_t idx = iter.Key();
+ RefPtr<CacheFileChunk>& chunk = iter.Data();
+
+ LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]",
+ this, idx, chunk.get()));
+
+ mChunks.Put(idx, RefPtr{chunk});
+ chunk->mFile = this;
+ chunk->mActiveChunk = true;
+
+ MOZ_ASSERT(chunk->IsReady());
+
+ // This would be cleaner if we had an nsRefPtr constructor that took
+ // a RefPtr<Derived>.
+ ReleaseOutsideLock(std::move(chunk));
+
+ iter.Remove();
+ }
+
+ return NS_OK;
+ }
+ }
+ }
+
+ if (listener) {
+ listener->OnFileReady(retval, isNew);
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(NS_SUCCEEDED(aResult));
+ MOZ_ASSERT(!mMetadata);
+ MOZ_ASSERT(mListener);
+
+ mMetadata = new CacheFileMetadata(mHandle, mKey);
+ mMetadata->ReadMetadata(this);
+ return NS_OK;
+}
+
+nsresult CacheFile::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFile::OnDataWritten should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFile::OnDataRead should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFile::OnMetadataRead(nsresult aResult) {
+ MOZ_ASSERT(mListener);
+
+ LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
+ static_cast<uint32_t>(aResult)));
+
+ bool isNew = false;
+ if (NS_SUCCEEDED(aResult)) {
+ mPinned = mMetadata->Pinned();
+ mReady = true;
+ mDataSize = mMetadata->Offset();
+ if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
+ isNew = true;
+ mMetadata->MarkDirty();
+ } else {
+ const char* altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
+ if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
+ altData, &mAltDataOffset, &mAltDataType)) ||
+ (mAltDataOffset > mDataSize))) {
+ // alt-metadata cannot be parsed or alt-data offset is invalid
+ mMetadata->InitEmptyMetadata();
+ isNew = true;
+ mAltDataOffset = -1;
+ mAltDataType.Truncate();
+ mDataSize = 0;
+ } else {
+ CacheFileAutoLock lock(this);
+ PreloadChunks(0);
+ }
+ }
+
+ InitIndexEntry();
+ }
+
+ nsCOMPtr<CacheFileListener> listener;
+ mListener.swap(listener);
+ listener->OnFileReady(aResult, isNew);
+ return NS_OK;
+}
+
+nsresult CacheFile::OnMetadataWritten(nsresult aResult) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", this,
+ static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(mWritingMetadata);
+ mWritingMetadata = false;
+
+ MOZ_ASSERT(!mMemoryOnly);
+ MOZ_ASSERT(!mOpeningFile);
+
+ if (NS_WARN_IF(NS_FAILED(aResult))) {
+ // TODO close streams with an error ???
+ SetError(aResult);
+ }
+
+ if (mOutput || mInputs.Length() || mChunks.Count()) return NS_OK;
+
+ if (IsDirty()) WriteMetadataIfNeededLocked();
+
+ if (!mWritingMetadata) {
+ LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]",
+ this));
+ CacheFileIOManager::ReleaseNSPRHandle(mHandle);
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFile::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) {
+ nsCOMPtr<CacheFileListener> listener;
+
+ {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mListener);
+
+ LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
+ this, static_cast<uint32_t>(aResult), aHandle));
+
+ mListener.swap(listener);
+ }
+
+ listener->OnFileDoomed(aResult);
+ return NS_OK;
+}
+
+nsresult CacheFile::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheFile::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFile::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheFile::OnFileRenamed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+bool CacheFile::IsKilled() {
+ bool killed = mKill;
+ if (killed) {
+ LOG(("CacheFile is killed, this=%p", this));
+ }
+
+ return killed;
+}
+
+nsresult CacheFile::OpenInputStream(nsICacheEntry* aEntryHandle,
+ nsIInputStream** _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+
+ if (!mReady) {
+ LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
+ this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (NS_FAILED(mStatus)) {
+ LOG(
+ ("CacheFile::OpenInputStream() - CacheFile is in a failure state "
+ "[this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ // Don't allow opening the input stream when this CacheFile is in
+ // a failed state. This is the only way to protect consumers correctly
+ // from reading a broken entry. When the file is in the failed state,
+ // it's also doomed, so reopening the entry won't make any difference -
+ // data will still be inaccessible anymore. Note that for just doomed
+ // files, we must allow reading the data.
+ return mStatus;
+ }
+
+ // Once we open input stream we no longer allow preloading of chunks without
+ // input stream, i.e. we will no longer keep first few chunks preloaded when
+ // the last input stream is closed.
+ mPreloadWithoutInputStreams = false;
+
+ CacheFileInputStream* input =
+ new CacheFileInputStream(this, aEntryHandle, false);
+ LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
+ input, this));
+
+ mInputs.AppendElement(input);
+ NS_ADDREF(input);
+
+ mDataAccessed = true;
+ *_retval = do_AddRef(input).take();
+ return NS_OK;
+}
+
+nsresult CacheFile::OpenAlternativeInputStream(nsICacheEntry* aEntryHandle,
+ const char* aAltDataType,
+ nsIInputStream** _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+
+ if (NS_WARN_IF(!mReady)) {
+ LOG(
+ ("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready "
+ "[this=%p]",
+ this));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mAltDataOffset == -1) {
+ LOG(
+ ("CacheFile::OpenAlternativeInputStream() - Alternative data is not "
+ "available [this=%p]",
+ this));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (NS_FAILED(mStatus)) {
+ LOG(
+ ("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure "
+ "state [this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ // Don't allow opening the input stream when this CacheFile is in
+ // a failed state. This is the only way to protect consumers correctly
+ // from reading a broken entry. When the file is in the failed state,
+ // it's also doomed, so reopening the entry won't make any difference -
+ // data will still be inaccessible anymore. Note that for just doomed
+ // files, we must allow reading the data.
+ return mStatus;
+ }
+
+ if (mAltDataType != aAltDataType) {
+ LOG(
+ ("CacheFile::OpenAlternativeInputStream() - Alternative data is of a "
+ "different type than requested [this=%p, availableType=%s, "
+ "requestedType=%s]",
+ this, mAltDataType.get(), aAltDataType));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Once we open input stream we no longer allow preloading of chunks without
+ // input stream, i.e. we will no longer keep first few chunks preloaded when
+ // the last input stream is closed.
+ mPreloadWithoutInputStreams = false;
+
+ CacheFileInputStream* input =
+ new CacheFileInputStream(this, aEntryHandle, true);
+
+ LOG(
+ ("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p "
+ "[this=%p]",
+ input, this));
+
+ mInputs.AppendElement(input);
+ NS_ADDREF(input);
+
+ mDataAccessed = true;
+ *_retval = do_AddRef(input).take();
+
+ return NS_OK;
+}
+
+nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener,
+ nsIOutputStream** _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+
+ nsresult rv;
+
+ if (!mReady) {
+ LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]",
+ this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mOutput) {
+ LOG(
+ ("CacheFile::OpenOutputStream() - We already have output stream %p "
+ "[this=%p]",
+ mOutput, this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (NS_FAILED(mStatus)) {
+ LOG(
+ ("CacheFile::OpenOutputStream() - CacheFile is in a failure state "
+ "[this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ // The CacheFile is already doomed. It make no sense to allow to write any
+ // data to such entry.
+ return mStatus;
+ }
+
+ // Fail if there is any input stream opened for alternative data
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ if (mInputs[i]->IsAlternativeData()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ }
+
+ if (mAltDataOffset != -1) {
+ // Remove alt-data
+ rv = Truncate(mAltDataOffset);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::OpenOutputStream() - Truncating alt-data failed "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+ SetAltMetadata(nullptr);
+ mAltDataOffset = -1;
+ mAltDataType.Truncate();
+ }
+
+ // Once we open output stream we no longer allow preloading of chunks without
+ // input stream. There is no reason to believe that some input stream will be
+ // opened soon. Otherwise we would cache unused chunks of all newly created
+ // entries until the CacheFile is destroyed.
+ mPreloadWithoutInputStreams = false;
+
+ mOutput = new CacheFileOutputStream(this, aCloseListener, false);
+
+ LOG(
+ ("CacheFile::OpenOutputStream() - Creating new output stream %p "
+ "[this=%p]",
+ mOutput, this));
+
+ mDataAccessed = true;
+ *_retval = do_AddRef(mOutput).take();
+ return NS_OK;
+}
+
+nsresult CacheFile::OpenAlternativeOutputStream(
+ CacheOutputCloseListener* aCloseListener, const char* aAltDataType,
+ nsIAsyncOutputStream** _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+
+ if (!mReady) {
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready "
+ "[this=%p]",
+ this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mOutput) {
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - We already have output "
+ "stream %p [this=%p]",
+ mOutput, this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (NS_FAILED(mStatus)) {
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure "
+ "state [this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ // The CacheFile is already doomed. It make no sense to allow to write any
+ // data to such entry.
+ return mStatus;
+ }
+
+ // Fail if there is any input stream opened for alternative data
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ if (mInputs[i]->IsAlternativeData()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ }
+
+ nsresult rv;
+
+ if (mAltDataOffset != -1) {
+ // Truncate old alt-data
+ rv = Truncate(mAltDataOffset);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data "
+ "failed [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+ } else {
+ mAltDataOffset = mDataSize;
+ }
+
+ nsAutoCString altMetadata;
+ CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset,
+ altMetadata);
+ rv = SetAltMetadata(altMetadata.get());
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data"
+ "failed [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ // Once we open output stream we no longer allow preloading of chunks without
+ // input stream. There is no reason to believe that some input stream will be
+ // opened soon. Otherwise we would cache unused chunks of all newly created
+ // entries until the CacheFile is destroyed.
+ mPreloadWithoutInputStreams = false;
+
+ mOutput = new CacheFileOutputStream(this, aCloseListener, true);
+
+ LOG(
+ ("CacheFile::OpenAlternativeOutputStream() - Creating new output stream "
+ "%p [this=%p]",
+ mOutput, this));
+
+ mDataAccessed = true;
+ mAltDataType = aAltDataType;
+ *_retval = do_AddRef(mOutput).take();
+ return NS_OK;
+}
+
+nsresult CacheFile::SetMemoryOnly() {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", mMemoryOnly,
+ this));
+
+ if (mMemoryOnly) return NS_OK;
+
+ MOZ_ASSERT(mReady);
+
+ if (!mReady) {
+ LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]",
+ this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mDataAccessed) {
+ LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]",
+ this));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // TODO what to do when this isn't a new entry and has an existing metadata???
+ mMemoryOnly = true;
+ return NS_OK;
+}
+
+nsresult CacheFile::Doom(CacheFileListener* aCallback) {
+ LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback));
+
+ CacheFileAutoLock lock(this);
+
+ return DoomLocked(aCallback);
+}
+
+nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) {
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+
+ LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
+
+ nsresult rv = NS_OK;
+
+ if (mMemoryOnly) {
+ return NS_ERROR_FILE_NOT_FOUND;
+ }
+
+ if (mHandle && mHandle->IsDoomed()) {
+ return NS_ERROR_FILE_NOT_FOUND;
+ }
+
+ nsCOMPtr<CacheFileIOListener> listener;
+ if (aCallback || !mHandle) {
+ listener = new DoomFileHelper(aCallback);
+ }
+ if (mHandle) {
+ rv = CacheFileIOManager::DoomFile(mHandle, listener);
+ } else if (mOpeningFile) {
+ mDoomAfterOpenListener = listener;
+ }
+
+ return rv;
+}
+
+nsresult CacheFile::ThrowMemoryCachedData() {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this));
+
+ if (mMemoryOnly) {
+ // This method should not be called when the CacheFile was initialized as
+ // memory-only, but it can be called when CacheFile end up as memory-only
+ // due to e.g. IO failure since CacheEntry doesn't know it.
+ LOG(
+ ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
+ "entry is memory-only. [this=%p]",
+ this));
+
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mOpeningFile) {
+ // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading
+ // entries from being purged.
+
+ LOG(
+ ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
+ "entry is still opening the file [this=%p]",
+ this));
+
+ return NS_ERROR_ABORT;
+ }
+
+ // We cannot release all cached chunks since we need to keep preloaded chunks
+ // in memory. See initialization of mPreloadChunkCount for explanation.
+ CleanUpCachedChunks();
+
+ return NS_OK;
+}
+
+nsresult CacheFile::GetElement(const char* aKey, char** _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ const char* value;
+ value = mMetadata->GetElement(aKey);
+ if (!value) return NS_ERROR_NOT_AVAILABLE;
+
+ *_retval = NS_xstrdup(value);
+ return NS_OK;
+}
+
+nsresult CacheFile::SetElement(const char* aKey, const char* aValue) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetElement() this=%p", this));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) {
+ NS_ERROR(
+ "alt-data element is reserved for internal use and must not be "
+ "changed via CacheFile::SetElement()");
+ return NS_ERROR_FAILURE;
+ }
+
+ PostWriteTimer();
+ return mMetadata->SetElement(aKey, aValue);
+}
+
+nsresult CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ MOZ_ASSERT(mReady);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ mMetadata->Visit(aVisitor);
+ return NS_OK;
+}
+
+nsresult CacheFile::ElementsSize(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+
+ if (!mMetadata) return NS_ERROR_NOT_AVAILABLE;
+
+ *_retval = mMetadata->ElementsSize();
+ return NS_OK;
+}
+
+nsresult CacheFile::SetExpirationTime(uint32_t aExpirationTime) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", this,
+ aExpirationTime));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+ mMetadata->SetExpirationTime(aExpirationTime);
+ return NS_OK;
+}
+
+nsresult CacheFile::GetExpirationTime(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ *_retval = mMetadata->GetExpirationTime();
+ return NS_OK;
+}
+
+nsresult CacheFile::SetFrecency(uint32_t aFrecency) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", this, aFrecency));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+
+ if (mHandle && !mHandle->IsDoomed())
+ CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr,
+ nullptr, nullptr);
+
+ mMetadata->SetFrecency(aFrecency);
+ return NS_OK;
+}
+
+nsresult CacheFile::GetFrecency(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+ *_retval = mMetadata->GetFrecency();
+ return NS_OK;
+}
+
+nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime,
+ uint64_t aOnStopTime) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
+ ", aOnStopTime=%" PRIu64 "",
+ this, aOnStartTime, aOnStopTime));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+
+ nsAutoCString onStartTime;
+ onStartTime.AppendInt(aOnStartTime);
+ nsresult rv =
+ mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString onStopTime;
+ onStopTime.AppendInt(aOnStopTime);
+ rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound
+ ? aOnStartTime
+ : kIndexTimeOutOfBound;
+ uint16_t onStopTime16 =
+ aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
+
+ if (mHandle && !mHandle->IsDoomed()) {
+ CacheFileIOManager::UpdateIndexEntry(
+ mHandle, nullptr, nullptr, &onStartTime16, &onStopTime16, nullptr);
+ }
+ return NS_OK;
+}
+
+nsresult CacheFile::GetOnStartTime(uint64_t* _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mMetadata);
+ const char* onStartTimeStr =
+ mMetadata->GetElement("net-response-time-onstart");
+ if (!onStartTimeStr) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ nsresult rv;
+ *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ return NS_OK;
+}
+
+nsresult CacheFile::GetOnStopTime(uint64_t* _retval) {
+ CacheFileAutoLock lock(this);
+
+ MOZ_ASSERT(mMetadata);
+ const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
+ if (!onStopTimeStr) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ nsresult rv;
+ *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ return NS_OK;
+}
+
+nsresult CacheFile::SetContentType(uint8_t aContentType) {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::SetContentType() this=%p, contentType=%u", this,
+ aContentType));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+
+ // Save the content type to metadata for case we need to rebuild the index.
+ nsAutoCString contentType;
+ contentType.AppendInt(aContentType);
+ nsresult rv = mMetadata->SetElement("ctid", contentType.get());
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ if (mHandle && !mHandle->IsDoomed()) {
+ CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
+ nullptr, &aContentType);
+ }
+ return NS_OK;
+}
+
+nsresult CacheFile::SetAltMetadata(const char* aAltMetadata) {
+ AssertOwnsLock();
+ LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", this,
+ aAltMetadata ? aAltMetadata : ""));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+
+ nsresult rv =
+ mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata);
+
+ bool hasAltData = !!aAltMetadata;
+
+ if (NS_FAILED(rv)) {
+ // Removing element shouldn't fail because it doesn't allocate memory.
+ mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr);
+
+ mAltDataOffset = -1;
+ mAltDataType.Truncate();
+ hasAltData = false;
+ }
+
+ if (mHandle && !mHandle->IsDoomed()) {
+ CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &hasAltData, nullptr,
+ nullptr, nullptr);
+ }
+ return rv;
+}
+
+nsresult CacheFile::GetLastModified(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ *_retval = mMetadata->GetLastModified();
+ return NS_OK;
+}
+
+nsresult CacheFile::GetLastFetched(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ *_retval = mMetadata->GetLastFetched();
+ return NS_OK;
+}
+
+nsresult CacheFile::GetFetchCount(uint32_t* _retval) {
+ CacheFileAutoLock lock(this);
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+ *_retval = mMetadata->GetFetchCount();
+ return NS_OK;
+}
+
+nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
+ if (!mHandle) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *aDiskStorageSize = mHandle->FileSizeInK();
+ return NS_OK;
+}
+
+nsresult CacheFile::OnFetched() {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::OnFetched() this=%p", this));
+
+ MOZ_ASSERT(mMetadata);
+ NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
+
+ PostWriteTimer();
+
+ mMetadata->OnFetched();
+ return NS_OK;
+}
+
+void CacheFile::Lock() { mLock.Lock(); }
+
+void CacheFile::Unlock() {
+ // move the elements out of mObjsToRelease
+ // so that they can be released after we unlock
+ nsTArray<RefPtr<nsISupports>> objs = std::move(mObjsToRelease);
+
+ mLock.Unlock();
+}
+
+void CacheFile::AssertOwnsLock() const { mLock.AssertCurrentThreadOwns(); }
+
+void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) {
+ AssertOwnsLock();
+
+ mObjsToRelease.AppendElement(std::move(aObject));
+}
+
+nsresult CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
+ CacheFileChunkListener* aCallback,
+ CacheFileChunk** _retval) {
+ AssertOwnsLock();
+
+ LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
+ this, aIndex, aCaller, aCallback));
+
+ MOZ_ASSERT(mReady);
+ MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
+ MOZ_ASSERT((aCaller == READER && aCallback) ||
+ (aCaller == WRITER && !aCallback) ||
+ (aCaller == PRELOADER && !aCallback));
+
+ // Preload chunks from disk when this is disk backed entry and the listener
+ // is reader.
+ bool preload = !mMemoryOnly && (aCaller == READER);
+
+ nsresult rv;
+
+ RefPtr<CacheFileChunk> chunk;
+ if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
+ LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
+ chunk.get(), this));
+
+ // Preloader calls this method to preload only non-loaded chunks.
+ MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
+
+ // We might get failed chunk between releasing the lock in
+ // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
+ rv = chunk->GetStatus();
+ if (NS_FAILED(rv)) {
+ SetError(rv);
+ LOG(
+ ("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
+ "[this=%p]",
+ this));
+ return rv;
+ }
+
+ if (chunk->IsReady() || aCaller == WRITER) {
+ chunk.swap(*_retval);
+ } else {
+ QueueChunkListener(aIndex, aCallback);
+ }
+
+ if (preload) {
+ PreloadChunks(aIndex + 1);
+ }
+
+ return NS_OK;
+ }
+
+ if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
+ LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
+ chunk.get(), this));
+
+ // Preloader calls this method to preload only non-loaded chunks.
+ MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
+
+ mChunks.Put(aIndex, RefPtr{chunk});
+ mCachedChunks.Remove(aIndex);
+ chunk->mFile = this;
+ chunk->mActiveChunk = true;
+
+ MOZ_ASSERT(chunk->IsReady());
+
+ chunk.swap(*_retval);
+
+ if (preload) {
+ PreloadChunks(aIndex + 1);
+ }
+
+ return NS_OK;
+ }
+
+ int64_t off = aIndex * static_cast<int64_t>(kChunkSize);
+
+ if (off < mDataSize) {
+ // We cannot be here if this is memory only entry since the chunk must exist
+ MOZ_ASSERT(!mMemoryOnly);
+ if (mMemoryOnly) {
+ // If this ever really happen it is better to fail rather than crashing on
+ // a null handle.
+ LOG(
+ ("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize "
+ "for memory-only entry. [this=%p, off=%" PRId64
+ ", mDataSize=%" PRId64 "]",
+ this, off, mDataSize));
+
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
+ mChunks.Put(aIndex, RefPtr{chunk});
+ chunk->mActiveChunk = true;
+
+ LOG(
+ ("CacheFile::GetChunkLocked() - Reading newly created chunk %p from "
+ "the disk [this=%p]",
+ chunk.get(), this));
+
+ // Read the chunk from the disk
+ rv = chunk->Read(mHandle,
+ std::min(static_cast<uint32_t>(mDataSize - off),
+ static_cast<uint32_t>(kChunkSize)),
+ mMetadata->GetHash(aIndex), this);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ RemoveChunkInternal(chunk, false);
+ return rv;
+ }
+
+ if (aCaller == WRITER) {
+ chunk.swap(*_retval);
+ } else if (aCaller != PRELOADER) {
+ QueueChunkListener(aIndex, aCallback);
+ }
+
+ if (preload) {
+ PreloadChunks(aIndex + 1);
+ }
+
+ return NS_OK;
+ } else if (off == mDataSize) {
+ if (aCaller == WRITER) {
+ // this listener is going to write to the chunk
+ chunk = new CacheFileChunk(this, aIndex, true);
+ mChunks.Put(aIndex, RefPtr{chunk});
+ chunk->mActiveChunk = true;
+
+ LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
+ chunk.get(), this));
+
+ chunk->InitNew();
+ mMetadata->SetHash(aIndex, chunk->Hash());
+
+ if (HaveChunkListeners(aIndex)) {
+ rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ chunk.swap(*_retval);
+ return NS_OK;
+ }
+ } else {
+ if (aCaller == WRITER) {
+ // this chunk was requested by writer, but we need to fill the gap first
+
+ // Fill with zero the last chunk if it is incomplete
+ if (mDataSize % kChunkSize) {
+ rv = PadChunkWithZeroes(mDataSize / kChunkSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MOZ_ASSERT(!(mDataSize % kChunkSize));
+ }
+
+ uint32_t startChunk = mDataSize / kChunkSize;
+
+ if (mMemoryOnly) {
+ // We need to create all missing CacheFileChunks if this is memory-only
+ // entry
+ for (uint32_t i = startChunk; i < aIndex; i++) {
+ rv = PadChunkWithZeroes(i);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ } else {
+ // We don't need to create CacheFileChunk for other empty chunks unless
+ // there is some input stream waiting for this chunk.
+
+ if (startChunk != aIndex) {
+ // Make sure the file contains zeroes at the end of the file
+ rv = CacheFileIOManager::TruncateSeekSetEOF(
+ mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ for (uint32_t i = startChunk; i < aIndex; i++) {
+ if (HaveChunkListeners(i)) {
+ rv = PadChunkWithZeroes(i);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ mMetadata->SetHash(i, kEmptyChunkHash);
+ mDataSize = (i + 1) * kChunkSize;
+ }
+ }
+ }
+
+ MOZ_ASSERT(mDataSize == off);
+ rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ chunk.swap(*_retval);
+ return NS_OK;
+ }
+ }
+
+ // We can be here only if the caller is reader since writer always create a
+ // new chunk above and preloader calls this method to preload only chunks that
+ // are not loaded but that do exist.
+ MOZ_ASSERT(aCaller == READER, "Unexpected!");
+
+ if (mOutput) {
+ // the chunk doesn't exist but mOutput may create it
+ QueueChunkListener(aIndex, aCallback);
+ } else {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return NS_OK;
+}
+
+void CacheFile::PreloadChunks(uint32_t aIndex) {
+ AssertOwnsLock();
+
+ uint32_t limit = aIndex + mPreloadChunkCount;
+
+ for (uint32_t i = aIndex; i < limit; ++i) {
+ int64_t off = i * static_cast<int64_t>(kChunkSize);
+
+ if (off >= mDataSize) {
+ // This chunk is beyond EOF.
+ return;
+ }
+
+ if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
+ // This chunk is already in memory or is being read right now.
+ continue;
+ }
+
+ LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
+ this, i));
+
+ RefPtr<CacheFileChunk> chunk;
+ GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
+ // We've checked that we don't have this chunk, so no chunk must be
+ // returned.
+ MOZ_ASSERT(!chunk);
+ }
+}
+
+bool CacheFile::ShouldCacheChunk(uint32_t aIndex) {
+ AssertOwnsLock();
+
+#ifdef CACHE_CHUNKS
+ // We cache all chunks.
+ return true;
+#else
+
+ if (mPreloadChunkCount != 0 && mInputs.Length() == 0 &&
+ mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) {
+ // We don't have any input stream yet, but it is likely that some will be
+ // opened soon. Keep first mPreloadChunkCount chunks in memory. The
+ // condition is here instead of in MustKeepCachedChunk() since these
+ // chunks should be preloaded and can be kept in memory as an optimization,
+ // but they can be released at any time until they are considered as
+ // preloaded chunks for any input stream.
+ return true;
+ }
+
+ // Cache only chunks that we really need to keep.
+ return MustKeepCachedChunk(aIndex);
+#endif
+}
+
+bool CacheFile::MustKeepCachedChunk(uint32_t aIndex) {
+ AssertOwnsLock();
+
+ // We must keep the chunk when this is memory only entry or we don't have
+ // a handle yet.
+ if (mMemoryOnly || mOpeningFile) {
+ return true;
+ }
+
+ if (mPreloadChunkCount == 0) {
+ // Preloading of chunks is disabled
+ return false;
+ }
+
+ // Check whether this chunk should be considered as preloaded chunk for any
+ // existing input stream.
+
+ // maxPos is the position of the last byte in the given chunk
+ int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
+
+ // minPos is the position of the first byte in a chunk that precedes the given
+ // chunk by mPreloadChunkCount chunks
+ int64_t minPos;
+ if (mPreloadChunkCount >= aIndex) {
+ minPos = 0;
+ } else {
+ minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize;
+ }
+
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ int64_t inputPos = mInputs[i]->GetPosition();
+ if (inputPos >= minPos && inputPos <= maxPos) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+nsresult CacheFile::DeactivateChunk(CacheFileChunk* aChunk) {
+ nsresult rv;
+
+ // Avoid lock reentrancy by increasing the RefCnt
+ RefPtr<CacheFileChunk> chunk = aChunk;
+
+ {
+ CacheFileAutoLock lock(this);
+
+ LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", this,
+ aChunk, aChunk->Index()));
+
+ MOZ_ASSERT(mReady);
+ MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
+ (!mHandle && mMemoryOnly && !mOpeningFile) ||
+ (!mHandle && !mMemoryOnly && mOpeningFile));
+
+ if (aChunk->mRefCnt != 2) {
+ LOG(
+ ("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, "
+ "chunk=%p, refcnt=%" PRIuPTR "]",
+ this, aChunk, aChunk->mRefCnt.get()));
+
+ // somebody got the reference before the lock was acquired
+ return NS_OK;
+ }
+
+ if (aChunk->mDiscardedChunk) {
+ aChunk->mActiveChunk = false;
+ ReleaseOutsideLock(
+ RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
+
+ DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
+ MOZ_ASSERT(removed);
+ return NS_OK;
+ }
+
+#ifdef DEBUG
+ {
+ // We can be here iff the chunk is in the hash table
+ RefPtr<CacheFileChunk> chunkCheck;
+ mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck));
+ MOZ_ASSERT(chunkCheck == chunk);
+
+ // We also shouldn't have any queued listener for this chunk
+ ChunkListeners* listeners;
+ mChunkListeners.Get(chunk->Index(), &listeners);
+ MOZ_ASSERT(!listeners);
+ }
+#endif
+
+ if (NS_FAILED(chunk->GetStatus())) {
+ SetError(chunk->GetStatus());
+ }
+
+ if (NS_FAILED(mStatus)) {
+ // Don't write any chunk to disk since this entry will be doomed
+ LOG(
+ ("CacheFile::DeactivateChunk() - Releasing chunk because of status "
+ "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]",
+ this, chunk.get(), static_cast<uint32_t>(mStatus)));
+
+ RemoveChunkInternal(chunk, false);
+ return mStatus;
+ }
+
+ if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
+ LOG(
+ ("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk "
+ "[this=%p]",
+ this));
+
+ mDataIsDirty = true;
+
+ rv = chunk->Write(mHandle, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed "
+ "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32
+ "]",
+ this, chunk.get(), static_cast<uint32_t>(rv)));
+
+ RemoveChunkInternal(chunk, false);
+
+ SetError(rv);
+ return rv;
+ }
+
+ // Chunk will be removed in OnChunkWritten if it is still unused
+
+ // chunk needs to be released under the lock to be able to rely on
+ // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
+ chunk = nullptr;
+ return NS_OK;
+ }
+
+ bool keepChunk = ShouldCacheChunk(aChunk->Index());
+ LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]",
+ keepChunk ? "Caching" : "Releasing", this, chunk.get()));
+
+ RemoveChunkInternal(chunk, keepChunk);
+
+ if (!mMemoryOnly) WriteMetadataIfNeededLocked();
+ }
+
+ return NS_OK;
+}
+
+void CacheFile::RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk) {
+ AssertOwnsLock();
+
+ aChunk->mActiveChunk = false;
+ ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
+
+ if (aCacheChunk) {
+ mCachedChunks.Put(aChunk->Index(), RefPtr{aChunk});
+ }
+
+ mChunks.Remove(aChunk->Index());
+}
+
+bool CacheFile::OutputStreamExists(bool aAlternativeData) {
+ AssertOwnsLock();
+
+ if (!mOutput) {
+ return false;
+ }
+
+ return mOutput->IsAlternativeData() == aAlternativeData;
+}
+
+int64_t CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) {
+ AssertOwnsLock();
+
+ int64_t dataSize;
+
+ if (mAltDataOffset != -1) {
+ if (aAlternativeData) {
+ dataSize = mDataSize;
+ } else {
+ dataSize = mAltDataOffset;
+ }
+ } else {
+ MOZ_ASSERT(!aAlternativeData);
+ dataSize = mDataSize;
+ }
+
+ if (!dataSize) {
+ return 0;
+ }
+
+ // Index of the last existing chunk.
+ uint32_t lastChunk = (dataSize - 1) / kChunkSize;
+ if (aIndex > lastChunk) {
+ return 0;
+ }
+
+ // We can use only preloaded chunks for the given stream to calculate
+ // available bytes if this is an entry stored on disk, since only those
+ // chunks are guaranteed not to be released.
+ uint32_t maxPreloadedChunk;
+ if (mMemoryOnly) {
+ maxPreloadedChunk = lastChunk;
+ } else {
+ maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk);
+ }
+
+ uint32_t i;
+ for (i = aIndex; i <= maxPreloadedChunk; ++i) {
+ CacheFileChunk* chunk;
+
+ chunk = mChunks.GetWeak(i);
+ if (chunk) {
+ MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
+ if (chunk->IsReady()) {
+ continue;
+ }
+
+ // don't search this chunk in cached
+ break;
+ }
+
+ chunk = mCachedChunks.GetWeak(i);
+ if (chunk) {
+ MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
+ continue;
+ }
+
+ break;
+ }
+
+ // theoretic bytes in advance
+ int64_t advance = int64_t(i - aIndex) * kChunkSize;
+ // real bytes till the end of the file
+ int64_t tail = dataSize - (aIndex * kChunkSize);
+
+ return std::min(advance, tail);
+}
+
+nsresult CacheFile::Truncate(int64_t aOffset) {
+ AssertOwnsLock();
+
+ LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset));
+
+ nsresult rv;
+
+ // If we ever need to truncate on non alt-data boundary, we need to handle
+ // existing input streams.
+ MOZ_ASSERT(aOffset == mAltDataOffset,
+ "Truncating normal data not implemented");
+ MOZ_ASSERT(mReady);
+ MOZ_ASSERT(!mOutput);
+
+ uint32_t lastChunk = 0;
+ if (mDataSize > 0) {
+ lastChunk = (mDataSize - 1) / kChunkSize;
+ }
+
+ uint32_t newLastChunk = 0;
+ if (aOffset > 0) {
+ newLastChunk = (aOffset - 1) / kChunkSize;
+ }
+
+ uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
+
+ LOG(
+ ("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
+ "bytesInNewLastChunk=%u",
+ lastChunk, newLastChunk, bytesInNewLastChunk));
+
+ // Remove all truncated chunks from mCachedChunks
+ for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
+ uint32_t idx = iter.Key();
+
+ if (idx > newLastChunk) {
+ // This is unused chunk, simply remove it.
+ LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx));
+ iter.Remove();
+ }
+ }
+
+ // We need to make sure no input stream holds a reference to a chunk we're
+ // going to discard. In theory, if alt-data begins at chunk boundary, input
+ // stream for normal data can get the chunk containing only alt-data via
+ // EnsureCorrectChunk() call. The input stream won't read the data from such
+ // chunk, but it will keep the reference until the stream is closed and we
+ // cannot simply discard this chunk.
+ int64_t maxInputChunk = -1;
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ int64_t inputChunk = mInputs[i]->GetChunkIdx();
+
+ if (maxInputChunk < inputChunk) {
+ maxInputChunk = inputChunk;
+ }
+
+ MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
+ }
+
+ MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
+ if (maxInputChunk == newLastChunk + 1) {
+ // Truncating must be done at chunk boundary
+ MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
+ newLastChunk++;
+ bytesInNewLastChunk = 0;
+ LOG(
+ ("CacheFile::Truncate() - chunk %p is still in use, using "
+ "newLastChunk=%u and bytesInNewLastChunk=%u",
+ mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk));
+ }
+
+ // Discard all truncated chunks in mChunks
+ for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
+ uint32_t idx = iter.Key();
+
+ if (idx > newLastChunk) {
+ RefPtr<CacheFileChunk>& chunk = iter.Data();
+ LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", idx,
+ chunk.get()));
+
+ if (HaveChunkListeners(idx)) {
+ NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
+ }
+
+ chunk->mDiscardedChunk = true;
+ mDiscardedChunks.AppendElement(chunk);
+ iter.Remove();
+ }
+ }
+
+ // Remove hashes of all removed chunks from the metadata
+ for (uint32_t i = lastChunk; i > newLastChunk; --i) {
+ mMetadata->RemoveHash(i);
+ }
+
+ // Truncate new last chunk
+ if (bytesInNewLastChunk == kChunkSize) {
+ LOG(("CacheFile::Truncate() - not truncating last chunk."));
+ } else {
+ RefPtr<CacheFileChunk> chunk;
+ if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
+ LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.",
+ chunk.get()));
+ } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
+ LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.",
+ chunk.get()));
+ } else {
+ // New last chunk isn't loaded but we need to update the hash.
+ MOZ_ASSERT(!mMemoryOnly);
+ MOZ_ASSERT(mHandle);
+
+ rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr,
+ getter_AddRefs(chunk));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ // We've checked that we don't have this chunk, so no chunk must be
+ // returned.
+ MOZ_ASSERT(!chunk);
+
+ if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
+ chunk.get()));
+ }
+
+ rv = chunk->GetStatus();
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::Truncate() - New last chunk is failed "
+ "[status=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ chunk->Truncate(bytesInNewLastChunk);
+
+ // If the chunk is ready set the new hash now. If it's still being loaded
+ // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
+ // in OnChunkWritten().
+ if (chunk->IsReady()) {
+ mMetadata->SetHash(newLastChunk, chunk->Hash());
+ }
+ }
+
+ if (mHandle) {
+ rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset,
+ nullptr);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ mDataSize = aOffset;
+
+ return NS_OK;
+}
+
+static uint32_t StatusToTelemetryEnum(nsresult aStatus) {
+ if (NS_SUCCEEDED(aStatus)) {
+ return 0;
+ }
+
+ switch (aStatus) {
+ case NS_BASE_STREAM_CLOSED:
+ return 0; // Log this as a success
+ case NS_ERROR_OUT_OF_MEMORY:
+ return 2;
+ case NS_ERROR_FILE_DISK_FULL:
+ return 3;
+ case NS_ERROR_FILE_CORRUPTED:
+ return 4;
+ case NS_ERROR_FILE_NOT_FOUND:
+ return 5;
+ case NS_BINDING_ABORTED:
+ return 6;
+ default:
+ return 1; // other error
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+}
+
+void CacheFile::RemoveInput(CacheFileInputStream* aInput, nsresult aStatus) {
+ AssertOwnsLock();
+
+ LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]",
+ this, aInput, static_cast<uint32_t>(aStatus)));
+
+ DebugOnly<bool> found;
+ found = mInputs.RemoveElement(aInput);
+ MOZ_ASSERT(found);
+
+ ReleaseOutsideLock(
+ already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput)));
+
+ if (!mMemoryOnly) WriteMetadataIfNeededLocked();
+
+ // If the input didn't read all data, there might be left some preloaded
+ // chunks that won't be used anymore.
+ CleanUpCachedChunks();
+
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
+ StatusToTelemetryEnum(aStatus));
+}
+
+void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) {
+ AssertOwnsLock();
+
+ nsresult rv;
+
+ LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]",
+ this, aOutput, static_cast<uint32_t>(aStatus)));
+
+ if (mOutput != aOutput) {
+ LOG(
+ ("CacheFile::RemoveOutput() - This output was already removed, ignoring"
+ " call [this=%p]",
+ this));
+ return;
+ }
+
+ mOutput = nullptr;
+
+ // Cancel all queued chunk and update listeners that cannot be satisfied
+ NotifyListenersAboutOutputRemoval();
+
+ if (!mMemoryOnly) WriteMetadataIfNeededLocked();
+
+ // Make sure the CacheFile status is set to a failure when the output stream
+ // is closed with a fatal error. This way we propagate correctly and w/o any
+ // windows the failure state of this entry to end consumers.
+ if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) &&
+ aStatus != NS_BASE_STREAM_CLOSED) {
+ if (aOutput->IsAlternativeData()) {
+ MOZ_ASSERT(mAltDataOffset != -1);
+ // If there is no alt-data input stream truncate only alt-data, otherwise
+ // doom the entry.
+ bool altDataInputExists = false;
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ if (mInputs[i]->IsAlternativeData()) {
+ altDataInputExists = true;
+ break;
+ }
+ }
+ if (altDataInputExists) {
+ SetError(aStatus);
+ } else {
+ rv = Truncate(mAltDataOffset);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFile::RemoveOutput() - Truncating alt-data failed "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ SetError(aStatus);
+ } else {
+ SetAltMetadata(nullptr);
+ mAltDataOffset = -1;
+ mAltDataType.Truncate();
+ }
+ }
+ } else {
+ SetError(aStatus);
+ }
+ }
+
+ // Notify close listener as the last action
+ aOutput->NotifyCloseListener();
+
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
+ StatusToTelemetryEnum(aStatus));
+}
+
+nsresult CacheFile::NotifyChunkListener(CacheFileChunkListener* aCallback,
+ nsIEventTarget* aTarget,
+ nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) {
+ LOG(
+ ("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
+ "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]",
+ this, aCallback, aTarget, static_cast<uint32_t>(aResult), aChunkIdx,
+ aChunk));
+
+ RefPtr<NotifyChunkListenerEvent> ev;
+ ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
+ if (aTarget) {
+ return aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
+ }
+ return NS_DispatchToCurrentThread(ev);
+}
+
+void CacheFile::QueueChunkListener(uint32_t aIndex,
+ CacheFileChunkListener* aCallback) {
+ LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", this,
+ aIndex, aCallback));
+
+ AssertOwnsLock();
+
+ MOZ_ASSERT(aCallback);
+
+ ChunkListenerItem* item = new ChunkListenerItem();
+ item->mTarget = CacheFileIOManager::IOTarget();
+ if (!item->mTarget) {
+ LOG(
+ ("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using "
+ "main thread for callback."));
+ item->mTarget = GetMainThreadEventTarget();
+ }
+ item->mCallback = aCallback;
+
+ ChunkListeners* listeners;
+ if (!mChunkListeners.Get(aIndex, &listeners)) {
+ listeners = new ChunkListeners();
+ mChunkListeners.Put(aIndex, listeners);
+ }
+
+ listeners->mItems.AppendElement(item);
+}
+
+nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
+ CacheFileChunk* aChunk) {
+ LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32
+ ", "
+ "chunk=%p]",
+ this, aIndex, static_cast<uint32_t>(aResult), aChunk));
+
+ AssertOwnsLock();
+
+ nsresult rv, rv2;
+
+ ChunkListeners* listeners;
+ mChunkListeners.Get(aIndex, &listeners);
+ MOZ_ASSERT(listeners);
+
+ rv = NS_OK;
+ for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
+ ChunkListenerItem* item = listeners->mItems[i];
+ rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex,
+ aChunk);
+ if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2;
+ delete item;
+ }
+
+ mChunkListeners.Remove(aIndex);
+
+ return rv;
+}
+
+bool CacheFile::HaveChunkListeners(uint32_t aIndex) {
+ ChunkListeners* listeners;
+ mChunkListeners.Get(aIndex, &listeners);
+ return !!listeners;
+}
+
+void CacheFile::NotifyListenersAboutOutputRemoval() {
+ LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this));
+
+ AssertOwnsLock();
+
+ // First fail all chunk listeners that wait for non-existent chunk
+ for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) {
+ uint32_t idx = iter.Key();
+ auto listeners = iter.UserData();
+
+ LOG(
+ ("CacheFile::NotifyListenersAboutOutputRemoval() - fail "
+ "[this=%p, idx=%u]",
+ this, idx));
+
+ RefPtr<CacheFileChunk> chunk;
+ mChunks.Get(idx, getter_AddRefs(chunk));
+ if (chunk) {
+ // Skip these listeners because the chunk is being read. We don't have
+ // assertion here to check its state because it might be already in READY
+ // state while CacheFile::OnChunkRead() is waiting on Cache I/O thread for
+ // a lock so the listeners hasn't been notified yet. In any case, the
+ // listeners will be notified from CacheFile::OnChunkRead().
+ continue;
+ }
+
+ for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
+ ChunkListenerItem* item = listeners->mItems[i];
+ NotifyChunkListener(item->mCallback, item->mTarget,
+ NS_ERROR_NOT_AVAILABLE, idx, nullptr);
+ delete item;
+ }
+
+ iter.Remove();
+ }
+
+ // Fail all update listeners
+ for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
+ const RefPtr<CacheFileChunk>& chunk = iter.Data();
+ LOG(
+ ("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 "
+ "[this=%p, idx=%u]",
+ this, iter.Key()));
+
+ if (chunk->IsReady()) {
+ chunk->NotifyUpdateListeners();
+ }
+ }
+}
+
+bool CacheFile::DataSize(int64_t* aSize) {
+ CacheFileAutoLock lock(this);
+
+ if (OutputStreamExists(false)) {
+ return false;
+ }
+
+ if (mAltDataOffset == -1) {
+ *aSize = mDataSize;
+ } else {
+ *aSize = mAltDataOffset;
+ }
+
+ return true;
+}
+
+nsresult CacheFile::GetAltDataSize(int64_t* aSize) {
+ CacheFileAutoLock lock(this);
+ if (mOutput) {
+ return NS_ERROR_IN_PROGRESS;
+ }
+
+ if (mAltDataOffset == -1) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *aSize = mDataSize - mAltDataOffset;
+ return NS_OK;
+}
+
+nsresult CacheFile::GetAltDataType(nsACString& aType) {
+ CacheFileAutoLock lock(this);
+
+ if (mAltDataOffset == -1) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ aType = mAltDataType;
+ return NS_OK;
+}
+
+bool CacheFile::IsDoomed() {
+ CacheFileAutoLock lock(this);
+
+ if (!mHandle) return false;
+
+ return mHandle->IsDoomed();
+}
+
+bool CacheFile::IsWriteInProgress() {
+ CacheFileAutoLock lock(this);
+
+ bool result = false;
+
+ if (!mMemoryOnly) {
+ result =
+ mDataIsDirty || (mMetadata && mMetadata->IsDirty()) || mWritingMetadata;
+ }
+
+ result = result || mOpeningFile || mOutput || mChunks.Count();
+
+ return result;
+}
+
+bool CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize,
+ bool aIsAltData) {
+ CacheFileAutoLock lock(this);
+
+ if (mSkipSizeCheck || aSize < 0) {
+ return false;
+ }
+
+ int64_t totalSize = aOffset + aSize;
+ if (aIsAltData) {
+ totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset;
+ }
+
+ if (CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool CacheFile::IsDirty() { return mDataIsDirty || mMetadata->IsDirty(); }
+
+void CacheFile::WriteMetadataIfNeeded() {
+ LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this));
+
+ CacheFileAutoLock lock(this);
+
+ if (!mMemoryOnly) WriteMetadataIfNeededLocked();
+}
+
+void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) {
+ // When aFireAndForget is set to true, we are called from dtor.
+ // |this| must not be referenced after this method returns!
+
+ LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this));
+
+ nsresult rv;
+
+ AssertOwnsLock();
+ MOZ_ASSERT(!mMemoryOnly);
+
+ if (!mMetadata) {
+ MOZ_CRASH("Must have metadata here");
+ return;
+ }
+
+ if (NS_FAILED(mStatus)) return;
+
+ if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() ||
+ mWritingMetadata || mOpeningFile || mKill)
+ return;
+
+ if (!aFireAndForget) {
+ // if aFireAndForget is set, we are called from dtor. Write
+ // scheduler hard-refers CacheFile otherwise, so we cannot be here.
+ CacheFileIOManager::UnscheduleMetadataWrite(this);
+ }
+
+ LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]",
+ this));
+
+ rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this);
+ if (NS_SUCCEEDED(rv)) {
+ mWritingMetadata = true;
+ mDataIsDirty = false;
+ } else {
+ LOG(
+ ("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
+ "failed [this=%p]",
+ this));
+ // TODO: close streams with error
+ SetError(rv);
+ }
+}
+
+void CacheFile::PostWriteTimer() {
+ if (mMemoryOnly) return;
+
+ LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
+
+ CacheFileIOManager::ScheduleMetadataWrite(this);
+}
+
+void CacheFile::CleanUpCachedChunks() {
+ for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
+ uint32_t idx = iter.Key();
+ const RefPtr<CacheFileChunk>& chunk = iter.Data();
+
+ LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this,
+ idx, chunk.get()));
+
+ if (MustKeepCachedChunk(idx)) {
+ LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk"));
+ continue;
+ }
+
+ LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk"));
+ iter.Remove();
+ }
+}
+
+nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) {
+ AssertOwnsLock();
+
+ // This method is used to pad last incomplete chunk with zeroes or create
+ // a new chunk full of zeroes
+ MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
+
+ nsresult rv;
+ RefPtr<CacheFileChunk> chunk;
+ rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ LOG(
+ ("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
+ " [this=%p]",
+ aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
+
+ CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize);
+ if (!hnd.Buf()) {
+ ReleaseOutsideLock(std::move(chunk));
+ SetError(NS_ERROR_OUT_OF_MEMORY);
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ uint32_t offset = hnd.DataSize();
+ memset(hnd.Buf() + offset, 0, kChunkSize - offset);
+ hnd.UpdateDataSize(offset, kChunkSize - offset);
+
+ ReleaseOutsideLock(std::move(chunk));
+
+ return NS_OK;
+}
+
+void CacheFile::SetError(nsresult aStatus) {
+ AssertOwnsLock();
+
+ if (NS_SUCCEEDED(mStatus)) {
+ mStatus = aStatus;
+ if (mHandle) {
+ CacheFileIOManager::DoomFile(mHandle, nullptr);
+ }
+ }
+}
+
+nsresult CacheFile::InitIndexEntry() {
+ MOZ_ASSERT(mHandle);
+
+ if (mHandle->IsDoomed()) return NS_OK;
+
+ nsresult rv;
+
+ rv = CacheFileIOManager::InitIndexEntry(
+ mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()),
+ mMetadata->IsAnonymous(), mPinned);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t frecency = mMetadata->GetFrecency();
+
+ bool hasAltData =
+ mMetadata->GetElement(CacheFileUtils::kAltDataKey) ? true : false;
+
+ static auto toUint16 = [](const char* s) -> uint16_t {
+ if (s) {
+ nsresult rv;
+ uint64_t n64 = nsDependentCString(s).ToInteger64(&rv);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
+ }
+ return kIndexTimeNotAvailable;
+ };
+
+ const char* onStartTimeStr =
+ mMetadata->GetElement("net-response-time-onstart");
+ uint16_t onStartTime = toUint16(onStartTimeStr);
+
+ const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
+ uint16_t onStopTime = toUint16(onStopTimeStr);
+
+ const char* contentTypeStr = mMetadata->GetElement("ctid");
+ uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ if (contentTypeStr) {
+ int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv);
+ if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN ||
+ n64 >= nsICacheEntry::CONTENT_TYPE_LAST) {
+ n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ }
+ contentType = n64;
+ }
+
+ rv = CacheFileIOManager::UpdateIndexEntry(
+ mHandle, &frecency, &hasAltData, &onStartTime, &onStopTime, &contentType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+size_t CacheFile::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ CacheFileAutoLock lock(const_cast<CacheFile*>(this));
+
+ size_t n = 0;
+ n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
+ for (auto iter = mChunks.ConstIter(); !iter.Done(); iter.Next()) {
+ n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
+ }
+ n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
+ for (auto iter = mCachedChunks.ConstIter(); !iter.Done(); iter.Next()) {
+ n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
+ }
+ // Ignore metadata if it's still being read. It's not safe to access buffers
+ // in CacheFileMetadata because they might be reallocated on another thread
+ // outside CacheFile's lock.
+ if (mMetadata && mReady) {
+ n += mMetadata->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ // Input streams are not elsewhere reported.
+ n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf);
+ for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+ n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ // Output streams are not elsewhere reported.
+ if (mOutput) {
+ n += mOutput->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ // The listeners are usually classes reported just above.
+ n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf);
+ n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf);
+
+ // mHandle reported directly from CacheFileIOManager.
+
+ return n;
+}
+
+size_t CacheFile::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFile.h b/netwerk/cache2/CacheFile.h
new file mode 100644
index 0000000000..9b33c5dcd4
--- /dev/null
+++ b/netwerk/cache2/CacheFile.h
@@ -0,0 +1,264 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFile__h__
+#define CacheFile__h__
+
+#include "CacheFileChunk.h"
+#include "CacheFileIOManager.h"
+#include "CacheFileMetadata.h"
+#include "nsRefPtrHashtable.h"
+#include "nsClassHashtable.h"
+#include "mozilla/Mutex.h"
+
+class nsIInputStream;
+class nsIOutputStream;
+class nsICacheEntryMetaDataVisitor;
+
+namespace mozilla {
+namespace net {
+
+class CacheFileInputStream;
+class CacheFileOutputStream;
+class CacheOutputCloseListener;
+class MetadataWriteTimer;
+
+#define CACHEFILELISTENER_IID \
+ { /* 95e7f284-84ba-48f9-b1fc-3a7336b4c33c */ \
+ 0x95e7f284, 0x84ba, 0x48f9, { \
+ 0xb1, 0xfc, 0x3a, 0x73, 0x36, 0xb4, 0xc3, 0x3c \
+ } \
+ }
+
+class CacheFileListener : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(CACHEFILELISTENER_IID)
+
+ NS_IMETHOD OnFileReady(nsresult aResult, bool aIsNew) = 0;
+ NS_IMETHOD OnFileDoomed(nsresult aResult) = 0;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(CacheFileListener, CACHEFILELISTENER_IID)
+
+class CacheFile final : public CacheFileChunkListener,
+ public CacheFileIOListener,
+ public CacheFileMetadataListener {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ CacheFile();
+
+ nsresult Init(const nsACString& aKey, bool aCreateNew, bool aMemoryOnly,
+ bool aSkipSizeCheck, bool aPriority, bool aPinned,
+ CacheFileListener* aCallback);
+
+ NS_IMETHOD OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkUpdated(CacheFileChunk* aChunk) override;
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) override;
+ virtual bool IsKilled() override;
+
+ NS_IMETHOD OnMetadataRead(nsresult aResult) override;
+ NS_IMETHOD OnMetadataWritten(nsresult aResult) override;
+
+ NS_IMETHOD OpenInputStream(nsICacheEntry* aCacheEntryHandle,
+ nsIInputStream** _retval);
+ NS_IMETHOD OpenAlternativeInputStream(nsICacheEntry* aCacheEntryHandle,
+ const char* aAltDataType,
+ nsIInputStream** _retval);
+ NS_IMETHOD OpenOutputStream(CacheOutputCloseListener* aCloseListener,
+ nsIOutputStream** _retval);
+ NS_IMETHOD OpenAlternativeOutputStream(
+ CacheOutputCloseListener* aCloseListener, const char* aAltDataType,
+ nsIAsyncOutputStream** _retval);
+ NS_IMETHOD SetMemoryOnly();
+ NS_IMETHOD Doom(CacheFileListener* aCallback);
+
+ void Kill() { mKill = true; }
+ nsresult ThrowMemoryCachedData();
+
+ nsresult GetAltDataSize(int64_t* aSize);
+ nsresult GetAltDataType(nsACString& aType);
+
+ // metadata forwarders
+ nsresult GetElement(const char* aKey, char** _retval);
+ nsresult SetElement(const char* aKey, const char* aValue);
+ nsresult VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor);
+ nsresult ElementsSize(uint32_t* _retval);
+ nsresult SetExpirationTime(uint32_t aExpirationTime);
+ nsresult GetExpirationTime(uint32_t* _retval);
+ nsresult SetFrecency(uint32_t aFrecency);
+ nsresult GetFrecency(uint32_t* _retval);
+ nsresult SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime);
+ nsresult SetContentType(uint8_t aContentType);
+ nsresult GetOnStartTime(uint64_t* _retval);
+ nsresult GetOnStopTime(uint64_t* _retval);
+ nsresult GetLastModified(uint32_t* _retval);
+ nsresult GetLastFetched(uint32_t* _retval);
+ nsresult GetFetchCount(uint32_t* _retval);
+ nsresult GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize);
+ // Called by upper layers to indicated the entry has been fetched,
+ // i.e. delivered to the consumer.
+ nsresult OnFetched();
+
+ bool DataSize(int64_t* aSize);
+ void Key(nsACString& aKey) { aKey = mKey; }
+ bool IsDoomed();
+ bool IsPinned() const { return mPinned; }
+ // Returns true when there is a potentially unfinished write operation.
+ bool IsWriteInProgress();
+ bool EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData);
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ friend class CacheFileIOManager;
+ friend class CacheFileChunk;
+ friend class CacheFileInputStream;
+ friend class CacheFileOutputStream;
+ friend class CacheFileAutoLock;
+ friend class MetadataWriteTimer;
+
+ virtual ~CacheFile();
+
+ void Lock();
+ void Unlock();
+ void AssertOwnsLock() const;
+ void ReleaseOutsideLock(RefPtr<nsISupports> aObject);
+
+ enum ECallerType { READER = 0, WRITER = 1, PRELOADER = 2 };
+
+ nsresult DoomLocked(CacheFileListener* aCallback);
+
+ nsresult GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
+ CacheFileChunkListener* aCallback,
+ CacheFileChunk** _retval);
+
+ void PreloadChunks(uint32_t aIndex);
+ bool ShouldCacheChunk(uint32_t aIndex);
+ bool MustKeepCachedChunk(uint32_t aIndex);
+
+ nsresult DeactivateChunk(CacheFileChunk* aChunk);
+ void RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk);
+
+ bool OutputStreamExists(bool aAlternativeData);
+ // Returns number of bytes that are available and can be read by input stream
+ // without waiting for the data. The amount is counted from the start of
+ // aIndex chunk and it is guaranteed that this data won't be released by
+ // CleanUpCachedChunks().
+ int64_t BytesFromChunk(uint32_t aIndex, bool aAlternativeData);
+ nsresult Truncate(int64_t aOffset);
+
+ void RemoveInput(CacheFileInputStream* aInput, nsresult aStatus);
+ void RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus);
+ nsresult NotifyChunkListener(CacheFileChunkListener* aCallback,
+ nsIEventTarget* aTarget, nsresult aResult,
+ uint32_t aChunkIdx, CacheFileChunk* aChunk);
+ void QueueChunkListener(uint32_t aIndex, CacheFileChunkListener* aCallback);
+ nsresult NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
+ CacheFileChunk* aChunk);
+ bool HaveChunkListeners(uint32_t aIndex);
+ void NotifyListenersAboutOutputRemoval();
+
+ bool IsDirty();
+ void WriteMetadataIfNeeded();
+ void WriteMetadataIfNeededLocked(bool aFireAndForget = false);
+ void PostWriteTimer();
+
+ void CleanUpCachedChunks();
+
+ nsresult PadChunkWithZeroes(uint32_t aChunkIdx);
+
+ void SetError(nsresult aStatus);
+ nsresult SetAltMetadata(const char* aAltMetadata);
+
+ nsresult InitIndexEntry();
+
+ mozilla::Mutex mLock;
+ bool mOpeningFile;
+ bool mReady;
+ bool mMemoryOnly;
+ bool mSkipSizeCheck;
+ bool mOpenAsMemoryOnly;
+ bool mPinned;
+ bool mPriority;
+ bool mDataAccessed;
+ bool mDataIsDirty;
+ bool mWritingMetadata;
+ bool mPreloadWithoutInputStreams;
+ uint32_t mPreloadChunkCount;
+ nsresult mStatus;
+ int64_t mDataSize; // Size of the whole data including eventual
+ // alternative data represenation.
+ int64_t mAltDataOffset; // If there is alternative data present, it
+ // contains size of the original data, i.e.
+ // offset where alternative data starts.
+ // Otherwise it is -1.
+ nsCString mKey;
+ nsCString mAltDataType; // The type of the saved alt-data. May be empty.
+
+ RefPtr<CacheFileHandle> mHandle;
+ RefPtr<CacheFileMetadata> mMetadata;
+ nsCOMPtr<CacheFileListener> mListener;
+ nsCOMPtr<CacheFileIOListener> mDoomAfterOpenListener;
+ Atomic<bool, Relaxed> mKill;
+
+ nsRefPtrHashtable<nsUint32HashKey, CacheFileChunk> mChunks;
+ nsClassHashtable<nsUint32HashKey, ChunkListeners> mChunkListeners;
+ nsRefPtrHashtable<nsUint32HashKey, CacheFileChunk> mCachedChunks;
+ // We can truncate data only if there is no input/output stream beyond the
+ // truncate position, so only unused chunks can be thrown away. But it can
+ // happen that we need to throw away a chunk that is still in mChunks (i.e.
+ // an active chunk) because deactivation happens with a small delay. We cannot
+ // delete such chunk immediately but we need to ensure that such chunk won't
+ // be returned by GetChunkLocked, so we move this chunk into mDiscardedChunks
+ // and mark it as discarded.
+ nsTArray<RefPtr<CacheFileChunk>> mDiscardedChunks;
+
+ nsTArray<CacheFileInputStream*> mInputs;
+ CacheFileOutputStream* mOutput;
+
+ nsTArray<RefPtr<nsISupports>> mObjsToRelease;
+};
+
+class CacheFileAutoLock {
+ public:
+ explicit CacheFileAutoLock(CacheFile* aFile) : mFile(aFile), mLocked(true) {
+ mFile->Lock();
+ }
+ ~CacheFileAutoLock() {
+ if (mLocked) mFile->Unlock();
+ }
+ void Lock() {
+ MOZ_ASSERT(!mLocked);
+ mFile->Lock();
+ mLocked = true;
+ }
+ void Unlock() {
+ MOZ_ASSERT(mLocked);
+ mFile->Unlock();
+ mLocked = false;
+ }
+
+ private:
+ RefPtr<CacheFile> mFile;
+ bool mLocked;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileChunk.cpp b/netwerk/cache2/CacheFileChunk.cpp
new file mode 100644
index 0000000000..faf8b9c78c
--- /dev/null
+++ b/netwerk/cache2/CacheFileChunk.cpp
@@ -0,0 +1,840 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileChunk.h"
+
+#include "CacheFile.h"
+#include "nsThreadUtils.h"
+
+#include "mozilla/IntegerPrintfMacros.h"
+
+namespace mozilla {
+namespace net {
+
+#define kMinBufSize 512
+
+CacheFileChunkBuffer::CacheFileChunkBuffer(CacheFileChunk* aChunk)
+ : mChunk(aChunk),
+ mBuf(nullptr),
+ mBufSize(0),
+ mDataSize(0),
+ mReadHandlesCount(0),
+ mWriteHandleExists(false) {}
+
+CacheFileChunkBuffer::~CacheFileChunkBuffer() {
+ if (mBuf) {
+ CacheFileUtils::FreeBuffer(mBuf);
+ mBuf = nullptr;
+ mChunk->BuffersAllocationChanged(mBufSize, 0);
+ mBufSize = 0;
+ }
+}
+
+void CacheFileChunkBuffer::CopyFrom(CacheFileChunkBuffer* aOther) {
+ MOZ_RELEASE_ASSERT(mBufSize >= aOther->mDataSize);
+ mDataSize = aOther->mDataSize;
+ memcpy(mBuf, aOther->mBuf, mDataSize);
+}
+
+nsresult CacheFileChunkBuffer::FillInvalidRanges(
+ CacheFileChunkBuffer* aOther, CacheFileUtils::ValidityMap* aMap) {
+ nsresult rv;
+
+ rv = EnsureBufSize(aOther->mDataSize);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ uint32_t invalidOffset = 0;
+ uint32_t invalidLength;
+
+ for (uint32_t i = 0; i < aMap->Length(); ++i) {
+ uint32_t validOffset = (*aMap)[i].Offset();
+ uint32_t validLength = (*aMap)[i].Len();
+
+ MOZ_RELEASE_ASSERT(invalidOffset <= validOffset);
+ invalidLength = validOffset - invalidOffset;
+ if (invalidLength > 0) {
+ MOZ_RELEASE_ASSERT(invalidOffset + invalidLength <= aOther->mDataSize);
+ memcpy(mBuf + invalidOffset, aOther->mBuf + invalidOffset, invalidLength);
+ }
+ invalidOffset = validOffset + validLength;
+ }
+
+ if (invalidOffset < aOther->mDataSize) {
+ invalidLength = aOther->mDataSize - invalidOffset;
+ memcpy(mBuf + invalidOffset, aOther->mBuf + invalidOffset, invalidLength);
+ }
+
+ return NS_OK;
+}
+
+[[nodiscard]] nsresult CacheFileChunkBuffer::EnsureBufSize(uint32_t aBufSize) {
+ AssertOwnsLock();
+
+ if (mBufSize >= aBufSize) {
+ return NS_OK;
+ }
+
+ // find smallest power of 2 greater than or equal to aBufSize
+ aBufSize--;
+ aBufSize |= aBufSize >> 1;
+ aBufSize |= aBufSize >> 2;
+ aBufSize |= aBufSize >> 4;
+ aBufSize |= aBufSize >> 8;
+ aBufSize |= aBufSize >> 16;
+ aBufSize++;
+
+ const uint32_t minBufSize = kMinBufSize;
+ const uint32_t maxBufSize = kChunkSize;
+ aBufSize = clamped(aBufSize, minBufSize, maxBufSize);
+
+ if (!mChunk->CanAllocate(aBufSize - mBufSize)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ char* newBuf = static_cast<char*>(realloc(mBuf, aBufSize));
+ if (!newBuf) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ mChunk->BuffersAllocationChanged(mBufSize, aBufSize);
+ mBuf = newBuf;
+ mBufSize = aBufSize;
+
+ return NS_OK;
+}
+
+void CacheFileChunkBuffer::SetDataSize(uint32_t aDataSize) {
+ MOZ_RELEASE_ASSERT(
+ // EnsureBufSize must be called before SetDataSize, so the new data size
+ // is guaranteed to be smaller than or equal to mBufSize.
+ aDataSize <= mBufSize ||
+ // The only exception is an optimization when we read the data from the
+ // disk. The data is read to a separate buffer and CacheFileChunk::mBuf is
+ // empty (see CacheFileChunk::Read). We need to set mBuf::mDataSize
+ // accordingly so that DataSize() methods return correct value, but we
+ // don't want to allocate the buffer since it wouldn't be used in most
+ // cases.
+ (mBufSize == 0 && mChunk->mState == CacheFileChunk::READING));
+
+ mDataSize = aDataSize;
+}
+
+void CacheFileChunkBuffer::AssertOwnsLock() const { mChunk->AssertOwnsLock(); }
+
+void CacheFileChunkBuffer::RemoveReadHandle() {
+ AssertOwnsLock();
+ MOZ_RELEASE_ASSERT(mReadHandlesCount);
+ MOZ_RELEASE_ASSERT(!mWriteHandleExists);
+ mReadHandlesCount--;
+
+ if (mReadHandlesCount == 0 && mChunk->mBuf != this) {
+ DebugOnly<bool> removed = mChunk->mOldBufs.RemoveElement(this);
+ MOZ_ASSERT(removed);
+ }
+}
+
+void CacheFileChunkBuffer::RemoveWriteHandle() {
+ AssertOwnsLock();
+ MOZ_RELEASE_ASSERT(mReadHandlesCount == 0);
+ MOZ_RELEASE_ASSERT(mWriteHandleExists);
+ mWriteHandleExists = false;
+}
+
+size_t CacheFileChunkBuffer::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = mallocSizeOf(this);
+
+ if (mBuf) {
+ n += mallocSizeOf(mBuf);
+ }
+
+ return n;
+}
+
+uint32_t CacheFileChunkHandle::DataSize() {
+ MOZ_ASSERT(mBuf, "Unexpected call on dummy handle");
+ mBuf->AssertOwnsLock();
+ return mBuf->mDataSize;
+}
+
+uint32_t CacheFileChunkHandle::Offset() {
+ MOZ_ASSERT(mBuf, "Unexpected call on dummy handle");
+ mBuf->AssertOwnsLock();
+ return mBuf->mChunk->Index() * kChunkSize;
+}
+
+CacheFileChunkReadHandle::CacheFileChunkReadHandle(CacheFileChunkBuffer* aBuf) {
+ mBuf = aBuf;
+ mBuf->mReadHandlesCount++;
+}
+
+CacheFileChunkReadHandle::~CacheFileChunkReadHandle() {
+ mBuf->RemoveReadHandle();
+}
+
+const char* CacheFileChunkReadHandle::Buf() { return mBuf->mBuf; }
+
+CacheFileChunkWriteHandle::CacheFileChunkWriteHandle(
+ CacheFileChunkBuffer* aBuf) {
+ mBuf = aBuf;
+ if (mBuf) {
+ MOZ_ASSERT(!mBuf->mWriteHandleExists);
+ mBuf->mWriteHandleExists = true;
+ }
+}
+
+CacheFileChunkWriteHandle::~CacheFileChunkWriteHandle() {
+ if (mBuf) {
+ mBuf->RemoveWriteHandle();
+ }
+}
+
+char* CacheFileChunkWriteHandle::Buf() { return mBuf ? mBuf->mBuf : nullptr; }
+
+void CacheFileChunkWriteHandle::UpdateDataSize(uint32_t aOffset,
+ uint32_t aLen) {
+ MOZ_ASSERT(mBuf, "Write performed on dummy handle?");
+ MOZ_ASSERT(aOffset <= mBuf->mDataSize);
+ MOZ_ASSERT(aOffset + aLen <= mBuf->mBufSize);
+
+ if (aOffset + aLen > mBuf->mDataSize) {
+ mBuf->mDataSize = aOffset + aLen;
+ }
+
+ mBuf->mChunk->UpdateDataSize(aOffset, aLen);
+}
+
+class NotifyUpdateListenerEvent : public Runnable {
+ public:
+ NotifyUpdateListenerEvent(CacheFileChunkListener* aCallback,
+ CacheFileChunk* aChunk)
+ : Runnable("net::NotifyUpdateListenerEvent"),
+ mCallback(aCallback),
+ mChunk(aChunk) {
+ LOG(("NotifyUpdateListenerEvent::NotifyUpdateListenerEvent() [this=%p]",
+ this));
+ }
+
+ protected:
+ ~NotifyUpdateListenerEvent() {
+ LOG(("NotifyUpdateListenerEvent::~NotifyUpdateListenerEvent() [this=%p]",
+ this));
+ }
+
+ public:
+ NS_IMETHOD Run() override {
+ LOG(("NotifyUpdateListenerEvent::Run() [this=%p]", this));
+
+ mCallback->OnChunkUpdated(mChunk);
+ return NS_OK;
+ }
+
+ protected:
+ nsCOMPtr<CacheFileChunkListener> mCallback;
+ RefPtr<CacheFileChunk> mChunk;
+};
+
+bool CacheFileChunk::DispatchRelease() {
+ if (NS_IsMainThread()) {
+ return false;
+ }
+
+ NS_DispatchToMainThread(NewNonOwningRunnableMethod(
+ "net::CacheFileChunk::Release", this, &CacheFileChunk::Release));
+
+ return true;
+}
+
+NS_IMPL_ADDREF(CacheFileChunk)
+NS_IMETHODIMP_(MozExternalRefCountType)
+CacheFileChunk::Release() {
+ nsrefcnt count = mRefCnt - 1;
+ if (DispatchRelease()) {
+ // Redispatched to the main thread.
+ return count;
+ }
+
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "CacheFileChunk");
+
+ if (0 == count) {
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ // We can safely access this chunk after decreasing mRefCnt since we re-post
+ // all calls to Release() happening off the main thread to the main thread.
+ // I.e. no other Release() that would delete the object could be run before
+ // we call CacheFile::DeactivateChunk().
+ //
+ // NOTE: we don't grab the CacheFile's lock, so the chunk might be addrefed
+ // on another thread before CacheFile::DeactivateChunk() grabs the lock on
+ // this thread. To make sure we won't deactivate chunk that was just returned
+ // to a new consumer we check mRefCnt once again in
+ // CacheFile::DeactivateChunk() after we grab the lock.
+ if (mActiveChunk && count == 1) {
+ mFile->DeactivateChunk(this);
+ }
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(CacheFileChunk)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+CacheFileChunk::CacheFileChunk(CacheFile* aFile, uint32_t aIndex,
+ bool aInitByWriter)
+ : CacheMemoryConsumer(aFile->mOpenAsMemoryOnly ? MEMORY_ONLY : DONT_REPORT),
+ mIndex(aIndex),
+ mState(INITIAL),
+ mStatus(NS_OK),
+ mActiveChunk(false),
+ mIsDirty(false),
+ mDiscardedChunk(false),
+ mBuffersSize(0),
+ mLimitAllocation(!aFile->mOpenAsMemoryOnly && aInitByWriter),
+ mIsPriority(aFile->mPriority),
+ mExpectedHash(0),
+ mFile(aFile) {
+ LOG(("CacheFileChunk::CacheFileChunk() [this=%p, index=%u, initByWriter=%d]",
+ this, aIndex, aInitByWriter));
+ mBuf = new CacheFileChunkBuffer(this);
+}
+
+CacheFileChunk::~CacheFileChunk() {
+ LOG(("CacheFileChunk::~CacheFileChunk() [this=%p]", this));
+}
+
+void CacheFileChunk::AssertOwnsLock() const { mFile->AssertOwnsLock(); }
+
+void CacheFileChunk::InitNew() {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::InitNew() [this=%p]", this));
+
+ MOZ_ASSERT(mState == INITIAL);
+ MOZ_ASSERT(NS_SUCCEEDED(mStatus));
+ MOZ_ASSERT(!mBuf->Buf());
+ MOZ_ASSERT(!mWritingStateHandle);
+ MOZ_ASSERT(!mReadingStateBuf);
+ MOZ_ASSERT(!mIsDirty);
+
+ mBuf = new CacheFileChunkBuffer(this);
+ mState = READY;
+}
+
+nsresult CacheFileChunk::Read(CacheFileHandle* aHandle, uint32_t aLen,
+ CacheHash::Hash16_t aHash,
+ CacheFileChunkListener* aCallback) {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::Read() [this=%p, handle=%p, len=%d, listener=%p]", this,
+ aHandle, aLen, aCallback));
+
+ MOZ_ASSERT(mState == INITIAL);
+ MOZ_ASSERT(NS_SUCCEEDED(mStatus));
+ MOZ_ASSERT(!mBuf->Buf());
+ MOZ_ASSERT(!mWritingStateHandle);
+ MOZ_ASSERT(!mReadingStateBuf);
+ MOZ_ASSERT(aLen);
+
+ nsresult rv;
+
+ mState = READING;
+
+ RefPtr<CacheFileChunkBuffer> tmpBuf = new CacheFileChunkBuffer(this);
+ rv = tmpBuf->EnsureBufSize(aLen);
+ if (NS_FAILED(rv)) {
+ SetError(rv);
+ return mStatus;
+ }
+ tmpBuf->SetDataSize(aLen);
+
+ rv = CacheFileIOManager::Read(aHandle, mIndex * kChunkSize, tmpBuf->Buf(),
+ aLen, this);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ rv = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
+ SetError(rv);
+ } else {
+ mReadingStateBuf.swap(tmpBuf);
+ mListener = aCallback;
+ // mBuf contains no data but we set datasize to size of the data that will
+ // be read from the disk. No handle is allowed to access the non-existent
+ // data until reading finishes, but data can be appended or overwritten.
+ // These pieces are tracked in mValidityMap and will be merged with the data
+ // read from disk in OnDataRead().
+ mBuf->SetDataSize(aLen);
+ mExpectedHash = aHash;
+ }
+
+ return rv;
+}
+
+nsresult CacheFileChunk::Write(CacheFileHandle* aHandle,
+ CacheFileChunkListener* aCallback) {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::Write() [this=%p, handle=%p, listener=%p]", this,
+ aHandle, aCallback));
+
+ MOZ_ASSERT(mState == READY);
+ MOZ_ASSERT(NS_SUCCEEDED(mStatus));
+ MOZ_ASSERT(!mWritingStateHandle);
+ MOZ_ASSERT(mBuf->DataSize()); // Don't write chunk when it is empty
+ MOZ_ASSERT(mBuf->ReadHandlesCount() == 0);
+ MOZ_ASSERT(!mBuf->WriteHandleExists());
+
+ nsresult rv;
+
+ mState = WRITING;
+ mWritingStateHandle = MakeUnique<CacheFileChunkReadHandle>(mBuf);
+
+ rv = CacheFileIOManager::Write(
+ aHandle, mIndex * kChunkSize, mWritingStateHandle->Buf(),
+ mWritingStateHandle->DataSize(), false, false, this);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ mWritingStateHandle = nullptr;
+ SetError(rv);
+ } else {
+ mListener = aCallback;
+ mIsDirty = false;
+ }
+
+ return rv;
+}
+
+void CacheFileChunk::WaitForUpdate(CacheFileChunkListener* aCallback) {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::WaitForUpdate() [this=%p, listener=%p]", this,
+ aCallback));
+
+ MOZ_ASSERT(mFile->mOutput);
+ MOZ_ASSERT(IsReady());
+
+#ifdef DEBUG
+ for (uint32_t i = 0; i < mUpdateListeners.Length(); i++) {
+ MOZ_ASSERT(mUpdateListeners[i]->mCallback != aCallback);
+ }
+#endif
+
+ ChunkListenerItem* item = new ChunkListenerItem();
+ item->mTarget = CacheFileIOManager::IOTarget();
+ if (!item->mTarget) {
+ LOG(
+ ("CacheFileChunk::WaitForUpdate() - Cannot get Cache I/O thread! Using "
+ "main thread for callback."));
+ item->mTarget = GetMainThreadEventTarget();
+ }
+ item->mCallback = aCallback;
+ MOZ_ASSERT(item->mTarget);
+ item->mCallback = aCallback;
+
+ mUpdateListeners.AppendElement(item);
+}
+
+void CacheFileChunk::CancelWait(CacheFileChunkListener* aCallback) {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::CancelWait() [this=%p, listener=%p]", this, aCallback));
+
+ MOZ_ASSERT(IsReady());
+
+ uint32_t i;
+ for (i = 0; i < mUpdateListeners.Length(); i++) {
+ ChunkListenerItem* item = mUpdateListeners[i];
+
+ if (item->mCallback == aCallback) {
+ mUpdateListeners.RemoveElementAt(i);
+ delete item;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (; i < mUpdateListeners.Length(); i++) {
+ MOZ_ASSERT(mUpdateListeners[i]->mCallback != aCallback);
+ }
+#endif
+}
+
+nsresult CacheFileChunk::NotifyUpdateListeners() {
+ AssertOwnsLock();
+
+ LOG(("CacheFileChunk::NotifyUpdateListeners() [this=%p]", this));
+
+ MOZ_ASSERT(IsReady());
+
+ nsresult rv, rv2;
+
+ rv = NS_OK;
+ for (uint32_t i = 0; i < mUpdateListeners.Length(); i++) {
+ ChunkListenerItem* item = mUpdateListeners[i];
+
+ LOG(
+ ("CacheFileChunk::NotifyUpdateListeners() - Notifying listener %p "
+ "[this=%p]",
+ item->mCallback.get(), this));
+
+ RefPtr<NotifyUpdateListenerEvent> ev;
+ ev = new NotifyUpdateListenerEvent(item->mCallback, this);
+ rv2 = item->mTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2;
+ delete item;
+ }
+
+ mUpdateListeners.Clear();
+
+ return rv;
+}
+
+uint32_t CacheFileChunk::Index() const { return mIndex; }
+
+CacheHash::Hash16_t CacheFileChunk::Hash() const {
+ MOZ_ASSERT(IsReady());
+
+ return CacheHash::Hash16(mBuf->Buf(), mBuf->DataSize());
+}
+
+uint32_t CacheFileChunk::DataSize() const { return mBuf->DataSize(); }
+
+void CacheFileChunk::UpdateDataSize(uint32_t aOffset, uint32_t aLen) {
+ AssertOwnsLock();
+
+ // UpdateDataSize() is called only when we've written some data to the chunk
+ // and we never write data anymore once some error occurs.
+ MOZ_ASSERT(NS_SUCCEEDED(mStatus));
+
+ LOG(("CacheFileChunk::UpdateDataSize() [this=%p, offset=%d, len=%d]", this,
+ aOffset, aLen));
+
+ mIsDirty = true;
+
+ int64_t fileSize = static_cast<int64_t>(kChunkSize) * mIndex + aOffset + aLen;
+ bool notify = false;
+
+ if (fileSize > mFile->mDataSize) {
+ mFile->mDataSize = fileSize;
+ notify = true;
+ }
+
+ if (mState == READY || mState == WRITING) {
+ MOZ_ASSERT(mValidityMap.Length() == 0);
+
+ if (notify) {
+ NotifyUpdateListeners();
+ }
+
+ return;
+ }
+
+ // We're still waiting for data from the disk. This chunk cannot be used by
+ // input stream, so there must be no update listener. We also need to keep
+ // track of where the data is written so that we can correctly merge the new
+ // data with the old one.
+
+ MOZ_ASSERT(mUpdateListeners.Length() == 0);
+ MOZ_ASSERT(mState == READING);
+
+ mValidityMap.AddPair(aOffset, aLen);
+ mValidityMap.Log();
+}
+
+void CacheFileChunk::Truncate(uint32_t aOffset) {
+ MOZ_RELEASE_ASSERT(mState == READY || mState == WRITING || mState == READING);
+
+ if (mState == READING) {
+ mIsDirty = true;
+ }
+
+ mBuf->SetDataSize(aOffset);
+}
+
+nsresult CacheFileChunk::OnFileOpened(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileChunk::OnFileOpened should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileChunk::OnDataWritten(CacheFileHandle* aHandle,
+ const char* aBuf, nsresult aResult) {
+ LOG((
+ "CacheFileChunk::OnDataWritten() [this=%p, handle=%p, result=0x%08" PRIx32
+ "]",
+ this, aHandle, static_cast<uint32_t>(aResult)));
+
+ nsCOMPtr<CacheFileChunkListener> listener;
+
+ {
+ CacheFileAutoLock lock(mFile);
+
+ MOZ_ASSERT(mState == WRITING);
+ MOZ_ASSERT(mListener);
+
+ mWritingStateHandle = nullptr;
+
+ if (NS_WARN_IF(NS_FAILED(aResult))) {
+ SetError(aResult);
+ }
+
+ mState = READY;
+ mListener.swap(listener);
+ }
+
+ listener->OnChunkWritten(aResult, this);
+
+ return NS_OK;
+}
+
+nsresult CacheFileChunk::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) {
+ LOG(("CacheFileChunk::OnDataRead() [this=%p, handle=%p, result=0x%08" PRIx32
+ "]",
+ this, aHandle, static_cast<uint32_t>(aResult)));
+
+ nsCOMPtr<CacheFileChunkListener> listener;
+
+ {
+ CacheFileAutoLock lock(mFile);
+
+ MOZ_ASSERT(mState == READING);
+ MOZ_ASSERT(mListener);
+ MOZ_ASSERT(mReadingStateBuf);
+ MOZ_RELEASE_ASSERT(mBuf->ReadHandlesCount() == 0);
+ MOZ_RELEASE_ASSERT(!mBuf->WriteHandleExists());
+
+ RefPtr<CacheFileChunkBuffer> tmpBuf;
+ tmpBuf.swap(mReadingStateBuf);
+
+ if (NS_SUCCEEDED(aResult)) {
+ CacheHash::Hash16_t hash =
+ CacheHash::Hash16(tmpBuf->Buf(), tmpBuf->DataSize());
+ if (hash != mExpectedHash) {
+ LOG(
+ ("CacheFileChunk::OnDataRead() - Hash mismatch! Hash of the data is"
+ " %hx, hash in metadata is %hx. [this=%p, idx=%d]",
+ hash, mExpectedHash, this, mIndex));
+ aResult = NS_ERROR_FILE_CORRUPTED;
+ } else {
+ if (mBuf->DataSize() < tmpBuf->DataSize()) {
+ // Truncate() was called while the data was being read.
+ tmpBuf->SetDataSize(mBuf->DataSize());
+ }
+
+ if (!mBuf->Buf()) {
+ // Just swap the buffers if mBuf is still empty
+ mBuf.swap(tmpBuf);
+ } else {
+ LOG(("CacheFileChunk::OnDataRead() - Merging buffers. [this=%p]",
+ this));
+
+ mValidityMap.Log();
+ aResult = mBuf->FillInvalidRanges(tmpBuf, &mValidityMap);
+ mValidityMap.Clear();
+ }
+ }
+ }
+
+ if (NS_FAILED(aResult)) {
+ aResult = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
+ SetError(aResult);
+ mBuf->SetDataSize(0);
+ }
+
+ mState = READY;
+ mListener.swap(listener);
+ }
+
+ listener->OnChunkRead(aResult, this);
+
+ return NS_OK;
+}
+
+nsresult CacheFileChunk::OnFileDoomed(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileChunk::OnFileDoomed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileChunk::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheFileChunk::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileChunk::OnFileRenamed(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileChunk::OnFileRenamed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+bool CacheFileChunk::IsKilled() { return mFile->IsKilled(); }
+
+bool CacheFileChunk::IsReady() const {
+ return (NS_SUCCEEDED(mStatus) && (mState == READY || mState == WRITING));
+}
+
+bool CacheFileChunk::IsDirty() const {
+ AssertOwnsLock();
+
+ return mIsDirty;
+}
+
+nsresult CacheFileChunk::GetStatus() { return mStatus; }
+
+void CacheFileChunk::SetError(nsresult aStatus) {
+ LOG(("CacheFileChunk::SetError() [this=%p, status=0x%08" PRIx32 "]", this,
+ static_cast<uint32_t>(aStatus)));
+
+ MOZ_ASSERT(NS_FAILED(aStatus));
+
+ if (NS_FAILED(mStatus)) {
+ // Remember only the first error code.
+ return;
+ }
+
+ mStatus = aStatus;
+}
+
+CacheFileChunkReadHandle CacheFileChunk::GetReadHandle() {
+ LOG(("CacheFileChunk::GetReadHandle() [this=%p]", this));
+
+ AssertOwnsLock();
+
+ MOZ_RELEASE_ASSERT(mState == READY || mState == WRITING);
+ // We don't release the lock when writing the data and CacheFileOutputStream
+ // doesn't get the read handle, so there cannot be a write handle when read
+ // handle is obtained.
+ MOZ_RELEASE_ASSERT(!mBuf->WriteHandleExists());
+
+ return CacheFileChunkReadHandle(mBuf);
+}
+
+CacheFileChunkWriteHandle CacheFileChunk::GetWriteHandle(
+ uint32_t aEnsuredBufSize) {
+ LOG(("CacheFileChunk::GetWriteHandle() [this=%p, ensuredBufSize=%u]", this,
+ aEnsuredBufSize));
+
+ AssertOwnsLock();
+
+ if (NS_FAILED(mStatus)) {
+ return CacheFileChunkWriteHandle(nullptr); // dummy handle
+ }
+
+ nsresult rv;
+
+ // We don't support multiple write handles
+ MOZ_RELEASE_ASSERT(!mBuf->WriteHandleExists());
+
+ if (mBuf->ReadHandlesCount()) {
+ LOG(
+ ("CacheFileChunk::GetWriteHandle() - cloning buffer because of existing"
+ " read handle"));
+
+ MOZ_RELEASE_ASSERT(mState != READING);
+ RefPtr<CacheFileChunkBuffer> newBuf = new CacheFileChunkBuffer(this);
+ rv = newBuf->EnsureBufSize(std::max(aEnsuredBufSize, mBuf->DataSize()));
+ if (NS_SUCCEEDED(rv)) {
+ newBuf->CopyFrom(mBuf);
+ mOldBufs.AppendElement(mBuf);
+ mBuf = newBuf;
+ }
+ } else {
+ rv = mBuf->EnsureBufSize(aEnsuredBufSize);
+ }
+
+ if (NS_FAILED(rv)) {
+ SetError(NS_ERROR_OUT_OF_MEMORY);
+ return CacheFileChunkWriteHandle(nullptr); // dummy handle
+ }
+
+ return CacheFileChunkWriteHandle(mBuf);
+}
+
+// Memory reporting
+
+size_t CacheFileChunk::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = mBuf->SizeOfIncludingThis(mallocSizeOf);
+
+ if (mReadingStateBuf) {
+ n += mReadingStateBuf->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ for (uint32_t i = 0; i < mOldBufs.Length(); ++i) {
+ n += mOldBufs[i]->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ n += mValidityMap.SizeOfExcludingThis(mallocSizeOf);
+
+ return n;
+}
+
+size_t CacheFileChunk::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+bool CacheFileChunk::CanAllocate(uint32_t aSize) const {
+ if (!mLimitAllocation) {
+ return true;
+ }
+
+ LOG(("CacheFileChunk::CanAllocate() [this=%p, size=%u]", this, aSize));
+
+ int64_t limit = CacheObserver::MaxDiskChunksMemoryUsage(mIsPriority);
+ if (limit == 0) {
+ return true;
+ }
+
+ limit <<= 10;
+ if (limit > UINT32_MAX) {
+ limit = UINT32_MAX;
+ }
+
+ int64_t usage = ChunksMemoryUsage();
+ if (usage + aSize > limit) {
+ LOG(("CacheFileChunk::CanAllocate() - Returning false. [this=%p]", this));
+ return false;
+ }
+
+ return true;
+}
+
+void CacheFileChunk::BuffersAllocationChanged(uint32_t aFreed,
+ uint32_t aAllocated) {
+ uint32_t oldBuffersSize = mBuffersSize;
+ mBuffersSize += aAllocated;
+ mBuffersSize -= aFreed;
+
+ DoMemoryReport(sizeof(CacheFileChunk) + mBuffersSize);
+
+ if (!mLimitAllocation) {
+ return;
+ }
+
+ ChunksMemoryUsage() -= oldBuffersSize;
+ ChunksMemoryUsage() += mBuffersSize;
+ LOG(
+ ("CacheFileChunk::BuffersAllocationChanged() - %s chunks usage %u "
+ "[this=%p]",
+ mIsPriority ? "Priority" : "Normal",
+ static_cast<uint32_t>(ChunksMemoryUsage()), this));
+}
+
+mozilla::Atomic<uint32_t, ReleaseAcquire>& CacheFileChunk::ChunksMemoryUsage()
+ const {
+ static mozilla::Atomic<uint32_t, ReleaseAcquire> chunksMemoryUsage(0);
+ static mozilla::Atomic<uint32_t, ReleaseAcquire> prioChunksMemoryUsage(0);
+ return mIsPriority ? prioChunksMemoryUsage : chunksMemoryUsage;
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileChunk.h b/netwerk/cache2/CacheFileChunk.h
new file mode 100644
index 0000000000..ea82fb3773
--- /dev/null
+++ b/netwerk/cache2/CacheFileChunk.h
@@ -0,0 +1,238 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileChunk__h__
+#define CacheFileChunk__h__
+
+#include "CacheFileIOManager.h"
+#include "CacheStorageService.h"
+#include "CacheHashUtils.h"
+#include "CacheFileUtils.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+namespace net {
+
+constexpr int32_t kChunkSize = 256 * 1024;
+constexpr size_t kEmptyChunkHash = 0x1826;
+
+class CacheFileChunk;
+class CacheFile;
+
+class CacheFileChunkBuffer {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CacheFileChunkBuffer)
+
+ explicit CacheFileChunkBuffer(CacheFileChunk* aChunk);
+
+ nsresult EnsureBufSize(uint32_t aSize);
+ void CopyFrom(CacheFileChunkBuffer* aOther);
+ nsresult FillInvalidRanges(CacheFileChunkBuffer* aOther,
+ CacheFileUtils::ValidityMap* aMap);
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ char* Buf() const { return mBuf; }
+ void SetDataSize(uint32_t aDataSize);
+ uint32_t DataSize() const { return mDataSize; }
+ uint32_t ReadHandlesCount() const { return mReadHandlesCount; }
+ bool WriteHandleExists() const { return mWriteHandleExists; }
+
+ private:
+ friend class CacheFileChunkHandle;
+ friend class CacheFileChunkReadHandle;
+ friend class CacheFileChunkWriteHandle;
+
+ ~CacheFileChunkBuffer();
+
+ void AssertOwnsLock() const;
+
+ void RemoveReadHandle();
+ void RemoveWriteHandle();
+
+ // We keep a weak reference to the chunk to not create a reference cycle. The
+ // buffer is referenced only by chunk and handles. Handles are always
+ // destroyed before the chunk so it is guaranteed that mChunk is a valid
+ // pointer for the whole buffer's lifetime.
+ CacheFileChunk* mChunk;
+ char* mBuf;
+ uint32_t mBufSize;
+ uint32_t mDataSize;
+ uint32_t mReadHandlesCount;
+ bool mWriteHandleExists;
+};
+
+class CacheFileChunkHandle {
+ public:
+ uint32_t DataSize();
+ uint32_t Offset();
+
+ protected:
+ RefPtr<CacheFileChunkBuffer> mBuf;
+};
+
+class CacheFileChunkReadHandle : public CacheFileChunkHandle {
+ public:
+ explicit CacheFileChunkReadHandle(CacheFileChunkBuffer* aBuf);
+ ~CacheFileChunkReadHandle();
+
+ const char* Buf();
+};
+
+class CacheFileChunkWriteHandle : public CacheFileChunkHandle {
+ public:
+ explicit CacheFileChunkWriteHandle(CacheFileChunkBuffer* aBuf);
+ ~CacheFileChunkWriteHandle();
+
+ char* Buf();
+ void UpdateDataSize(uint32_t aOffset, uint32_t aLen);
+};
+
+#define CACHEFILECHUNKLISTENER_IID \
+ { /* baf16149-2ab5-499c-a9c2-5904eb95c288 */ \
+ 0xbaf16149, 0x2ab5, 0x499c, { \
+ 0xa9, 0xc2, 0x59, 0x04, 0xeb, 0x95, 0xc2, 0x88 \
+ } \
+ }
+
+class CacheFileChunkListener : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(CACHEFILECHUNKLISTENER_IID)
+
+ NS_IMETHOD OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) = 0;
+ NS_IMETHOD OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) = 0;
+ NS_IMETHOD OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) = 0;
+ NS_IMETHOD OnChunkUpdated(CacheFileChunk* aChunk) = 0;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(CacheFileChunkListener,
+ CACHEFILECHUNKLISTENER_IID)
+
+class ChunkListenerItem {
+ public:
+ MOZ_COUNTED_DEFAULT_CTOR(ChunkListenerItem)
+ MOZ_COUNTED_DTOR(ChunkListenerItem)
+
+ nsCOMPtr<nsIEventTarget> mTarget;
+ nsCOMPtr<CacheFileChunkListener> mCallback;
+};
+
+class ChunkListeners {
+ public:
+ MOZ_COUNTED_DEFAULT_CTOR(ChunkListeners)
+ MOZ_COUNTED_DTOR(ChunkListeners)
+
+ nsTArray<ChunkListenerItem*> mItems;
+};
+
+class CacheFileChunk final : public CacheFileIOListener,
+ public CacheMemoryConsumer {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ bool DispatchRelease();
+
+ CacheFileChunk(CacheFile* aFile, uint32_t aIndex, bool aInitByWriter);
+
+ void InitNew();
+ nsresult Read(CacheFileHandle* aHandle, uint32_t aLen,
+ CacheHash::Hash16_t aHash, CacheFileChunkListener* aCallback);
+ nsresult Write(CacheFileHandle* aHandle, CacheFileChunkListener* aCallback);
+ void WaitForUpdate(CacheFileChunkListener* aCallback);
+ void CancelWait(CacheFileChunkListener* aCallback);
+ nsresult NotifyUpdateListeners();
+
+ uint32_t Index() const;
+ CacheHash::Hash16_t Hash() const;
+ uint32_t DataSize() const;
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) override;
+ virtual bool IsKilled() override;
+
+ bool IsReady() const;
+ bool IsDirty() const;
+
+ nsresult GetStatus();
+ void SetError(nsresult aStatus);
+
+ CacheFileChunkReadHandle GetReadHandle();
+ CacheFileChunkWriteHandle GetWriteHandle(uint32_t aEnsuredBufSize);
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ friend class CacheFileChunkBuffer;
+ friend class CacheFileChunkWriteHandle;
+ friend class CacheFileInputStream;
+ friend class CacheFileOutputStream;
+ friend class CacheFile;
+
+ virtual ~CacheFileChunk();
+
+ void AssertOwnsLock() const;
+
+ void UpdateDataSize(uint32_t aOffset, uint32_t aLen);
+ void Truncate(uint32_t aOffset);
+
+ bool CanAllocate(uint32_t aSize) const;
+ void BuffersAllocationChanged(uint32_t aFreed, uint32_t aAllocated);
+
+ mozilla::Atomic<uint32_t, ReleaseAcquire>& ChunksMemoryUsage() const;
+
+ enum EState { INITIAL = 0, READING = 1, WRITING = 2, READY = 3 };
+
+ uint32_t mIndex;
+ EState mState;
+ nsresult mStatus;
+
+ Atomic<bool> mActiveChunk; // Is true iff the chunk is in CacheFile::mChunks.
+ // Adding/removing chunk to/from mChunks as well
+ // as changing this member happens under the
+ // CacheFile's lock.
+ bool mIsDirty : 1;
+ bool mDiscardedChunk : 1;
+
+ uint32_t mBuffersSize;
+ bool const mLimitAllocation : 1; // Whether this chunk respects limit for
+ // disk chunks memory usage.
+ bool const mIsPriority : 1;
+
+ // Buffer containing the chunk data. Multiple read handles can access the same
+ // buffer. When write handle is created and some read handle exists a new copy
+ // of the buffer is created. This prevents invalidating the buffer when
+ // CacheFileInputStream::ReadSegments calls the handler outside the lock.
+ RefPtr<CacheFileChunkBuffer> mBuf;
+
+ // We need to keep pointers of the old buffers for memory reporting.
+ nsTArray<RefPtr<CacheFileChunkBuffer>> mOldBufs;
+
+ // Read handle that is used during writing the chunk to the disk.
+ UniquePtr<CacheFileChunkReadHandle> mWritingStateHandle;
+
+ // Buffer that is used to read the chunk from the disk. It is allowed to write
+ // a new data to chunk while we wait for the data from the disk. In this case
+ // this buffer is merged with mBuf in OnDataRead().
+ RefPtr<CacheFileChunkBuffer> mReadingStateBuf;
+ CacheHash::Hash16_t mExpectedHash;
+
+ RefPtr<CacheFile> mFile; // is null if chunk is cached to
+ // prevent reference cycles
+ nsCOMPtr<CacheFileChunkListener> mListener;
+ nsTArray<ChunkListenerItem*> mUpdateListeners;
+ CacheFileUtils::ValidityMap mValidityMap;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileContextEvictor.cpp b/netwerk/cache2/CacheFileContextEvictor.cpp
new file mode 100644
index 0000000000..1dc2af5848
--- /dev/null
+++ b/netwerk/cache2/CacheFileContextEvictor.cpp
@@ -0,0 +1,741 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileContextEvictor.h"
+#include "CacheFileIOManager.h"
+#include "CacheIndex.h"
+#include "CacheIndexIterator.h"
+#include "CacheFileUtils.h"
+#include "nsIFile.h"
+#include "LoadContextInfo.h"
+#include "nsThreadUtils.h"
+#include "nsString.h"
+#include "nsIDirectoryEnumerator.h"
+#include "mozilla/Base64.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "nsContentUtils.h"
+#include "nsNetUtil.h"
+
+namespace mozilla {
+namespace net {
+
+#define CONTEXT_EVICTION_PREFIX "ce_"
+const uint32_t kContextEvictionPrefixLength =
+ sizeof(CONTEXT_EVICTION_PREFIX) - 1;
+
+bool CacheFileContextEvictor::sDiskAlreadySearched = false;
+
+CacheFileContextEvictor::CacheFileContextEvictor()
+ : mEvicting(false), mIndexIsUpToDate(false) {
+ LOG(("CacheFileContextEvictor::CacheFileContextEvictor() [this=%p]", this));
+}
+
+CacheFileContextEvictor::~CacheFileContextEvictor() {
+ LOG(("CacheFileContextEvictor::~CacheFileContextEvictor() [this=%p]", this));
+}
+
+nsresult CacheFileContextEvictor::Init(nsIFile* aCacheDirectory) {
+ LOG(("CacheFileContextEvictor::Init()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ CacheIndex::IsUpToDate(&mIndexIsUpToDate);
+
+ mCacheDirectory = aCacheDirectory;
+
+ rv = aCacheDirectory->Clone(getter_AddRefs(mEntriesDir));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = mEntriesDir->AppendNative(nsLiteralCString(ENTRIES_DIR));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ if (!sDiskAlreadySearched) {
+ LoadEvictInfoFromDisk();
+ if ((mEntries.Length() != 0) && mIndexIsUpToDate) {
+ CreateIterators();
+ StartEvicting();
+ }
+ }
+
+ return NS_OK;
+}
+
+void CacheFileContextEvictor::Shutdown() {
+ LOG(("CacheFileContextEvictor::Shutdown()"));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ CloseIterators();
+}
+
+uint32_t CacheFileContextEvictor::ContextsCount() {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ return mEntries.Length();
+}
+
+nsresult CacheFileContextEvictor::AddContext(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin) {
+ LOG(
+ ("CacheFileContextEvictor::AddContext() [this=%p, loadContextInfo=%p, "
+ "pinned=%d]",
+ this, aLoadContextInfo, aPinned));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ CacheFileContextEvictorEntry* entry = nullptr;
+ if (aLoadContextInfo) {
+ for (uint32_t i = 0; i < mEntries.Length(); ++i) {
+ if (mEntries[i]->mInfo && mEntries[i]->mInfo->Equals(aLoadContextInfo) &&
+ mEntries[i]->mPinned == aPinned &&
+ mEntries[i]->mOrigin.Equals(aOrigin)) {
+ entry = mEntries[i].get();
+ break;
+ }
+ }
+ } else {
+ // Not providing load context info means we want to delete everything,
+ // so let's not bother with any currently running context cleanups
+ // for the same pinning state.
+ for (uint32_t i = mEntries.Length(); i > 0;) {
+ --i;
+ if (mEntries[i]->mInfo && mEntries[i]->mPinned == aPinned) {
+ RemoveEvictInfoFromDisk(mEntries[i]->mInfo, mEntries[i]->mPinned,
+ mEntries[i]->mOrigin);
+ mEntries.RemoveElementAt(i);
+ }
+ }
+ }
+
+ if (!entry) {
+ entry = new CacheFileContextEvictorEntry();
+ entry->mInfo = aLoadContextInfo;
+ entry->mPinned = aPinned;
+ entry->mOrigin = aOrigin;
+ mEntries.AppendElement(WrapUnique(entry));
+ }
+
+ entry->mTimeStamp = PR_Now() / PR_USEC_PER_MSEC;
+
+ PersistEvictionInfoToDisk(aLoadContextInfo, aPinned, aOrigin);
+
+ if (mIndexIsUpToDate) {
+ // Already existing context could be added again, in this case the iterator
+ // would be recreated. Close the old iterator explicitely.
+ if (entry->mIterator) {
+ entry->mIterator->Close();
+ entry->mIterator = nullptr;
+ }
+
+ rv = CacheIndex::GetIterator(aLoadContextInfo, false,
+ getter_AddRefs(entry->mIterator));
+ if (NS_FAILED(rv)) {
+ // This could probably happen during shutdown. Remove the entry from
+ // the array, but leave the info on the disk. No entry can be opened
+ // during shutdown and we'll load the eviction info on next start.
+ LOG(
+ ("CacheFileContextEvictor::AddContext() - Cannot get an iterator. "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ mEntries.RemoveElement(entry);
+ return rv;
+ }
+
+ StartEvicting();
+ }
+
+ return NS_OK;
+}
+
+void CacheFileContextEvictor::CacheIndexStateChanged() {
+ LOG(("CacheFileContextEvictor::CacheIndexStateChanged() [this=%p]", this));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ bool isUpToDate = false;
+ CacheIndex::IsUpToDate(&isUpToDate);
+ if (mEntries.Length() == 0) {
+ // Just save the state and exit, since there is nothing to do
+ mIndexIsUpToDate = isUpToDate;
+ return;
+ }
+
+ if (!isUpToDate && !mIndexIsUpToDate) {
+ // Index is outdated and status has not changed, nothing to do.
+ return;
+ }
+
+ if (isUpToDate && mIndexIsUpToDate) {
+ // Status has not changed, but make sure the eviction is running.
+ if (mEvicting) {
+ return;
+ }
+
+ // We're not evicting, but we should be evicting?!
+ LOG(
+ ("CacheFileContextEvictor::CacheIndexStateChanged() - Index is up to "
+ "date, we have some context to evict but eviction is not running! "
+ "Starting now."));
+ }
+
+ mIndexIsUpToDate = isUpToDate;
+
+ if (mIndexIsUpToDate) {
+ CreateIterators();
+ StartEvicting();
+ } else {
+ CloseIterators();
+ }
+}
+
+void CacheFileContextEvictor::WasEvicted(const nsACString& aKey, nsIFile* aFile,
+ bool* aEvictedAsPinned,
+ bool* aEvictedAsNonPinned) {
+ LOG(("CacheFileContextEvictor::WasEvicted() [key=%s]",
+ PromiseFlatCString(aKey).get()));
+
+ *aEvictedAsPinned = false;
+ *aEvictedAsNonPinned = false;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(aKey);
+ MOZ_ASSERT(info);
+ if (!info) {
+ LOG(("CacheFileContextEvictor::WasEvicted() - Cannot parse key!"));
+ return;
+ }
+
+ for (uint32_t i = 0; i < mEntries.Length(); ++i) {
+ const auto& entry = mEntries[i];
+
+ if (entry->mInfo && !info->Equals(entry->mInfo)) {
+ continue;
+ }
+
+ PRTime lastModifiedTime;
+ if (NS_FAILED(aFile->GetLastModifiedTime(&lastModifiedTime))) {
+ LOG(
+ ("CacheFileContextEvictor::WasEvicted() - Cannot get last modified "
+ "time, returning."));
+ return;
+ }
+
+ if (lastModifiedTime > entry->mTimeStamp) {
+ // File has been modified since context eviction.
+ continue;
+ }
+
+ LOG(
+ ("CacheFileContextEvictor::WasEvicted() - evicted [pinning=%d, "
+ "mTimeStamp=%" PRId64 ", lastModifiedTime=%" PRId64 "]",
+ entry->mPinned, entry->mTimeStamp, lastModifiedTime));
+
+ if (entry->mPinned) {
+ *aEvictedAsPinned = true;
+ } else {
+ *aEvictedAsNonPinned = true;
+ }
+ }
+}
+
+nsresult CacheFileContextEvictor::PersistEvictionInfoToDisk(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin) {
+ LOG(
+ ("CacheFileContextEvictor::PersistEvictionInfoToDisk() [this=%p, "
+ "loadContextInfo=%p]",
+ this, aLoadContextInfo));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetContextFile(aLoadContextInfo, aPinned, aOrigin, getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsCString path = file->HumanReadablePath();
+
+ PRFileDesc* fd;
+ rv =
+ file->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 0600, &fd);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileContextEvictor::PersistEvictionInfoToDisk() - Creating file "
+ "failed! [path=%s, rv=0x%08" PRIx32 "]",
+ path.get(), static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ PR_Close(fd);
+
+ LOG(
+ ("CacheFileContextEvictor::PersistEvictionInfoToDisk() - Successfully "
+ "created file. [path=%s]",
+ path.get()));
+
+ return NS_OK;
+}
+
+nsresult CacheFileContextEvictor::RemoveEvictInfoFromDisk(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin) {
+ LOG(
+ ("CacheFileContextEvictor::RemoveEvictInfoFromDisk() [this=%p, "
+ "loadContextInfo=%p]",
+ this, aLoadContextInfo));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetContextFile(aLoadContextInfo, aPinned, aOrigin, getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsCString path = file->HumanReadablePath();
+
+ rv = file->Remove(false);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileContextEvictor::RemoveEvictionInfoFromDisk() - Removing file"
+ " failed! [path=%s, rv=0x%08" PRIx32 "]",
+ path.get(), static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ LOG(
+ ("CacheFileContextEvictor::RemoveEvictionInfoFromDisk() - Successfully "
+ "removed file. [path=%s]",
+ path.get()));
+
+ return NS_OK;
+}
+
+nsresult CacheFileContextEvictor::LoadEvictInfoFromDisk() {
+ LOG(("CacheFileContextEvictor::LoadEvictInfoFromDisk() [this=%p]", this));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ sDiskAlreadySearched = true;
+
+ nsCOMPtr<nsIDirectoryEnumerator> dirEnum;
+ rv = mCacheDirectory->GetDirectoryEntries(getter_AddRefs(dirEnum));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ while (true) {
+ nsCOMPtr<nsIFile> file;
+ rv = dirEnum->GetNextFile(getter_AddRefs(file));
+ if (!file) {
+ break;
+ }
+
+ bool isDir = false;
+ file->IsDirectory(&isDir);
+ if (isDir) {
+ continue;
+ }
+
+ nsAutoCString leaf;
+ rv = file->GetNativeLeafName(leaf);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::LoadEvictInfoFromDisk() - "
+ "GetNativeLeafName() failed! Skipping file."));
+ continue;
+ }
+
+ if (leaf.Length() < kContextEvictionPrefixLength) {
+ continue;
+ }
+
+ if (!StringBeginsWith(leaf, nsLiteralCString(CONTEXT_EVICTION_PREFIX))) {
+ continue;
+ }
+
+ nsAutoCString encoded;
+ encoded = Substring(leaf, kContextEvictionPrefixLength);
+ encoded.ReplaceChar('-', '/');
+
+ nsAutoCString decoded;
+ rv = Base64Decode(encoded, decoded);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::LoadEvictInfoFromDisk() - Base64 decoding "
+ "failed. Removing the file. [file=%s]",
+ leaf.get()));
+ file->Remove(false);
+ continue;
+ }
+
+ bool pinned = decoded[0] == '\t';
+ if (pinned) {
+ decoded = Substring(decoded, 1);
+ }
+
+ // Let's see if we have an origin.
+ nsAutoCString origin;
+ if (decoded.Contains('\t')) {
+ auto split = decoded.Split('\t');
+ MOZ_ASSERT(decoded.CountChar('\t') == 1);
+
+ auto splitIt = split.begin();
+ origin = *splitIt;
+ ++splitIt;
+ decoded = *splitIt;
+ }
+
+ nsCOMPtr<nsILoadContextInfo> info;
+ if (!"*"_ns.Equals(decoded)) {
+ // "*" is indication of 'delete all', info left null will pass
+ // to CacheFileContextEvictor::AddContext and clear all the cache data.
+ info = CacheFileUtils::ParseKey(decoded);
+ if (!info) {
+ LOG(
+ ("CacheFileContextEvictor::LoadEvictInfoFromDisk() - Cannot parse "
+ "context key, removing file. [contextKey=%s, file=%s]",
+ decoded.get(), leaf.get()));
+ file->Remove(false);
+ continue;
+ }
+ }
+
+ PRTime lastModifiedTime;
+ rv = file->GetLastModifiedTime(&lastModifiedTime);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+
+ CacheFileContextEvictorEntry* entry = new CacheFileContextEvictorEntry();
+ entry->mInfo = info;
+ entry->mPinned = pinned;
+ CopyUTF8toUTF16(origin, entry->mOrigin);
+ entry->mTimeStamp = lastModifiedTime;
+ mEntries.AppendElement(entry);
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFileContextEvictor::GetContextFile(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin, nsIFile** _retval) {
+ nsresult rv;
+
+ nsAutoCString keyPrefix;
+ if (aPinned) {
+ // Mark pinned context files with a tab char at the start.
+ // Tab is chosen because it can never be used as a context key tag.
+ keyPrefix.Append('\t');
+ }
+ if (aLoadContextInfo) {
+ CacheFileUtils::AppendKeyPrefix(aLoadContextInfo, keyPrefix);
+ } else {
+ keyPrefix.Append('*');
+ }
+ if (!aOrigin.IsEmpty()) {
+ keyPrefix.Append('\t');
+ keyPrefix.Append(NS_ConvertUTF16toUTF8(aOrigin));
+ }
+
+ nsAutoCString leafName;
+ leafName.AssignLiteral(CONTEXT_EVICTION_PREFIX);
+
+ rv = Base64EncodeAppend(keyPrefix, leafName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ // Replace '/' with '-' since '/' cannot be part of the filename.
+ leafName.ReplaceChar('/', '-');
+
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = file->AppendNative(leafName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ file.swap(*_retval);
+ return NS_OK;
+}
+
+void CacheFileContextEvictor::CreateIterators() {
+ LOG(("CacheFileContextEvictor::CreateIterators() [this=%p]", this));
+
+ CloseIterators();
+
+ nsresult rv;
+
+ for (uint32_t i = 0; i < mEntries.Length();) {
+ rv = CacheIndex::GetIterator(mEntries[i]->mInfo, false,
+ getter_AddRefs(mEntries[i]->mIterator));
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::CreateIterators() - Cannot get an iterator"
+ ". [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ mEntries.RemoveElementAt(i);
+ continue;
+ }
+
+ ++i;
+ }
+}
+
+void CacheFileContextEvictor::CloseIterators() {
+ LOG(("CacheFileContextEvictor::CloseIterators() [this=%p]", this));
+
+ for (uint32_t i = 0; i < mEntries.Length(); ++i) {
+ if (mEntries[i]->mIterator) {
+ mEntries[i]->mIterator->Close();
+ mEntries[i]->mIterator = nullptr;
+ }
+ }
+}
+
+void CacheFileContextEvictor::StartEvicting() {
+ LOG(("CacheFileContextEvictor::StartEvicting() [this=%p]", this));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ if (mEvicting) {
+ LOG(("CacheFileContextEvictor::StartEvicting() - already evicting."));
+ return;
+ }
+
+ if (mEntries.Length() == 0) {
+ LOG(("CacheFileContextEvictor::StartEvicting() - no context to evict."));
+ return;
+ }
+
+ nsCOMPtr<nsIRunnable> ev =
+ NewRunnableMethod("net::CacheFileContextEvictor::EvictEntries", this,
+ &CacheFileContextEvictor::EvictEntries);
+
+ RefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
+
+ nsresult rv = ioThread->Dispatch(ev, CacheIOThread::EVICT);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::StartEvicting() - Cannot dispatch event to "
+ "IO thread. [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+
+ mEvicting = true;
+}
+
+void CacheFileContextEvictor::EvictEntries() {
+ LOG(("CacheFileContextEvictor::EvictEntries()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ mEvicting = false;
+
+ if (!mIndexIsUpToDate) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Stopping evicting due to "
+ "outdated index."));
+ return;
+ }
+
+ while (true) {
+ if (CacheObserver::ShuttingDown()) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Stopping evicting due to "
+ "shutdown."));
+ mEvicting =
+ true; // We don't want to start eviction again during shutdown
+ // process. Setting this flag to true ensures it.
+ return;
+ }
+
+ if (CacheIOThread::YieldAndRerun()) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Breaking loop for higher "
+ "level events."));
+ mEvicting = true;
+ return;
+ }
+
+ if (mEntries.Length() == 0) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Stopping evicting, there "
+ "is no context to evict."));
+
+ // Allow index to notify AsyncGetDiskConsumption callbacks. The size is
+ // actual again.
+ CacheIndex::OnAsyncEviction(false);
+ return;
+ }
+
+ SHA1Sum::Hash hash;
+ rv = mEntries[0]->mIterator->GetNextHash(&hash);
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - No more entries left in "
+ "iterator. [iterator=%p, info=%p]",
+ mEntries[0]->mIterator.get(), mEntries[0]->mInfo.get()));
+ RemoveEvictInfoFromDisk(mEntries[0]->mInfo, mEntries[0]->mPinned,
+ mEntries[0]->mOrigin);
+ mEntries.RemoveElementAt(0);
+ continue;
+ } else if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Iterator failed to "
+ "provide next hash (shutdown?), keeping eviction info on disk."
+ " [iterator=%p, info=%p]",
+ mEntries[0]->mIterator.get(), mEntries[0]->mInfo.get()));
+ mEntries.RemoveElementAt(0);
+ continue;
+ }
+
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Processing hash. "
+ "[hash=%08x%08x%08x%08x%08x, iterator=%p, info=%p]",
+ LOGSHA1(&hash), mEntries[0]->mIterator.get(),
+ mEntries[0]->mInfo.get()));
+
+ RefPtr<CacheFileHandle> handle;
+ CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
+ getter_AddRefs(handle));
+ if (handle) {
+ // We doom any active handle in CacheFileIOManager::EvictByContext(), so
+ // this must be a new one. Skip it.
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping entry since we "
+ "found an active handle. [handle=%p]",
+ handle.get()));
+ continue;
+ }
+
+ CacheIndex::EntryStatus status;
+ bool pinned = false;
+ auto callback = [&pinned](const CacheIndexEntry* aEntry) {
+ pinned = aEntry->IsPinned();
+ };
+ rv = CacheIndex::HasEntry(hash, &status, callback);
+ // This must never fail, since eviction (this code) happens only when the
+ // index is up-to-date and thus the informatin is known.
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+ if (pinned != mEntries[0]->mPinned) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping entry since "
+ "pinning "
+ "doesn't match [evicting pinned=%d, entry pinned=%d]",
+ mEntries[0]->mPinned, pinned));
+ continue;
+ }
+
+ if (!mEntries[0]->mOrigin.IsEmpty()) {
+ nsCOMPtr<nsIFile> file;
+ CacheFileIOManager::gInstance->GetFile(&hash, getter_AddRefs(file));
+
+ // Read metadata from the file synchronously
+ RefPtr<CacheFileMetadata> metadata = new CacheFileMetadata();
+ rv = metadata->SyncReadMetadata(file);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ continue;
+ }
+
+ // Now get the context + enhance id + URL from the key.
+ nsAutoCString uriSpec;
+ RefPtr<nsILoadContextInfo> info =
+ CacheFileUtils::ParseKey(metadata->GetKey(), nullptr, &uriSpec);
+ MOZ_ASSERT(info);
+ if (!info) {
+ continue;
+ }
+
+ nsCOMPtr<nsIURI> uri;
+ rv = NS_NewURI(getter_AddRefs(uri), uriSpec);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping entry since "
+ "NS_NewURI failed to parse the uriSpec"));
+ continue;
+ }
+
+ nsAutoString urlOrigin;
+ rv = nsContentUtils::GetUTFOrigin(uri, urlOrigin);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping entry since "
+ "We failed to extract an origin"));
+ continue;
+ }
+
+ if (!urlOrigin.Equals(mEntries[0]->mOrigin)) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping entry since "
+ "origin "
+ "doesn't match"));
+ continue;
+ }
+ }
+
+ nsAutoCString leafName;
+ CacheFileIOManager::HashToStr(&hash, leafName);
+
+ PRTime lastModifiedTime;
+ nsCOMPtr<nsIFile> file;
+ rv = mEntriesDir->Clone(getter_AddRefs(file));
+ if (NS_SUCCEEDED(rv)) {
+ rv = file->AppendNative(leafName);
+ }
+ if (NS_SUCCEEDED(rv)) {
+ rv = file->GetLastModifiedTime(&lastModifiedTime);
+ }
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Cannot get last modified "
+ "time, skipping entry."));
+ continue;
+ }
+
+ if (lastModifiedTime > mEntries[0]->mTimeStamp) {
+ LOG(
+ ("CacheFileContextEvictor::EvictEntries() - Skipping newer entry. "
+ "[mTimeStamp=%" PRId64 ", lastModifiedTime=%" PRId64 "]",
+ mEntries[0]->mTimeStamp, lastModifiedTime));
+ continue;
+ }
+
+ LOG(("CacheFileContextEvictor::EvictEntries - Removing entry."));
+ file->Remove(false);
+ CacheIndex::RemoveEntry(&hash);
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileContextEvictor.h b/netwerk/cache2/CacheFileContextEvictor.h
new file mode 100644
index 0000000000..3637cc37e6
--- /dev/null
+++ b/netwerk/cache2/CacheFileContextEvictor.h
@@ -0,0 +1,99 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileContextEvictor__h__
+#define CacheFileContextEvictor__h__
+
+#include "mozilla/UniquePtr.h"
+#include "nsTArray.h"
+#include "nsCOMPtr.h"
+
+class nsIFile;
+class nsILoadContextInfo;
+
+namespace mozilla {
+namespace net {
+
+class CacheIndexIterator;
+
+struct CacheFileContextEvictorEntry {
+ nsCOMPtr<nsILoadContextInfo> mInfo;
+ bool mPinned;
+ nsString mOrigin; // it can be empty
+ PRTime mTimeStamp; // in milliseconds
+ RefPtr<CacheIndexIterator> mIterator;
+};
+
+class CacheFileContextEvictor {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CacheFileContextEvictor)
+
+ CacheFileContextEvictor();
+
+ private:
+ virtual ~CacheFileContextEvictor();
+
+ public:
+ nsresult Init(nsIFile* aCacheDirectory);
+ void Shutdown();
+
+ // Returns number of contexts that are being evicted.
+ uint32_t ContextsCount();
+ // Start evicting given context and an origin, if not empty.
+ nsresult AddContext(nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin);
+ // CacheFileIOManager calls this method when CacheIndex's state changes. We
+ // check whether the index is up to date and start or stop evicting according
+ // to index's state.
+ void CacheIndexStateChanged();
+ // CacheFileIOManager calls this method to check whether an entry file should
+ // be considered as evicted. It returns true when there is a matching context
+ // info to the given key and the last modified time of the entry file is
+ // earlier than the time stamp of the time when the context was added to the
+ // evictor.
+ void WasEvicted(const nsACString& aKey, nsIFile* aFile,
+ bool* aEvictedAsPinned, bool* aEvictedAsNonPinned);
+
+ private:
+ // Writes information about eviction of the given context to the disk. This is
+ // done for every context added to the evictor to be able to recover eviction
+ // after a shutdown or crash. When the context file is found after startup, we
+ // restore mTimeStamp from the last modified time of the file.
+ nsresult PersistEvictionInfoToDisk(nsILoadContextInfo* aLoadContextInfo,
+ bool aPinned, const nsAString& aOrigin);
+ // Once we are done with eviction for the given context, the eviction info is
+ // removed from the disk.
+ nsresult RemoveEvictInfoFromDisk(nsILoadContextInfo* aLoadContextInfo,
+ bool aPinned, const nsAString& aOrigin);
+ // Tries to load all contexts from the disk. This method is called just once
+ // after startup.
+ nsresult LoadEvictInfoFromDisk();
+ nsresult GetContextFile(nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin, nsIFile** _retval);
+
+ void CreateIterators();
+ void CloseIterators();
+ void StartEvicting();
+ void EvictEntries();
+
+ // Whether eviction is in progress
+ bool mEvicting;
+ // Whether index is up to date. We wait with eviction until the index finishes
+ // update process when it is outdated. NOTE: We also stop eviction in progress
+ // when the index is found outdated, the eviction is restarted again once the
+ // update process finishes.
+ bool mIndexIsUpToDate;
+ // Whether we already tried to restore unfinished jobs from previous run after
+ // startup.
+ static bool sDiskAlreadySearched;
+ // Array of contexts being evicted.
+ nsTArray<UniquePtr<CacheFileContextEvictorEntry>> mEntries;
+ nsCOMPtr<nsIFile> mCacheDirectory;
+ nsCOMPtr<nsIFile> mEntriesDir;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileIOManager.cpp b/netwerk/cache2/CacheFileIOManager.cpp
new file mode 100644
index 0000000000..5ffd12a9c0
--- /dev/null
+++ b/netwerk/cache2/CacheFileIOManager.cpp
@@ -0,0 +1,4225 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileIOManager.h"
+
+#include "../cache/nsCacheUtils.h"
+#include "CacheHashUtils.h"
+#include "CacheStorageService.h"
+#include "CacheIndex.h"
+#include "CacheFileUtils.h"
+#include "nsThreadUtils.h"
+#include "CacheFile.h"
+#include "CacheObserver.h"
+#include "nsIFile.h"
+#include "CacheFileContextEvictor.h"
+#include "nsITimer.h"
+#include "nsIDirectoryEnumerator.h"
+#include "nsIObserverService.h"
+#include "nsISizeOf.h"
+#include "mozilla/net/MozURL.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Services.h"
+#include "nsDirectoryServiceUtils.h"
+#include "nsAppDirectoryServiceDefs.h"
+#include "private/pprio.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Preferences.h"
+#include "nsNetUtil.h"
+
+// include files for ftruncate (or equivalent)
+#if defined(XP_UNIX)
+# include <unistd.h>
+#elif defined(XP_WIN)
+# include <windows.h>
+# undef CreateFile
+# undef CREATE_NEW
+#else
+// XXX add necessary include file for ftruncate (or equivalent)
+#endif
+
+namespace mozilla {
+namespace net {
+
+#define kOpenHandlesLimit 128
+#define kMetadataWriteDelay 5000
+#define kRemoveTrashStartDelay 60000 // in milliseconds
+#define kSmartSizeUpdateInterval 60000 // in milliseconds
+
+#ifdef ANDROID
+const uint32_t kMaxCacheSizeKB = 512 * 1024; // 512 MB
+#else
+const uint32_t kMaxCacheSizeKB = 1024 * 1024; // 1 GB
+#endif
+const uint32_t kMaxClearOnShutdownCacheSizeKB = 150 * 1024; // 150 MB
+
+bool CacheFileHandle::DispatchRelease() {
+ if (CacheFileIOManager::IsOnIOThreadOrCeased()) {
+ return false;
+ }
+
+ nsCOMPtr<nsIEventTarget> ioTarget = CacheFileIOManager::IOTarget();
+ if (!ioTarget) {
+ return false;
+ }
+
+ nsresult rv = ioTarget->Dispatch(
+ NewNonOwningRunnableMethod("net::CacheFileHandle::Release", this,
+ &CacheFileHandle::Release),
+ nsIEventTarget::DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+
+ return true;
+}
+
+NS_IMPL_ADDREF(CacheFileHandle)
+NS_IMETHODIMP_(MozExternalRefCountType)
+CacheFileHandle::Release() {
+ nsrefcnt count = mRefCnt - 1;
+ if (DispatchRelease()) {
+ // Redispatched to the IO thread.
+ return count;
+ }
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ LOG(("CacheFileHandle::Release() [this=%p, refcnt=%" PRIuPTR "]", this,
+ mRefCnt.get()));
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "CacheFileHandle");
+
+ if (0 == count) {
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(CacheFileHandle)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+CacheFileHandle::CacheFileHandle(const SHA1Sum::Hash* aHash, bool aPriority,
+ PinningStatus aPinning)
+ : mHash(aHash),
+ mIsDoomed(false),
+ mClosed(false),
+ mPriority(aPriority),
+ mSpecialFile(false),
+ mInvalid(false),
+ mFileExists(false),
+ mDoomWhenFoundPinned(false),
+ mDoomWhenFoundNonPinned(false),
+ mKilled(false),
+ mPinning(aPinning),
+ mFileSize(-1),
+ mFD(nullptr) {
+ // If we initialize mDoomed in the initialization list, that initialization is
+ // not guaranteeded to be atomic. Whereas this assignment here is guaranteed
+ // to be atomic. TSan will see this (atomic) assignment and be satisfied
+ // that cross-thread accesses to mIsDoomed are properly synchronized.
+ mIsDoomed = false;
+ LOG((
+ "CacheFileHandle::CacheFileHandle() [this=%p, hash=%08x%08x%08x%08x%08x]",
+ this, LOGSHA1(aHash)));
+}
+
+CacheFileHandle::CacheFileHandle(const nsACString& aKey, bool aPriority,
+ PinningStatus aPinning)
+ : mHash(nullptr),
+ mIsDoomed(false),
+ mClosed(false),
+ mPriority(aPriority),
+ mSpecialFile(true),
+ mInvalid(false),
+ mFileExists(false),
+ mDoomWhenFoundPinned(false),
+ mDoomWhenFoundNonPinned(false),
+ mKilled(false),
+ mPinning(aPinning),
+ mFileSize(-1),
+ mFD(nullptr),
+ mKey(aKey) {
+ // See comment above about the initialization of mIsDoomed.
+ mIsDoomed = false;
+ LOG(("CacheFileHandle::CacheFileHandle() [this=%p, key=%s]", this,
+ PromiseFlatCString(aKey).get()));
+}
+
+CacheFileHandle::~CacheFileHandle() {
+ LOG(("CacheFileHandle::~CacheFileHandle() [this=%p]", this));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ RefPtr<CacheFileIOManager> ioMan = CacheFileIOManager::gInstance;
+ if (!IsClosed() && ioMan) {
+ ioMan->CloseHandleInternal(this);
+ }
+}
+
+void CacheFileHandle::Log() {
+ nsAutoCString leafName;
+ if (mFile) {
+ mFile->GetNativeLeafName(leafName);
+ }
+
+ if (mSpecialFile) {
+ LOG(
+ ("CacheFileHandle::Log() - special file [this=%p, "
+ "isDoomed=%d, priority=%d, closed=%d, invalid=%d, "
+ "pinning=%" PRIu32 ", fileExists=%d, fileSize=%" PRId64
+ ", leafName=%s, key=%s]",
+ this, bool(mIsDoomed), bool(mPriority), bool(mClosed), bool(mInvalid),
+ static_cast<uint32_t>(mPinning), bool(mFileExists), int64_t(mFileSize),
+ leafName.get(), mKey.get()));
+ } else {
+ LOG(
+ ("CacheFileHandle::Log() - entry file [this=%p, "
+ "hash=%08x%08x%08x%08x%08x, "
+ "isDoomed=%d, priority=%d, closed=%d, invalid=%d, "
+ "pinning=%" PRIu32 ", fileExists=%d, fileSize=%" PRId64
+ ", leafName=%s, key=%s]",
+ this, LOGSHA1(mHash), bool(mIsDoomed), bool(mPriority), bool(mClosed),
+ bool(mInvalid), static_cast<uint32_t>(mPinning), bool(mFileExists),
+ int64_t(mFileSize), leafName.get(), mKey.get()));
+ }
+}
+
+uint32_t CacheFileHandle::FileSizeInK() const {
+ MOZ_ASSERT(mFileSize != -1);
+ uint64_t size64 = mFileSize;
+
+ size64 += 0x3FF;
+ size64 >>= 10;
+
+ uint32_t size;
+ if (size64 >> 32) {
+ NS_WARNING(
+ "CacheFileHandle::FileSizeInK() - FileSize is too large, "
+ "truncating to PR_UINT32_MAX");
+ size = PR_UINT32_MAX;
+ } else {
+ size = static_cast<uint32_t>(size64);
+ }
+
+ return size;
+}
+
+bool CacheFileHandle::SetPinned(bool aPinned) {
+ LOG(("CacheFileHandle::SetPinned [this=%p, pinned=%d]", this, aPinned));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ mPinning = aPinned ? PinningStatus::PINNED : PinningStatus::NON_PINNED;
+
+ if ((MOZ_UNLIKELY(mDoomWhenFoundPinned) && aPinned) ||
+ (MOZ_UNLIKELY(mDoomWhenFoundNonPinned) && !aPinned)) {
+ LOG((" dooming, when: pinned=%d, non-pinned=%d, found: pinned=%d",
+ bool(mDoomWhenFoundPinned), bool(mDoomWhenFoundNonPinned), aPinned));
+
+ mDoomWhenFoundPinned = false;
+ mDoomWhenFoundNonPinned = false;
+
+ return false;
+ }
+
+ return true;
+}
+
+// Memory reporting
+
+size_t CacheFileHandle::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+ nsCOMPtr<nsISizeOf> sizeOf;
+
+ sizeOf = do_QueryInterface(mFile);
+ if (sizeOf) {
+ n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ n += mallocSizeOf(mFD);
+ n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ return n;
+}
+
+size_t CacheFileHandle::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+/******************************************************************************
+ * CacheFileHandles::HandleHashKey
+ *****************************************************************************/
+
+void CacheFileHandles::HandleHashKey::AddHandle(CacheFileHandle* aHandle) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ mHandles.InsertElementAt(0, aHandle);
+}
+
+void CacheFileHandles::HandleHashKey::RemoveHandle(CacheFileHandle* aHandle) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ DebugOnly<bool> found;
+ found = mHandles.RemoveElement(aHandle);
+ MOZ_ASSERT(found);
+}
+
+already_AddRefed<CacheFileHandle>
+CacheFileHandles::HandleHashKey::GetNewestHandle() {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ RefPtr<CacheFileHandle> handle;
+ if (mHandles.Length()) {
+ handle = mHandles[0];
+ }
+
+ return handle.forget();
+}
+
+void CacheFileHandles::HandleHashKey::GetHandles(
+ nsTArray<RefPtr<CacheFileHandle> >& aResult) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ for (uint32_t i = 0; i < mHandles.Length(); ++i) {
+ CacheFileHandle* handle = mHandles[i];
+ aResult.AppendElement(handle);
+ }
+}
+
+#ifdef DEBUG
+
+void CacheFileHandles::HandleHashKey::AssertHandlesState() {
+ for (uint32_t i = 0; i < mHandles.Length(); ++i) {
+ CacheFileHandle* handle = mHandles[i];
+ MOZ_ASSERT(handle->IsDoomed());
+ }
+}
+
+#endif
+
+size_t CacheFileHandles::HandleHashKey::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ size_t n = 0;
+ n += mallocSizeOf(mHash.get());
+ for (uint32_t i = 0; i < mHandles.Length(); ++i) {
+ n += mHandles[i]->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ return n;
+}
+
+/******************************************************************************
+ * CacheFileHandles
+ *****************************************************************************/
+
+CacheFileHandles::CacheFileHandles() {
+ LOG(("CacheFileHandles::CacheFileHandles() [this=%p]", this));
+ MOZ_COUNT_CTOR(CacheFileHandles);
+}
+
+CacheFileHandles::~CacheFileHandles() {
+ LOG(("CacheFileHandles::~CacheFileHandles() [this=%p]", this));
+ MOZ_COUNT_DTOR(CacheFileHandles);
+}
+
+nsresult CacheFileHandles::GetHandle(const SHA1Sum::Hash* aHash,
+ CacheFileHandle** _retval) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ MOZ_ASSERT(aHash);
+
+#ifdef DEBUG_HANDLES
+ LOG(("CacheFileHandles::GetHandle() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHash)));
+#endif
+
+ // find hash entry for key
+ HandleHashKey* entry = mTable.GetEntry(*aHash);
+ if (!entry) {
+ LOG(
+ ("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
+ "no handle entries found",
+ LOGSHA1(aHash)));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+#ifdef DEBUG_HANDLES
+ Log(entry);
+#endif
+
+ // Check if the entry is doomed
+ RefPtr<CacheFileHandle> handle = entry->GetNewestHandle();
+ if (!handle) {
+ LOG(
+ ("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
+ "no handle found %p, entry %p",
+ LOGSHA1(aHash), handle.get(), entry));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (handle->IsDoomed()) {
+ LOG(
+ ("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
+ "found doomed handle %p, entry %p",
+ LOGSHA1(aHash), handle.get(), entry));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ LOG(
+ ("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
+ "found handle %p, entry %p",
+ LOGSHA1(aHash), handle.get(), entry));
+
+ handle.forget(_retval);
+ return NS_OK;
+}
+
+already_AddRefed<CacheFileHandle> CacheFileHandles::NewHandle(
+ const SHA1Sum::Hash* aHash, bool aPriority,
+ CacheFileHandle::PinningStatus aPinning) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ MOZ_ASSERT(aHash);
+
+#ifdef DEBUG_HANDLES
+ LOG(("CacheFileHandles::NewHandle() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHash)));
+#endif
+
+ // find hash entry for key
+ HandleHashKey* entry = mTable.PutEntry(*aHash);
+
+#ifdef DEBUG_HANDLES
+ Log(entry);
+#endif
+
+#ifdef DEBUG
+ entry->AssertHandlesState();
+#endif
+
+ RefPtr<CacheFileHandle> handle =
+ new CacheFileHandle(entry->Hash(), aPriority, aPinning);
+ entry->AddHandle(handle);
+
+ LOG(
+ ("CacheFileHandles::NewHandle() hash=%08x%08x%08x%08x%08x "
+ "created new handle %p, entry=%p",
+ LOGSHA1(aHash), handle.get(), entry));
+ return handle.forget();
+}
+
+void CacheFileHandles::RemoveHandle(CacheFileHandle* aHandle) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ MOZ_ASSERT(aHandle);
+
+ if (!aHandle) {
+ return;
+ }
+
+#ifdef DEBUG_HANDLES
+ LOG((
+ "CacheFileHandles::RemoveHandle() [handle=%p, hash=%08x%08x%08x%08x%08x]",
+ aHandle, LOGSHA1(aHandle->Hash())));
+#endif
+
+ // find hash entry for key
+ HandleHashKey* entry = mTable.GetEntry(*aHandle->Hash());
+ if (!entry) {
+ MOZ_ASSERT(CacheFileIOManager::IsShutdown(),
+ "Should find entry when removing a handle before shutdown");
+
+ LOG(
+ ("CacheFileHandles::RemoveHandle() hash=%08x%08x%08x%08x%08x "
+ "no entries found",
+ LOGSHA1(aHandle->Hash())));
+ return;
+ }
+
+#ifdef DEBUG_HANDLES
+ Log(entry);
+#endif
+
+ LOG(
+ ("CacheFileHandles::RemoveHandle() hash=%08x%08x%08x%08x%08x "
+ "removing handle %p",
+ LOGSHA1(entry->Hash()), aHandle));
+ entry->RemoveHandle(aHandle);
+
+ if (entry->IsEmpty()) {
+ LOG(
+ ("CacheFileHandles::RemoveHandle() hash=%08x%08x%08x%08x%08x "
+ "list is empty, removing entry %p",
+ LOGSHA1(entry->Hash()), entry));
+ mTable.RemoveEntry(entry);
+ }
+}
+
+void CacheFileHandles::GetAllHandles(
+ nsTArray<RefPtr<CacheFileHandle> >* _retval) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ for (auto iter = mTable.Iter(); !iter.Done(); iter.Next()) {
+ iter.Get()->GetHandles(*_retval);
+ }
+}
+
+void CacheFileHandles::GetActiveHandles(
+ nsTArray<RefPtr<CacheFileHandle> >* _retval) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ for (auto iter = mTable.Iter(); !iter.Done(); iter.Next()) {
+ RefPtr<CacheFileHandle> handle = iter.Get()->GetNewestHandle();
+ MOZ_ASSERT(handle);
+
+ if (!handle->IsDoomed()) {
+ _retval->AppendElement(handle);
+ }
+ }
+}
+
+void CacheFileHandles::ClearAll() {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ mTable.Clear();
+}
+
+uint32_t CacheFileHandles::HandleCount() { return mTable.Count(); }
+
+#ifdef DEBUG_HANDLES
+void CacheFileHandles::Log(CacheFileHandlesEntry* entry) {
+ LOG(("CacheFileHandles::Log() BEGIN [entry=%p]", entry));
+
+ nsTArray<RefPtr<CacheFileHandle> > array;
+ aEntry->GetHandles(array);
+
+ for (uint32_t i = 0; i < array.Length(); ++i) {
+ CacheFileHandle* handle = array[i];
+ handle->Log();
+ }
+
+ LOG(("CacheFileHandles::Log() END [entry=%p]", entry));
+}
+#endif
+
+// Memory reporting
+
+size_t CacheFileHandles::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ return mTable.SizeOfExcludingThis(mallocSizeOf);
+}
+
+// Events
+
+class ShutdownEvent : public Runnable {
+ public:
+ ShutdownEvent()
+ : Runnable("net::ShutdownEvent"),
+ mMonitor("ShutdownEvent.mMonitor"),
+ mNotified(false) {}
+
+ protected:
+ ~ShutdownEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ MonitorAutoLock mon(mMonitor);
+
+ CacheFileIOManager::gInstance->ShutdownInternal();
+
+ mNotified = true;
+ mon.Notify();
+
+ return NS_OK;
+ }
+
+ void PostAndWait() {
+ MonitorAutoLock mon(mMonitor);
+
+ nsresult rv = CacheFileIOManager::gInstance->mIOThread->Dispatch(
+ this,
+ CacheIOThread::WRITE); // When writes and closing of handles is done
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+ // If we failed to post the even there's no reason to go into the loop
+ // because mNotified will never be set to true.
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Posting ShutdownEvent task failed");
+ return;
+ }
+
+ TimeDuration waitTime = TimeDuration::FromSeconds(1);
+ while (!mNotified) {
+ mon.Wait(waitTime);
+ if (!mNotified) {
+ // If there is any IO blocking on the IO thread, this will
+ // try to cancel it. Returns no later than after two seconds.
+ MonitorAutoUnlock unmon(mMonitor); // Prevent delays
+ CacheFileIOManager::gInstance->mIOThread->CancelBlockingIO();
+ }
+ }
+ }
+
+ protected:
+ mozilla::Monitor mMonitor;
+ bool mNotified;
+};
+
+// Class responsible for reporting IO performance stats
+class IOPerfReportEvent {
+ public:
+ explicit IOPerfReportEvent(CacheFileUtils::CachePerfStats::EDataType aType)
+ : mType(aType), mEventCounter(0) {}
+
+ void Start(CacheIOThread* aIOThread) {
+ mStartTime = TimeStamp::Now();
+ mEventCounter = aIOThread->EventCounter();
+ }
+
+ void Report(CacheIOThread* aIOThread) {
+ if (mStartTime.IsNull()) {
+ return;
+ }
+
+ // Single IO operations can take less than 1ms. So we use microseconds to
+ // keep a good resolution of data.
+ uint32_t duration = (TimeStamp::Now() - mStartTime).ToMicroseconds();
+
+ // This is a simple prefiltering of values that might differ a lot from the
+ // average value. Do not add the value to the filtered stats when the event
+ // had to wait in a long queue.
+ uint32_t eventCounter = aIOThread->EventCounter();
+ bool shortOnly = eventCounter - mEventCounter < 5 ? false : true;
+
+ CacheFileUtils::CachePerfStats::AddValue(mType, duration, shortOnly);
+ }
+
+ protected:
+ CacheFileUtils::CachePerfStats::EDataType mType;
+ TimeStamp mStartTime;
+ uint32_t mEventCounter;
+};
+
+class OpenFileEvent : public Runnable, public IOPerfReportEvent {
+ public:
+ OpenFileEvent(const nsACString& aKey, uint32_t aFlags,
+ CacheFileIOListener* aCallback)
+ : Runnable("net::OpenFileEvent"),
+ IOPerfReportEvent(CacheFileUtils::CachePerfStats::IO_OPEN),
+ mFlags(aFlags),
+ mCallback(aCallback),
+ mKey(aKey) {
+ mIOMan = CacheFileIOManager::gInstance;
+ if (!(mFlags & CacheFileIOManager::SPECIAL_FILE)) {
+ Start(mIOMan->mIOThread);
+ }
+ }
+
+ protected:
+ ~OpenFileEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv = NS_OK;
+
+ if (!(mFlags & CacheFileIOManager::SPECIAL_FILE)) {
+ SHA1Sum sum;
+ sum.update(mKey.BeginReading(), mKey.Length());
+ sum.finish(mHash);
+ }
+
+ if (!mIOMan) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ if (mFlags & CacheFileIOManager::SPECIAL_FILE) {
+ rv = mIOMan->OpenSpecialFileInternal(mKey, mFlags,
+ getter_AddRefs(mHandle));
+ } else {
+ rv = mIOMan->OpenFileInternal(&mHash, mKey, mFlags,
+ getter_AddRefs(mHandle));
+ if (NS_SUCCEEDED(rv)) {
+ Report(mIOMan->mIOThread);
+ }
+ }
+ mIOMan = nullptr;
+ if (mHandle) {
+ if (mHandle->Key().IsEmpty()) {
+ mHandle->Key() = mKey;
+ }
+ }
+ }
+
+ mCallback->OnFileOpened(mHandle, rv);
+ return NS_OK;
+ }
+
+ protected:
+ SHA1Sum::Hash mHash;
+ uint32_t mFlags;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+ RefPtr<CacheFileIOManager> mIOMan;
+ RefPtr<CacheFileHandle> mHandle;
+ nsCString mKey;
+};
+
+class ReadEvent : public Runnable, public IOPerfReportEvent {
+ public:
+ ReadEvent(CacheFileHandle* aHandle, int64_t aOffset, char* aBuf,
+ int32_t aCount, CacheFileIOListener* aCallback)
+ : Runnable("net::ReadEvent"),
+ IOPerfReportEvent(CacheFileUtils::CachePerfStats::IO_READ),
+ mHandle(aHandle),
+ mOffset(aOffset),
+ mBuf(aBuf),
+ mCount(aCount),
+ mCallback(aCallback) {
+ if (!mHandle->IsSpecialFile()) {
+ Start(CacheFileIOManager::gInstance->mIOThread);
+ }
+ }
+
+ protected:
+ ~ReadEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (mHandle->IsClosed() || (mCallback && mCallback->IsKilled())) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = CacheFileIOManager::gInstance->ReadInternal(mHandle, mOffset, mBuf,
+ mCount);
+ if (NS_SUCCEEDED(rv)) {
+ Report(CacheFileIOManager::gInstance->mIOThread);
+ }
+ }
+
+ mCallback->OnDataRead(mHandle, mBuf, rv);
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+ int64_t mOffset;
+ char* mBuf;
+ int32_t mCount;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+};
+
+class WriteEvent : public Runnable, public IOPerfReportEvent {
+ public:
+ WriteEvent(CacheFileHandle* aHandle, int64_t aOffset, const char* aBuf,
+ int32_t aCount, bool aValidate, bool aTruncate,
+ CacheFileIOListener* aCallback)
+ : Runnable("net::WriteEvent"),
+ IOPerfReportEvent(CacheFileUtils::CachePerfStats::IO_WRITE),
+ mHandle(aHandle),
+ mOffset(aOffset),
+ mBuf(aBuf),
+ mCount(aCount),
+ mValidate(aValidate),
+ mTruncate(aTruncate),
+ mCallback(aCallback) {
+ if (!mHandle->IsSpecialFile()) {
+ Start(CacheFileIOManager::gInstance->mIOThread);
+ }
+ }
+
+ protected:
+ ~WriteEvent() {
+ if (!mCallback && mBuf) {
+ free(const_cast<char*>(mBuf));
+ }
+ }
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (mHandle->IsClosed() || (mCallback && mCallback->IsKilled())) {
+ // We usually get here only after the internal shutdown
+ // (i.e. mShuttingDown == true). Pretend write has succeeded
+ // to avoid any past-shutdown file dooming.
+ rv = (CacheObserver::IsPastShutdownIOLag() ||
+ CacheFileIOManager::gInstance->mShuttingDown)
+ ? NS_OK
+ : NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = CacheFileIOManager::gInstance->WriteInternal(
+ mHandle, mOffset, mBuf, mCount, mValidate, mTruncate);
+ if (NS_SUCCEEDED(rv)) {
+ Report(CacheFileIOManager::gInstance->mIOThread);
+ }
+ if (NS_FAILED(rv) && !mCallback) {
+ // No listener is going to handle the error, doom the file
+ CacheFileIOManager::gInstance->DoomFileInternal(mHandle);
+ }
+ }
+ if (mCallback) {
+ mCallback->OnDataWritten(mHandle, mBuf, rv);
+ } else {
+ free(const_cast<char*>(mBuf));
+ mBuf = nullptr;
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+ int64_t mOffset;
+ const char* mBuf;
+ int32_t mCount;
+ bool mValidate : 1;
+ bool mTruncate : 1;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+};
+
+class DoomFileEvent : public Runnable {
+ public:
+ DoomFileEvent(CacheFileHandle* aHandle, CacheFileIOListener* aCallback)
+ : Runnable("net::DoomFileEvent"),
+ mCallback(aCallback),
+ mHandle(aHandle) {}
+
+ protected:
+ ~DoomFileEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (mHandle->IsClosed()) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = CacheFileIOManager::gInstance->DoomFileInternal(mHandle);
+ }
+
+ if (mCallback) {
+ mCallback->OnFileDoomed(mHandle, rv);
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ nsCOMPtr<CacheFileIOListener> mCallback;
+ nsCOMPtr<nsIEventTarget> mTarget;
+ RefPtr<CacheFileHandle> mHandle;
+};
+
+class DoomFileByKeyEvent : public Runnable {
+ public:
+ DoomFileByKeyEvent(const nsACString& aKey, CacheFileIOListener* aCallback)
+ : Runnable("net::DoomFileByKeyEvent"), mCallback(aCallback) {
+ SHA1Sum sum;
+ sum.update(aKey.BeginReading(), aKey.Length());
+ sum.finish(mHash);
+
+ mIOMan = CacheFileIOManager::gInstance;
+ }
+
+ protected:
+ ~DoomFileByKeyEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (!mIOMan) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = mIOMan->DoomFileByKeyInternal(&mHash);
+ mIOMan = nullptr;
+ }
+
+ if (mCallback) {
+ mCallback->OnFileDoomed(nullptr, rv);
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ SHA1Sum::Hash mHash;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+ RefPtr<CacheFileIOManager> mIOMan;
+};
+
+class ReleaseNSPRHandleEvent : public Runnable {
+ public:
+ explicit ReleaseNSPRHandleEvent(CacheFileHandle* aHandle)
+ : Runnable("net::ReleaseNSPRHandleEvent"), mHandle(aHandle) {}
+
+ protected:
+ ~ReleaseNSPRHandleEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ if (!mHandle->IsClosed()) {
+ CacheFileIOManager::gInstance->MaybeReleaseNSPRHandleInternal(mHandle);
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+};
+
+class TruncateSeekSetEOFEvent : public Runnable {
+ public:
+ TruncateSeekSetEOFEvent(CacheFileHandle* aHandle, int64_t aTruncatePos,
+ int64_t aEOFPos, CacheFileIOListener* aCallback)
+ : Runnable("net::TruncateSeekSetEOFEvent"),
+ mHandle(aHandle),
+ mTruncatePos(aTruncatePos),
+ mEOFPos(aEOFPos),
+ mCallback(aCallback) {}
+
+ protected:
+ ~TruncateSeekSetEOFEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (mHandle->IsClosed() || (mCallback && mCallback->IsKilled())) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = CacheFileIOManager::gInstance->TruncateSeekSetEOFInternal(
+ mHandle, mTruncatePos, mEOFPos);
+ }
+
+ if (mCallback) {
+ mCallback->OnEOFSet(mHandle, rv);
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+ int64_t mTruncatePos;
+ int64_t mEOFPos;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+};
+
+class RenameFileEvent : public Runnable {
+ public:
+ RenameFileEvent(CacheFileHandle* aHandle, const nsACString& aNewName,
+ CacheFileIOListener* aCallback)
+ : Runnable("net::RenameFileEvent"),
+ mHandle(aHandle),
+ mNewName(aNewName),
+ mCallback(aCallback) {}
+
+ protected:
+ ~RenameFileEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ nsresult rv;
+
+ if (mHandle->IsClosed()) {
+ rv = NS_ERROR_NOT_INITIALIZED;
+ } else {
+ rv = CacheFileIOManager::gInstance->RenameFileInternal(mHandle, mNewName);
+ }
+
+ if (mCallback) {
+ mCallback->OnFileRenamed(mHandle, rv);
+ }
+
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+ nsCString mNewName;
+ nsCOMPtr<CacheFileIOListener> mCallback;
+};
+
+class InitIndexEntryEvent : public Runnable {
+ public:
+ InitIndexEntryEvent(CacheFileHandle* aHandle,
+ OriginAttrsHash aOriginAttrsHash, bool aAnonymous,
+ bool aPinning)
+ : Runnable("net::InitIndexEntryEvent"),
+ mHandle(aHandle),
+ mOriginAttrsHash(aOriginAttrsHash),
+ mAnonymous(aAnonymous),
+ mPinning(aPinning) {}
+
+ protected:
+ ~InitIndexEntryEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ if (mHandle->IsClosed() || mHandle->IsDoomed()) {
+ return NS_OK;
+ }
+
+ CacheIndex::InitEntry(mHandle->Hash(), mOriginAttrsHash, mAnonymous,
+ mPinning);
+
+ // We cannot set the filesize before we init the entry. If we're opening
+ // an existing entry file, frecency will be set after parsing the entry
+ // file, but we must set the filesize here since nobody is going to set it
+ // if there is no write to the file.
+ uint32_t sizeInK = mHandle->FileSizeInK();
+ CacheIndex::UpdateEntry(mHandle->Hash(), nullptr, nullptr, nullptr, nullptr,
+ nullptr, &sizeInK);
+
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+ OriginAttrsHash mOriginAttrsHash;
+ bool mAnonymous;
+ bool mPinning;
+};
+
+class UpdateIndexEntryEvent : public Runnable {
+ public:
+ UpdateIndexEntryEvent(CacheFileHandle* aHandle, const uint32_t* aFrecency,
+ const bool* aHasAltData, const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType)
+ : Runnable("net::UpdateIndexEntryEvent"),
+ mHandle(aHandle),
+ mHasFrecency(false),
+ mHasHasAltData(false),
+ mHasOnStartTime(false),
+ mHasOnStopTime(false),
+ mHasContentType(false),
+ mFrecency(0),
+ mHasAltData(false),
+ mOnStartTime(0),
+ mOnStopTime(0),
+ mContentType(nsICacheEntry::CONTENT_TYPE_UNKNOWN) {
+ if (aFrecency) {
+ mHasFrecency = true;
+ mFrecency = *aFrecency;
+ }
+ if (aHasAltData) {
+ mHasHasAltData = true;
+ mHasAltData = *aHasAltData;
+ }
+ if (aOnStartTime) {
+ mHasOnStartTime = true;
+ mOnStartTime = *aOnStartTime;
+ }
+ if (aOnStopTime) {
+ mHasOnStopTime = true;
+ mOnStopTime = *aOnStopTime;
+ }
+ if (aContentType) {
+ mHasContentType = true;
+ mContentType = *aContentType;
+ }
+ }
+
+ protected:
+ ~UpdateIndexEntryEvent() = default;
+
+ public:
+ NS_IMETHOD Run() override {
+ if (mHandle->IsClosed() || mHandle->IsDoomed()) {
+ return NS_OK;
+ }
+
+ CacheIndex::UpdateEntry(mHandle->Hash(),
+ mHasFrecency ? &mFrecency : nullptr,
+ mHasHasAltData ? &mHasAltData : nullptr,
+ mHasOnStartTime ? &mOnStartTime : nullptr,
+ mHasOnStopTime ? &mOnStopTime : nullptr,
+ mHasContentType ? &mContentType : nullptr, nullptr);
+ return NS_OK;
+ }
+
+ protected:
+ RefPtr<CacheFileHandle> mHandle;
+
+ bool mHasFrecency;
+ bool mHasHasAltData;
+ bool mHasOnStartTime;
+ bool mHasOnStopTime;
+ bool mHasContentType;
+
+ uint32_t mFrecency;
+ bool mHasAltData;
+ uint16_t mOnStartTime;
+ uint16_t mOnStopTime;
+ uint8_t mContentType;
+};
+
+class MetadataWriteScheduleEvent : public Runnable {
+ public:
+ enum EMode { SCHEDULE, UNSCHEDULE, SHUTDOWN } mMode;
+
+ RefPtr<CacheFile> mFile;
+ RefPtr<CacheFileIOManager> mIOMan;
+
+ MetadataWriteScheduleEvent(CacheFileIOManager* aManager, CacheFile* aFile,
+ EMode aMode)
+ : Runnable("net::MetadataWriteScheduleEvent"),
+ mMode(aMode),
+ mFile(aFile),
+ mIOMan(aManager) {}
+
+ virtual ~MetadataWriteScheduleEvent() = default;
+
+ NS_IMETHOD Run() override {
+ RefPtr<CacheFileIOManager> ioMan = CacheFileIOManager::gInstance;
+ if (!ioMan) {
+ NS_WARNING(
+ "CacheFileIOManager already gone in "
+ "MetadataWriteScheduleEvent::Run()");
+ return NS_OK;
+ }
+
+ switch (mMode) {
+ case SCHEDULE:
+ ioMan->ScheduleMetadataWriteInternal(mFile);
+ break;
+ case UNSCHEDULE:
+ ioMan->UnscheduleMetadataWriteInternal(mFile);
+ break;
+ case SHUTDOWN:
+ ioMan->ShutdownMetadataWriteSchedulingInternal();
+ break;
+ }
+ return NS_OK;
+ }
+};
+
+StaticRefPtr<CacheFileIOManager> CacheFileIOManager::gInstance;
+
+NS_IMPL_ISUPPORTS(CacheFileIOManager, nsITimerCallback, nsINamed)
+
+CacheFileIOManager::CacheFileIOManager()
+ : mShuttingDown(false),
+ mTreeCreated(false),
+ mTreeCreationFailed(false),
+ mOverLimitEvicting(false),
+ mCacheSizeOnHardLimit(false),
+ mRemovingTrashDirs(false) {
+ LOG(("CacheFileIOManager::CacheFileIOManager [this=%p]", this));
+ MOZ_ASSERT(!gInstance, "multiple CacheFileIOManager instances!");
+}
+
+CacheFileIOManager::~CacheFileIOManager() {
+ LOG(("CacheFileIOManager::~CacheFileIOManager [this=%p]", this));
+}
+
+// static
+nsresult CacheFileIOManager::Init() {
+ LOG(("CacheFileIOManager::Init()"));
+
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (gInstance) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ RefPtr<CacheFileIOManager> ioMan = new CacheFileIOManager();
+
+ nsresult rv = ioMan->InitInternal();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ gInstance = std::move(ioMan);
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::InitInternal() {
+ nsresult rv;
+
+ mIOThread = new CacheIOThread();
+
+ rv = mIOThread->Init();
+ MOZ_ASSERT(NS_SUCCEEDED(rv), "Can't create background thread");
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mStartTime = TimeStamp::NowLoRes();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::Shutdown() {
+ LOG(("CacheFileIOManager::Shutdown() [gInstance=%p]", gInstance.get()));
+
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (!gInstance) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_SHUTDOWN_V2> shutdownTimer;
+
+ CacheIndex::PreShutdown();
+
+ ShutdownMetadataWriteScheduling();
+
+ RefPtr<ShutdownEvent> ev = new ShutdownEvent();
+ ev->PostAndWait();
+
+ MOZ_ASSERT(gInstance->mHandles.HandleCount() == 0);
+ MOZ_ASSERT(gInstance->mHandlesByLastUsed.Length() == 0);
+
+ if (gInstance->mIOThread) {
+ gInstance->mIOThread->Shutdown();
+ }
+
+ CacheIndex::Shutdown();
+
+ if (CacheObserver::ClearCacheOnShutdown()) {
+ Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE2_SHUTDOWN_CLEAR_PRIVATE>
+ totalTimer;
+ gInstance->SyncRemoveAllCacheFiles();
+ }
+
+ gInstance = nullptr;
+
+ return NS_OK;
+}
+
+void CacheFileIOManager::ShutdownInternal() {
+ LOG(("CacheFileIOManager::ShutdownInternal() [this=%p]", this));
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ // No new handles can be created after this flag is set
+ mShuttingDown = true;
+
+ if (mTrashTimer) {
+ mTrashTimer->Cancel();
+ mTrashTimer = nullptr;
+ }
+
+ // close all handles and delete all associated files
+ nsTArray<RefPtr<CacheFileHandle> > handles;
+ mHandles.GetAllHandles(&handles);
+ handles.AppendElements(mSpecialHandles);
+
+ for (uint32_t i = 0; i < handles.Length(); i++) {
+ CacheFileHandle* h = handles[i];
+ h->mClosed = true;
+
+ h->Log();
+
+ // Close completely written files.
+ MaybeReleaseNSPRHandleInternal(h);
+ // Don't bother removing invalid and/or doomed files to improve
+ // shutdown perfomrance.
+ // Doomed files are already in the doomed directory from which
+ // we never reuse files and delete the dir on next session startup.
+ // Invalid files don't have metadata and thus won't load anyway
+ // (hashes won't match).
+
+ if (!h->IsSpecialFile() && !h->mIsDoomed && !h->mFileExists) {
+ CacheIndex::RemoveEntry(h->Hash());
+ }
+
+ // Remove the handle from mHandles/mSpecialHandles
+ if (h->IsSpecialFile()) {
+ mSpecialHandles.RemoveElement(h);
+ } else {
+ mHandles.RemoveHandle(h);
+ }
+
+ // Pointer to the hash is no longer valid once the last handle with the
+ // given hash is released. Null out the pointer so that we crash if there
+ // is a bug in this code and we dereference the pointer after this point.
+ if (!h->IsSpecialFile()) {
+ h->mHash = nullptr;
+ }
+ }
+
+ // Assert the table is empty. When we are here, no new handles can be added
+ // and handles will no longer remove them self from this table and we don't
+ // want to keep invalid handles here. Also, there is no lookup after this
+ // point to happen.
+ MOZ_ASSERT(mHandles.HandleCount() == 0);
+
+ // Release trash directory enumerator
+ if (mTrashDirEnumerator) {
+ mTrashDirEnumerator->Close();
+ mTrashDirEnumerator = nullptr;
+ }
+
+ if (mContextEvictor) {
+ mContextEvictor->Shutdown();
+ mContextEvictor = nullptr;
+ }
+}
+
+// static
+nsresult CacheFileIOManager::OnProfile() {
+ LOG(("CacheFileIOManager::OnProfile() [gInstance=%p]", gInstance.get()));
+
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (!ioMan) {
+ // CacheFileIOManager::Init() failed, probably could not create the IO
+ // thread, just go with it...
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsresult rv;
+
+ nsCOMPtr<nsIFile> directory;
+
+ CacheObserver::ParentDirOverride(getter_AddRefs(directory));
+
+#if defined(MOZ_WIDGET_ANDROID)
+ nsCOMPtr<nsIFile> profilelessDirectory;
+ char* cachePath = getenv("CACHE_DIRECTORY");
+ if (!directory && cachePath && *cachePath) {
+ rv = NS_NewNativeLocalFile(nsDependentCString(cachePath), true,
+ getter_AddRefs(directory));
+ if (NS_SUCCEEDED(rv)) {
+ // Save this directory as the profileless path.
+ rv = directory->Clone(getter_AddRefs(profilelessDirectory));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Add profile leaf name to the directory name to distinguish
+ // multiple profiles Fennec supports.
+ nsCOMPtr<nsIFile> profD;
+ rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
+ getter_AddRefs(profD));
+
+ nsAutoCString leafName;
+ if (NS_SUCCEEDED(rv)) {
+ rv = profD->GetNativeLeafName(leafName);
+ }
+ if (NS_SUCCEEDED(rv)) {
+ rv = directory->AppendNative(leafName);
+ }
+ if (NS_FAILED(rv)) {
+ directory = nullptr;
+ }
+ }
+ }
+#endif
+
+ if (!directory) {
+ rv = NS_GetSpecialDirectory(NS_APP_CACHE_PARENT_DIR,
+ getter_AddRefs(directory));
+ }
+
+ if (!directory) {
+ rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_LOCAL_50_DIR,
+ getter_AddRefs(directory));
+ }
+
+ if (directory) {
+ rv = directory->Append(u"cache2"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ // All functions return a clone.
+ ioMan->mCacheDirectory.swap(directory);
+
+#if defined(MOZ_WIDGET_ANDROID)
+ if (profilelessDirectory) {
+ rv = profilelessDirectory->Append(u"cache2"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ ioMan->mCacheProfilelessDirectory.swap(profilelessDirectory);
+#endif
+
+ if (ioMan->mCacheDirectory) {
+ CacheIndex::Init(ioMan->mCacheDirectory);
+ }
+
+ return NS_OK;
+}
+
+// static
+already_AddRefed<nsIEventTarget> CacheFileIOManager::IOTarget() {
+ nsCOMPtr<nsIEventTarget> target;
+ if (gInstance && gInstance->mIOThread) {
+ target = gInstance->mIOThread->Target();
+ }
+
+ return target.forget();
+}
+
+// static
+already_AddRefed<CacheIOThread> CacheFileIOManager::IOThread() {
+ RefPtr<CacheIOThread> thread;
+ if (gInstance) {
+ thread = gInstance->mIOThread;
+ }
+
+ return thread.forget();
+}
+
+// static
+bool CacheFileIOManager::IsOnIOThread() {
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (ioMan && ioMan->mIOThread) {
+ return ioMan->mIOThread->IsCurrentThread();
+ }
+
+ return false;
+}
+
+// static
+bool CacheFileIOManager::IsOnIOThreadOrCeased() {
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (ioMan && ioMan->mIOThread) {
+ return ioMan->mIOThread->IsCurrentThread();
+ }
+
+ // Ceased...
+ return true;
+}
+
+// static
+bool CacheFileIOManager::IsShutdown() {
+ if (!gInstance) {
+ return true;
+ }
+ return gInstance->mShuttingDown;
+}
+
+// static
+nsresult CacheFileIOManager::ScheduleMetadataWrite(CacheFile* aFile) {
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ NS_ENSURE_TRUE(ioMan, NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_TRUE(!ioMan->mShuttingDown, NS_ERROR_NOT_INITIALIZED);
+
+ RefPtr<MetadataWriteScheduleEvent> event = new MetadataWriteScheduleEvent(
+ ioMan, aFile, MetadataWriteScheduleEvent::SCHEDULE);
+ nsCOMPtr<nsIEventTarget> target = ioMan->IOTarget();
+ NS_ENSURE_TRUE(target, NS_ERROR_UNEXPECTED);
+ return target->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+}
+
+nsresult CacheFileIOManager::ScheduleMetadataWriteInternal(CacheFile* aFile) {
+ MOZ_ASSERT(IsOnIOThreadOrCeased());
+
+ nsresult rv;
+
+ if (!mMetadataWritesTimer) {
+ rv = NS_NewTimerWithCallback(getter_AddRefs(mMetadataWritesTimer), this,
+ kMetadataWriteDelay, nsITimer::TYPE_ONE_SHOT);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (mScheduledMetadataWrites.IndexOf(aFile) !=
+ mScheduledMetadataWrites.NoIndex) {
+ return NS_OK;
+ }
+
+ mScheduledMetadataWrites.AppendElement(aFile);
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::UnscheduleMetadataWrite(CacheFile* aFile) {
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ NS_ENSURE_TRUE(ioMan, NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_TRUE(!ioMan->mShuttingDown, NS_ERROR_NOT_INITIALIZED);
+
+ RefPtr<MetadataWriteScheduleEvent> event = new MetadataWriteScheduleEvent(
+ ioMan, aFile, MetadataWriteScheduleEvent::UNSCHEDULE);
+ nsCOMPtr<nsIEventTarget> target = ioMan->IOTarget();
+ NS_ENSURE_TRUE(target, NS_ERROR_UNEXPECTED);
+ return target->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+}
+
+void CacheFileIOManager::UnscheduleMetadataWriteInternal(CacheFile* aFile) {
+ MOZ_ASSERT(IsOnIOThreadOrCeased());
+
+ mScheduledMetadataWrites.RemoveElement(aFile);
+
+ if (mScheduledMetadataWrites.Length() == 0 && mMetadataWritesTimer) {
+ mMetadataWritesTimer->Cancel();
+ mMetadataWritesTimer = nullptr;
+ }
+}
+
+// static
+nsresult CacheFileIOManager::ShutdownMetadataWriteScheduling() {
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ NS_ENSURE_TRUE(ioMan, NS_ERROR_NOT_INITIALIZED);
+
+ RefPtr<MetadataWriteScheduleEvent> event = new MetadataWriteScheduleEvent(
+ ioMan, nullptr, MetadataWriteScheduleEvent::SHUTDOWN);
+ nsCOMPtr<nsIEventTarget> target = ioMan->IOTarget();
+ NS_ENSURE_TRUE(target, NS_ERROR_UNEXPECTED);
+ return target->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+}
+
+void CacheFileIOManager::ShutdownMetadataWriteSchedulingInternal() {
+ MOZ_ASSERT(IsOnIOThreadOrCeased());
+
+ nsTArray<RefPtr<CacheFile> > files = std::move(mScheduledMetadataWrites);
+ for (uint32_t i = 0; i < files.Length(); ++i) {
+ CacheFile* file = files[i];
+ file->WriteMetadataIfNeeded();
+ }
+
+ if (mMetadataWritesTimer) {
+ mMetadataWritesTimer->Cancel();
+ mMetadataWritesTimer = nullptr;
+ }
+}
+
+NS_IMETHODIMP
+CacheFileIOManager::Notify(nsITimer* aTimer) {
+ MOZ_ASSERT(IsOnIOThreadOrCeased());
+ MOZ_ASSERT(mMetadataWritesTimer == aTimer);
+
+ mMetadataWritesTimer = nullptr;
+
+ nsTArray<RefPtr<CacheFile> > files = std::move(mScheduledMetadataWrites);
+ for (uint32_t i = 0; i < files.Length(); ++i) {
+ CacheFile* file = files[i];
+ file->WriteMetadataIfNeeded();
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileIOManager::GetName(nsACString& aName) {
+ aName.AssignLiteral("CacheFileIOManager");
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::OpenFile(const nsACString& aKey, uint32_t aFlags,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::OpenFile() [key=%s, flags=%d, listener=%p]",
+ PromiseFlatCString(aKey).get(), aFlags, aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ bool priority = aFlags & CacheFileIOManager::PRIORITY;
+ RefPtr<OpenFileEvent> ev = new OpenFileEvent(aKey, aFlags, aCallback);
+ rv = ioMan->mIOThread->Dispatch(
+ ev, priority ? CacheIOThread::OPEN_PRIORITY : CacheIOThread::OPEN);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::OpenFileInternal(const SHA1Sum::Hash* aHash,
+ const nsACString& aKey,
+ uint32_t aFlags,
+ CacheFileHandle** _retval) {
+ LOG(
+ ("CacheFileIOManager::OpenFileInternal() [hash=%08x%08x%08x%08x%08x, "
+ "key=%s, flags=%d]",
+ LOGSHA1(aHash), PromiseFlatCString(aKey).get(), aFlags));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsresult rv;
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ CacheIOThread::Cancelable cancelable(
+ true /* never called for special handles */);
+
+ if (!mTreeCreated) {
+ rv = CreateCacheTree();
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ CacheFileHandle::PinningStatus pinning =
+ aFlags & PINNED ? CacheFileHandle::PinningStatus::PINNED
+ : CacheFileHandle::PinningStatus::NON_PINNED;
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetFile(aHash, getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<CacheFileHandle> handle;
+ mHandles.GetHandle(aHash, getter_AddRefs(handle));
+
+ if ((aFlags & (OPEN | CREATE | CREATE_NEW)) == CREATE_NEW) {
+ if (handle) {
+ rv = DoomFileInternal(handle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ handle = nullptr;
+ }
+
+ handle = mHandles.NewHandle(aHash, aFlags & PRIORITY, pinning);
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (exists) {
+ CacheIndex::RemoveEntry(aHash);
+
+ LOG(
+ ("CacheFileIOManager::OpenFileInternal() - Removing old file from "
+ "disk"));
+ rv = file->Remove(false);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Cannot remove old entry from the disk");
+ LOG(
+ ("CacheFileIOManager::OpenFileInternal() - Removing old file failed"
+ ". [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+ }
+
+ CacheIndex::AddEntry(aHash);
+ handle->mFile.swap(file);
+ handle->mFileSize = 0;
+ }
+
+ if (handle) {
+ handle.swap(*_retval);
+ return NS_OK;
+ }
+
+ bool exists, evictedAsPinned = false, evictedAsNonPinned = false;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (exists && mContextEvictor) {
+ if (mContextEvictor->ContextsCount() == 0) {
+ mContextEvictor = nullptr;
+ } else {
+ mContextEvictor->WasEvicted(aKey, file, &evictedAsPinned,
+ &evictedAsNonPinned);
+ }
+ }
+
+ if (!exists && (aFlags & (OPEN | CREATE | CREATE_NEW)) == OPEN) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (exists) {
+ // For existing files we determine the pinning status later, after the
+ // metadata gets parsed.
+ pinning = CacheFileHandle::PinningStatus::UNKNOWN;
+ }
+
+ handle = mHandles.NewHandle(aHash, aFlags & PRIORITY, pinning);
+ if (exists) {
+ // If this file has been found evicted through the context file evictor
+ // above for any of pinned or non-pinned state, these calls ensure we doom
+ // the handle ASAP we know the real pinning state after metadta has been
+ // parsed. DoomFileInternal on the |handle| doesn't doom right now, since
+ // the pinning state is unknown and we pass down a pinning restriction.
+ if (evictedAsPinned) {
+ rv = DoomFileInternal(handle, DOOM_WHEN_PINNED);
+ MOZ_ASSERT(!handle->IsDoomed() && NS_SUCCEEDED(rv));
+ }
+ if (evictedAsNonPinned) {
+ rv = DoomFileInternal(handle, DOOM_WHEN_NON_PINNED);
+ MOZ_ASSERT(!handle->IsDoomed() && NS_SUCCEEDED(rv));
+ }
+
+ int64_t fileSize = -1;
+ rv = file->GetFileSize(&fileSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ handle->mFileSize = fileSize;
+ handle->mFileExists = true;
+
+ CacheIndex::EnsureEntryExists(aHash);
+ } else {
+ handle->mFileSize = 0;
+
+ CacheIndex::AddEntry(aHash);
+ }
+
+ handle->mFile.swap(file);
+ handle.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::OpenSpecialFileInternal(
+ const nsACString& aKey, uint32_t aFlags, CacheFileHandle** _retval) {
+ LOG(("CacheFileIOManager::OpenSpecialFileInternal() [key=%s, flags=%d]",
+ PromiseFlatCString(aKey).get(), aFlags));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsresult rv;
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!mTreeCreated) {
+ rv = CreateCacheTree();
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetSpecialFile(aKey, getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<CacheFileHandle> handle;
+ for (uint32_t i = 0; i < mSpecialHandles.Length(); i++) {
+ if (!mSpecialHandles[i]->IsDoomed() && mSpecialHandles[i]->Key() == aKey) {
+ handle = mSpecialHandles[i];
+ break;
+ }
+ }
+
+ if ((aFlags & (OPEN | CREATE | CREATE_NEW)) == CREATE_NEW) {
+ if (handle) {
+ rv = DoomFileInternal(handle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ handle = nullptr;
+ }
+
+ handle = new CacheFileHandle(aKey, aFlags & PRIORITY,
+ CacheFileHandle::PinningStatus::NON_PINNED);
+ mSpecialHandles.AppendElement(handle);
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (exists) {
+ LOG(
+ ("CacheFileIOManager::OpenSpecialFileInternal() - Removing file from "
+ "disk"));
+ rv = file->Remove(false);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Cannot remove old entry from the disk");
+ LOG(
+ ("CacheFileIOManager::OpenSpecialFileInternal() - Removing file "
+ "failed. [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+ }
+
+ handle->mFile.swap(file);
+ handle->mFileSize = 0;
+ }
+
+ if (handle) {
+ handle.swap(*_retval);
+ return NS_OK;
+ }
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!exists && (aFlags & (OPEN | CREATE | CREATE_NEW)) == OPEN) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ handle = new CacheFileHandle(aKey, aFlags & PRIORITY,
+ CacheFileHandle::PinningStatus::NON_PINNED);
+ mSpecialHandles.AppendElement(handle);
+
+ if (exists) {
+ int64_t fileSize = -1;
+ rv = file->GetFileSize(&fileSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ handle->mFileSize = fileSize;
+ handle->mFileExists = true;
+ } else {
+ handle->mFileSize = 0;
+ }
+
+ handle->mFile.swap(file);
+ handle.swap(*_retval);
+ return NS_OK;
+}
+
+void CacheFileIOManager::CloseHandleInternal(CacheFileHandle* aHandle) {
+ nsresult rv;
+ LOG(("CacheFileIOManager::CloseHandleInternal() [handle=%p]", aHandle));
+
+ MOZ_ASSERT(!aHandle->IsClosed());
+
+ aHandle->Log();
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ // Maybe close file handle (can be legally bypassed after shutdown)
+ rv = MaybeReleaseNSPRHandleInternal(aHandle);
+
+ // Delete the file if the entry was doomed or invalid and
+ // filedesc properly closed
+ if ((aHandle->mIsDoomed || aHandle->mInvalid) && aHandle->mFileExists &&
+ NS_SUCCEEDED(rv)) {
+ LOG(
+ ("CacheFileIOManager::CloseHandleInternal() - Removing file from "
+ "disk"));
+
+ rv = aHandle->mFile->Remove(false);
+ if (NS_SUCCEEDED(rv)) {
+ aHandle->mFileExists = false;
+ } else {
+ LOG((" failed to remove the file [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+ }
+
+ if (!aHandle->IsSpecialFile() && !aHandle->mIsDoomed &&
+ (aHandle->mInvalid || !aHandle->mFileExists)) {
+ CacheIndex::RemoveEntry(aHandle->Hash());
+ }
+
+ // Don't remove handles after shutdown
+ if (!mShuttingDown) {
+ if (aHandle->IsSpecialFile()) {
+ mSpecialHandles.RemoveElement(aHandle);
+ } else {
+ mHandles.RemoveHandle(aHandle);
+ }
+ }
+}
+
+// static
+nsresult CacheFileIOManager::Read(CacheFileHandle* aHandle, int64_t aOffset,
+ char* aBuf, int32_t aCount,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::Read() [handle=%p, offset=%" PRId64 ", count=%d, "
+ "listener=%p]",
+ aHandle, aOffset, aCount, aCallback));
+
+ if (CacheObserver::ShuttingDown()) {
+ LOG((" no reads after shutdown"));
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<ReadEvent> ev =
+ new ReadEvent(aHandle, aOffset, aBuf, aCount, aCallback);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->IsPriority()
+ ? CacheIOThread::READ_PRIORITY
+ : CacheIOThread::READ);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::ReadInternal(CacheFileHandle* aHandle,
+ int64_t aOffset, char* aBuf,
+ int32_t aCount) {
+ LOG(("CacheFileIOManager::ReadInternal() [handle=%p, offset=%" PRId64
+ ", count=%d]",
+ aHandle, aOffset, aCount));
+
+ nsresult rv;
+
+ if (CacheObserver::ShuttingDown()) {
+ LOG((" no reads after shutdown"));
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!aHandle->mFileExists) {
+ NS_WARNING("Trying to read from non-existent file");
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ if (!aHandle->mFD) {
+ rv = OpenNSPRHandle(aHandle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ NSPRHandleUsed(aHandle);
+ }
+
+ // Check again, OpenNSPRHandle could figure out the file was gone.
+ if (!aHandle->mFileExists) {
+ NS_WARNING("Trying to read from non-existent file");
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ int64_t offset = PR_Seek64(aHandle->mFD, aOffset, PR_SEEK_SET);
+ if (offset == -1) {
+ return NS_ERROR_FAILURE;
+ }
+
+ int32_t bytesRead = PR_Read(aHandle->mFD, aBuf, aCount);
+ if (bytesRead != aCount) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::Write(CacheFileHandle* aHandle, int64_t aOffset,
+ const char* aBuf, int32_t aCount,
+ bool aValidate, bool aTruncate,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::Write() [handle=%p, offset=%" PRId64 ", count=%d, "
+ "validate=%d, truncate=%d, listener=%p]",
+ aHandle, aOffset, aCount, aValidate, aTruncate, aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || (aCallback && aCallback->IsKilled()) || !ioMan) {
+ if (!aCallback) {
+ // When no callback is provided, CacheFileIOManager is responsible for
+ // releasing the buffer. We must release it even in case of failure.
+ free(const_cast<char*>(aBuf));
+ }
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<WriteEvent> ev = new WriteEvent(aHandle, aOffset, aBuf, aCount,
+ aValidate, aTruncate, aCallback);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+static nsresult TruncFile(PRFileDesc* aFD, int64_t aEOF) {
+#if defined(XP_UNIX)
+ if (ftruncate(PR_FileDesc2NativeHandle(aFD), aEOF) != 0) {
+ NS_ERROR("ftruncate failed");
+ return NS_ERROR_FAILURE;
+ }
+#elif defined(XP_WIN)
+ int64_t cnt = PR_Seek64(aFD, aEOF, PR_SEEK_SET);
+ if (cnt == -1) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!SetEndOfFile((HANDLE)PR_FileDesc2NativeHandle(aFD))) {
+ NS_ERROR("SetEndOfFile failed");
+ return NS_ERROR_FAILURE;
+ }
+#else
+ MOZ_ASSERT(false, "Not implemented!");
+ return NS_ERROR_NOT_IMPLEMENTED;
+#endif
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::WriteInternal(CacheFileHandle* aHandle,
+ int64_t aOffset, const char* aBuf,
+ int32_t aCount, bool aValidate,
+ bool aTruncate) {
+ LOG(("CacheFileIOManager::WriteInternal() [handle=%p, offset=%" PRId64
+ ", count=%d, "
+ "validate=%d, truncate=%d]",
+ aHandle, aOffset, aCount, aValidate, aTruncate));
+
+ nsresult rv;
+
+ if (aHandle->mKilled) {
+ LOG((" handle already killed, nothing written"));
+ return NS_OK;
+ }
+
+ if (CacheObserver::ShuttingDown() && (!aValidate || !aHandle->mFD)) {
+ aHandle->mKilled = true;
+ LOG((" killing the handle, nothing written"));
+ return NS_OK;
+ }
+
+ if (CacheObserver::IsPastShutdownIOLag()) {
+ LOG((" past the shutdown I/O lag, nothing written"));
+ // Pretend the write has succeeded, otherwise upper layers will doom
+ // the file and we end up with I/O anyway.
+ return NS_OK;
+ }
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ if (!aHandle->mFileExists) {
+ rv = CreateFile(aHandle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (!aHandle->mFD) {
+ rv = OpenNSPRHandle(aHandle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ NSPRHandleUsed(aHandle);
+ }
+
+ // Check again, OpenNSPRHandle could figure out the file was gone.
+ if (!aHandle->mFileExists) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // When this operation would increase cache size, check whether the cache size
+ // reached the hard limit and whether it would cause critical low disk space.
+ if (aHandle->mFileSize < aOffset + aCount) {
+ if (mOverLimitEvicting && mCacheSizeOnHardLimit) {
+ LOG(
+ ("CacheFileIOManager::WriteInternal() - failing because cache size "
+ "reached hard limit!"));
+ return NS_ERROR_FILE_DISK_FULL;
+ }
+
+ int64_t freeSpace;
+ rv = mCacheDirectory->GetDiskSpaceAvailable(&freeSpace);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ freeSpace = -1;
+ LOG(
+ ("CacheFileIOManager::WriteInternal() - GetDiskSpaceAvailable() "
+ "failed! [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ } else {
+ freeSpace >>= 10; // bytes to kilobytes
+ uint32_t limit = CacheObserver::DiskFreeSpaceHardLimit();
+ if (freeSpace - aOffset - aCount + aHandle->mFileSize < limit) {
+ LOG(
+ ("CacheFileIOManager::WriteInternal() - Low free space, refusing "
+ "to write! [freeSpace=%" PRId64 "kB, limit=%ukB]",
+ freeSpace, limit));
+ return NS_ERROR_FILE_DISK_FULL;
+ }
+ }
+ }
+
+ // Write invalidates the entry by default
+ aHandle->mInvalid = true;
+
+ int64_t offset = PR_Seek64(aHandle->mFD, aOffset, PR_SEEK_SET);
+ if (offset == -1) {
+ return NS_ERROR_FAILURE;
+ }
+
+ int32_t bytesWritten = PR_Write(aHandle->mFD, aBuf, aCount);
+
+ if (bytesWritten != -1) {
+ uint32_t oldSizeInK = aHandle->FileSizeInK();
+ int64_t writeEnd = aOffset + bytesWritten;
+
+ if (aTruncate) {
+ rv = TruncFile(aHandle->mFD, writeEnd);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aHandle->mFileSize = writeEnd;
+ } else {
+ if (aHandle->mFileSize < writeEnd) {
+ aHandle->mFileSize = writeEnd;
+ }
+ }
+
+ uint32_t newSizeInK = aHandle->FileSizeInK();
+
+ if (oldSizeInK != newSizeInK && !aHandle->IsDoomed() &&
+ !aHandle->IsSpecialFile()) {
+ CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr,
+ nullptr, nullptr, &newSizeInK);
+
+ if (oldSizeInK < newSizeInK) {
+ EvictIfOverLimitInternal();
+ }
+ }
+
+ CacheIndex::UpdateTotalBytesWritten(bytesWritten);
+ }
+
+ if (bytesWritten != aCount) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Write was successful and this write validates the entry (i.e. metadata)
+ if (aValidate) {
+ aHandle->mInvalid = false;
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::DoomFile(CacheFileHandle* aHandle,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::DoomFile() [handle=%p, listener=%p]", aHandle,
+ aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<DoomFileEvent> ev = new DoomFileEvent(aHandle, aCallback);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->IsPriority()
+ ? CacheIOThread::OPEN_PRIORITY
+ : CacheIOThread::OPEN);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::DoomFileInternal(
+ CacheFileHandle* aHandle, PinningDoomRestriction aPinningDoomRestriction) {
+ LOG(("CacheFileIOManager::DoomFileInternal() [handle=%p]", aHandle));
+ aHandle->Log();
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ nsresult rv;
+
+ if (aHandle->IsDoomed()) {
+ return NS_OK;
+ }
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ if (aPinningDoomRestriction > NO_RESTRICTION) {
+ switch (aHandle->mPinning) {
+ case CacheFileHandle::PinningStatus::NON_PINNED:
+ if (MOZ_LIKELY(aPinningDoomRestriction != DOOM_WHEN_NON_PINNED)) {
+ LOG((" not dooming, it's a non-pinned handle"));
+ return NS_OK;
+ }
+ // Doom now
+ break;
+
+ case CacheFileHandle::PinningStatus::PINNED:
+ if (MOZ_UNLIKELY(aPinningDoomRestriction != DOOM_WHEN_PINNED)) {
+ LOG((" not dooming, it's a pinned handle"));
+ return NS_OK;
+ }
+ // Doom now
+ break;
+
+ case CacheFileHandle::PinningStatus::UNKNOWN:
+ if (MOZ_LIKELY(aPinningDoomRestriction == DOOM_WHEN_NON_PINNED)) {
+ LOG((" doom when non-pinned set"));
+ aHandle->mDoomWhenFoundNonPinned = true;
+ } else if (MOZ_UNLIKELY(aPinningDoomRestriction == DOOM_WHEN_PINNED)) {
+ LOG((" doom when pinned set"));
+ aHandle->mDoomWhenFoundPinned = true;
+ }
+
+ LOG((" pinning status not known, deferring doom decision"));
+ return NS_OK;
+ }
+ }
+
+ if (aHandle->mFileExists) {
+ // we need to move the current file to the doomed directory
+ rv = MaybeReleaseNSPRHandleInternal(aHandle, true);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // find unused filename
+ nsCOMPtr<nsIFile> file;
+ rv = GetDoomedFile(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> parentDir;
+ rv = file->GetParent(getter_AddRefs(parentDir));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString leafName;
+ rv = file->GetNativeLeafName(leafName);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = aHandle->mFile->MoveToNative(parentDir, leafName);
+ if (NS_ERROR_FILE_NOT_FOUND == rv ||
+ NS_ERROR_FILE_TARGET_DOES_NOT_EXIST == rv) {
+ LOG((" file already removed under our hands"));
+ aHandle->mFileExists = false;
+ rv = NS_OK;
+ } else {
+ NS_ENSURE_SUCCESS(rv, rv);
+ aHandle->mFile.swap(file);
+ }
+ }
+
+ if (!aHandle->IsSpecialFile()) {
+ CacheIndex::RemoveEntry(aHandle->Hash());
+ }
+
+ aHandle->mIsDoomed = true;
+
+ if (!aHandle->IsSpecialFile()) {
+ RefPtr<CacheStorageService> storageService = CacheStorageService::Self();
+ if (storageService) {
+ nsAutoCString idExtension, url;
+ nsCOMPtr<nsILoadContextInfo> info =
+ CacheFileUtils::ParseKey(aHandle->Key(), &idExtension, &url);
+ MOZ_ASSERT(info);
+ if (info) {
+ storageService->CacheFileDoomed(info, idExtension, url);
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::DoomFileByKey(const nsACString& aKey,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::DoomFileByKey() [key=%s, listener=%p]",
+ PromiseFlatCString(aKey).get(), aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<DoomFileByKeyEvent> ev = new DoomFileByKeyEvent(aKey, aCallback);
+ rv = ioMan->mIOThread->DispatchAfterPendingOpens(ev);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::DoomFileByKeyInternal(const SHA1Sum::Hash* aHash) {
+ LOG((
+ "CacheFileIOManager::DoomFileByKeyInternal() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHash)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ nsresult rv;
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!mCacheDirectory) {
+ return NS_ERROR_FILE_INVALID_PATH;
+ }
+
+ // Find active handle
+ RefPtr<CacheFileHandle> handle;
+ mHandles.GetHandle(aHash, getter_AddRefs(handle));
+
+ if (handle) {
+ handle->Log();
+
+ return DoomFileInternal(handle);
+ }
+
+ CacheIOThread::Cancelable cancelable(true);
+
+ // There is no handle for this file, delete the file if exists
+ nsCOMPtr<nsIFile> file;
+ rv = GetFile(aHash, getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!exists) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ LOG(
+ ("CacheFileIOManager::DoomFileByKeyInternal() - Removing file from "
+ "disk"));
+ rv = file->Remove(false);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Cannot remove old entry from the disk");
+ LOG(
+ ("CacheFileIOManager::DoomFileByKeyInternal() - Removing file failed. "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+
+ CacheIndex::RemoveEntry(aHash);
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::ReleaseNSPRHandle(CacheFileHandle* aHandle) {
+ LOG(("CacheFileIOManager::ReleaseNSPRHandle() [handle=%p]", aHandle));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<ReleaseNSPRHandleEvent> ev = new ReleaseNSPRHandleEvent(aHandle);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::MaybeReleaseNSPRHandleInternal(
+ CacheFileHandle* aHandle, bool aIgnoreShutdownLag) {
+ LOG(
+ ("CacheFileIOManager::MaybeReleaseNSPRHandleInternal() [handle=%p, "
+ "ignore shutdown=%d]",
+ aHandle, aIgnoreShutdownLag));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+
+ if (aHandle->mFD) {
+ DebugOnly<bool> found;
+ found = mHandlesByLastUsed.RemoveElement(aHandle);
+ MOZ_ASSERT(found);
+ }
+
+ PRFileDesc* fd = aHandle->mFD;
+ aHandle->mFD = nullptr;
+
+ // Leak invalid (w/o metadata) and doomed handles immediately after shutdown.
+ // Leak other handles when past the shutdown time maximum lag.
+ if (
+#ifndef DEBUG
+ ((aHandle->mInvalid || aHandle->mIsDoomed) &&
+ MOZ_UNLIKELY(CacheObserver::ShuttingDown())) ||
+#endif
+ MOZ_UNLIKELY(!aIgnoreShutdownLag &&
+ CacheObserver::IsPastShutdownIOLag())) {
+ // Don't bother closing this file. Return a failure code from here will
+ // cause any following IO operation on the file (mainly removal) to be
+ // bypassed, which is what we want.
+ // For mInvalid == true the entry will never be used, since it doesn't
+ // have correct metadata, thus we don't need to worry about removing it.
+ // For mIsDoomed == true the file is already in the doomed sub-dir and
+ // will be removed on next session start.
+ LOG((" past the shutdown I/O lag, leaking file handle"));
+ return NS_ERROR_ILLEGAL_DURING_SHUTDOWN;
+ }
+
+ if (!fd) {
+ // The filedesc has already been closed before, just let go.
+ return NS_OK;
+ }
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ PRStatus status = PR_Close(fd);
+ if (status != PR_SUCCESS) {
+ LOG(
+ ("CacheFileIOManager::MaybeReleaseNSPRHandleInternal() "
+ "failed to close [handle=%p, status=%u]",
+ aHandle, status));
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG(("CacheFileIOManager::MaybeReleaseNSPRHandleInternal() DONE"));
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::TruncateSeekSetEOF(
+ CacheFileHandle* aHandle, int64_t aTruncatePos, int64_t aEOFPos,
+ CacheFileIOListener* aCallback) {
+ LOG(
+ ("CacheFileIOManager::TruncateSeekSetEOF() [handle=%p, "
+ "truncatePos=%" PRId64 ", "
+ "EOFPos=%" PRId64 ", listener=%p]",
+ aHandle, aTruncatePos, aEOFPos, aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || (aCallback && aCallback->IsKilled()) || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<TruncateSeekSetEOFEvent> ev =
+ new TruncateSeekSetEOFEvent(aHandle, aTruncatePos, aEOFPos, aCallback);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+// static
+void CacheFileIOManager::GetCacheDirectory(nsIFile** result) {
+ *result = nullptr;
+
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (!ioMan || !ioMan->mCacheDirectory) {
+ return;
+ }
+
+ ioMan->mCacheDirectory->Clone(result);
+}
+
+#if defined(MOZ_WIDGET_ANDROID)
+
+// static
+void CacheFileIOManager::GetProfilelessCacheDirectory(nsIFile** result) {
+ *result = nullptr;
+
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (!ioMan || !ioMan->mCacheProfilelessDirectory) {
+ return;
+ }
+
+ ioMan->mCacheProfilelessDirectory->Clone(result);
+}
+
+#endif
+
+// static
+nsresult CacheFileIOManager::GetEntryInfo(
+ const SHA1Sum::Hash* aHash,
+ CacheStorageService::EntryInfoCallback* aCallback) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsresult rv;
+
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsAutoCString enhanceId;
+ nsAutoCString uriSpec;
+
+ RefPtr<CacheFileHandle> handle;
+ ioMan->mHandles.GetHandle(aHash, getter_AddRefs(handle));
+ if (handle) {
+ RefPtr<nsILoadContextInfo> info =
+ CacheFileUtils::ParseKey(handle->Key(), &enhanceId, &uriSpec);
+
+ MOZ_ASSERT(info);
+ if (!info) {
+ return NS_OK; // ignore
+ }
+
+ RefPtr<CacheStorageService> service = CacheStorageService::Self();
+ if (!service) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ // Invokes OnCacheEntryInfo when an existing entry is found
+ if (service->GetCacheEntryInfo(info, enhanceId, uriSpec, aCallback)) {
+ return NS_OK;
+ }
+
+ // When we are here, there is no existing entry and we need
+ // to synchrnously load metadata from a disk file.
+ }
+
+ // Locate the actual file
+ nsCOMPtr<nsIFile> file;
+ ioMan->GetFile(aHash, getter_AddRefs(file));
+
+ // Read metadata from the file synchronously
+ RefPtr<CacheFileMetadata> metadata = new CacheFileMetadata();
+ rv = metadata->SyncReadMetadata(file);
+ if (NS_FAILED(rv)) {
+ return NS_OK;
+ }
+
+ // Now get the context + enhance id + URL from the key.
+ RefPtr<nsILoadContextInfo> info =
+ CacheFileUtils::ParseKey(metadata->GetKey(), &enhanceId, &uriSpec);
+ MOZ_ASSERT(info);
+ if (!info) {
+ return NS_OK;
+ }
+
+ // Pick all data to pass to the callback.
+ int64_t dataSize = metadata->Offset();
+ uint32_t fetchCount = metadata->GetFetchCount();
+ uint32_t expirationTime = metadata->GetExpirationTime();
+ uint32_t lastModified = metadata->GetLastModified();
+
+ // Call directly on the callback.
+ aCallback->OnEntryInfo(uriSpec, enhanceId, dataSize, fetchCount, lastModified,
+ expirationTime, metadata->Pinned(), info);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::TruncateSeekSetEOFInternal(
+ CacheFileHandle* aHandle, int64_t aTruncatePos, int64_t aEOFPos) {
+ LOG(
+ ("CacheFileIOManager::TruncateSeekSetEOFInternal() [handle=%p, "
+ "truncatePos=%" PRId64 ", EOFPos=%" PRId64 "]",
+ aHandle, aTruncatePos, aEOFPos));
+
+ nsresult rv;
+
+ if (aHandle->mKilled) {
+ LOG((" handle already killed, file not truncated"));
+ return NS_OK;
+ }
+
+ if (CacheObserver::ShuttingDown() && !aHandle->mFD) {
+ aHandle->mKilled = true;
+ LOG((" killing the handle, file not truncated"));
+ return NS_OK;
+ }
+
+ CacheIOThread::Cancelable cancelable(!aHandle->IsSpecialFile());
+
+ if (!aHandle->mFileExists) {
+ rv = CreateFile(aHandle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (!aHandle->mFD) {
+ rv = OpenNSPRHandle(aHandle);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ NSPRHandleUsed(aHandle);
+ }
+
+ // Check again, OpenNSPRHandle could figure out the file was gone.
+ if (!aHandle->mFileExists) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // When this operation would increase cache size, check whether the cache size
+ // reached the hard limit and whether it would cause critical low disk space.
+ if (aHandle->mFileSize < aEOFPos) {
+ if (mOverLimitEvicting && mCacheSizeOnHardLimit) {
+ LOG(
+ ("CacheFileIOManager::TruncateSeekSetEOFInternal() - failing because "
+ "cache size reached hard limit!"));
+ return NS_ERROR_FILE_DISK_FULL;
+ }
+
+ int64_t freeSpace;
+ rv = mCacheDirectory->GetDiskSpaceAvailable(&freeSpace);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ freeSpace = -1;
+ LOG(
+ ("CacheFileIOManager::TruncateSeekSetEOFInternal() - "
+ "GetDiskSpaceAvailable() failed! [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ } else {
+ freeSpace >>= 10; // bytes to kilobytes
+ uint32_t limit = CacheObserver::DiskFreeSpaceHardLimit();
+ if (freeSpace - aEOFPos + aHandle->mFileSize < limit) {
+ LOG(
+ ("CacheFileIOManager::TruncateSeekSetEOFInternal() - Low free space"
+ ", refusing to write! [freeSpace=%" PRId64 "kB, limit=%ukB]",
+ freeSpace, limit));
+ return NS_ERROR_FILE_DISK_FULL;
+ }
+ }
+ }
+
+ // This operation always invalidates the entry
+ aHandle->mInvalid = true;
+
+ rv = TruncFile(aHandle->mFD, aTruncatePos);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (aTruncatePos != aEOFPos) {
+ rv = TruncFile(aHandle->mFD, aEOFPos);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ uint32_t oldSizeInK = aHandle->FileSizeInK();
+ aHandle->mFileSize = aEOFPos;
+ uint32_t newSizeInK = aHandle->FileSizeInK();
+
+ if (oldSizeInK != newSizeInK && !aHandle->IsDoomed() &&
+ !aHandle->IsSpecialFile()) {
+ CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr, nullptr,
+ nullptr, &newSizeInK);
+
+ if (oldSizeInK < newSizeInK) {
+ EvictIfOverLimitInternal();
+ }
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::RenameFile(CacheFileHandle* aHandle,
+ const nsACString& aNewName,
+ CacheFileIOListener* aCallback) {
+ LOG(("CacheFileIOManager::RenameFile() [handle=%p, newName=%s, listener=%p]",
+ aHandle, PromiseFlatCString(aNewName).get(), aCallback));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!aHandle->IsSpecialFile()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ RefPtr<RenameFileEvent> ev =
+ new RenameFileEvent(aHandle, aNewName, aCallback);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::RenameFileInternal(CacheFileHandle* aHandle,
+ const nsACString& aNewName) {
+ LOG(("CacheFileIOManager::RenameFileInternal() [handle=%p, newName=%s]",
+ aHandle, PromiseFlatCString(aNewName).get()));
+
+ nsresult rv;
+
+ MOZ_ASSERT(aHandle->IsSpecialFile());
+
+ if (aHandle->IsDoomed()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Doom old handle if it exists and is not doomed
+ for (uint32_t i = 0; i < mSpecialHandles.Length(); i++) {
+ if (!mSpecialHandles[i]->IsDoomed() &&
+ mSpecialHandles[i]->Key() == aNewName) {
+ MOZ_ASSERT(aHandle != mSpecialHandles[i]);
+ rv = DoomFileInternal(mSpecialHandles[i]);
+ NS_ENSURE_SUCCESS(rv, rv);
+ break;
+ }
+ }
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetSpecialFile(aNewName, getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (exists) {
+ LOG(
+ ("CacheFileIOManager::RenameFileInternal() - Removing old file from "
+ "disk"));
+ rv = file->Remove(false);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Cannot remove file from the disk");
+ LOG(
+ ("CacheFileIOManager::RenameFileInternal() - Removing old file failed"
+ ". [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+ }
+
+ if (!aHandle->FileExists()) {
+ aHandle->mKey = aNewName;
+ return NS_OK;
+ }
+
+ rv = MaybeReleaseNSPRHandleInternal(aHandle, true);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = aHandle->mFile->MoveToNative(nullptr, aNewName);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aHandle->mKey = aNewName;
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::EvictIfOverLimit() {
+ LOG(("CacheFileIOManager::EvictIfOverLimit()"));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsCOMPtr<nsIRunnable> ev;
+ ev = NewRunnableMethod("net::CacheFileIOManager::EvictIfOverLimitInternal",
+ ioMan, &CacheFileIOManager::EvictIfOverLimitInternal);
+
+ rv = ioMan->mIOThread->Dispatch(ev, CacheIOThread::EVICT);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::EvictIfOverLimitInternal() {
+ LOG(("CacheFileIOManager::EvictIfOverLimitInternal()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (mOverLimitEvicting) {
+ LOG(
+ ("CacheFileIOManager::EvictIfOverLimitInternal() - Eviction already "
+ "running."));
+ return NS_OK;
+ }
+
+ CacheIOThread::Cancelable cancelable(true);
+
+ int64_t freeSpace;
+ rv = mCacheDirectory->GetDiskSpaceAvailable(&freeSpace);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ freeSpace = -1;
+
+ // Do not change smart size.
+ LOG(
+ ("CacheFileIOManager::EvictIfOverLimitInternal() - "
+ "GetDiskSpaceAvailable() failed! [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ } else {
+ freeSpace >>= 10; // bytes to kilobytes
+ UpdateSmartCacheSize(freeSpace);
+ }
+
+ uint32_t cacheUsage;
+ rv = CacheIndex::GetCacheSize(&cacheUsage);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t cacheLimit = CacheObserver::DiskCacheCapacity();
+ uint32_t freeSpaceLimit = CacheObserver::DiskFreeSpaceSoftLimit();
+
+ if (cacheUsage <= cacheLimit &&
+ (freeSpace == -1 || freeSpace >= freeSpaceLimit)) {
+ LOG(
+ ("CacheFileIOManager::EvictIfOverLimitInternal() - Cache size and free "
+ "space in limits. [cacheSize=%ukB, cacheSizeLimit=%ukB, "
+ "freeSpace=%" PRId64 "kB, freeSpaceLimit=%ukB]",
+ cacheUsage, cacheLimit, freeSpace, freeSpaceLimit));
+ return NS_OK;
+ }
+
+ LOG(
+ ("CacheFileIOManager::EvictIfOverLimitInternal() - Cache size exceeded "
+ "limit. Starting overlimit eviction. [cacheSize=%ukB, limit=%ukB]",
+ cacheUsage, cacheLimit));
+
+ nsCOMPtr<nsIRunnable> ev;
+ ev = NewRunnableMethod("net::CacheFileIOManager::OverLimitEvictionInternal",
+ this, &CacheFileIOManager::OverLimitEvictionInternal);
+
+ rv = mIOThread->Dispatch(ev, CacheIOThread::EVICT);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mOverLimitEvicting = true;
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::OverLimitEvictionInternal() {
+ LOG(("CacheFileIOManager::OverLimitEvictionInternal()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ // mOverLimitEvicting is accessed only on IO thread, so we can set it to false
+ // here and set it to true again once we dispatch another event that will
+ // continue with the eviction. The reason why we do so is that we can fail
+ // early anywhere in this method and the variable will contain a correct
+ // value. Otherwise we would need to set it to false on every failing place.
+ mOverLimitEvicting = false;
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ while (true) {
+ int64_t freeSpace;
+ rv = mCacheDirectory->GetDiskSpaceAvailable(&freeSpace);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ freeSpace = -1;
+
+ // Do not change smart size.
+ LOG(
+ ("CacheFileIOManager::EvictIfOverLimitInternal() - "
+ "GetDiskSpaceAvailable() failed! [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ } else {
+ freeSpace >>= 10; // bytes to kilobytes
+ UpdateSmartCacheSize(freeSpace);
+ }
+
+ uint32_t cacheUsage;
+ rv = CacheIndex::GetCacheSize(&cacheUsage);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t cacheLimit = CacheObserver::DiskCacheCapacity();
+ uint32_t freeSpaceLimit = CacheObserver::DiskFreeSpaceSoftLimit();
+
+ if (cacheUsage > cacheLimit) {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - Cache size over "
+ "limit. [cacheSize=%ukB, limit=%ukB]",
+ cacheUsage, cacheLimit));
+
+ // We allow cache size to go over the specified limit. Eviction should
+ // keep the size within the limit in a long run, but in case the eviction
+ // is too slow, the cache could go way over the limit. To prevent this we
+ // set flag mCacheSizeOnHardLimit when the size reaches 105% of the limit
+ // and WriteInternal() and TruncateSeekSetEOFInternal() fail to cache
+ // additional data.
+ if ((cacheUsage - cacheLimit) > (cacheLimit / 20)) {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - Cache size "
+ "reached hard limit."));
+ mCacheSizeOnHardLimit = true;
+ } else {
+ mCacheSizeOnHardLimit = false;
+ }
+ } else if (freeSpace != -1 && freeSpace < freeSpaceLimit) {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - Free space under "
+ "limit. [freeSpace=%" PRId64 "kB, freeSpaceLimit=%ukB]",
+ freeSpace, freeSpaceLimit));
+ } else {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - Cache size and "
+ "free space in limits. [cacheSize=%ukB, cacheSizeLimit=%ukB, "
+ "freeSpace=%" PRId64 "kB, freeSpaceLimit=%ukB]",
+ cacheUsage, cacheLimit, freeSpace, freeSpaceLimit));
+
+ mCacheSizeOnHardLimit = false;
+ return NS_OK;
+ }
+
+ if (CacheIOThread::YieldAndRerun()) {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - Breaking loop "
+ "for higher level events."));
+ mOverLimitEvicting = true;
+ return NS_OK;
+ }
+
+ SHA1Sum::Hash hash;
+ uint32_t cnt;
+ static uint32_t consecutiveFailures = 0;
+ rv = CacheIndex::GetEntryForEviction(false, &hash, &cnt);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = DoomFileByKeyInternal(&hash);
+ if (NS_SUCCEEDED(rv)) {
+ consecutiveFailures = 0;
+ } else if (rv == NS_ERROR_NOT_AVAILABLE) {
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - "
+ "DoomFileByKeyInternal() failed. [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ // TODO index is outdated, start update
+
+ // Make sure index won't return the same entry again
+ CacheIndex::RemoveEntry(&hash);
+ consecutiveFailures = 0;
+ } else {
+ // This shouldn't normally happen, but the eviction must not fail
+ // completely if we ever encounter this problem.
+ NS_WARNING(
+ "CacheFileIOManager::OverLimitEvictionInternal() - Unexpected "
+ "failure of DoomFileByKeyInternal()");
+
+ LOG(
+ ("CacheFileIOManager::OverLimitEvictionInternal() - "
+ "DoomFileByKeyInternal() failed. [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+
+ // Normally, CacheIndex::UpdateEntry() is called only to update newly
+ // created/opened entries which are always fresh and UpdateEntry() expects
+ // and checks this flag. The way we use UpdateEntry() here is a kind of
+ // hack and we must make sure the flag is set by calling
+ // EnsureEntryExists().
+ rv = CacheIndex::EnsureEntryExists(&hash);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Move the entry at the end of both lists to make sure we won't end up
+ // failing on one entry forever.
+ uint32_t frecency = 0;
+ rv = CacheIndex::UpdateEntry(&hash, &frecency, nullptr, nullptr, nullptr,
+ nullptr, nullptr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ consecutiveFailures++;
+ if (consecutiveFailures >= cnt) {
+ // This doesn't necessarily mean that we've tried to doom every entry
+ // but we've reached a sane number of tries. It is likely that another
+ // eviction will start soon. And as said earlier, this normally doesn't
+ // happen at all.
+ return NS_OK;
+ }
+ }
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::EvictAll() {
+ LOG(("CacheFileIOManager::EvictAll()"));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsCOMPtr<nsIRunnable> ev;
+ ev = NewRunnableMethod("net::CacheFileIOManager::EvictAllInternal", ioMan,
+ &CacheFileIOManager::EvictAllInternal);
+
+ rv = ioMan->mIOThread->DispatchAfterPendingOpens(ev);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+namespace {
+
+class EvictionNotifierRunnable : public Runnable {
+ public:
+ EvictionNotifierRunnable() : Runnable("EvictionNotifierRunnable") {}
+ NS_DECL_NSIRUNNABLE
+};
+
+NS_IMETHODIMP
+EvictionNotifierRunnable::Run() {
+ nsCOMPtr<nsIObserverService> obsSvc = mozilla::services::GetObserverService();
+ if (obsSvc) {
+ obsSvc->NotifyObservers(nullptr, "cacheservice:empty-cache", nullptr);
+ }
+ return NS_OK;
+}
+
+} // namespace
+
+nsresult CacheFileIOManager::EvictAllInternal() {
+ LOG(("CacheFileIOManager::EvictAllInternal()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ RefPtr<EvictionNotifierRunnable> r = new EvictionNotifierRunnable();
+
+ if (!mCacheDirectory) {
+ // This is a kind of hack. Somebody called EvictAll() without a profile.
+ // This happens in xpcshell tests that use cache without profile. We need
+ // to notify observers in this case since the tests are waiting for it.
+ NS_DispatchToMainThread(r);
+ return NS_ERROR_FILE_INVALID_PATH;
+ }
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!mTreeCreated) {
+ rv = CreateCacheTree();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ // Doom all active handles
+ nsTArray<RefPtr<CacheFileHandle> > handles;
+ mHandles.GetActiveHandles(&handles);
+
+ for (uint32_t i = 0; i < handles.Length(); ++i) {
+ rv = DoomFileInternal(handles[i]);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileIOManager::EvictAllInternal() - Cannot doom handle "
+ "[handle=%p]",
+ handles[i].get()));
+ }
+ }
+
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = file->AppendNative(nsLiteralCString(ENTRIES_DIR));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ // Trash current entries directory
+ rv = TrashDirectory(file);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ // Files are now inaccessible in entries directory, notify observers.
+ NS_DispatchToMainThread(r);
+
+ // Create a new empty entries directory
+ rv = CheckAndCreateDir(mCacheDirectory, ENTRIES_DIR, false);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ CacheIndex::RemoveAll();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::EvictByContext(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin) {
+ LOG(("CacheFileIOManager::EvictByContext() [loadContextInfo=%p]",
+ aLoadContextInfo));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsCOMPtr<nsIRunnable> ev;
+ ev = NewRunnableMethod<nsCOMPtr<nsILoadContextInfo>, bool, nsString>(
+ "net::CacheFileIOManager::EvictByContextInternal", ioMan,
+ &CacheFileIOManager::EvictByContextInternal, aLoadContextInfo, aPinned,
+ aOrigin);
+
+ rv = ioMan->mIOThread->DispatchAfterPendingOpens(ev);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::EvictByContextInternal(
+ nsILoadContextInfo* aLoadContextInfo, bool aPinned,
+ const nsAString& aOrigin) {
+ LOG(
+ ("CacheFileIOManager::EvictByContextInternal() [loadContextInfo=%p, "
+ "pinned=%d]",
+ aLoadContextInfo, aPinned));
+
+ nsresult rv;
+
+ if (aLoadContextInfo) {
+ nsAutoCString suffix;
+ aLoadContextInfo->OriginAttributesPtr()->CreateSuffix(suffix);
+ LOG((" anonymous=%u, suffix=%s]", aLoadContextInfo->IsAnonymous(),
+ suffix.get()));
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ MOZ_ASSERT(!aLoadContextInfo->IsPrivate());
+ if (aLoadContextInfo->IsPrivate()) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ if (!mCacheDirectory) {
+ // This is a kind of hack. Somebody called EvictAll() without a profile.
+ // This happens in xpcshell tests that use cache without profile. We need
+ // to notify observers in this case since the tests are waiting for it.
+ // Also notify for aPinned == true, those are interested as well.
+ if (!aLoadContextInfo) {
+ RefPtr<EvictionNotifierRunnable> r = new EvictionNotifierRunnable();
+ NS_DispatchToMainThread(r);
+ }
+ return NS_ERROR_FILE_INVALID_PATH;
+ }
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!mTreeCreated) {
+ rv = CreateCacheTree();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ NS_ConvertUTF16toUTF8 origin(aOrigin);
+
+ // Doom all active handles that matches the load context
+ nsTArray<RefPtr<CacheFileHandle> > handles;
+ mHandles.GetActiveHandles(&handles);
+
+ for (uint32_t i = 0; i < handles.Length(); ++i) {
+ CacheFileHandle* handle = handles[i];
+
+ nsAutoCString uriSpec;
+ RefPtr<nsILoadContextInfo> info =
+ CacheFileUtils::ParseKey(handle->Key(), nullptr, &uriSpec);
+ if (!info) {
+ LOG(
+ ("CacheFileIOManager::EvictByContextInternal() - Cannot parse key in "
+ "handle! [handle=%p, key=%s]",
+ handle, handle->Key().get()));
+ MOZ_CRASH("Unexpected error!");
+ }
+
+ if (aLoadContextInfo && !info->Equals(aLoadContextInfo)) {
+ continue;
+ }
+
+ if (!origin.IsEmpty()) {
+ RefPtr<MozURL> url;
+ rv = MozURL::Init(getter_AddRefs(url), uriSpec);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+
+ nsAutoCString urlOrigin;
+ url->Origin(urlOrigin);
+
+ if (!urlOrigin.Equals(origin)) {
+ continue;
+ }
+ }
+
+ // handle will be doomed only when pinning status is known and equal or
+ // doom decision will be deferred until pinning status is determined.
+ rv = DoomFileInternal(handle,
+ aPinned ? CacheFileIOManager::DOOM_WHEN_PINNED
+ : CacheFileIOManager::DOOM_WHEN_NON_PINNED);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileIOManager::EvictByContextInternal() - Cannot doom handle"
+ " [handle=%p]",
+ handle));
+ }
+ }
+
+ if (!aLoadContextInfo) {
+ RefPtr<EvictionNotifierRunnable> r = new EvictionNotifierRunnable();
+ NS_DispatchToMainThread(r);
+ }
+
+ if (!mContextEvictor) {
+ mContextEvictor = new CacheFileContextEvictor();
+ mContextEvictor->Init(mCacheDirectory);
+ }
+
+ mContextEvictor->AddContext(aLoadContextInfo, aPinned, aOrigin);
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::CacheIndexStateChanged() {
+ LOG(("CacheFileIOManager::CacheIndexStateChanged()"));
+
+ nsresult rv;
+
+ // CacheFileIOManager lives longer than CacheIndex so gInstance must be
+ // non-null here.
+ MOZ_ASSERT(gInstance);
+
+ // We have to re-distatch even if we are on IO thread to prevent reentering
+ // the lock in CacheIndex
+ nsCOMPtr<nsIRunnable> ev = NewRunnableMethod(
+ "net::CacheFileIOManager::CacheIndexStateChangedInternal",
+ gInstance.get(), &CacheFileIOManager::CacheIndexStateChangedInternal);
+
+ nsCOMPtr<nsIEventTarget> ioTarget = IOTarget();
+ MOZ_ASSERT(ioTarget);
+
+ rv = ioTarget->Dispatch(ev, nsIEventTarget::DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+void CacheFileIOManager::CacheIndexStateChangedInternal() {
+ if (mShuttingDown) {
+ // ignore notification during shutdown
+ return;
+ }
+
+ if (!mContextEvictor) {
+ return;
+ }
+
+ mContextEvictor->CacheIndexStateChanged();
+}
+
+nsresult CacheFileIOManager::TrashDirectory(nsIFile* aFile) {
+ LOG(("CacheFileIOManager::TrashDirectory() [file=%s]",
+ aFile->HumanReadablePath().get()));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+ MOZ_ASSERT(mCacheDirectory);
+
+ // When the directory is empty, it is cheaper to remove it directly instead of
+ // using the trash mechanism.
+ bool isEmpty;
+ rv = IsEmptyDirectory(aFile, &isEmpty);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (isEmpty) {
+ rv = aFile->Remove(false);
+ LOG(
+ ("CacheFileIOManager::TrashDirectory() - Directory removed "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+#ifdef DEBUG
+ nsCOMPtr<nsIFile> dirCheck;
+ rv = aFile->GetParent(getter_AddRefs(dirCheck));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool equals = false;
+ rv = dirCheck->Equals(mCacheDirectory, &equals);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MOZ_ASSERT(equals);
+#endif
+
+ nsCOMPtr<nsIFile> dir, trash;
+ nsAutoCString leaf;
+
+ rv = aFile->Clone(getter_AddRefs(dir));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = aFile->Clone(getter_AddRefs(trash));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ const int32_t kMaxTries = 16;
+ srand(static_cast<unsigned>(PR_Now()));
+ for (int32_t triesCount = 0;; ++triesCount) {
+ leaf = TRASH_DIR;
+ leaf.AppendInt(rand());
+ rv = trash->SetNativeLeafName(leaf);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool exists;
+ if (NS_SUCCEEDED(trash->Exists(&exists)) && !exists) {
+ break;
+ }
+
+ LOG(
+ ("CacheFileIOManager::TrashDirectory() - Trash directory already "
+ "exists [leaf=%s]",
+ leaf.get()));
+
+ if (triesCount == kMaxTries) {
+ LOG(
+ ("CacheFileIOManager::TrashDirectory() - Could not find unused trash "
+ "directory in %d tries.",
+ kMaxTries));
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ LOG(("CacheFileIOManager::TrashDirectory() - Renaming directory [leaf=%s]",
+ leaf.get()));
+
+ rv = dir->MoveToNative(nullptr, leaf);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ StartRemovingTrash();
+ return NS_OK;
+}
+
+// static
+void CacheFileIOManager::OnTrashTimer(nsITimer* aTimer, void* aClosure) {
+ LOG(("CacheFileIOManager::OnTrashTimer() [timer=%p, closure=%p]", aTimer,
+ aClosure));
+
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (!ioMan) {
+ return;
+ }
+
+ ioMan->mTrashTimer = nullptr;
+ ioMan->StartRemovingTrash();
+}
+
+nsresult CacheFileIOManager::StartRemovingTrash() {
+ LOG(("CacheFileIOManager::StartRemovingTrash()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!mCacheDirectory) {
+ return NS_ERROR_FILE_INVALID_PATH;
+ }
+
+ if (mTrashTimer) {
+ LOG(("CacheFileIOManager::StartRemovingTrash() - Trash timer exists."));
+ return NS_OK;
+ }
+
+ if (mRemovingTrashDirs) {
+ LOG(
+ ("CacheFileIOManager::StartRemovingTrash() - Trash removing in "
+ "progress."));
+ return NS_OK;
+ }
+
+ uint32_t elapsed = (TimeStamp::NowLoRes() - mStartTime).ToMilliseconds();
+ if (elapsed < kRemoveTrashStartDelay) {
+ nsCOMPtr<nsIEventTarget> ioTarget = IOTarget();
+ MOZ_ASSERT(ioTarget);
+
+ return NS_NewTimerWithFuncCallback(
+ getter_AddRefs(mTrashTimer), CacheFileIOManager::OnTrashTimer, nullptr,
+ kRemoveTrashStartDelay - elapsed, nsITimer::TYPE_ONE_SHOT,
+ "net::CacheFileIOManager::StartRemovingTrash", ioTarget);
+ }
+
+ nsCOMPtr<nsIRunnable> ev;
+ ev = NewRunnableMethod("net::CacheFileIOManager::RemoveTrashInternal", this,
+ &CacheFileIOManager::RemoveTrashInternal);
+
+ rv = mIOThread->Dispatch(ev, CacheIOThread::EVICT);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mRemovingTrashDirs = true;
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::RemoveTrashInternal() {
+ LOG(("CacheFileIOManager::RemoveTrashInternal()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ if (mShuttingDown) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ CacheIOThread::Cancelable cancelable(true);
+
+ MOZ_ASSERT(!mTrashTimer);
+ MOZ_ASSERT(mRemovingTrashDirs);
+
+ if (!mTreeCreated) {
+ rv = CreateCacheTree();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ // mRemovingTrashDirs is accessed only on IO thread, so we can drop the flag
+ // here and set it again once we dispatch a continuation event. By doing so,
+ // we don't have to drop the flag on any possible early return.
+ mRemovingTrashDirs = false;
+
+ while (true) {
+ if (CacheIOThread::YieldAndRerun()) {
+ LOG(
+ ("CacheFileIOManager::RemoveTrashInternal() - Breaking loop for "
+ "higher level events."));
+ mRemovingTrashDirs = true;
+ return NS_OK;
+ }
+
+ // Find some trash directory
+ if (!mTrashDir) {
+ MOZ_ASSERT(!mTrashDirEnumerator);
+
+ rv = FindTrashDirToRemove();
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ LOG(
+ ("CacheFileIOManager::RemoveTrashInternal() - No trash directory "
+ "found."));
+ return NS_OK;
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = mTrashDir->GetDirectoryEntries(getter_AddRefs(mTrashDirEnumerator));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ continue; // check elapsed time
+ }
+
+ // We null out mTrashDirEnumerator once we remove all files in the
+ // directory, so remove the trash directory if we don't have enumerator.
+ if (!mTrashDirEnumerator) {
+ rv = mTrashDir->Remove(false);
+ if (NS_FAILED(rv)) {
+ // There is no reason why removing an empty directory should fail, but
+ // if it does, we should continue and try to remove all other trash
+ // directories.
+ nsAutoCString leafName;
+ mTrashDir->GetNativeLeafName(leafName);
+ mFailedTrashDirs.AppendElement(leafName);
+ LOG(
+ ("CacheFileIOManager::RemoveTrashInternal() - Cannot remove "
+ "trashdir. [name=%s]",
+ leafName.get()));
+ }
+
+ mTrashDir = nullptr;
+ continue; // check elapsed time
+ }
+
+ nsCOMPtr<nsIFile> file;
+ rv = mTrashDirEnumerator->GetNextFile(getter_AddRefs(file));
+ if (!file) {
+ mTrashDirEnumerator->Close();
+ mTrashDirEnumerator = nullptr;
+ continue; // check elapsed time
+ }
+ bool isDir = false;
+ file->IsDirectory(&isDir);
+ if (isDir) {
+ NS_WARNING(
+ "Found a directory in a trash directory! It will be removed "
+ "recursively, but this can block IO thread for a while!");
+ if (LOG_ENABLED()) {
+ LOG(
+ ("CacheFileIOManager::RemoveTrashInternal() - Found a directory in "
+ "a trash "
+ "directory! It will be removed recursively, but this can block IO "
+ "thread for a while! [file=%s]",
+ file->HumanReadablePath().get()));
+ }
+ }
+ file->Remove(isDir);
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::FindTrashDirToRemove() {
+ LOG(("CacheFileIOManager::FindTrashDirToRemove()"));
+
+ nsresult rv;
+
+ // We call this method on the main thread during shutdown when user wants to
+ // remove all cache files.
+ MOZ_ASSERT(mIOThread->IsCurrentThread() || mShuttingDown);
+
+ nsCOMPtr<nsIDirectoryEnumerator> iter;
+ rv = mCacheDirectory->GetDirectoryEntries(getter_AddRefs(iter));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> file;
+ while (NS_SUCCEEDED(iter->GetNextFile(getter_AddRefs(file))) && file) {
+ bool isDir = false;
+ file->IsDirectory(&isDir);
+ if (!isDir) {
+ continue;
+ }
+
+ nsAutoCString leafName;
+ rv = file->GetNativeLeafName(leafName);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+
+ if (leafName.Length() < strlen(TRASH_DIR)) {
+ continue;
+ }
+
+ if (!StringBeginsWith(leafName, nsLiteralCString(TRASH_DIR))) {
+ continue;
+ }
+
+ if (mFailedTrashDirs.Contains(leafName)) {
+ continue;
+ }
+
+ LOG(("CacheFileIOManager::FindTrashDirToRemove() - Returning directory %s",
+ leafName.get()));
+
+ mTrashDir = file;
+ return NS_OK;
+ }
+
+ // When we're here we've tried to delete all trash directories. Clear
+ // mFailedTrashDirs so we will try to delete them again when we start removing
+ // trash directories next time.
+ mFailedTrashDirs.Clear();
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+// static
+nsresult CacheFileIOManager::InitIndexEntry(CacheFileHandle* aHandle,
+ OriginAttrsHash aOriginAttrsHash,
+ bool aAnonymous, bool aPinning) {
+ LOG(
+ ("CacheFileIOManager::InitIndexEntry() [handle=%p, "
+ "originAttrsHash=%" PRIx64 ", "
+ "anonymous=%d, pinning=%d]",
+ aHandle, aOriginAttrsHash, aAnonymous, aPinning));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (aHandle->IsSpecialFile()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ RefPtr<InitIndexEntryEvent> ev =
+ new InitIndexEntryEvent(aHandle, aOriginAttrsHash, aAnonymous, aPinning);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheFileIOManager::UpdateIndexEntry(CacheFileHandle* aHandle,
+ const uint32_t* aFrecency,
+ const bool* aHasAltData,
+ const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType) {
+ LOG(
+ ("CacheFileIOManager::UpdateIndexEntry() [handle=%p, frecency=%s, "
+ "hasAltData=%s, onStartTime=%s, onStopTime=%s, contentType=%s]",
+ aHandle, aFrecency ? nsPrintfCString("%u", *aFrecency).get() : "",
+ aHasAltData ? (*aHasAltData ? "true" : "false") : "",
+ aOnStartTime ? nsPrintfCString("%u", *aOnStartTime).get() : "",
+ aOnStopTime ? nsPrintfCString("%u", *aOnStopTime).get() : "",
+ aContentType ? nsPrintfCString("%u", *aContentType).get() : ""));
+
+ nsresult rv;
+ RefPtr<CacheFileIOManager> ioMan = gInstance;
+
+ if (aHandle->IsClosed() || !ioMan) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (aHandle->IsSpecialFile()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ RefPtr<UpdateIndexEntryEvent> ev = new UpdateIndexEntryEvent(
+ aHandle, aFrecency, aHasAltData, aOnStartTime, aOnStopTime, aContentType);
+ rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
+ ? CacheIOThread::WRITE_PRIORITY
+ : CacheIOThread::WRITE);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::CreateFile(CacheFileHandle* aHandle) {
+ MOZ_ASSERT(!aHandle->mFD);
+ MOZ_ASSERT(aHandle->mFile);
+
+ nsresult rv;
+
+ if (aHandle->IsDoomed()) {
+ nsCOMPtr<nsIFile> file;
+
+ rv = GetDoomedFile(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aHandle->mFile.swap(file);
+ } else {
+ bool exists;
+ if (NS_SUCCEEDED(aHandle->mFile->Exists(&exists)) && exists) {
+ NS_WARNING("Found a file that should not exist!");
+ }
+ }
+
+ rv = OpenNSPRHandle(aHandle, true);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aHandle->mFileSize = 0;
+ return NS_OK;
+}
+
+// static
+void CacheFileIOManager::HashToStr(const SHA1Sum::Hash* aHash,
+ nsACString& _retval) {
+ _retval.Truncate();
+ const char hexChars[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
+ for (uint32_t i = 0; i < sizeof(SHA1Sum::Hash); i++) {
+ _retval.Append(hexChars[(*aHash)[i] >> 4]);
+ _retval.Append(hexChars[(*aHash)[i] & 0xF]);
+ }
+}
+
+// static
+nsresult CacheFileIOManager::StrToHash(const nsACString& aHash,
+ SHA1Sum::Hash* _retval) {
+ if (aHash.Length() != 2 * sizeof(SHA1Sum::Hash)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ for (uint32_t i = 0; i < aHash.Length(); i++) {
+ uint8_t value;
+
+ if (aHash[i] >= '0' && aHash[i] <= '9') {
+ value = aHash[i] - '0';
+ } else if (aHash[i] >= 'A' && aHash[i] <= 'F') {
+ value = aHash[i] - 'A' + 10;
+ } else if (aHash[i] >= 'a' && aHash[i] <= 'f') {
+ value = aHash[i] - 'a' + 10;
+ } else {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (i % 2 == 0) {
+ (reinterpret_cast<uint8_t*>(_retval))[i / 2] = value << 4;
+ } else {
+ (reinterpret_cast<uint8_t*>(_retval))[i / 2] += value;
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::GetFile(const SHA1Sum::Hash* aHash,
+ nsIFile** _retval) {
+ nsresult rv;
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative(nsLiteralCString(ENTRIES_DIR));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString leafName;
+ HashToStr(aHash, leafName);
+
+ rv = file->AppendNative(leafName);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ file.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::GetSpecialFile(const nsACString& aKey,
+ nsIFile** _retval) {
+ nsresult rv;
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative(aKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ file.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::GetDoomedFile(nsIFile** _retval) {
+ nsresult rv;
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative(nsLiteralCString(DOOMED_DIR));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative("dummyleaf"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ const int32_t kMaxTries = 64;
+ srand(static_cast<unsigned>(PR_Now()));
+ nsAutoCString leafName;
+ for (int32_t triesCount = 0;; ++triesCount) {
+ leafName.AppendInt(rand());
+ rv = file->SetNativeLeafName(leafName);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool exists;
+ if (NS_SUCCEEDED(file->Exists(&exists)) && !exists) {
+ break;
+ }
+
+ if (triesCount == kMaxTries) {
+ LOG(
+ ("CacheFileIOManager::GetDoomedFile() - Could not find unused file "
+ "name in %d tries.",
+ kMaxTries));
+ return NS_ERROR_FAILURE;
+ }
+
+ leafName.Truncate();
+ }
+
+ file.swap(*_retval);
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::IsEmptyDirectory(nsIFile* aFile, bool* _retval) {
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ nsresult rv;
+
+ nsCOMPtr<nsIDirectoryEnumerator> enumerator;
+ rv = aFile->GetDirectoryEntries(getter_AddRefs(enumerator));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasMoreElements = false;
+ rv = enumerator->HasMoreElements(&hasMoreElements);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *_retval = !hasMoreElements;
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::CheckAndCreateDir(nsIFile* aFile, const char* aDir,
+ bool aEnsureEmptyDir) {
+ nsresult rv;
+
+ nsCOMPtr<nsIFile> file;
+ if (!aDir) {
+ file = aFile;
+ } else {
+ nsAutoCString dir(aDir);
+ rv = aFile->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = file->AppendNative(dir);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ bool exists = false;
+ rv = file->Exists(&exists);
+ if (NS_SUCCEEDED(rv) && exists) {
+ bool isDirectory = false;
+ rv = file->IsDirectory(&isDirectory);
+ if (NS_FAILED(rv) || !isDirectory) {
+ // Try to remove the file
+ rv = file->Remove(false);
+ if (NS_SUCCEEDED(rv)) {
+ exists = false;
+ }
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (aEnsureEmptyDir && NS_SUCCEEDED(rv) && exists) {
+ bool isEmpty;
+ rv = IsEmptyDirectory(file, &isEmpty);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!isEmpty) {
+ // Don't check the result, if this fails, it's OK. We do this
+ // only for the doomed directory that doesn't need to be deleted
+ // for the cost of completely disabling the whole browser.
+ TrashDirectory(file);
+ }
+ }
+
+ if (NS_SUCCEEDED(rv) && !exists) {
+ rv = file->Create(nsIFile::DIRECTORY_TYPE, 0700);
+ }
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Cannot create directory");
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::CreateCacheTree() {
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+ MOZ_ASSERT(!mTreeCreated);
+
+ if (!mCacheDirectory || mTreeCreationFailed) {
+ return NS_ERROR_FILE_INVALID_PATH;
+ }
+
+ nsresult rv;
+
+ // Set the flag here and clear it again below when the tree is created
+ // successfully.
+ mTreeCreationFailed = true;
+
+ // ensure parent directory exists
+ nsCOMPtr<nsIFile> parentDir;
+ rv = mCacheDirectory->GetParent(getter_AddRefs(parentDir));
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = CheckAndCreateDir(parentDir, nullptr, false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // ensure cache directory exists
+ rv = CheckAndCreateDir(mCacheDirectory, nullptr, false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // ensure entries directory exists
+ rv = CheckAndCreateDir(mCacheDirectory, ENTRIES_DIR, false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // ensure doomed directory exists
+ rv = CheckAndCreateDir(mCacheDirectory, DOOMED_DIR, true);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mTreeCreated = true;
+ mTreeCreationFailed = false;
+
+ if (!mContextEvictor) {
+ RefPtr<CacheFileContextEvictor> contextEvictor;
+ contextEvictor = new CacheFileContextEvictor();
+
+ // Init() method will try to load unfinished contexts from the disk. Store
+ // the evictor as a member only when there is some unfinished job.
+ contextEvictor->Init(mCacheDirectory);
+ if (contextEvictor->ContextsCount()) {
+ contextEvictor.swap(mContextEvictor);
+ }
+ }
+
+ StartRemovingTrash();
+
+ return NS_OK;
+}
+
+nsresult CacheFileIOManager::OpenNSPRHandle(CacheFileHandle* aHandle,
+ bool aCreate) {
+ LOG(("CacheFileIOManager::OpenNSPRHandle BEGIN, handle=%p", aHandle));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ MOZ_ASSERT(!aHandle->mFD);
+ MOZ_ASSERT(mHandlesByLastUsed.IndexOf(aHandle) == mHandlesByLastUsed.NoIndex);
+ MOZ_ASSERT(mHandlesByLastUsed.Length() <= kOpenHandlesLimit);
+ MOZ_ASSERT((aCreate && !aHandle->mFileExists) ||
+ (!aCreate && aHandle->mFileExists));
+
+ nsresult rv;
+
+ if (mHandlesByLastUsed.Length() == kOpenHandlesLimit) {
+ // close handle that hasn't been used for the longest time
+ rv = MaybeReleaseNSPRHandleInternal(mHandlesByLastUsed[0], true);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (aCreate) {
+ rv = aHandle->mFile->OpenNSPRFileDesc(
+ PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 0600, &aHandle->mFD);
+ if (rv == NS_ERROR_FILE_ALREADY_EXISTS || // error from nsLocalFileWin
+ rv == NS_ERROR_FILE_NO_DEVICE_SPACE) { // error from nsLocalFileUnix
+ LOG(
+ ("CacheFileIOManager::OpenNSPRHandle() - Cannot create a new file, we"
+ " might reached a limit on FAT32. Will evict a single entry and try "
+ "again. [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHandle->Hash())));
+
+ SHA1Sum::Hash hash;
+ uint32_t cnt;
+
+ rv = CacheIndex::GetEntryForEviction(true, &hash, &cnt);
+ if (NS_SUCCEEDED(rv)) {
+ rv = DoomFileByKeyInternal(&hash);
+ }
+ if (NS_SUCCEEDED(rv)) {
+ rv = aHandle->mFile->OpenNSPRFileDesc(
+ PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 0600, &aHandle->mFD);
+ LOG(
+ ("CacheFileIOManager::OpenNSPRHandle() - Successfully evicted entry"
+ " with hash %08x%08x%08x%08x%08x. %s to create the new file.",
+ LOGSHA1(&hash), NS_SUCCEEDED(rv) ? "Succeeded" : "Failed"));
+
+ // Report the full size only once per session
+ static bool sSizeReported = false;
+ if (!sSizeReported) {
+ uint32_t cacheUsage;
+ if (NS_SUCCEEDED(CacheIndex::GetCacheSize(&cacheUsage))) {
+ cacheUsage >>= 10;
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_SIZE_FULL_FAT,
+ cacheUsage);
+ sSizeReported = true;
+ }
+ }
+ } else {
+ LOG(
+ ("CacheFileIOManager::OpenNSPRHandle() - Couldn't evict an existing"
+ " entry."));
+ rv = NS_ERROR_FILE_NO_DEVICE_SPACE;
+ }
+ }
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileIOManager::OpenNSPRHandle() Create failed with "
+ "0x%08" PRIx32,
+ static_cast<uint32_t>(rv)));
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aHandle->mFileExists = true;
+ } else {
+ rv = aHandle->mFile->OpenNSPRFileDesc(PR_RDWR, 0600, &aHandle->mFD);
+ if (NS_ERROR_FILE_NOT_FOUND == rv) {
+ LOG((" file doesn't exists"));
+ aHandle->mFileExists = false;
+ return DoomFileInternal(aHandle);
+ }
+ if (NS_FAILED(rv)) {
+ LOG(("CacheFileIOManager::OpenNSPRHandle() Open failed with 0x%08" PRIx32,
+ static_cast<uint32_t>(rv)));
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ mHandlesByLastUsed.AppendElement(aHandle);
+
+ LOG(("CacheFileIOManager::OpenNSPRHandle END, handle=%p", aHandle));
+
+ return NS_OK;
+}
+
+void CacheFileIOManager::NSPRHandleUsed(CacheFileHandle* aHandle) {
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
+ MOZ_ASSERT(aHandle->mFD);
+
+ DebugOnly<bool> found;
+ found = mHandlesByLastUsed.RemoveElement(aHandle);
+ MOZ_ASSERT(found);
+
+ mHandlesByLastUsed.AppendElement(aHandle);
+}
+
+nsresult CacheFileIOManager::SyncRemoveDir(nsIFile* aFile, const char* aDir) {
+ nsresult rv;
+ nsCOMPtr<nsIFile> file;
+
+ if (!aDir) {
+ file = aFile;
+ } else {
+ rv = aFile->Clone(getter_AddRefs(file));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = file->AppendNative(nsDependentCString(aDir));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ }
+
+ if (LOG_ENABLED()) {
+ LOG(("CacheFileIOManager::SyncRemoveDir() - Removing directory %s",
+ file->HumanReadablePath().get()));
+ }
+
+ rv = file->Remove(true);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileIOManager::SyncRemoveDir() - Removing failed! "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ }
+
+ return rv;
+}
+
+void CacheFileIOManager::SyncRemoveAllCacheFiles() {
+ LOG(("CacheFileIOManager::SyncRemoveAllCacheFiles()"));
+
+ nsresult rv;
+
+ SyncRemoveDir(mCacheDirectory, ENTRIES_DIR);
+ SyncRemoveDir(mCacheDirectory, DOOMED_DIR);
+
+ // Clear any intermediate state of trash dir enumeration.
+ mFailedTrashDirs.Clear();
+ mTrashDir = nullptr;
+
+ while (true) {
+ // FindTrashDirToRemove() fills mTrashDir if there is any trash directory.
+ rv = FindTrashDirToRemove();
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ LOG(
+ ("CacheFileIOManager::SyncRemoveAllCacheFiles() - No trash directory "
+ "found."));
+ break;
+ }
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileIOManager::SyncRemoveAllCacheFiles() - "
+ "FindTrashDirToRemove() returned an unexpected error. "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ break;
+ }
+
+ rv = SyncRemoveDir(mTrashDir, nullptr);
+ if (NS_FAILED(rv)) {
+ nsAutoCString leafName;
+ mTrashDir->GetNativeLeafName(leafName);
+ mFailedTrashDirs.AppendElement(leafName);
+ }
+ }
+}
+
+// Returns default ("smart") size (in KB) of cache, given available disk space
+// (also in KB)
+static uint32_t SmartCacheSize(const int64_t availKB) {
+ uint32_t maxSize;
+
+ if (CacheObserver::ClearCacheOnShutdown()) {
+ maxSize = kMaxClearOnShutdownCacheSizeKB;
+ } else {
+ maxSize = kMaxCacheSizeKB;
+ }
+
+ if (availKB > 25 * 1024 * 1024) {
+ return maxSize; // skip computing if we're over 25 GB
+ }
+
+ // Grow/shrink in 10 MB units, deliberately, so that in the common case we
+ // don't shrink cache and evict items every time we startup (it's important
+ // that we don't slow down startup benchmarks).
+ uint32_t sz10MBs = 0;
+ uint32_t avail10MBs = availKB / (1024 * 10);
+
+ // 2.5% of space above 7GB
+ if (avail10MBs > 700) {
+ sz10MBs += static_cast<uint32_t>((avail10MBs - 700) * .025);
+ avail10MBs = 700;
+ }
+ // 7.5% of space between 500 MB -> 7 GB
+ if (avail10MBs > 50) {
+ sz10MBs += static_cast<uint32_t>((avail10MBs - 50) * .075);
+ avail10MBs = 50;
+ }
+
+#ifdef ANDROID
+ // On Android, smaller/older devices may have very little storage and
+ // device owners may be sensitive to storage footprint: Use a smaller
+ // percentage of available space and a smaller minimum.
+
+ // 16% of space up to 500 MB (10 MB min)
+ sz10MBs += std::max<uint32_t>(1, static_cast<uint32_t>(avail10MBs * .16));
+#else
+ // 30% of space up to 500 MB (50 MB min)
+ sz10MBs += std::max<uint32_t>(5, static_cast<uint32_t>(avail10MBs * .3));
+#endif
+
+ return std::min<uint32_t>(maxSize, sz10MBs * 10 * 1024);
+}
+
+nsresult CacheFileIOManager::UpdateSmartCacheSize(int64_t aFreeSpace) {
+ MOZ_ASSERT(mIOThread->IsCurrentThread());
+
+ nsresult rv;
+
+ if (!CacheObserver::SmartCacheSizeEnabled()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Wait at least kSmartSizeUpdateInterval before recomputing smart size.
+ static const TimeDuration kUpdateLimit =
+ TimeDuration::FromMilliseconds(kSmartSizeUpdateInterval);
+ if (!mLastSmartSizeTime.IsNull() &&
+ (TimeStamp::NowLoRes() - mLastSmartSizeTime) < kUpdateLimit) {
+ return NS_OK;
+ }
+
+ // Do not compute smart size when cache size is not reliable.
+ bool isUpToDate = false;
+ CacheIndex::IsUpToDate(&isUpToDate);
+ if (!isUpToDate) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ uint32_t cacheUsage;
+ rv = CacheIndex::GetCacheSize(&cacheUsage);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOG(
+ ("CacheFileIOManager::UpdateSmartCacheSize() - Cannot get cacheUsage! "
+ "[rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ return rv;
+ }
+
+ mLastSmartSizeTime = TimeStamp::NowLoRes();
+
+ uint32_t smartSize = SmartCacheSize(aFreeSpace + cacheUsage);
+
+ if (smartSize == CacheObserver::DiskCacheCapacity()) {
+ // Smart size has not changed.
+ return NS_OK;
+ }
+
+ CacheObserver::SetSmartDiskCacheCapacity(smartSize);
+
+ return NS_OK;
+}
+
+// Memory reporting
+
+namespace {
+
+// A helper class that dispatches and waits for an event that gets result of
+// CacheFileIOManager->mHandles.SizeOfExcludingThis() on the I/O thread
+// to safely get handles memory report.
+// We must do this, since the handle list is only accessed and managed w/o
+// locking on the I/O thread. That is by design.
+class SizeOfHandlesRunnable : public Runnable {
+ public:
+ SizeOfHandlesRunnable(mozilla::MallocSizeOf mallocSizeOf,
+ CacheFileHandles const& handles,
+ nsTArray<CacheFileHandle*> const& specialHandles)
+ : Runnable("net::SizeOfHandlesRunnable"),
+ mMonitor("SizeOfHandlesRunnable.mMonitor"),
+ mMonitorNotified(false),
+ mMallocSizeOf(mallocSizeOf),
+ mHandles(handles),
+ mSpecialHandles(specialHandles),
+ mSize(0) {}
+
+ size_t Get(CacheIOThread* thread) {
+ nsCOMPtr<nsIEventTarget> target = thread->Target();
+ if (!target) {
+ NS_ERROR("If we have the I/O thread we also must have the I/O target");
+ return 0;
+ }
+
+ mozilla::MonitorAutoLock mon(mMonitor);
+ mMonitorNotified = false;
+ nsresult rv = target->Dispatch(this, nsIEventTarget::DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_ERROR("Dispatch failed, cannot do memory report of CacheFileHandles");
+ return 0;
+ }
+
+ while (!mMonitorNotified) {
+ mon.Wait();
+ }
+ return mSize;
+ }
+
+ NS_IMETHOD Run() override {
+ mozilla::MonitorAutoLock mon(mMonitor);
+ // Excluding this since the object itself is a member of CacheFileIOManager
+ // reported in CacheFileIOManager::SizeOfIncludingThis as part of |this|.
+ mSize = mHandles.SizeOfExcludingThis(mMallocSizeOf);
+ for (uint32_t i = 0; i < mSpecialHandles.Length(); ++i) {
+ mSize += mSpecialHandles[i]->SizeOfIncludingThis(mMallocSizeOf);
+ }
+
+ mMonitorNotified = true;
+ mon.Notify();
+ return NS_OK;
+ }
+
+ private:
+ mozilla::Monitor mMonitor;
+ bool mMonitorNotified;
+ mozilla::MallocSizeOf mMallocSizeOf;
+ CacheFileHandles const& mHandles;
+ nsTArray<CacheFileHandle*> const& mSpecialHandles;
+ size_t mSize;
+};
+
+} // namespace
+
+size_t CacheFileIOManager::SizeOfExcludingThisInternal(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+ nsCOMPtr<nsISizeOf> sizeOf;
+
+ if (mIOThread) {
+ n += mIOThread->SizeOfIncludingThis(mallocSizeOf);
+
+ // mHandles and mSpecialHandles must be accessed only on the I/O thread,
+ // must sync dispatch.
+ RefPtr<SizeOfHandlesRunnable> sizeOfHandlesRunnable =
+ new SizeOfHandlesRunnable(mallocSizeOf, mHandles, mSpecialHandles);
+ n += sizeOfHandlesRunnable->Get(mIOThread);
+ }
+
+ // mHandlesByLastUsed just refers handles reported by mHandles.
+
+ sizeOf = do_QueryInterface(mCacheDirectory);
+ if (sizeOf) n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+
+ sizeOf = do_QueryInterface(mMetadataWritesTimer);
+ if (sizeOf) n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+
+ sizeOf = do_QueryInterface(mTrashTimer);
+ if (sizeOf) n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+
+ sizeOf = do_QueryInterface(mTrashDir);
+ if (sizeOf) n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+
+ for (uint32_t i = 0; i < mFailedTrashDirs.Length(); ++i) {
+ n += mFailedTrashDirs[i].SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ }
+
+ return n;
+}
+
+// static
+size_t CacheFileIOManager::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) {
+ if (!gInstance) return 0;
+
+ return gInstance->SizeOfExcludingThisInternal(mallocSizeOf);
+}
+
+// static
+size_t CacheFileIOManager::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(gInstance) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileIOManager.h b/netwerk/cache2/CacheFileIOManager.h
new file mode 100644
index 0000000000..579e58e328
--- /dev/null
+++ b/netwerk/cache2/CacheFileIOManager.h
@@ -0,0 +1,479 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileIOManager__h__
+#define CacheFileIOManager__h__
+
+#include "CacheIOThread.h"
+#include "CacheStorageService.h"
+#include "CacheHashUtils.h"
+#include "nsIEventTarget.h"
+#include "nsINamed.h"
+#include "nsITimer.h"
+#include "nsCOMPtr.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/SHA1.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
+#include "nsString.h"
+#include "nsTHashtable.h"
+#include "prio.h"
+
+//#define DEBUG_HANDLES 1
+
+class nsIFile;
+class nsITimer;
+class nsIDirectoryEnumerator;
+class nsILoadContextInfo;
+
+namespace mozilla {
+namespace net {
+
+class CacheFile;
+class CacheFileIOListener;
+
+#ifdef DEBUG_HANDLES
+class CacheFileHandlesEntry;
+#endif
+
+#define ENTRIES_DIR "entries"
+#define DOOMED_DIR "doomed"
+#define TRASH_DIR "trash"
+
+class CacheFileHandle final : public nsISupports {
+ public:
+ enum class PinningStatus : uint32_t { UNKNOWN, NON_PINNED, PINNED };
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ bool DispatchRelease();
+
+ CacheFileHandle(const SHA1Sum::Hash* aHash, bool aPriority,
+ PinningStatus aPinning);
+ CacheFileHandle(const nsACString& aKey, bool aPriority,
+ PinningStatus aPinning);
+ void Log();
+ bool IsDoomed() const { return mIsDoomed; }
+ const SHA1Sum::Hash* Hash() const { return mHash; }
+ int64_t FileSize() const { return mFileSize; }
+ uint32_t FileSizeInK() const;
+ bool IsPriority() const { return mPriority; }
+ bool FileExists() const { return mFileExists; }
+ bool IsClosed() const { return mClosed; }
+ bool IsSpecialFile() const { return mSpecialFile; }
+ nsCString& Key() { return mKey; }
+
+ // Returns false when this handle has been doomed based on the pinning state
+ // update.
+ bool SetPinned(bool aPinned);
+ void SetInvalid() { mInvalid = true; }
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ friend class CacheFileIOManager;
+ friend class CacheFileHandles;
+ friend class ReleaseNSPRHandleEvent;
+
+ virtual ~CacheFileHandle();
+
+ const SHA1Sum::Hash* mHash;
+ mozilla::Atomic<bool, ReleaseAcquire> mIsDoomed;
+ mozilla::Atomic<bool, ReleaseAcquire> mClosed;
+
+ // mPriority and mSpecialFile are plain "bool", not "bool:1", so as to
+ // avoid bitfield races with the byte containing mInvalid et al. See
+ // bug 1278502.
+ bool const mPriority;
+ bool const mSpecialFile;
+
+ mozilla::Atomic<bool, Relaxed> mInvalid;
+
+ // These bit flags are all accessed only on the IO thread
+ bool mFileExists : 1; // This means that the file should exists,
+ // but it can be still deleted by OS/user
+ // and then a subsequent OpenNSPRFileDesc()
+ // will fail.
+
+ // Both initially false. Can be raised to true only when this handle is to be
+ // doomed during the period when the pinning status is unknown. After the
+ // pinning status determination we check these flags and possibly doom. These
+ // flags are only accessed on the IO thread.
+ bool mDoomWhenFoundPinned : 1;
+ bool mDoomWhenFoundNonPinned : 1;
+ // Set when after shutdown AND:
+ // - when writing: writing data (not metadata) OR the physical file handle is
+ // not currently open
+ // - when truncating: the physical file handle is not currently open
+ // When set it prevents any further writes or truncates on such handles to
+ // happen immediately after shutdown and gives a chance to write metadata of
+ // already open files quickly as possible (only that renders them actually
+ // usable by the cache.)
+ bool mKilled : 1;
+ // For existing files this is always pre-set to UNKNOWN. The status is
+ // udpated accordingly after the matadata has been parsed. For new files the
+ // flag is set according to which storage kind is opening the cache entry and
+ // remains so for the handle's lifetime. The status can only change from
+ // UNKNOWN (if set so initially) to one of PINNED or NON_PINNED and it stays
+ // unchanged afterwards. This status is only accessed on the IO thread.
+ PinningStatus mPinning;
+
+ nsCOMPtr<nsIFile> mFile;
+
+ // file size is atomic because it is used on main thread by
+ // nsHttpChannel::ReportNetVSCacheTelemetry()
+ Atomic<int64_t, Relaxed> mFileSize;
+ PRFileDesc* mFD; // if null then the file doesn't exists on the disk
+ nsCString mKey;
+};
+
+class CacheFileHandles {
+ public:
+ CacheFileHandles();
+ ~CacheFileHandles();
+
+ nsresult GetHandle(const SHA1Sum::Hash* aHash, CacheFileHandle** _retval);
+ already_AddRefed<CacheFileHandle> NewHandle(const SHA1Sum::Hash*,
+ bool aPriority,
+ CacheFileHandle::PinningStatus);
+ void RemoveHandle(CacheFileHandle* aHandlle);
+ void GetAllHandles(nsTArray<RefPtr<CacheFileHandle> >* _retval);
+ void GetActiveHandles(nsTArray<RefPtr<CacheFileHandle> >* _retval);
+ void ClearAll();
+ uint32_t HandleCount();
+
+#ifdef DEBUG_HANDLES
+ void Log(CacheFileHandlesEntry* entry);
+#endif
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ class HandleHashKey : public PLDHashEntryHdr {
+ public:
+ typedef const SHA1Sum::Hash& KeyType;
+ typedef const SHA1Sum::Hash* KeyTypePointer;
+
+ explicit HandleHashKey(KeyTypePointer aKey) {
+ MOZ_COUNT_CTOR(HandleHashKey);
+ mHash = MakeUnique<uint8_t[]>(SHA1Sum::kHashSize);
+ memcpy(mHash.get(), aKey, sizeof(SHA1Sum::Hash));
+ }
+ HandleHashKey(const HandleHashKey& aOther) {
+ MOZ_ASSERT_UNREACHABLE("HandleHashKey copy constructor is forbidden!");
+ }
+ MOZ_COUNTED_DTOR(HandleHashKey)
+
+ bool KeyEquals(KeyTypePointer aKey) const {
+ return memcmp(mHash.get(), aKey, sizeof(SHA1Sum::Hash)) == 0;
+ }
+ static KeyTypePointer KeyToPointer(KeyType aKey) { return &aKey; }
+ static PLDHashNumber HashKey(KeyTypePointer aKey) {
+ return (reinterpret_cast<const uint32_t*>(aKey))[0];
+ }
+
+ void AddHandle(CacheFileHandle* aHandle);
+ void RemoveHandle(CacheFileHandle* aHandle);
+ already_AddRefed<CacheFileHandle> GetNewestHandle();
+ void GetHandles(nsTArray<RefPtr<CacheFileHandle> >& aResult);
+
+ SHA1Sum::Hash* Hash() const {
+ return reinterpret_cast<SHA1Sum::Hash*>(mHash.get());
+ }
+ bool IsEmpty() const { return mHandles.Length() == 0; }
+
+ enum { ALLOW_MEMMOVE = true };
+
+#ifdef DEBUG
+ void AssertHandlesState();
+#endif
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ // We can't make this UniquePtr<SHA1Sum::Hash>, because you can't have
+ // UniquePtrs with known bounds. So we settle for this representation
+ // and using appropriate casts when we need to access it as a
+ // SHA1Sum::Hash.
+ UniquePtr<uint8_t[]> mHash;
+ // Use weak pointers since the hash table access is on a single thread
+ // only and CacheFileHandle removes itself from this table in its dtor
+ // that may only be called on the same thread as we work with the hashtable
+ // since we dispatch its Release() to this thread.
+ nsTArray<CacheFileHandle*> mHandles;
+ };
+
+ private:
+ nsTHashtable<HandleHashKey> mTable;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class OpenFileEvent;
+class ReadEvent;
+class WriteEvent;
+class MetadataWriteScheduleEvent;
+class CacheFileContextEvictor;
+
+#define CACHEFILEIOLISTENER_IID \
+ { /* dcaf2ddc-17cf-4242-bca1-8c86936375a5 */ \
+ 0xdcaf2ddc, 0x17cf, 0x4242, { \
+ 0xbc, 0xa1, 0x8c, 0x86, 0x93, 0x63, 0x75, 0xa5 \
+ } \
+ }
+
+class CacheFileIOListener : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(CACHEFILEIOLISTENER_IID)
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) = 0;
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) = 0;
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) = 0;
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) = 0;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) = 0;
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) = 0;
+
+ virtual bool IsKilled() { return false; }
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(CacheFileIOListener, CACHEFILEIOLISTENER_IID)
+
+class CacheFileIOManager final : public nsITimerCallback, public nsINamed {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITIMERCALLBACK
+ NS_DECL_NSINAMED
+
+ enum {
+ OPEN = 0U,
+ CREATE = 1U,
+ CREATE_NEW = 2U,
+ PRIORITY = 4U,
+ SPECIAL_FILE = 8U,
+ PINNED = 16U
+ };
+
+ CacheFileIOManager();
+
+ static nsresult Init();
+ static nsresult Shutdown();
+ static nsresult OnProfile();
+ static already_AddRefed<nsIEventTarget> IOTarget();
+ static already_AddRefed<CacheIOThread> IOThread();
+ static bool IsOnIOThread();
+ static bool IsOnIOThreadOrCeased();
+ static bool IsShutdown();
+
+ // Make aFile's WriteMetadataIfNeeded be called automatically after
+ // a short interval.
+ static nsresult ScheduleMetadataWrite(CacheFile* aFile);
+ // Remove aFile from the scheduling registry array.
+ // WriteMetadataIfNeeded will not be automatically called.
+ static nsresult UnscheduleMetadataWrite(CacheFile* aFile);
+ // Shuts the scheduling off and flushes all pending metadata writes.
+ static nsresult ShutdownMetadataWriteScheduling();
+
+ static nsresult OpenFile(const nsACString& aKey, uint32_t aFlags,
+ CacheFileIOListener* aCallback);
+ static nsresult Read(CacheFileHandle* aHandle, int64_t aOffset, char* aBuf,
+ int32_t aCount, CacheFileIOListener* aCallback);
+ static nsresult Write(CacheFileHandle* aHandle, int64_t aOffset,
+ const char* aBuf, int32_t aCount, bool aValidate,
+ bool aTruncate, CacheFileIOListener* aCallback);
+ // PinningDoomRestriction:
+ // NO_RESTRICTION
+ // no restriction is checked, the file is simply always doomed
+ // DOOM_WHEN_(NON)_PINNED, we branch based on the pinning status of the
+ // handle:
+ // UNKNOWN: the handle is marked to be doomed when later found (non)pinned
+ // PINNED/NON_PINNED: doom only when the restriction matches the pin status
+ // and the handle has not yet been required to doom during the UNKNOWN
+ // period
+ enum PinningDoomRestriction {
+ NO_RESTRICTION,
+ DOOM_WHEN_NON_PINNED,
+ DOOM_WHEN_PINNED
+ };
+ static nsresult DoomFile(CacheFileHandle* aHandle,
+ CacheFileIOListener* aCallback);
+ static nsresult DoomFileByKey(const nsACString& aKey,
+ CacheFileIOListener* aCallback);
+ static nsresult ReleaseNSPRHandle(CacheFileHandle* aHandle);
+ static nsresult TruncateSeekSetEOF(CacheFileHandle* aHandle,
+ int64_t aTruncatePos, int64_t aEOFPos,
+ CacheFileIOListener* aCallback);
+ static nsresult RenameFile(CacheFileHandle* aHandle,
+ const nsACString& aNewName,
+ CacheFileIOListener* aCallback);
+ static nsresult EvictIfOverLimit();
+ static nsresult EvictAll();
+ static nsresult EvictByContext(nsILoadContextInfo* aLoadContextInfo,
+ bool aPinning, const nsAString& aOrigin);
+
+ static nsresult InitIndexEntry(CacheFileHandle* aHandle,
+ OriginAttrsHash aOriginAttrsHash,
+ bool aAnonymous, bool aPinning);
+ static nsresult UpdateIndexEntry(CacheFileHandle* aHandle,
+ const uint32_t* aFrecency,
+ const bool* aHasAltData,
+ const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType);
+
+ static nsresult UpdateIndexEntry();
+
+ enum EEnumerateMode { ENTRIES, DOOMED };
+
+ static void GetCacheDirectory(nsIFile** result);
+#if defined(MOZ_WIDGET_ANDROID)
+ static void GetProfilelessCacheDirectory(nsIFile** result);
+#endif
+
+ // Calls synchronously OnEntryInfo for an entry with the given hash.
+ // Tries to find an existing entry in the service hashtables first, if not
+ // found, loads synchronously from disk file.
+ // Callable on the IO thread only.
+ static nsresult GetEntryInfo(
+ const SHA1Sum::Hash* aHash,
+ CacheStorageService::EntryInfoCallback* aCallback);
+
+ // Memory reporting
+ static size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+ static size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ private:
+ friend class CacheFileHandle;
+ friend class CacheFileChunk;
+ friend class CacheFile;
+ friend class ShutdownEvent;
+ friend class OpenFileEvent;
+ friend class CloseHandleEvent;
+ friend class ReadEvent;
+ friend class WriteEvent;
+ friend class DoomFileEvent;
+ friend class DoomFileByKeyEvent;
+ friend class ReleaseNSPRHandleEvent;
+ friend class TruncateSeekSetEOFEvent;
+ friend class RenameFileEvent;
+ friend class CacheIndex;
+ friend class MetadataWriteScheduleEvent;
+ friend class CacheFileContextEvictor;
+
+ virtual ~CacheFileIOManager();
+
+ nsresult InitInternal();
+ void ShutdownInternal();
+
+ nsresult OpenFileInternal(const SHA1Sum::Hash* aHash, const nsACString& aKey,
+ uint32_t aFlags, CacheFileHandle** _retval);
+ nsresult OpenSpecialFileInternal(const nsACString& aKey, uint32_t aFlags,
+ CacheFileHandle** _retval);
+ void CloseHandleInternal(CacheFileHandle* aHandle);
+ nsresult ReadInternal(CacheFileHandle* aHandle, int64_t aOffset, char* aBuf,
+ int32_t aCount);
+ nsresult WriteInternal(CacheFileHandle* aHandle, int64_t aOffset,
+ const char* aBuf, int32_t aCount, bool aValidate,
+ bool aTruncate);
+ nsresult DoomFileInternal(
+ CacheFileHandle* aHandle,
+ PinningDoomRestriction aPinningStatusRestriction = NO_RESTRICTION);
+ nsresult DoomFileByKeyInternal(const SHA1Sum::Hash* aHash);
+ nsresult MaybeReleaseNSPRHandleInternal(CacheFileHandle* aHandle,
+ bool aIgnoreShutdownLag = false);
+ nsresult TruncateSeekSetEOFInternal(CacheFileHandle* aHandle,
+ int64_t aTruncatePos, int64_t aEOFPos);
+ nsresult RenameFileInternal(CacheFileHandle* aHandle,
+ const nsACString& aNewName);
+ nsresult EvictIfOverLimitInternal();
+ nsresult OverLimitEvictionInternal();
+ nsresult EvictAllInternal();
+ nsresult EvictByContextInternal(nsILoadContextInfo* aLoadContextInfo,
+ bool aPinning, const nsAString& aOrigin);
+
+ nsresult TrashDirectory(nsIFile* aFile);
+ static void OnTrashTimer(nsITimer* aTimer, void* aClosure);
+ nsresult StartRemovingTrash();
+ nsresult RemoveTrashInternal();
+ nsresult FindTrashDirToRemove();
+
+ nsresult CreateFile(CacheFileHandle* aHandle);
+ static void HashToStr(const SHA1Sum::Hash* aHash, nsACString& _retval);
+ static nsresult StrToHash(const nsACString& aHash, SHA1Sum::Hash* _retval);
+ nsresult GetFile(const SHA1Sum::Hash* aHash, nsIFile** _retval);
+ nsresult GetSpecialFile(const nsACString& aKey, nsIFile** _retval);
+ nsresult GetDoomedFile(nsIFile** _retval);
+ nsresult IsEmptyDirectory(nsIFile* aFile, bool* _retval);
+ nsresult CheckAndCreateDir(nsIFile* aFile, const char* aDir,
+ bool aEnsureEmptyDir);
+ nsresult CreateCacheTree();
+ nsresult OpenNSPRHandle(CacheFileHandle* aHandle, bool aCreate = false);
+ void NSPRHandleUsed(CacheFileHandle* aHandle);
+
+ // Removing all cache files during shutdown
+ nsresult SyncRemoveDir(nsIFile* aFile, const char* aDir);
+ void SyncRemoveAllCacheFiles();
+
+ nsresult ScheduleMetadataWriteInternal(CacheFile* aFile);
+ void UnscheduleMetadataWriteInternal(CacheFile* aFile);
+ void ShutdownMetadataWriteSchedulingInternal();
+
+ static nsresult CacheIndexStateChanged();
+ void CacheIndexStateChangedInternal();
+
+ // Smart size calculation. UpdateSmartCacheSize() must be called on IO thread.
+ // It is called in EvictIfOverLimitInternal() just before we decide whether to
+ // start overlimit eviction or not and also in OverLimitEvictionInternal()
+ // before we start an eviction loop.
+ nsresult UpdateSmartCacheSize(int64_t aFreeSpace);
+
+ // Memory reporting (private part)
+ size_t SizeOfExcludingThisInternal(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ static StaticRefPtr<CacheFileIOManager> gInstance;
+
+ TimeStamp mStartTime;
+ // Set true on the IO thread, CLOSE level as part of the internal shutdown
+ // procedure.
+ bool mShuttingDown;
+ RefPtr<CacheIOThread> mIOThread;
+ nsCOMPtr<nsIFile> mCacheDirectory;
+#if defined(MOZ_WIDGET_ANDROID)
+ // On Android we add the active profile directory name between the path
+ // and the 'cache2' leaf name. However, to delete any leftover data from
+ // times before we were doing it, we still need to access the directory
+ // w/o the profile name in the path. Here it is stored.
+ nsCOMPtr<nsIFile> mCacheProfilelessDirectory;
+#endif
+ bool mTreeCreated;
+ bool mTreeCreationFailed;
+ CacheFileHandles mHandles;
+ nsTArray<CacheFileHandle*> mHandlesByLastUsed;
+ nsTArray<CacheFileHandle*> mSpecialHandles;
+ nsTArray<RefPtr<CacheFile> > mScheduledMetadataWrites;
+ nsCOMPtr<nsITimer> mMetadataWritesTimer;
+ bool mOverLimitEvicting;
+ // When overlimit eviction is too slow and cache size reaches 105% of the
+ // limit, this flag is set and no other content is cached to prevent
+ // uncontrolled cache growing.
+ bool mCacheSizeOnHardLimit;
+ bool mRemovingTrashDirs;
+ nsCOMPtr<nsITimer> mTrashTimer;
+ nsCOMPtr<nsIFile> mTrashDir;
+ nsCOMPtr<nsIDirectoryEnumerator> mTrashDirEnumerator;
+ nsTArray<nsCString> mFailedTrashDirs;
+ RefPtr<CacheFileContextEvictor> mContextEvictor;
+ TimeStamp mLastSmartSizeTime;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileInputStream.cpp b/netwerk/cache2/CacheFileInputStream.cpp
new file mode 100644
index 0000000000..dc6d0208e6
--- /dev/null
+++ b/netwerk/cache2/CacheFileInputStream.cpp
@@ -0,0 +1,719 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileInputStream.h"
+
+#include "CacheFile.h"
+#include "nsStreamUtils.h"
+#include "nsThreadUtils.h"
+#include <algorithm>
+
+namespace mozilla {
+namespace net {
+
+NS_IMPL_ADDREF(CacheFileInputStream)
+NS_IMETHODIMP_(MozExternalRefCountType)
+CacheFileInputStream::Release() {
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "CacheFileInputStream");
+
+ if (0 == count) {
+ mRefCnt = 1;
+ delete (this);
+ return 0;
+ }
+
+ if (count == 1) {
+ CacheFileAutoLock lock(mFile);
+ mFile->RemoveInput(this, mStatus);
+ }
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(CacheFileInputStream)
+ NS_INTERFACE_MAP_ENTRY(nsIInputStream)
+ NS_INTERFACE_MAP_ENTRY(nsIAsyncInputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISeekableStream)
+ NS_INTERFACE_MAP_ENTRY(nsITellableStream)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIInputStream)
+NS_INTERFACE_MAP_END
+
+CacheFileInputStream::CacheFileInputStream(CacheFile* aFile,
+ nsISupports* aEntry,
+ bool aAlternativeData)
+ : mFile(aFile),
+ mPos(0),
+ mStatus(NS_OK),
+ mClosed(false),
+ mInReadSegments(false),
+ mWaitingForUpdate(false),
+ mAlternativeData(aAlternativeData),
+ mListeningForChunk(-1),
+ mCallbackFlags(0),
+ mCacheEntryHandle(aEntry) {
+ LOG(("CacheFileInputStream::CacheFileInputStream() [this=%p]", this));
+
+ if (mAlternativeData) {
+ mPos = mFile->mAltDataOffset;
+ }
+}
+
+CacheFileInputStream::~CacheFileInputStream() {
+ LOG(("CacheFileInputStream::~CacheFileInputStream() [this=%p]", this));
+ MOZ_ASSERT(!mInReadSegments);
+}
+
+// nsIInputStream
+NS_IMETHODIMP
+CacheFileInputStream::Close() {
+ LOG(("CacheFileInputStream::Close() [this=%p]", this));
+ return CloseWithStatus(NS_OK);
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::Available(uint64_t* _retval) {
+ CacheFileAutoLock lock(mFile);
+
+ if (mClosed) {
+ LOG(
+ ("CacheFileInputStream::Available() - Stream is closed. [this=%p, "
+ "status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+ return NS_FAILED(mStatus) ? mStatus : NS_BASE_STREAM_CLOSED;
+ }
+
+ EnsureCorrectChunk(false);
+ if (NS_FAILED(mStatus)) {
+ LOG(
+ ("CacheFileInputStream::Available() - EnsureCorrectChunk failed. "
+ "[this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+ return mStatus;
+ }
+
+ nsresult rv = NS_OK;
+ *_retval = 0;
+
+ if (mChunk) {
+ int64_t canRead = mFile->BytesFromChunk(mChunk->Index(), mAlternativeData);
+ canRead -= (mPos % kChunkSize);
+
+ if (canRead > 0) {
+ *_retval = canRead;
+ } else if (canRead == 0 && !mFile->OutputStreamExists(mAlternativeData)) {
+ rv = NS_BASE_STREAM_CLOSED;
+ }
+ }
+
+ LOG(("CacheFileInputStream::Available() [this=%p, retval=%" PRIu64
+ ", rv=0x%08" PRIx32 "]",
+ this, *_retval, static_cast<uint32_t>(rv)));
+
+ return rv;
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::Read(char* aBuf, uint32_t aCount, uint32_t* _retval) {
+ LOG(("CacheFileInputStream::Read() [this=%p, count=%d]", this, aCount));
+ return ReadSegments(NS_CopySegmentToBuffer, aBuf, aCount, _retval);
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::ReadSegments(nsWriteSegmentFun aWriter, void* aClosure,
+ uint32_t aCount, uint32_t* _retval) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileInputStream::ReadSegments() [this=%p, count=%d]", this,
+ aCount));
+
+ nsresult rv = NS_OK;
+
+ *_retval = 0;
+
+ if (mInReadSegments) {
+ LOG(
+ ("CacheFileInputStream::ReadSegments() - Cannot be called while the "
+ "stream is in ReadSegments!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (mClosed) {
+ LOG(
+ ("CacheFileInputStream::ReadSegments() - Stream is closed. [this=%p, "
+ "status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ if (NS_FAILED(mStatus)) {
+ return mStatus;
+ }
+
+ return NS_OK;
+ }
+
+ if (aCount == 0) {
+ return NS_OK;
+ }
+
+ EnsureCorrectChunk(false);
+
+ while (true) {
+ if (NS_FAILED(mStatus)) return mStatus;
+
+ if (!mChunk) {
+ if (mListeningForChunk == -1) {
+ return NS_OK;
+ }
+ return NS_BASE_STREAM_WOULD_BLOCK;
+ }
+
+ CacheFileChunkReadHandle hnd = mChunk->GetReadHandle();
+ int64_t canRead = CanRead(&hnd);
+ if (NS_FAILED(mStatus)) {
+ return mStatus;
+ }
+
+ if (canRead < 0) {
+ // file was truncated ???
+ MOZ_ASSERT(false, "SetEOF is currenty not implemented?!");
+ rv = NS_OK;
+ } else if (canRead > 0) {
+ uint32_t toRead = std::min(static_cast<uint32_t>(canRead), aCount);
+ uint32_t read;
+ const char* buf = hnd.Buf() + (mPos - hnd.Offset());
+
+ mInReadSegments = true;
+ lock.Unlock();
+
+ rv = aWriter(this, aClosure, buf, *_retval, toRead, &read);
+
+ lock.Lock();
+ mInReadSegments = false;
+
+ if (NS_SUCCEEDED(rv)) {
+ MOZ_ASSERT(read <= toRead,
+ "writer should not write more than we asked it to write");
+
+ *_retval += read;
+ mPos += read;
+ aCount -= read;
+
+ if (!mClosed) {
+ // The last chunk is released after the caller closes this stream.
+ EnsureCorrectChunk(false);
+
+ if (mChunk && aCount) {
+ // Check whether there is more data available to read.
+ continue;
+ }
+ }
+ }
+
+ if (mClosed) {
+ // The stream was closed from aWriter, do the cleanup.
+ CleanUp();
+ }
+
+ rv = NS_OK;
+ } else {
+ if (*_retval == 0 && mFile->OutputStreamExists(mAlternativeData)) {
+ rv = NS_BASE_STREAM_WOULD_BLOCK;
+ } else {
+ rv = NS_OK;
+ }
+ }
+
+ break;
+ }
+
+ LOG(("CacheFileInputStream::ReadSegments() [this=%p, rv=0x%08" PRIx32
+ ", retval=%d]",
+ this, static_cast<uint32_t>(rv), *_retval));
+
+ return rv;
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::IsNonBlocking(bool* _retval) {
+ *_retval = true;
+ return NS_OK;
+}
+
+// nsIAsyncInputStream
+NS_IMETHODIMP
+CacheFileInputStream::CloseWithStatus(nsresult aStatus) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileInputStream::CloseWithStatus() [this=%p, aStatus=0x%08" PRIx32
+ "]",
+ this, static_cast<uint32_t>(aStatus)));
+
+ CloseWithStatusLocked(aStatus);
+ return NS_OK;
+}
+
+void CacheFileInputStream::CloseWithStatusLocked(nsresult aStatus) {
+ LOG(
+ ("CacheFileInputStream::CloseWithStatusLocked() [this=%p, "
+ "aStatus=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(aStatus)));
+
+ if (mClosed) {
+ // We notify listener and null out mCallback immediately after closing
+ // the stream. If we're in ReadSegments we postpone notification until we
+ // step out from ReadSegments. So if the stream is already closed the
+ // following assertion must be true.
+ MOZ_ASSERT(!mCallback || mInReadSegments);
+ return;
+ }
+
+ mClosed = true;
+ mStatus = NS_FAILED(aStatus) ? aStatus : NS_BASE_STREAM_CLOSED;
+
+ if (!mInReadSegments) {
+ CleanUp();
+ }
+}
+
+void CacheFileInputStream::CleanUp() {
+ MOZ_ASSERT(!mInReadSegments);
+ MOZ_ASSERT(mClosed);
+
+ if (mChunk) {
+ ReleaseChunk();
+ }
+
+ // TODO propagate error from input stream to other streams ???
+
+ MaybeNotifyListener();
+
+ mFile->ReleaseOutsideLock(std::move(mCacheEntryHandle));
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::AsyncWait(nsIInputStreamCallback* aCallback,
+ uint32_t aFlags, uint32_t aRequestedCount,
+ nsIEventTarget* aEventTarget) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(
+ ("CacheFileInputStream::AsyncWait() [this=%p, callback=%p, flags=%d, "
+ "requestedCount=%d, eventTarget=%p]",
+ this, aCallback, aFlags, aRequestedCount, aEventTarget));
+
+ if (mInReadSegments) {
+ LOG(
+ ("CacheFileInputStream::AsyncWait() - Cannot be called while the stream"
+ " is in ReadSegments!"));
+ MOZ_ASSERT(false,
+ "Unexpected call. If it's a valid usage implement it. "
+ "Otherwise fix the caller.");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mCallback = aCallback;
+ mCallbackFlags = aFlags;
+ mCallbackTarget = aEventTarget;
+
+ if (!mCallback) {
+ if (mWaitingForUpdate) {
+ mChunk->CancelWait(this);
+ mWaitingForUpdate = false;
+ }
+ return NS_OK;
+ }
+
+ if (mClosed) {
+ NotifyListener();
+ return NS_OK;
+ }
+
+ EnsureCorrectChunk(false);
+
+ MaybeNotifyListener();
+
+ return NS_OK;
+}
+
+// nsISeekableStream
+NS_IMETHODIMP
+CacheFileInputStream::Seek(int32_t whence, int64_t offset) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileInputStream::Seek() [this=%p, whence=%d, offset=%" PRId64 "]",
+ this, whence, offset));
+
+ if (mInReadSegments) {
+ LOG(
+ ("CacheFileInputStream::Seek() - Cannot be called while the stream is "
+ "in ReadSegments!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (mClosed) {
+ LOG(("CacheFileInputStream::Seek() - Stream is closed. [this=%p]", this));
+ return NS_BASE_STREAM_CLOSED;
+ }
+
+ int64_t newPos = offset;
+ switch (whence) {
+ case NS_SEEK_SET:
+ if (mAlternativeData) {
+ newPos += mFile->mAltDataOffset;
+ }
+ break;
+ case NS_SEEK_CUR:
+ newPos += mPos;
+ break;
+ case NS_SEEK_END:
+ if (mAlternativeData) {
+ newPos += mFile->mDataSize;
+ } else {
+ newPos += mFile->mAltDataOffset;
+ }
+ break;
+ default:
+ NS_ERROR("invalid whence");
+ return NS_ERROR_INVALID_ARG;
+ }
+ mPos = newPos;
+ EnsureCorrectChunk(false);
+
+ LOG(("CacheFileInputStream::Seek() [this=%p, pos=%" PRId64 "]", this, mPos));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileInputStream::SetEOF() {
+ MOZ_ASSERT(false, "Don't call SetEOF on cache input stream");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// nsITellableStream
+NS_IMETHODIMP
+CacheFileInputStream::Tell(int64_t* _retval) {
+ CacheFileAutoLock lock(mFile);
+
+ if (mClosed) {
+ LOG(("CacheFileInputStream::Tell() - Stream is closed. [this=%p]", this));
+ return NS_BASE_STREAM_CLOSED;
+ }
+
+ *_retval = mPos;
+
+ if (mAlternativeData) {
+ *_retval -= mFile->mAltDataOffset;
+ }
+
+ LOG(("CacheFileInputStream::Tell() [this=%p, retval=%" PRId64 "]", this,
+ *_retval));
+ return NS_OK;
+}
+
+// CacheFileChunkListener
+nsresult CacheFileInputStream::OnChunkRead(nsresult aResult,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileInputStream::OnChunkRead should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileInputStream::OnChunkWritten(nsresult aResult,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileInputStream::OnChunkWritten should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileInputStream::OnChunkAvailable(nsresult aResult,
+ uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileInputStream::OnChunkAvailable() [this=%p, result=0x%08" PRIx32
+ ", "
+ "idx=%d, chunk=%p]",
+ this, static_cast<uint32_t>(aResult), aChunkIdx, aChunk));
+
+ MOZ_ASSERT(mListeningForChunk != -1);
+
+ if (mListeningForChunk != static_cast<int64_t>(aChunkIdx)) {
+ // This is not a chunk that we're waiting for
+ LOG(
+ ("CacheFileInputStream::OnChunkAvailable() - Notification is for a "
+ "different chunk. [this=%p, listeningForChunk=%" PRId64 "]",
+ this, mListeningForChunk));
+
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(!mChunk);
+ MOZ_ASSERT(!mWaitingForUpdate);
+ MOZ_ASSERT(!mInReadSegments);
+ mListeningForChunk = -1;
+
+ if (mClosed) {
+ MOZ_ASSERT(!mCallback);
+
+ LOG(
+ ("CacheFileInputStream::OnChunkAvailable() - Stream is closed, "
+ "ignoring notification. [this=%p]",
+ this));
+
+ return NS_OK;
+ }
+
+ if (NS_SUCCEEDED(aResult)) {
+ mChunk = aChunk;
+ } else if (aResult != NS_ERROR_NOT_AVAILABLE) {
+ // Close the stream with error. The consumer will receive this error later
+ // in Read(), Available() etc. We need to handle NS_ERROR_NOT_AVAILABLE
+ // differently since it is returned when the requested chunk is not
+ // available and there is no writer that could create it, i.e. it means that
+ // we've reached the end of the file.
+ CloseWithStatusLocked(aResult);
+
+ return NS_OK;
+ }
+
+ MaybeNotifyListener();
+
+ return NS_OK;
+}
+
+nsresult CacheFileInputStream::OnChunkUpdated(CacheFileChunk* aChunk) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileInputStream::OnChunkUpdated() [this=%p, idx=%d]", this,
+ aChunk->Index()));
+
+ if (!mWaitingForUpdate) {
+ LOG(
+ ("CacheFileInputStream::OnChunkUpdated() - Ignoring notification since "
+ "mWaitingforUpdate == false. [this=%p]",
+ this));
+
+ return NS_OK;
+ }
+
+ mWaitingForUpdate = false;
+
+ MOZ_ASSERT(mChunk == aChunk);
+
+ MaybeNotifyListener();
+
+ return NS_OK;
+}
+
+void CacheFileInputStream::ReleaseChunk() {
+ mFile->AssertOwnsLock();
+
+ LOG(("CacheFileInputStream::ReleaseChunk() [this=%p, idx=%d]", this,
+ mChunk->Index()));
+
+ MOZ_ASSERT(!mInReadSegments);
+
+ if (mWaitingForUpdate) {
+ LOG(
+ ("CacheFileInputStream::ReleaseChunk() - Canceling waiting for update. "
+ "[this=%p]",
+ this));
+
+ mChunk->CancelWait(this);
+ mWaitingForUpdate = false;
+ }
+
+ mFile->ReleaseOutsideLock(std::move(mChunk));
+}
+
+void CacheFileInputStream::EnsureCorrectChunk(bool aReleaseOnly) {
+ mFile->AssertOwnsLock();
+
+ LOG(("CacheFileInputStream::EnsureCorrectChunk() [this=%p, releaseOnly=%d]",
+ this, aReleaseOnly));
+
+ nsresult rv;
+
+ uint32_t chunkIdx = mPos / kChunkSize;
+
+ if (mInReadSegments) {
+ // We must have correct chunk
+ MOZ_ASSERT(mChunk);
+ MOZ_ASSERT(mChunk->Index() == chunkIdx);
+ return;
+ }
+
+ if (mChunk) {
+ if (mChunk->Index() == chunkIdx) {
+ // we have a correct chunk
+ LOG(
+ ("CacheFileInputStream::EnsureCorrectChunk() - Have correct chunk "
+ "[this=%p, idx=%d]",
+ this, chunkIdx));
+
+ return;
+ }
+ ReleaseChunk();
+ }
+
+ MOZ_ASSERT(!mWaitingForUpdate);
+
+ if (aReleaseOnly) return;
+
+ if (mListeningForChunk == static_cast<int64_t>(chunkIdx)) {
+ // We're already waiting for this chunk
+ LOG(
+ ("CacheFileInputStream::EnsureCorrectChunk() - Already listening for "
+ "chunk %" PRId64 " [this=%p]",
+ mListeningForChunk, this));
+
+ return;
+ }
+
+ rv = mFile->GetChunkLocked(chunkIdx, CacheFile::READER, this,
+ getter_AddRefs(mChunk));
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileInputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
+ "[this=%p, idx=%d, rv=0x%08" PRIx32 "]",
+ this, chunkIdx, static_cast<uint32_t>(rv)));
+ if (rv != NS_ERROR_NOT_AVAILABLE) {
+ // Close the stream with error. The consumer will receive this error later
+ // in Read(), Available() etc. We need to handle NS_ERROR_NOT_AVAILABLE
+ // differently since it is returned when the requested chunk is not
+ // available and there is no writer that could create it, i.e. it means
+ // that we've reached the end of the file.
+ CloseWithStatusLocked(rv);
+
+ return;
+ }
+ } else if (!mChunk) {
+ mListeningForChunk = static_cast<int64_t>(chunkIdx);
+ }
+
+ MaybeNotifyListener();
+}
+
+int64_t CacheFileInputStream::CanRead(CacheFileChunkReadHandle* aHandle) {
+ mFile->AssertOwnsLock();
+
+ MOZ_ASSERT(mChunk);
+ MOZ_ASSERT(mPos / kChunkSize == mChunk->Index());
+
+ int64_t retval = aHandle->Offset() + aHandle->DataSize();
+
+ if (!mAlternativeData && mFile->mAltDataOffset != -1 &&
+ mFile->mAltDataOffset < retval) {
+ retval = mFile->mAltDataOffset;
+ }
+
+ retval -= mPos;
+ if (retval <= 0 && NS_FAILED(mChunk->GetStatus())) {
+ CloseWithStatusLocked(mChunk->GetStatus());
+ }
+
+ LOG(("CacheFileInputStream::CanRead() [this=%p, canRead=%" PRId64 "]", this,
+ retval));
+
+ return retval;
+}
+
+void CacheFileInputStream::NotifyListener() {
+ mFile->AssertOwnsLock();
+
+ LOG(("CacheFileInputStream::NotifyListener() [this=%p]", this));
+
+ MOZ_ASSERT(mCallback);
+ MOZ_ASSERT(!mInReadSegments);
+
+ if (!mCallbackTarget) {
+ mCallbackTarget = CacheFileIOManager::IOTarget();
+ if (!mCallbackTarget) {
+ LOG(
+ ("CacheFileInputStream::NotifyListener() - Cannot get Cache I/O "
+ "thread! Using main thread for callback."));
+ mCallbackTarget = GetMainThreadEventTarget();
+ }
+ }
+
+ nsCOMPtr<nsIInputStreamCallback> asyncCallback = NS_NewInputStreamReadyEvent(
+ "CacheFileInputStream::NotifyListener", mCallback, mCallbackTarget);
+
+ mCallback = nullptr;
+ mCallbackTarget = nullptr;
+
+ asyncCallback->OnInputStreamReady(this);
+}
+
+void CacheFileInputStream::MaybeNotifyListener() {
+ mFile->AssertOwnsLock();
+
+ LOG(
+ ("CacheFileInputStream::MaybeNotifyListener() [this=%p, mCallback=%p, "
+ "mClosed=%d, mStatus=0x%08" PRIx32
+ ", mChunk=%p, mListeningForChunk=%" PRId64 ", "
+ "mWaitingForUpdate=%d]",
+ this, mCallback.get(), mClosed, static_cast<uint32_t>(mStatus),
+ mChunk.get(), mListeningForChunk, mWaitingForUpdate));
+
+ MOZ_ASSERT(!mInReadSegments);
+
+ if (!mCallback) return;
+
+ if (mClosed || NS_FAILED(mStatus)) {
+ NotifyListener();
+ return;
+ }
+
+ if (!mChunk) {
+ if (mListeningForChunk == -1) {
+ // EOF, should we notify even if mCallbackFlags == WAIT_CLOSURE_ONLY ??
+ NotifyListener();
+ }
+ return;
+ }
+
+ MOZ_ASSERT(mPos / kChunkSize == mChunk->Index());
+
+ if (mWaitingForUpdate) return;
+
+ CacheFileChunkReadHandle hnd = mChunk->GetReadHandle();
+ int64_t canRead = CanRead(&hnd);
+ if (NS_FAILED(mStatus)) {
+ // CanRead() called CloseWithStatusLocked() which called
+ // MaybeNotifyListener() so the listener was already notified. Stop here.
+ MOZ_ASSERT(!mCallback);
+ return;
+ }
+
+ if (canRead > 0) {
+ if (!(mCallbackFlags & WAIT_CLOSURE_ONLY)) NotifyListener();
+ } else if (canRead == 0) {
+ if (!mFile->OutputStreamExists(mAlternativeData)) {
+ // EOF
+ NotifyListener();
+ } else {
+ mChunk->WaitForUpdate(this);
+ mWaitingForUpdate = true;
+ }
+ } else {
+ // Output have set EOF before mPos?
+ MOZ_ASSERT(false, "SetEOF is currenty not implemented?!");
+ NotifyListener();
+ }
+}
+
+// Memory reporting
+
+size_t CacheFileInputStream::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ // Everything the stream keeps a reference to is already reported somewhere
+ // else. mFile reports itself. mChunk reported as part of CacheFile. mCallback
+ // is usually CacheFile or a class that is reported elsewhere.
+ return mallocSizeOf(this);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileInputStream.h b/netwerk/cache2/CacheFileInputStream.h
new file mode 100644
index 0000000000..8280f9d224
--- /dev/null
+++ b/netwerk/cache2/CacheFileInputStream.h
@@ -0,0 +1,80 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileInputStream__h__
+#define CacheFileInputStream__h__
+
+#include "nsIAsyncInputStream.h"
+#include "nsISeekableStream.h"
+#include "nsCOMPtr.h"
+#include "CacheFileChunk.h"
+
+namespace mozilla {
+namespace net {
+
+class CacheFile;
+
+class CacheFileInputStream : public nsIAsyncInputStream,
+ public nsISeekableStream,
+ public CacheFileChunkListener {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIINPUTSTREAM
+ NS_DECL_NSIASYNCINPUTSTREAM
+ NS_DECL_NSISEEKABLESTREAM
+ NS_DECL_NSITELLABLESTREAM
+
+ public:
+ explicit CacheFileInputStream(CacheFile* aFile, nsISupports* aEntry,
+ bool aAlternativeData);
+
+ NS_IMETHOD OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkUpdated(CacheFileChunk* aChunk) override;
+
+ // Memory reporting
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ uint32_t GetPosition() const { return mPos; };
+ bool IsAlternativeData() const { return mAlternativeData; };
+ int64_t GetChunkIdx() const {
+ return mChunk ? static_cast<int64_t>(mChunk->Index()) : -1;
+ };
+
+ private:
+ virtual ~CacheFileInputStream();
+
+ void CloseWithStatusLocked(nsresult aStatus);
+ void CleanUp();
+ void ReleaseChunk();
+ void EnsureCorrectChunk(bool aReleaseOnly);
+
+ // CanRead returns negative value when output stream truncates the data before
+ // the input stream's mPos.
+ int64_t CanRead(CacheFileChunkReadHandle* aHandle);
+ void NotifyListener();
+ void MaybeNotifyListener();
+
+ RefPtr<CacheFile> mFile;
+ RefPtr<CacheFileChunk> mChunk;
+ int64_t mPos;
+ nsresult mStatus;
+ bool mClosed : 1;
+ bool mInReadSegments : 1;
+ bool mWaitingForUpdate : 1;
+ bool const mAlternativeData : 1;
+ int64_t mListeningForChunk;
+
+ nsCOMPtr<nsIInputStreamCallback> mCallback;
+ uint32_t mCallbackFlags;
+ nsCOMPtr<nsIEventTarget> mCallbackTarget;
+ // Held purely for referencing purposes
+ RefPtr<nsISupports> mCacheEntryHandle;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileMetadata.cpp b/netwerk/cache2/CacheFileMetadata.cpp
new file mode 100644
index 0000000000..738a1e9f49
--- /dev/null
+++ b/netwerk/cache2/CacheFileMetadata.cpp
@@ -0,0 +1,1049 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileMetadata.h"
+
+#include "CacheFileIOManager.h"
+#include "nsICacheEntry.h"
+#include "CacheHashUtils.h"
+#include "CacheFileChunk.h"
+#include "CacheFileUtils.h"
+#include "nsILoadContextInfo.h"
+#include "nsICacheEntry.h" // for nsICacheEntryMetaDataVisitor
+#include "../cache/nsCacheUtils.h"
+#include "nsIFile.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "prnetdb.h"
+
+namespace mozilla {
+namespace net {
+
+#define kMinMetadataRead 1024 // TODO find optimal value from telemetry
+#define kAlignSize 4096
+
+// Most of the cache entries fit into one chunk due to current chunk size. Make
+// sure to tweak this value if kChunkSize is going to change.
+#define kInitialHashArraySize 1
+
+// Initial elements buffer size.
+#define kInitialBufSize 64
+
+// Max size of elements in bytes.
+#define kMaxElementsSize 64 * 1024
+
+#define NOW_SECONDS() (uint32_t(PR_Now() / PR_USEC_PER_SEC))
+
+NS_IMPL_ISUPPORTS(CacheFileMetadata, CacheFileIOListener)
+
+CacheFileMetadata::CacheFileMetadata(CacheFileHandle* aHandle,
+ const nsACString& aKey)
+ : CacheMemoryConsumer(NORMAL),
+ mHandle(aHandle),
+ mHashArray(nullptr),
+ mHashArraySize(0),
+ mHashCount(0),
+ mOffset(-1),
+ mBuf(nullptr),
+ mBufSize(0),
+ mWriteBuf(nullptr),
+ mElementsSize(0),
+ mIsDirty(false),
+ mAnonymous(false),
+ mAllocExactSize(false),
+ mFirstRead(true) {
+ LOG(("CacheFileMetadata::CacheFileMetadata() [this=%p, handle=%p, key=%s]",
+ this, aHandle, PromiseFlatCString(aKey).get()));
+
+ memset(&mMetaHdr, 0, sizeof(CacheFileMetadataHeader));
+ mMetaHdr.mVersion = kCacheEntryVersion;
+ mMetaHdr.mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME;
+ mKey = aKey;
+
+ DebugOnly<nsresult> rv;
+ rv = ParseKey(aKey);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+}
+
+CacheFileMetadata::CacheFileMetadata(bool aMemoryOnly, bool aPinned,
+ const nsACString& aKey)
+ : CacheMemoryConsumer(aMemoryOnly ? MEMORY_ONLY : NORMAL),
+ mHandle(nullptr),
+ mHashArray(nullptr),
+ mHashArraySize(0),
+ mHashCount(0),
+ mOffset(0),
+ mBuf(nullptr),
+ mBufSize(0),
+ mWriteBuf(nullptr),
+ mElementsSize(0),
+ mIsDirty(true),
+ mAnonymous(false),
+ mAllocExactSize(false),
+ mFirstRead(true) {
+ LOG(("CacheFileMetadata::CacheFileMetadata() [this=%p, key=%s]", this,
+ PromiseFlatCString(aKey).get()));
+
+ memset(&mMetaHdr, 0, sizeof(CacheFileMetadataHeader));
+ mMetaHdr.mVersion = kCacheEntryVersion;
+ if (aPinned) {
+ AddFlags(kCacheEntryIsPinned);
+ }
+ mMetaHdr.mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME;
+ mKey = aKey;
+ mMetaHdr.mKeySize = mKey.Length();
+
+ DebugOnly<nsresult> rv;
+ rv = ParseKey(aKey);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+}
+
+CacheFileMetadata::CacheFileMetadata()
+ : CacheMemoryConsumer(DONT_REPORT /* This is a helper class */),
+ mHandle(nullptr),
+ mHashArray(nullptr),
+ mHashArraySize(0),
+ mHashCount(0),
+ mOffset(0),
+ mBuf(nullptr),
+ mBufSize(0),
+ mWriteBuf(nullptr),
+ mElementsSize(0),
+ mIsDirty(false),
+ mAnonymous(false),
+ mAllocExactSize(false),
+ mFirstRead(true) {
+ LOG(("CacheFileMetadata::CacheFileMetadata() [this=%p]", this));
+
+ memset(&mMetaHdr, 0, sizeof(CacheFileMetadataHeader));
+}
+
+CacheFileMetadata::~CacheFileMetadata() {
+ LOG(("CacheFileMetadata::~CacheFileMetadata() [this=%p]", this));
+
+ MOZ_ASSERT(!mListener);
+
+ if (mHashArray) {
+ CacheFileUtils::FreeBuffer(mHashArray);
+ mHashArray = nullptr;
+ mHashArraySize = 0;
+ }
+
+ if (mBuf) {
+ CacheFileUtils::FreeBuffer(mBuf);
+ mBuf = nullptr;
+ mBufSize = 0;
+ }
+}
+
+void CacheFileMetadata::SetHandle(CacheFileHandle* aHandle) {
+ LOG(("CacheFileMetadata::SetHandle() [this=%p, handle=%p]", this, aHandle));
+
+ MOZ_ASSERT(!mHandle);
+
+ mHandle = aHandle;
+}
+
+void CacheFileMetadata::ReadMetadata(CacheFileMetadataListener* aListener) {
+ LOG(("CacheFileMetadata::ReadMetadata() [this=%p, listener=%p]", this,
+ aListener));
+
+ MOZ_ASSERT(!mListener);
+ MOZ_ASSERT(!mHashArray);
+ MOZ_ASSERT(!mBuf);
+ MOZ_ASSERT(!mWriteBuf);
+
+ nsresult rv;
+
+ int64_t size = mHandle->FileSize();
+ MOZ_ASSERT(size != -1);
+
+ if (size == 0) {
+ // this is a new entry
+ LOG(
+ ("CacheFileMetadata::ReadMetadata() - Filesize == 0, creating empty "
+ "metadata. [this=%p]",
+ this));
+
+ InitEmptyMetadata();
+ aListener->OnMetadataRead(NS_OK);
+ return;
+ }
+
+ if (size < int64_t(sizeof(CacheFileMetadataHeader) + 2 * sizeof(uint32_t))) {
+ // there must be at least checksum, header and offset
+ LOG(
+ ("CacheFileMetadata::ReadMetadata() - File is corrupted, creating "
+ "empty metadata. [this=%p, filesize=%" PRId64 "]",
+ this, size));
+
+ InitEmptyMetadata();
+ aListener->OnMetadataRead(NS_OK);
+ return;
+ }
+
+ // Set offset so that we read at least kMinMetadataRead if the file is big
+ // enough.
+ int64_t offset;
+ if (size < kMinMetadataRead) {
+ offset = 0;
+ } else {
+ offset = size - kMinMetadataRead;
+ }
+
+ // round offset to kAlignSize blocks
+ offset = (offset / kAlignSize) * kAlignSize;
+
+ mBufSize = size - offset;
+ mBuf = static_cast<char*>(moz_xmalloc(mBufSize));
+
+ DoMemoryReport(MemoryUsage());
+
+ LOG(
+ ("CacheFileMetadata::ReadMetadata() - Reading metadata from disk, trying "
+ "offset=%" PRId64 ", filesize=%" PRId64 " [this=%p]",
+ offset, size, this));
+
+ mReadStart = mozilla::TimeStamp::Now();
+ mListener = aListener;
+ rv = CacheFileIOManager::Read(mHandle, offset, mBuf, mBufSize, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileMetadata::ReadMetadata() - CacheFileIOManager::Read() failed"
+ " synchronously, creating empty metadata. [this=%p, rv=0x%08" PRIx32
+ "]",
+ this, static_cast<uint32_t>(rv)));
+
+ mListener = nullptr;
+ InitEmptyMetadata();
+ aListener->OnMetadataRead(NS_OK);
+ }
+}
+
+uint32_t CacheFileMetadata::CalcMetadataSize(uint32_t aElementsSize,
+ uint32_t aHashCount) {
+ return sizeof(uint32_t) + // hash of the metadata
+ aHashCount * sizeof(CacheHash::Hash16_t) + // array of chunk hashes
+ sizeof(CacheFileMetadataHeader) + // metadata header
+ mKey.Length() + 1 + // key with trailing null
+ aElementsSize + // elements
+ sizeof(uint32_t); // offset
+}
+
+nsresult CacheFileMetadata::WriteMetadata(
+ uint32_t aOffset, CacheFileMetadataListener* aListener) {
+ LOG(("CacheFileMetadata::WriteMetadata() [this=%p, offset=%d, listener=%p]",
+ this, aOffset, aListener));
+
+ MOZ_ASSERT(!mListener);
+ MOZ_ASSERT(!mWriteBuf);
+
+ nsresult rv;
+
+ mIsDirty = false;
+
+ mWriteBuf =
+ static_cast<char*>(malloc(CalcMetadataSize(mElementsSize, mHashCount)));
+ if (!mWriteBuf) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ char* p = mWriteBuf + sizeof(uint32_t);
+ if (mHashCount) {
+ memcpy(p, mHashArray, mHashCount * sizeof(CacheHash::Hash16_t));
+ p += mHashCount * sizeof(CacheHash::Hash16_t);
+ }
+ mMetaHdr.WriteToBuf(p);
+ p += sizeof(CacheFileMetadataHeader);
+ memcpy(p, mKey.get(), mKey.Length());
+ p += mKey.Length();
+ *p = 0;
+ p++;
+ if (mElementsSize) {
+ memcpy(p, mBuf, mElementsSize);
+ p += mElementsSize;
+ }
+
+ CacheHash::Hash32_t hash;
+ hash = CacheHash::Hash(mWriteBuf + sizeof(uint32_t),
+ p - mWriteBuf - sizeof(uint32_t));
+ NetworkEndian::writeUint32(mWriteBuf, hash);
+
+ NetworkEndian::writeUint32(p, aOffset);
+ p += sizeof(uint32_t);
+
+ char* writeBuffer = mWriteBuf;
+ if (aListener) {
+ mListener = aListener;
+ } else {
+ // We are not going to pass |this| as a callback so the buffer will be
+ // released by CacheFileIOManager. Just null out mWriteBuf here.
+ mWriteBuf = nullptr;
+ }
+
+ rv = CacheFileIOManager::Write(mHandle, aOffset, writeBuffer, p - writeBuffer,
+ true, true, aListener ? this : nullptr);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileMetadata::WriteMetadata() - CacheFileIOManager::Write() "
+ "failed synchronously. [this=%p, rv=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(rv)));
+
+ mListener = nullptr;
+ if (mWriteBuf) {
+ CacheFileUtils::FreeBuffer(mWriteBuf);
+ mWriteBuf = nullptr;
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ DoMemoryReport(MemoryUsage());
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::SyncReadMetadata(nsIFile* aFile) {
+ LOG(("CacheFileMetadata::SyncReadMetadata() [this=%p]", this));
+
+ MOZ_ASSERT(!mListener);
+ MOZ_ASSERT(!mHandle);
+ MOZ_ASSERT(!mHashArray);
+ MOZ_ASSERT(!mBuf);
+ MOZ_ASSERT(!mWriteBuf);
+ MOZ_ASSERT(mKey.IsEmpty());
+
+ nsresult rv;
+
+ int64_t fileSize;
+ rv = aFile->GetFileSize(&fileSize);
+ if (NS_FAILED(rv)) {
+ // Don't bloat the console
+ return rv;
+ }
+
+ PRFileDesc* fd;
+ rv = aFile->OpenNSPRFileDesc(PR_RDONLY, 0600, &fd);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ int64_t offset = PR_Seek64(fd, fileSize - sizeof(uint32_t), PR_SEEK_SET);
+ if (offset == -1) {
+ PR_Close(fd);
+ return NS_ERROR_FAILURE;
+ }
+
+ uint32_t metaOffset;
+ int32_t bytesRead = PR_Read(fd, &metaOffset, sizeof(uint32_t));
+ if (bytesRead != sizeof(uint32_t)) {
+ PR_Close(fd);
+ return NS_ERROR_FAILURE;
+ }
+
+ metaOffset = NetworkEndian::readUint32(&metaOffset);
+ if (metaOffset > fileSize) {
+ PR_Close(fd);
+ return NS_ERROR_FAILURE;
+ }
+
+ mBuf = static_cast<char*>(malloc(fileSize - metaOffset));
+ if (!mBuf) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ mBufSize = fileSize - metaOffset;
+
+ DoMemoryReport(MemoryUsage());
+
+ offset = PR_Seek64(fd, metaOffset, PR_SEEK_SET);
+ if (offset == -1) {
+ PR_Close(fd);
+ return NS_ERROR_FAILURE;
+ }
+
+ bytesRead = PR_Read(fd, mBuf, mBufSize);
+ PR_Close(fd);
+ if (bytesRead != static_cast<int32_t>(mBufSize)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ rv = ParseMetadata(metaOffset, 0, false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+const char* CacheFileMetadata::GetElement(const char* aKey) {
+ const char* data = mBuf;
+ const char* limit = mBuf + mElementsSize;
+
+ while (data != limit) {
+ size_t maxLen = limit - data;
+ size_t keyLen = strnlen(data, maxLen);
+ MOZ_RELEASE_ASSERT(keyLen != maxLen,
+ "Metadata elements corrupted. Key "
+ "isn't null terminated!");
+ MOZ_RELEASE_ASSERT(keyLen + 1 != maxLen,
+ "Metadata elements corrupted. "
+ "There is no value for the key!");
+
+ const char* value = data + keyLen + 1;
+ maxLen = limit - value;
+ size_t valueLen = strnlen(value, maxLen);
+ MOZ_RELEASE_ASSERT(valueLen != maxLen,
+ "Metadata elements corrupted. Value "
+ "isn't null terminated!");
+
+ if (strcmp(data, aKey) == 0) {
+ LOG(("CacheFileMetadata::GetElement() - Key found [this=%p, key=%s]",
+ this, aKey));
+ return value;
+ }
+
+ // point to next pair
+ data += keyLen + valueLen + 2;
+ }
+ LOG(("CacheFileMetadata::GetElement() - Key not found [this=%p, key=%s]",
+ this, aKey));
+ return nullptr;
+}
+
+nsresult CacheFileMetadata::SetElement(const char* aKey, const char* aValue) {
+ LOG(("CacheFileMetadata::SetElement() [this=%p, key=%s, value=%p]", this,
+ aKey, aValue));
+
+ MarkDirty();
+
+ nsresult rv;
+
+ const uint32_t keySize = strlen(aKey) + 1;
+ char* pos = const_cast<char*>(GetElement(aKey));
+
+ if (!aValue) {
+ // No value means remove the key/value pair completely, if existing
+ if (pos) {
+ uint32_t oldValueSize = strlen(pos) + 1;
+ uint32_t offset = pos - mBuf;
+ uint32_t remainder = mElementsSize - (offset + oldValueSize);
+
+ memmove(pos - keySize, pos + oldValueSize, remainder);
+ mElementsSize -= keySize + oldValueSize;
+ }
+ return NS_OK;
+ }
+
+ const uint32_t valueSize = strlen(aValue) + 1;
+ uint32_t newSize = mElementsSize + valueSize;
+ if (pos) {
+ const uint32_t oldValueSize = strlen(pos) + 1;
+ const uint32_t offset = pos - mBuf;
+ const uint32_t remainder = mElementsSize - (offset + oldValueSize);
+
+ // Update the value in place
+ newSize -= oldValueSize;
+ rv = EnsureBuffer(newSize);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Move the remainder to the right place
+ pos = mBuf + offset;
+ memmove(pos + valueSize, pos + oldValueSize, remainder);
+ } else {
+ // allocate new meta data element
+ newSize += keySize;
+ rv = EnsureBuffer(newSize);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Add after last element
+ pos = mBuf + mElementsSize;
+ memcpy(pos, aKey, keySize);
+ pos += keySize;
+ }
+
+ // Update value
+ memcpy(pos, aValue, valueSize);
+ mElementsSize = newSize;
+
+ return NS_OK;
+}
+
+void CacheFileMetadata::Visit(nsICacheEntryMetaDataVisitor* aVisitor) {
+ const char* data = mBuf;
+ const char* limit = mBuf + mElementsSize;
+
+ while (data < limit) {
+ // Point to the value part
+ const char* value = data + strlen(data) + 1;
+ MOZ_ASSERT(value < limit, "Metadata elements corrupted");
+
+ aVisitor->OnMetaDataElement(data, value);
+
+ // Skip value part
+ data = value + strlen(value) + 1;
+ }
+
+ MOZ_ASSERT(data == limit, "Metadata elements corrupted");
+}
+
+CacheHash::Hash16_t CacheFileMetadata::GetHash(uint32_t aIndex) {
+ MOZ_ASSERT(aIndex < mHashCount);
+ return NetworkEndian::readUint16(&mHashArray[aIndex]);
+}
+
+nsresult CacheFileMetadata::SetHash(uint32_t aIndex,
+ CacheHash::Hash16_t aHash) {
+ LOG(("CacheFileMetadata::SetHash() [this=%p, idx=%d, hash=%x]", this, aIndex,
+ aHash));
+
+ MarkDirty();
+
+ MOZ_ASSERT(aIndex <= mHashCount);
+
+ if (aIndex > mHashCount) {
+ return NS_ERROR_INVALID_ARG;
+ } else if (aIndex == mHashCount) {
+ if ((aIndex + 1) * sizeof(CacheHash::Hash16_t) > mHashArraySize) {
+ // reallocate hash array buffer
+ if (mHashArraySize == 0) {
+ mHashArraySize = kInitialHashArraySize * sizeof(CacheHash::Hash16_t);
+ } else {
+ mHashArraySize *= 2;
+ }
+ mHashArray = static_cast<CacheHash::Hash16_t*>(
+ moz_xrealloc(mHashArray, mHashArraySize));
+ }
+
+ mHashCount++;
+ }
+
+ NetworkEndian::writeUint16(&mHashArray[aIndex], aHash);
+
+ DoMemoryReport(MemoryUsage());
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::RemoveHash(uint32_t aIndex) {
+ LOG(("CacheFileMetadata::RemoveHash() [this=%p, idx=%d]", this, aIndex));
+
+ MarkDirty();
+
+ MOZ_ASSERT((aIndex + 1) == mHashCount, "Can remove only last hash!");
+
+ if (aIndex + 1 != mHashCount) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ mHashCount--;
+ return NS_OK;
+}
+
+void CacheFileMetadata::AddFlags(uint32_t aFlags) {
+ MarkDirty(false);
+ mMetaHdr.mFlags |= aFlags;
+}
+
+void CacheFileMetadata::RemoveFlags(uint32_t aFlags) {
+ MarkDirty(false);
+ mMetaHdr.mFlags &= ~aFlags;
+}
+
+void CacheFileMetadata::SetExpirationTime(uint32_t aExpirationTime) {
+ LOG(("CacheFileMetadata::SetExpirationTime() [this=%p, expirationTime=%d]",
+ this, aExpirationTime));
+
+ MarkDirty(false);
+ mMetaHdr.mExpirationTime = aExpirationTime;
+}
+
+void CacheFileMetadata::SetFrecency(uint32_t aFrecency) {
+ LOG(("CacheFileMetadata::SetFrecency() [this=%p, frecency=%f]", this,
+ (double)aFrecency));
+
+ MarkDirty(false);
+ mMetaHdr.mFrecency = aFrecency;
+}
+
+void CacheFileMetadata::OnFetched() {
+ MarkDirty(false);
+
+ mMetaHdr.mLastFetched = NOW_SECONDS();
+ ++mMetaHdr.mFetchCount;
+}
+
+void CacheFileMetadata::MarkDirty(bool aUpdateLastModified) {
+ mIsDirty = true;
+ if (aUpdateLastModified) {
+ mMetaHdr.mLastModified = NOW_SECONDS();
+ }
+}
+
+nsresult CacheFileMetadata::OnFileOpened(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileMetadata::OnFileOpened should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileMetadata::OnDataWritten(CacheFileHandle* aHandle,
+ const char* aBuf, nsresult aResult) {
+ LOG(
+ ("CacheFileMetadata::OnDataWritten() [this=%p, handle=%p, "
+ "result=0x%08" PRIx32 "]",
+ this, aHandle, static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(mListener);
+ MOZ_ASSERT(mWriteBuf);
+
+ CacheFileUtils::FreeBuffer(mWriteBuf);
+ mWriteBuf = nullptr;
+
+ nsCOMPtr<CacheFileMetadataListener> listener;
+
+ mListener.swap(listener);
+ listener->OnMetadataWritten(aResult);
+
+ DoMemoryReport(MemoryUsage());
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) {
+ LOG((
+ "CacheFileMetadata::OnDataRead() [this=%p, handle=%p, result=0x%08" PRIx32
+ "]",
+ this, aHandle, static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(mListener);
+
+ nsresult rv;
+ nsCOMPtr<CacheFileMetadataListener> listener;
+
+ if (NS_FAILED(aResult)) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() failed"
+ ", creating empty metadata. [this=%p, rv=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(aResult)));
+
+ InitEmptyMetadata();
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+ return NS_OK;
+ }
+
+ if (mFirstRead) {
+ Telemetry::AccumulateTimeDelta(
+ Telemetry::NETWORK_CACHE_METADATA_FIRST_READ_TIME_MS, mReadStart);
+ } else {
+ Telemetry::AccumulateTimeDelta(
+ Telemetry::NETWORK_CACHE_METADATA_SECOND_READ_TIME_MS, mReadStart);
+ }
+
+ // check whether we have read all necessary data
+ uint32_t realOffset =
+ NetworkEndian::readUint32(mBuf + mBufSize - sizeof(uint32_t));
+
+ int64_t size = mHandle->FileSize();
+ MOZ_ASSERT(size != -1);
+
+ if (realOffset >= size) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - Invalid realOffset, creating "
+ "empty metadata. [this=%p, realOffset=%u, size=%" PRId64 "]",
+ this, realOffset, size));
+
+ InitEmptyMetadata();
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+ return NS_OK;
+ }
+
+ uint32_t maxHashCount = size / kChunkSize;
+ uint32_t maxMetadataSize = CalcMetadataSize(kMaxElementsSize, maxHashCount);
+ if (size - realOffset > maxMetadataSize) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - Invalid realOffset, metadata would "
+ "be too big, creating empty metadata. [this=%p, realOffset=%u, "
+ "maxMetadataSize=%u, size=%" PRId64 "]",
+ this, realOffset, maxMetadataSize, size));
+
+ InitEmptyMetadata();
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+ return NS_OK;
+ }
+
+ uint32_t usedOffset = size - mBufSize;
+
+ if (realOffset < usedOffset) {
+ uint32_t missing = usedOffset - realOffset;
+ // we need to read more data
+ char* newBuf = static_cast<char*>(realloc(mBuf, mBufSize + missing));
+ if (!newBuf) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - Error allocating %d more bytes "
+ "for the missing part of the metadata, creating empty metadata. "
+ "[this=%p]",
+ missing, this));
+
+ InitEmptyMetadata();
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+ return NS_OK;
+ }
+
+ mBuf = newBuf;
+ memmove(mBuf + missing, mBuf, mBufSize);
+ mBufSize += missing;
+
+ DoMemoryReport(MemoryUsage());
+
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - We need to read %d more bytes to "
+ "have full metadata. [this=%p]",
+ missing, this));
+
+ mFirstRead = false;
+ mReadStart = mozilla::TimeStamp::Now();
+ rv = CacheFileIOManager::Read(mHandle, realOffset, mBuf, missing, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() "
+ "failed synchronously, creating empty metadata. [this=%p, "
+ "rv=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(rv)));
+
+ InitEmptyMetadata();
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+ return NS_OK;
+ }
+
+ return NS_OK;
+ }
+
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_METADATA_SIZE_2,
+ size - realOffset);
+
+ // We have all data according to offset information at the end of the entry.
+ // Try to parse it.
+ rv = ParseMetadata(realOffset, realOffset - usedOffset, true);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileMetadata::OnDataRead() - Error parsing metadata, creating "
+ "empty metadata. [this=%p]",
+ this));
+ InitEmptyMetadata();
+ } else {
+ // Shrink elements buffer.
+ mBuf = static_cast<char*>(moz_xrealloc(mBuf, mElementsSize));
+ mBufSize = mElementsSize;
+
+ // There is usually no or just one call to SetMetadataElement() when the
+ // metadata is parsed from disk. Avoid allocating power of two sized buffer
+ // which we do in case of newly created metadata.
+ mAllocExactSize = true;
+ }
+
+ mListener.swap(listener);
+ listener->OnMetadataRead(NS_OK);
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::OnFileDoomed(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileMetadata::OnFileDoomed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileMetadata::OnEOFSet(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileMetadata::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileMetadata::OnFileRenamed(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ MOZ_CRASH("CacheFileMetadata::OnFileRenamed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+void CacheFileMetadata::InitEmptyMetadata() {
+ if (mBuf) {
+ CacheFileUtils::FreeBuffer(mBuf);
+ mBuf = nullptr;
+ mBufSize = 0;
+ }
+ mAllocExactSize = false;
+ mOffset = 0;
+ mMetaHdr.mVersion = kCacheEntryVersion;
+ mMetaHdr.mFetchCount = 0;
+ mMetaHdr.mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME;
+ mMetaHdr.mKeySize = mKey.Length();
+
+ // Deliberately not touching the "kCacheEntryIsPinned" flag.
+
+ DoMemoryReport(MemoryUsage());
+
+ // We're creating a new entry. If there is any old data truncate it.
+ if (mHandle) {
+ mHandle->SetPinned(Pinned());
+ // We can pronounce the handle as invalid now, because it simply
+ // doesn't have the correct metadata. This will cause IO operations
+ // be bypassed during shutdown (mainly dooming it, when a channel
+ // is canceled by closing the window.)
+ mHandle->SetInvalid();
+ if (mHandle->FileExists() && mHandle->FileSize()) {
+ CacheFileIOManager::TruncateSeekSetEOF(mHandle, 0, 0, nullptr);
+ }
+ }
+}
+
+nsresult CacheFileMetadata::ParseMetadata(uint32_t aMetaOffset,
+ uint32_t aBufOffset, bool aHaveKey) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() [this=%p, metaOffset=%d, "
+ "bufOffset=%d, haveKey=%u]",
+ this, aMetaOffset, aBufOffset, aHaveKey));
+
+ nsresult rv;
+
+ uint32_t metaposOffset = mBufSize - sizeof(uint32_t);
+ uint32_t hashesOffset = aBufOffset + sizeof(uint32_t);
+ uint32_t hashCount = aMetaOffset / kChunkSize;
+ if (aMetaOffset % kChunkSize) hashCount++;
+ uint32_t hashesLen = hashCount * sizeof(CacheHash::Hash16_t);
+ uint32_t hdrOffset = hashesOffset + hashesLen;
+ uint32_t keyOffset = hdrOffset + sizeof(CacheFileMetadataHeader);
+
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() [this=%p]\n metaposOffset=%d\n "
+ "hashesOffset=%d\n hashCount=%d\n hashesLen=%d\n hdfOffset=%d\n "
+ "keyOffset=%d\n",
+ this, metaposOffset, hashesOffset, hashCount, hashesLen, hdrOffset,
+ keyOffset));
+
+ if (keyOffset > metaposOffset) {
+ LOG(("CacheFileMetadata::ParseMetadata() - Wrong keyOffset! [this=%p]",
+ this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+
+ mMetaHdr.ReadFromBuf(mBuf + hdrOffset);
+
+ if (mMetaHdr.mVersion == 1) {
+ // Backward compatibility before we've added flags to the header
+ keyOffset -= sizeof(uint32_t);
+ } else if (mMetaHdr.mVersion == 2) {
+ // Version 2 just lacks the ability to store alternative data. Nothing to do
+ // here.
+ } else if (mMetaHdr.mVersion != kCacheEntryVersion) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Not a version we understand to. "
+ "[version=0x%x, this=%p]",
+ mMetaHdr.mVersion, this));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // Update the version stored in the header to make writes
+ // store the header in the current version form.
+ mMetaHdr.mVersion = kCacheEntryVersion;
+
+ uint32_t elementsOffset = mMetaHdr.mKeySize + keyOffset + 1;
+
+ if (elementsOffset > metaposOffset) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Wrong elementsOffset %d "
+ "[this=%p]",
+ elementsOffset, this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+
+ // check that key ends with \0
+ if (mBuf[elementsOffset - 1] != 0) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Elements not null terminated. "
+ "[this=%p]",
+ this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+
+ if (!aHaveKey) {
+ // get the key form metadata
+ mKey.Assign(mBuf + keyOffset, mMetaHdr.mKeySize);
+
+ rv = ParseKey(mKey);
+ if (NS_FAILED(rv)) return rv;
+ } else {
+ if (mMetaHdr.mKeySize != mKey.Length()) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Key collision (1), key=%s "
+ "[this=%p]",
+ nsCString(mBuf + keyOffset, mMetaHdr.mKeySize).get(), this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+
+ if (memcmp(mKey.get(), mBuf + keyOffset, mKey.Length()) != 0) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Key collision (2), key=%s "
+ "[this=%p]",
+ nsCString(mBuf + keyOffset, mMetaHdr.mKeySize).get(), this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+ }
+
+ // check metadata hash (data from hashesOffset to metaposOffset)
+ CacheHash::Hash32_t hashComputed, hashExpected;
+ hashComputed =
+ CacheHash::Hash(mBuf + hashesOffset, metaposOffset - hashesOffset);
+ hashExpected = NetworkEndian::readUint32(mBuf + aBufOffset);
+
+ if (hashComputed != hashExpected) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - Metadata hash mismatch! Hash of "
+ "the metadata is %x, hash in file is %x [this=%p]",
+ hashComputed, hashExpected, this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+
+ // check elements
+ rv = CheckElements(mBuf + elementsOffset, metaposOffset - elementsOffset);
+ if (NS_FAILED(rv)) return rv;
+
+ if (mHandle) {
+ if (!mHandle->SetPinned(Pinned())) {
+ LOG(
+ ("CacheFileMetadata::ParseMetadata() - handle was doomed for this "
+ "pinning state, truncate the file [this=%p, pinned=%d]",
+ this, Pinned()));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+ }
+
+ mHashArraySize = hashesLen;
+ mHashCount = hashCount;
+ if (mHashArraySize) {
+ mHashArray = static_cast<CacheHash::Hash16_t*>(moz_xmalloc(mHashArraySize));
+ memcpy(mHashArray, mBuf + hashesOffset, mHashArraySize);
+ }
+
+ MarkDirty();
+
+ mElementsSize = metaposOffset - elementsOffset;
+ memmove(mBuf, mBuf + elementsOffset, mElementsSize);
+ mOffset = aMetaOffset;
+
+ DoMemoryReport(MemoryUsage());
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::CheckElements(const char* aBuf, uint32_t aSize) {
+ if (aSize) {
+ // Check if the metadata ends with a zero byte.
+ if (aBuf[aSize - 1] != 0) {
+ NS_ERROR("Metadata elements are not null terminated");
+ LOG(
+ ("CacheFileMetadata::CheckElements() - Elements are not null "
+ "terminated. [this=%p]",
+ this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+ // Check that there are an even number of zero bytes
+ // to match the pattern { key \0 value \0 }
+ bool odd = false;
+ for (uint32_t i = 0; i < aSize; i++) {
+ if (aBuf[i] == 0) odd = !odd;
+ }
+ if (odd) {
+ NS_ERROR("Metadata elements are malformed");
+ LOG(
+ ("CacheFileMetadata::CheckElements() - Elements are malformed. "
+ "[this=%p]",
+ this));
+ return NS_ERROR_FILE_CORRUPTED;
+ }
+ }
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::EnsureBuffer(uint32_t aSize) {
+ if (aSize > kMaxElementsSize) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (mBufSize < aSize) {
+ if (mAllocExactSize) {
+ // If this is not the only allocation, use power of two for following
+ // allocations.
+ mAllocExactSize = false;
+ } else {
+ // find smallest power of 2 greater than or equal to aSize
+ --aSize;
+ aSize |= aSize >> 1;
+ aSize |= aSize >> 2;
+ aSize |= aSize >> 4;
+ aSize |= aSize >> 8;
+ aSize |= aSize >> 16;
+ ++aSize;
+ }
+
+ if (aSize < kInitialBufSize) {
+ aSize = kInitialBufSize;
+ }
+
+ char* newBuf = static_cast<char*>(realloc(mBuf, aSize));
+ if (!newBuf) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ mBufSize = aSize;
+ mBuf = newBuf;
+
+ DoMemoryReport(MemoryUsage());
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheFileMetadata::ParseKey(const nsACString& aKey) {
+ nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(aKey);
+ NS_ENSURE_TRUE(info, NS_ERROR_FAILURE);
+
+ mAnonymous = info->IsAnonymous();
+ mOriginAttributes = *info->OriginAttributesPtr();
+
+ return NS_OK;
+}
+
+// Memory reporting
+
+size_t CacheFileMetadata::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+ // mHandle reported via CacheFileIOManager.
+ n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
+ n += mallocSizeOf(mHashArray);
+ n += mallocSizeOf(mBuf);
+ // Ignore mWriteBuf, it's not safe to access it when metadata is being
+ // written and it's null otherwise.
+ // mListener is usually the owning CacheFile.
+
+ return n;
+}
+
+size_t CacheFileMetadata::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileMetadata.h b/netwerk/cache2/CacheFileMetadata.h
new file mode 100644
index 0000000000..32ac9ef0f9
--- /dev/null
+++ b/netwerk/cache2/CacheFileMetadata.h
@@ -0,0 +1,237 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileMetadata__h__
+#define CacheFileMetadata__h__
+
+#include "CacheFileIOManager.h"
+#include "CacheStorageService.h"
+#include "CacheHashUtils.h"
+#include "CacheObserver.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/BasePrincipal.h"
+#include "nsString.h"
+
+class nsICacheEntryMetaDataVisitor;
+
+namespace mozilla {
+namespace net {
+
+// Flags stored in CacheFileMetadataHeader.mFlags
+
+// Whether an entry is a pinned entry (created with
+// nsICacheStorageService.pinningCacheStorage.)
+static const uint32_t kCacheEntryIsPinned = 1 << 0;
+
+// By multiplying with the current half-life we convert the frecency
+// to time independent of half-life value. The range fits 32bits.
+// When decay time changes on next run of the browser, we convert
+// the frecency value to a correct internal representation again.
+// It might not be 100% accurate, but for the purpose it suffice.
+#define FRECENCY2INT(aFrecency) \
+ ((uint32_t)((aFrecency)*CacheObserver::HalfLifeSeconds()))
+#define INT2FRECENCY(aInt) \
+ ((double)(aInt) / (double)CacheObserver::HalfLifeSeconds())
+
+#define kCacheEntryVersion 3
+
+#pragma pack(push)
+#pragma pack(1)
+
+class CacheFileMetadataHeader {
+ public:
+ uint32_t mVersion;
+ uint32_t mFetchCount;
+ uint32_t mLastFetched;
+ uint32_t mLastModified;
+ uint32_t mFrecency;
+ uint32_t mExpirationTime;
+ uint32_t mKeySize;
+ uint32_t mFlags;
+
+ void WriteToBuf(void* aBuf) {
+ EnsureCorrectClassSize();
+
+ uint8_t* ptr = static_cast<uint8_t*>(aBuf);
+ MOZ_ASSERT(mVersion == kCacheEntryVersion);
+ NetworkEndian::writeUint32(ptr, mVersion);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mFetchCount);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mLastFetched);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mLastModified);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mFrecency);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mExpirationTime);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mKeySize);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint32(ptr, mFlags);
+ }
+
+ void ReadFromBuf(const void* aBuf) {
+ EnsureCorrectClassSize();
+
+ const uint8_t* ptr = static_cast<const uint8_t*>(aBuf);
+ mVersion = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mFetchCount = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mLastFetched = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mLastModified = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mFrecency = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mExpirationTime = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mKeySize = BigEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ if (mVersion >= 2) {
+ mFlags = BigEndian::readUint32(ptr);
+ } else {
+ mFlags = 0;
+ }
+ }
+
+ inline void EnsureCorrectClassSize() {
+ static_assert(
+ (sizeof(mVersion) + sizeof(mFetchCount) + sizeof(mLastFetched) +
+ sizeof(mLastModified) + sizeof(mFrecency) + sizeof(mExpirationTime) +
+ sizeof(mKeySize)) +
+ sizeof(mFlags) ==
+ sizeof(CacheFileMetadataHeader),
+ "Unexpected sizeof(CacheFileMetadataHeader)!");
+ }
+};
+
+#pragma pack(pop)
+
+#define CACHEFILEMETADATALISTENER_IID \
+ { /* a9e36125-3f01-4020-9540-9dafa8d31ba7 */ \
+ 0xa9e36125, 0x3f01, 0x4020, { \
+ 0x95, 0x40, 0x9d, 0xaf, 0xa8, 0xd3, 0x1b, 0xa7 \
+ } \
+ }
+
+class CacheFileMetadataListener : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(CACHEFILEMETADATALISTENER_IID)
+
+ NS_IMETHOD OnMetadataRead(nsresult aResult) = 0;
+ NS_IMETHOD OnMetadataWritten(nsresult aResult) = 0;
+ virtual bool IsKilled() = 0;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(CacheFileMetadataListener,
+ CACHEFILEMETADATALISTENER_IID)
+
+class CacheFileMetadata final : public CacheFileIOListener,
+ public CacheMemoryConsumer {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ CacheFileMetadata(CacheFileHandle* aHandle, const nsACString& aKey);
+ CacheFileMetadata(bool aMemoryOnly, bool aPinned, const nsACString& aKey);
+ CacheFileMetadata();
+
+ void SetHandle(CacheFileHandle* aHandle);
+
+ const nsACString& GetKey() const { return mKey; }
+
+ void ReadMetadata(CacheFileMetadataListener* aListener);
+ uint32_t CalcMetadataSize(uint32_t aElementsSize, uint32_t aHashCount);
+ nsresult WriteMetadata(uint32_t aOffset,
+ CacheFileMetadataListener* aListener);
+ nsresult SyncReadMetadata(nsIFile* aFile);
+
+ bool IsAnonymous() const { return mAnonymous; }
+ mozilla::OriginAttributes const& OriginAttributes() const {
+ return mOriginAttributes;
+ }
+ bool Pinned() const { return !!(mMetaHdr.mFlags & kCacheEntryIsPinned); }
+
+ const char* GetElement(const char* aKey);
+ nsresult SetElement(const char* aKey, const char* aValue);
+ void Visit(nsICacheEntryMetaDataVisitor* aVisitor);
+
+ CacheHash::Hash16_t GetHash(uint32_t aIndex);
+ nsresult SetHash(uint32_t aIndex, CacheHash::Hash16_t aHash);
+ nsresult RemoveHash(uint32_t aIndex);
+
+ void AddFlags(uint32_t aFlags);
+ void RemoveFlags(uint32_t aFlags);
+ uint32_t GetFlags() const { return mMetaHdr.mFlags; }
+ void SetExpirationTime(uint32_t aExpirationTime);
+ uint32_t GetExpirationTime() const { return mMetaHdr.mExpirationTime; }
+ void SetFrecency(uint32_t aFrecency);
+ uint32_t GetFrecency() const { return mMetaHdr.mFrecency; }
+ uint32_t GetLastModified() const { return mMetaHdr.mLastModified; }
+ uint32_t GetLastFetched() const { return mMetaHdr.mLastFetched; }
+ uint32_t GetFetchCount() const { return mMetaHdr.mFetchCount; }
+ // Called by upper layers to indicate the entry this metadata belongs
+ // with has been fetched, i.e. delivered to the consumer.
+ void OnFetched();
+
+ int64_t Offset() { return mOffset; }
+ uint32_t ElementsSize() { return mElementsSize; }
+ void MarkDirty(bool aUpdateLastModified = true);
+ bool IsDirty() { return mIsDirty; }
+ uint32_t MemoryUsage() {
+ return sizeof(CacheFileMetadata) + mHashArraySize + mBufSize;
+ }
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) override;
+ virtual bool IsKilled() override {
+ return mListener && mListener->IsKilled();
+ }
+ void InitEmptyMetadata();
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ virtual ~CacheFileMetadata();
+
+ nsresult ParseMetadata(uint32_t aMetaOffset, uint32_t aBufOffset,
+ bool aHaveKey);
+ nsresult CheckElements(const char* aBuf, uint32_t aSize);
+ nsresult EnsureBuffer(uint32_t aSize);
+ nsresult ParseKey(const nsACString& aKey);
+
+ RefPtr<CacheFileHandle> mHandle;
+ nsCString mKey;
+ CacheHash::Hash16_t* mHashArray;
+ uint32_t mHashArraySize;
+ uint32_t mHashCount;
+ int64_t mOffset;
+ char* mBuf; // used for parsing, then points
+ // to elements
+ uint32_t mBufSize;
+ char* mWriteBuf;
+ CacheFileMetadataHeader mMetaHdr;
+ uint32_t mElementsSize;
+ bool mIsDirty : 1;
+ bool mAnonymous : 1;
+ bool mAllocExactSize : 1;
+ bool mFirstRead : 1;
+ mozilla::OriginAttributes mOriginAttributes;
+ mozilla::TimeStamp mReadStart;
+ nsCOMPtr<CacheFileMetadataListener> mListener;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileOutputStream.cpp b/netwerk/cache2/CacheFileOutputStream.cpp
new file mode 100644
index 0000000000..9351765038
--- /dev/null
+++ b/netwerk/cache2/CacheFileOutputStream.cpp
@@ -0,0 +1,466 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheFileOutputStream.h"
+
+#include "CacheFile.h"
+#include "CacheEntry.h"
+#include "nsStreamUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/DebugOnly.h"
+#include <algorithm>
+
+namespace mozilla {
+namespace net {
+
+NS_IMPL_ADDREF(CacheFileOutputStream)
+NS_IMETHODIMP_(MozExternalRefCountType)
+CacheFileOutputStream::Release() {
+ MOZ_ASSERT(0 != mRefCnt, "dup release");
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "CacheFileOutputStream");
+
+ if (0 == count) {
+ mRefCnt = 1;
+ {
+ CacheFileAutoLock lock(mFile);
+ mFile->RemoveOutput(this, mStatus);
+ }
+ delete (this);
+ return 0;
+ }
+
+ return count;
+}
+
+NS_INTERFACE_MAP_BEGIN(CacheFileOutputStream)
+ NS_INTERFACE_MAP_ENTRY(nsIOutputStream)
+ NS_INTERFACE_MAP_ENTRY(nsIAsyncOutputStream)
+ NS_INTERFACE_MAP_ENTRY(nsISeekableStream)
+ NS_INTERFACE_MAP_ENTRY(nsITellableStream)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIOutputStream)
+NS_INTERFACE_MAP_END
+
+CacheFileOutputStream::CacheFileOutputStream(
+ CacheFile* aFile, CacheOutputCloseListener* aCloseListener,
+ bool aAlternativeData)
+ : mFile(aFile),
+ mCloseListener(aCloseListener),
+ mPos(0),
+ mClosed(false),
+ mAlternativeData(aAlternativeData),
+ mStatus(NS_OK),
+ mCallbackFlags(0) {
+ LOG(("CacheFileOutputStream::CacheFileOutputStream() [this=%p]", this));
+
+ if (mAlternativeData) {
+ mPos = mFile->mAltDataOffset;
+ }
+}
+
+CacheFileOutputStream::~CacheFileOutputStream() {
+ LOG(("CacheFileOutputStream::~CacheFileOutputStream() [this=%p]", this));
+}
+
+// nsIOutputStream
+NS_IMETHODIMP
+CacheFileOutputStream::Close() {
+ LOG(("CacheFileOutputStream::Close() [this=%p]", this));
+ return CloseWithStatus(NS_OK);
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::Flush() {
+ // TODO do we need to implement flush ???
+ LOG(("CacheFileOutputStream::Flush() [this=%p]", this));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::Write(const char* aBuf, uint32_t aCount,
+ uint32_t* _retval) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileOutputStream::Write() [this=%p, count=%d]", this, aCount));
+
+ if (mClosed) {
+ LOG(
+ ("CacheFileOutputStream::Write() - Stream is closed. [this=%p, "
+ "status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(mStatus)));
+
+ return NS_FAILED(mStatus) ? mStatus : NS_BASE_STREAM_CLOSED;
+ }
+
+ if (!mFile->mSkipSizeCheck &&
+ CacheObserver::EntryIsTooBig(mPos + aCount, !mFile->mMemoryOnly)) {
+ LOG(("CacheFileOutputStream::Write() - Entry is too big. [this=%p]", this));
+
+ CloseWithStatusLocked(NS_ERROR_FILE_TOO_BIG);
+ return NS_ERROR_FILE_TOO_BIG;
+ }
+
+ // We use 64-bit offset when accessing the file, unfortunately we use 32-bit
+ // metadata offset, so we cannot handle data bigger than 4GB.
+ if (mPos + aCount > PR_UINT32_MAX) {
+ LOG(("CacheFileOutputStream::Write() - Entry's size exceeds 4GB. [this=%p]",
+ this));
+
+ CloseWithStatusLocked(NS_ERROR_FILE_TOO_BIG);
+ return NS_ERROR_FILE_TOO_BIG;
+ }
+
+ *_retval = aCount;
+
+ while (aCount) {
+ EnsureCorrectChunk(false);
+ if (NS_FAILED(mStatus)) {
+ return mStatus;
+ }
+
+ FillHole();
+ if (NS_FAILED(mStatus)) {
+ return mStatus;
+ }
+
+ uint32_t chunkOffset = mPos - (mPos / kChunkSize) * kChunkSize;
+ uint32_t canWrite = kChunkSize - chunkOffset;
+ uint32_t thisWrite = std::min(static_cast<uint32_t>(canWrite), aCount);
+
+ CacheFileChunkWriteHandle hnd =
+ mChunk->GetWriteHandle(chunkOffset + thisWrite);
+ if (!hnd.Buf()) {
+ CloseWithStatusLocked(NS_ERROR_OUT_OF_MEMORY);
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ memcpy(hnd.Buf() + chunkOffset, aBuf, thisWrite);
+ hnd.UpdateDataSize(chunkOffset, thisWrite);
+
+ mPos += thisWrite;
+ aBuf += thisWrite;
+ aCount -= thisWrite;
+ }
+
+ EnsureCorrectChunk(true);
+
+ LOG(("CacheFileOutputStream::Write() - Wrote %d bytes [this=%p]", *_retval,
+ this));
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::WriteFrom(nsIInputStream* aFromStream, uint32_t aCount,
+ uint32_t* _retval) {
+ LOG(
+ ("CacheFileOutputStream::WriteFrom() - NOT_IMPLEMENTED [this=%p, from=%p"
+ ", count=%d]",
+ this, aFromStream, aCount));
+
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::WriteSegments(nsReadSegmentFun aReader, void* aClosure,
+ uint32_t aCount, uint32_t* _retval) {
+ LOG(
+ ("CacheFileOutputStream::WriteSegments() - NOT_IMPLEMENTED [this=%p, "
+ "count=%d]",
+ this, aCount));
+
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::IsNonBlocking(bool* _retval) {
+ *_retval = false;
+ return NS_OK;
+}
+
+// nsIAsyncOutputStream
+NS_IMETHODIMP
+CacheFileOutputStream::CloseWithStatus(nsresult aStatus) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileOutputStream::CloseWithStatus() [this=%p, aStatus=0x%08" PRIx32
+ "]",
+ this, static_cast<uint32_t>(aStatus)));
+
+ return CloseWithStatusLocked(aStatus);
+}
+
+nsresult CacheFileOutputStream::CloseWithStatusLocked(nsresult aStatus) {
+ LOG(
+ ("CacheFileOutputStream::CloseWithStatusLocked() [this=%p, "
+ "aStatus=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(aStatus)));
+
+ if (mClosed) {
+ MOZ_ASSERT(!mCallback);
+ return NS_OK;
+ }
+
+ mClosed = true;
+ mStatus = NS_FAILED(aStatus) ? aStatus : NS_BASE_STREAM_CLOSED;
+
+ if (mChunk) {
+ ReleaseChunk();
+ }
+
+ if (mCallback) {
+ NotifyListener();
+ }
+
+ mFile->RemoveOutput(this, mStatus);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::AsyncWait(nsIOutputStreamCallback* aCallback,
+ uint32_t aFlags, uint32_t aRequestedCount,
+ nsIEventTarget* aEventTarget) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(
+ ("CacheFileOutputStream::AsyncWait() [this=%p, callback=%p, flags=%d, "
+ "requestedCount=%d, eventTarget=%p]",
+ this, aCallback, aFlags, aRequestedCount, aEventTarget));
+
+ mCallback = aCallback;
+ mCallbackFlags = aFlags;
+ mCallbackTarget = aEventTarget;
+
+ if (!mCallback) return NS_OK;
+
+ // The stream is blocking so it is writable at any time
+ if (mClosed || !(aFlags & WAIT_CLOSURE_ONLY)) NotifyListener();
+
+ return NS_OK;
+}
+
+// nsISeekableStream
+NS_IMETHODIMP
+CacheFileOutputStream::Seek(int32_t whence, int64_t offset) {
+ CacheFileAutoLock lock(mFile);
+
+ LOG(("CacheFileOutputStream::Seek() [this=%p, whence=%d, offset=%" PRId64 "]",
+ this, whence, offset));
+
+ if (mClosed) {
+ LOG(("CacheFileOutputStream::Seek() - Stream is closed. [this=%p]", this));
+ return NS_BASE_STREAM_CLOSED;
+ }
+
+ int64_t newPos = offset;
+ switch (whence) {
+ case NS_SEEK_SET:
+ if (mAlternativeData) {
+ newPos += mFile->mAltDataOffset;
+ }
+ break;
+ case NS_SEEK_CUR:
+ newPos += mPos;
+ break;
+ case NS_SEEK_END:
+ if (mAlternativeData) {
+ newPos += mFile->mDataSize;
+ } else {
+ newPos += mFile->mAltDataOffset;
+ }
+ break;
+ default:
+ NS_ERROR("invalid whence");
+ return NS_ERROR_INVALID_ARG;
+ }
+ mPos = newPos;
+ EnsureCorrectChunk(true);
+
+ LOG(("CacheFileOutputStream::Seek() [this=%p, pos=%" PRId64 "]", this, mPos));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheFileOutputStream::SetEOF() {
+ MOZ_ASSERT(false, "CacheFileOutputStream::SetEOF() not implemented");
+ // Right now we don't use SetEOF(). If we ever need this method, we need
+ // to think about what to do with input streams that already points beyond
+ // new EOF.
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// nsITellableStream
+NS_IMETHODIMP
+CacheFileOutputStream::Tell(int64_t* _retval) {
+ CacheFileAutoLock lock(mFile);
+
+ if (mClosed) {
+ LOG(("CacheFileOutputStream::Tell() - Stream is closed. [this=%p]", this));
+ return NS_BASE_STREAM_CLOSED;
+ }
+
+ *_retval = mPos;
+
+ if (mAlternativeData) {
+ *_retval -= mFile->mAltDataOffset;
+ }
+
+ LOG(("CacheFileOutputStream::Tell() [this=%p, retval=%" PRId64 "]", this,
+ *_retval));
+ return NS_OK;
+}
+
+// CacheFileChunkListener
+nsresult CacheFileOutputStream::OnChunkRead(nsresult aResult,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileOutputStream::OnChunkRead should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileOutputStream::OnChunkWritten(nsresult aResult,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileOutputStream::OnChunkWritten should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileOutputStream::OnChunkAvailable(nsresult aResult,
+ uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileOutputStream::OnChunkAvailable should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheFileOutputStream::OnChunkUpdated(CacheFileChunk* aChunk) {
+ MOZ_CRASH("CacheFileOutputStream::OnChunkUpdated should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+void CacheFileOutputStream::NotifyCloseListener() {
+ RefPtr<CacheOutputCloseListener> listener;
+ listener.swap(mCloseListener);
+ if (!listener) return;
+
+ listener->OnOutputClosed();
+}
+
+void CacheFileOutputStream::ReleaseChunk() {
+ LOG(("CacheFileOutputStream::ReleaseChunk() [this=%p, idx=%d]", this,
+ mChunk->Index()));
+
+ // If the chunk didn't write any data we need to remove hash for this chunk
+ // that was added when the chunk was created in CacheFile::GetChunkLocked.
+ if (mChunk->DataSize() == 0) {
+ // It must be due to a failure, we don't create a new chunk when we don't
+ // have data to write.
+ MOZ_ASSERT(NS_FAILED(mChunk->GetStatus()));
+ mFile->mMetadata->RemoveHash(mChunk->Index());
+ }
+
+ mFile->ReleaseOutsideLock(std::move(mChunk));
+}
+
+void CacheFileOutputStream::EnsureCorrectChunk(bool aReleaseOnly) {
+ mFile->AssertOwnsLock();
+
+ LOG(("CacheFileOutputStream::EnsureCorrectChunk() [this=%p, releaseOnly=%d]",
+ this, aReleaseOnly));
+
+ uint32_t chunkIdx = mPos / kChunkSize;
+
+ if (mChunk) {
+ if (mChunk->Index() == chunkIdx) {
+ // we have a correct chunk
+ LOG(
+ ("CacheFileOutputStream::EnsureCorrectChunk() - Have correct chunk "
+ "[this=%p, idx=%d]",
+ this, chunkIdx));
+
+ return;
+ }
+ ReleaseChunk();
+ }
+
+ if (aReleaseOnly) return;
+
+ nsresult rv;
+ rv = mFile->GetChunkLocked(chunkIdx, CacheFile::WRITER, nullptr,
+ getter_AddRefs(mChunk));
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheFileOutputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
+ "[this=%p, idx=%d, rv=0x%08" PRIx32 "]",
+ this, chunkIdx, static_cast<uint32_t>(rv)));
+ CloseWithStatusLocked(rv);
+ }
+}
+
+void CacheFileOutputStream::FillHole() {
+ mFile->AssertOwnsLock();
+
+ MOZ_ASSERT(mChunk);
+ MOZ_ASSERT(mPos / kChunkSize == mChunk->Index());
+
+ uint32_t pos = mPos - (mPos / kChunkSize) * kChunkSize;
+ if (mChunk->DataSize() >= pos) return;
+
+ LOG(
+ ("CacheFileOutputStream::FillHole() - Zeroing hole in chunk %d, range "
+ "%d-%d [this=%p]",
+ mChunk->Index(), mChunk->DataSize(), pos - 1, this));
+
+ CacheFileChunkWriteHandle hnd = mChunk->GetWriteHandle(pos);
+ if (!hnd.Buf()) {
+ CloseWithStatusLocked(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ uint32_t offset = hnd.DataSize();
+ memset(hnd.Buf() + offset, 0, pos - offset);
+ hnd.UpdateDataSize(offset, pos - offset);
+}
+
+void CacheFileOutputStream::NotifyListener() {
+ mFile->AssertOwnsLock();
+
+ LOG(("CacheFileOutputStream::NotifyListener() [this=%p]", this));
+
+ MOZ_ASSERT(mCallback);
+
+ if (!mCallbackTarget) {
+ mCallbackTarget = CacheFileIOManager::IOTarget();
+ if (!mCallbackTarget) {
+ LOG(
+ ("CacheFileOutputStream::NotifyListener() - Cannot get Cache I/O "
+ "thread! Using main thread for callback."));
+ mCallbackTarget = GetMainThreadEventTarget();
+ }
+ }
+
+ nsCOMPtr<nsIOutputStreamCallback> asyncCallback =
+ NS_NewOutputStreamReadyEvent(mCallback, mCallbackTarget);
+
+ mCallback = nullptr;
+ mCallbackTarget = nullptr;
+
+ asyncCallback->OnOutputStreamReady(this);
+}
+
+// Memory reporting
+
+size_t CacheFileOutputStream::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ // Everything the stream keeps a reference to is already reported somewhere
+ // else.
+ // mFile reports itself.
+ // mChunk reported as part of CacheFile.
+ // mCloseListener is CacheEntry, already reported.
+ // mCallback is usually CacheFile or a class that is reported elsewhere.
+ return mallocSizeOf(this);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileOutputStream.h b/netwerk/cache2/CacheFileOutputStream.h
new file mode 100644
index 0000000000..4d1fc5e4b7
--- /dev/null
+++ b/netwerk/cache2/CacheFileOutputStream.h
@@ -0,0 +1,70 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileOutputStream__h__
+#define CacheFileOutputStream__h__
+
+#include "nsIAsyncOutputStream.h"
+#include "nsISeekableStream.h"
+#include "nsCOMPtr.h"
+#include "CacheFileChunk.h"
+
+namespace mozilla {
+namespace net {
+
+class CacheFile;
+class CacheOutputCloseListener;
+
+class CacheFileOutputStream : public nsIAsyncOutputStream,
+ public nsISeekableStream,
+ public CacheFileChunkListener {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIOUTPUTSTREAM
+ NS_DECL_NSIASYNCOUTPUTSTREAM
+ NS_DECL_NSISEEKABLESTREAM
+ NS_DECL_NSITELLABLESTREAM
+
+ public:
+ CacheFileOutputStream(CacheFile* aFile,
+ CacheOutputCloseListener* aCloseListener,
+ bool aAlternativeData);
+
+ NS_IMETHOD OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
+ CacheFileChunk* aChunk) override;
+ NS_IMETHOD OnChunkUpdated(CacheFileChunk* aChunk) override;
+
+ void NotifyCloseListener();
+ bool IsAlternativeData() const { return mAlternativeData; };
+
+ // Memory reporting
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ virtual ~CacheFileOutputStream();
+
+ nsresult CloseWithStatusLocked(nsresult aStatus);
+ void ReleaseChunk();
+ void EnsureCorrectChunk(bool aReleaseOnly);
+ void FillHole();
+ void NotifyListener();
+
+ RefPtr<CacheFile> mFile;
+ RefPtr<CacheFileChunk> mChunk;
+ RefPtr<CacheOutputCloseListener> mCloseListener;
+ int64_t mPos;
+ bool mClosed : 1;
+ bool const mAlternativeData : 1;
+ nsresult mStatus;
+
+ nsCOMPtr<nsIOutputStreamCallback> mCallback;
+ uint32_t mCallbackFlags;
+ nsCOMPtr<nsIEventTarget> mCallbackTarget;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheFileUtils.cpp b/netwerk/cache2/CacheFileUtils.cpp
new file mode 100644
index 0000000000..7d208094bf
--- /dev/null
+++ b/netwerk/cache2/CacheFileUtils.cpp
@@ -0,0 +1,669 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheIndex.h"
+#include "CacheLog.h"
+#include "CacheFileUtils.h"
+#include "LoadContextInfo.h"
+#include "mozilla/Tokenizer.h"
+#include "mozilla/Telemetry.h"
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include <algorithm>
+#include "mozilla/Unused.h"
+
+namespace mozilla {
+namespace net {
+namespace CacheFileUtils {
+
+// This designates the format for the "alt-data" metadata.
+// When the format changes we need to update the version.
+static uint32_t const kAltDataVersion = 1;
+const char* kAltDataKey = "alt-data";
+
+namespace {
+
+/**
+ * A simple recursive descent parser for the mapping key.
+ */
+class KeyParser : protected Tokenizer {
+ public:
+ explicit KeyParser(nsACString const& aInput)
+ : Tokenizer(aInput),
+ isAnonymous(false)
+ // Initialize the cache key to a zero length by default
+ ,
+ lastTag(0) {}
+
+ private:
+ // Results
+ OriginAttributes originAttribs;
+ bool isAnonymous;
+ nsCString idEnhance;
+ nsDependentCSubstring cacheKey;
+
+ // Keeps the last tag name, used for alphabetical sort checking
+ char lastTag;
+
+ // Classifier for the 'tag' character valid range.
+ // Explicitly using unsigned char as 127 is -1 when signed and it would only
+ // produce a warning.
+ static bool TagChar(const char aChar) {
+ unsigned char c = static_cast<unsigned char>(aChar);
+ return c >= ' ' && c <= '\x7f';
+ }
+
+ bool ParseTags() {
+ // Expects to be at the tag name or at the end
+ if (CheckEOF()) {
+ return true;
+ }
+
+ char tag;
+ if (!ReadChar(&TagChar, &tag)) {
+ return false;
+ }
+
+ // Check the alphabetical order, hard-fail on disobedience
+ if (!(lastTag < tag || tag == ':')) {
+ return false;
+ }
+ lastTag = tag;
+
+ switch (tag) {
+ case ':':
+ // last possible tag, when present there is the cacheKey following,
+ // not terminated with ',' and no need to unescape.
+ cacheKey.Rebind(mCursor, mEnd - mCursor);
+ return true;
+ case 'O': {
+ nsAutoCString originSuffix;
+ if (!ParseValue(&originSuffix) ||
+ !originAttribs.PopulateFromSuffix(originSuffix)) {
+ return false;
+ }
+ break;
+ }
+ case 'p':
+ originAttribs.SyncAttributesWithPrivateBrowsing(true);
+ break;
+ case 'b':
+ // Leaving to be able to read and understand oldformatted entries
+ originAttribs.mInIsolatedMozBrowser = true;
+ break;
+ case 'a':
+ isAnonymous = true;
+ break;
+ case 'i': {
+ // Leaving to be able to read and understand oldformatted entries
+ uint32_t deprecatedAppId = 0;
+ if (!ReadInteger(&deprecatedAppId)) {
+ return false; // not a valid 32-bit integer
+ }
+ break;
+ }
+ case '~':
+ if (!ParseValue(&idEnhance)) {
+ return false;
+ }
+ break;
+ default:
+ if (!ParseValue()) { // skip any tag values, optional
+ return false;
+ }
+ break;
+ }
+
+ // We expect a comma after every tag
+ if (!CheckChar(',')) {
+ return false;
+ }
+
+ // Recurse to the next tag
+ return ParseTags();
+ }
+
+ bool ParseValue(nsACString* result = nullptr) {
+ // If at the end, fail since we expect a comma ; value may be empty tho
+ if (CheckEOF()) {
+ return false;
+ }
+
+ Token t;
+ while (Next(t)) {
+ if (!Token::Char(',').Equals(t)) {
+ if (result) {
+ result->Append(t.Fragment());
+ }
+ continue;
+ }
+
+ if (CheckChar(',')) {
+ // Two commas in a row, escaping
+ if (result) {
+ result->Append(',');
+ }
+ continue;
+ }
+
+ // We must give the comma back since the upper calls expect it
+ Rollback();
+ return true;
+ }
+
+ return false;
+ }
+
+ public:
+ already_AddRefed<LoadContextInfo> Parse() {
+ RefPtr<LoadContextInfo> info;
+ if (ParseTags()) {
+ info = GetLoadContextInfo(isAnonymous, originAttribs);
+ }
+
+ return info.forget();
+ }
+
+ void URISpec(nsACString& result) { result.Assign(cacheKey); }
+
+ void IdEnhance(nsACString& result) { result.Assign(idEnhance); }
+};
+
+} // namespace
+
+already_AddRefed<nsILoadContextInfo> ParseKey(const nsACString& aKey,
+ nsACString* aIdEnhance,
+ nsACString* aURISpec) {
+ KeyParser parser(aKey);
+ RefPtr<LoadContextInfo> info = parser.Parse();
+
+ if (info) {
+ if (aIdEnhance) parser.IdEnhance(*aIdEnhance);
+ if (aURISpec) parser.URISpec(*aURISpec);
+ }
+
+ return info.forget();
+}
+
+void AppendKeyPrefix(nsILoadContextInfo* aInfo, nsACString& _retval) {
+ /**
+ * This key is used to salt file hashes. When form of the key is changed
+ * cache entries will fail to find on disk.
+ *
+ * IMPORTANT NOTE:
+ * Keep the attributes list sorted according their ASCII code.
+ */
+
+ if (!aInfo) {
+ return;
+ }
+
+ OriginAttributes const* oa = aInfo->OriginAttributesPtr();
+ nsAutoCString suffix;
+ oa->CreateSuffix(suffix);
+ if (!suffix.IsEmpty()) {
+ AppendTagWithValue(_retval, 'O', suffix);
+ }
+
+ if (aInfo->IsAnonymous()) {
+ _retval.AppendLiteral("a,");
+ }
+
+ if (aInfo->IsPrivate()) {
+ _retval.AppendLiteral("p,");
+ }
+}
+
+void AppendTagWithValue(nsACString& aTarget, char const aTag,
+ const nsACString& aValue) {
+ aTarget.Append(aTag);
+
+ // First check the value string to save some memory copying
+ // for cases we don't need to escape at all (most likely).
+ if (!aValue.IsEmpty()) {
+ if (!aValue.Contains(',')) {
+ // No need to escape
+ aTarget.Append(aValue);
+ } else {
+ nsAutoCString escapedValue(aValue);
+ escapedValue.ReplaceSubstring(","_ns, ",,"_ns);
+ aTarget.Append(escapedValue);
+ }
+ }
+
+ aTarget.Append(',');
+}
+
+nsresult KeyMatchesLoadContextInfo(const nsACString& aKey,
+ nsILoadContextInfo* aInfo, bool* _retval) {
+ nsCOMPtr<nsILoadContextInfo> info = ParseKey(aKey);
+
+ if (!info) {
+ return NS_ERROR_FAILURE;
+ }
+
+ *_retval = info->Equals(aInfo);
+ return NS_OK;
+}
+
+ValidityPair::ValidityPair(uint32_t aOffset, uint32_t aLen)
+ : mOffset(aOffset), mLen(aLen) {}
+
+bool ValidityPair::CanBeMerged(const ValidityPair& aOther) const {
+ // The pairs can be merged into a single one if the start of one of the pairs
+ // is placed anywhere in the validity interval of other pair or exactly after
+ // its end.
+ return IsInOrFollows(aOther.mOffset) || aOther.IsInOrFollows(mOffset);
+}
+
+bool ValidityPair::IsInOrFollows(uint32_t aOffset) const {
+ return mOffset <= aOffset && mOffset + mLen >= aOffset;
+}
+
+bool ValidityPair::LessThan(const ValidityPair& aOther) const {
+ if (mOffset < aOther.mOffset) {
+ return true;
+ }
+
+ if (mOffset == aOther.mOffset && mLen < aOther.mLen) {
+ return true;
+ }
+
+ return false;
+}
+
+void ValidityPair::Merge(const ValidityPair& aOther) {
+ MOZ_ASSERT(CanBeMerged(aOther));
+
+ uint32_t offset = std::min(mOffset, aOther.mOffset);
+ uint32_t end = std::max(mOffset + mLen, aOther.mOffset + aOther.mLen);
+
+ mOffset = offset;
+ mLen = end - offset;
+}
+
+void ValidityMap::Log() const {
+ LOG(("ValidityMap::Log() - number of pairs: %zu", mMap.Length()));
+ for (uint32_t i = 0; i < mMap.Length(); i++) {
+ LOG((" (%u, %u)", mMap[i].Offset() + 0, mMap[i].Len() + 0));
+ }
+}
+
+uint32_t ValidityMap::Length() const { return mMap.Length(); }
+
+void ValidityMap::AddPair(uint32_t aOffset, uint32_t aLen) {
+ ValidityPair pair(aOffset, aLen);
+
+ if (mMap.Length() == 0) {
+ mMap.AppendElement(pair);
+ return;
+ }
+
+ // Find out where to place this pair into the map, it can overlap only with
+ // one preceding pair and all subsequent pairs.
+ uint32_t pos = 0;
+ for (pos = mMap.Length(); pos > 0;) {
+ --pos;
+
+ if (mMap[pos].LessThan(pair)) {
+ // The new pair should be either inserted after pos or merged with it.
+ if (mMap[pos].CanBeMerged(pair)) {
+ // Merge with the preceding pair
+ mMap[pos].Merge(pair);
+ } else {
+ // They don't overlap, element must be placed after pos element
+ ++pos;
+ if (pos == mMap.Length()) {
+ mMap.AppendElement(pair);
+ } else {
+ mMap.InsertElementAt(pos, pair);
+ }
+ }
+
+ break;
+ }
+
+ if (pos == 0) {
+ // The new pair should be placed in front of all existing pairs.
+ mMap.InsertElementAt(0, pair);
+ }
+ }
+
+ // pos now points to merged or inserted pair, check whether it overlaps with
+ // subsequent pairs.
+ while (pos + 1 < mMap.Length()) {
+ if (mMap[pos].CanBeMerged(mMap[pos + 1])) {
+ mMap[pos].Merge(mMap[pos + 1]);
+ mMap.RemoveElementAt(pos + 1);
+ } else {
+ break;
+ }
+ }
+}
+
+void ValidityMap::Clear() { mMap.Clear(); }
+
+size_t ValidityMap::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mMap.ShallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+ValidityPair& ValidityMap::operator[](uint32_t aIdx) {
+ return mMap.ElementAt(aIdx);
+}
+
+StaticMutex DetailedCacheHitTelemetry::sLock;
+uint32_t DetailedCacheHitTelemetry::sRecordCnt = 0;
+DetailedCacheHitTelemetry::HitRate
+ DetailedCacheHitTelemetry::sHRStats[kNumOfRanges];
+
+DetailedCacheHitTelemetry::HitRate::HitRate() { Reset(); }
+
+void DetailedCacheHitTelemetry::HitRate::AddRecord(ERecType aType) {
+ if (aType == HIT) {
+ ++mHitCnt;
+ } else {
+ ++mMissCnt;
+ }
+}
+
+uint32_t DetailedCacheHitTelemetry::HitRate::GetHitRateBucket(
+ uint32_t aNumOfBuckets) const {
+ uint32_t bucketIdx = (aNumOfBuckets * mHitCnt) / (mHitCnt + mMissCnt);
+ if (bucketIdx ==
+ aNumOfBuckets) { // make sure 100% falls into the last bucket
+ --bucketIdx;
+ }
+
+ return bucketIdx;
+}
+
+uint32_t DetailedCacheHitTelemetry::HitRate::Count() {
+ return mHitCnt + mMissCnt;
+}
+
+void DetailedCacheHitTelemetry::HitRate::Reset() {
+ mHitCnt = 0;
+ mMissCnt = 0;
+}
+
+// static
+void DetailedCacheHitTelemetry::AddRecord(ERecType aType,
+ TimeStamp aLoadStart) {
+ bool isUpToDate = false;
+ CacheIndex::IsUpToDate(&isUpToDate);
+ if (!isUpToDate) {
+ // Ignore the record when the entry file count might be incorrect
+ return;
+ }
+
+ uint32_t entryCount;
+ nsresult rv = CacheIndex::GetEntryFileCount(&entryCount);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ uint32_t rangeIdx = entryCount / kRangeSize;
+ if (rangeIdx >= kNumOfRanges) { // The last range has no upper limit.
+ rangeIdx = kNumOfRanges - 1;
+ }
+
+ uint32_t hitMissValue = 2 * rangeIdx; // 2 values per range
+ if (aType == MISS) { // The order is HIT, MISS
+ ++hitMissValue;
+ }
+
+ StaticMutexAutoLock lock(sLock);
+
+ if (aType == MISS) {
+ mozilla::Telemetry::AccumulateTimeDelta(
+ mozilla::Telemetry::NETWORK_CACHE_V2_MISS_TIME_MS, aLoadStart);
+ } else {
+ mozilla::Telemetry::AccumulateTimeDelta(
+ mozilla::Telemetry::NETWORK_CACHE_V2_HIT_TIME_MS, aLoadStart);
+ }
+
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_HIT_MISS_STAT_PER_CACHE_SIZE,
+ hitMissValue);
+
+ sHRStats[rangeIdx].AddRecord(aType);
+ ++sRecordCnt;
+
+ if (sRecordCnt < kTotalSamplesReportLimit) {
+ return;
+ }
+
+ sRecordCnt = 0;
+
+ for (uint32_t i = 0; i < kNumOfRanges; ++i) {
+ if (sHRStats[i].Count() >= kHitRateSamplesReportLimit) {
+ // The telemetry enums are grouped by buckets as follows:
+ // Telemetry value : 0,1,2,3, ... ,19,20,21,22, ... ,398,399
+ // Hit rate bucket : 0,0,0,0, ... , 0, 1, 1, 1, ... , 19, 19
+ // Cache size range: 0,1,2,3, ... ,19, 0, 1, 2, ... , 18, 19
+ uint32_t bucketOffset =
+ sHRStats[i].GetHitRateBucket(kHitRateBuckets) * kNumOfRanges;
+
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_HIT_RATE_PER_CACHE_SIZE,
+ bucketOffset + i);
+ sHRStats[i].Reset();
+ }
+ }
+}
+
+StaticMutex CachePerfStats::sLock;
+CachePerfStats::PerfData CachePerfStats::sData[CachePerfStats::LAST];
+uint32_t CachePerfStats::sCacheSlowCnt = 0;
+uint32_t CachePerfStats::sCacheNotSlowCnt = 0;
+
+CachePerfStats::MMA::MMA(uint32_t aTotalWeight, bool aFilter)
+ : mSum(0), mSumSq(0), mCnt(0), mWeight(aTotalWeight), mFilter(aFilter) {}
+
+void CachePerfStats::MMA::AddValue(uint32_t aValue) {
+ if (mFilter) {
+ // Filter high spikes
+ uint32_t avg = GetAverage();
+ uint32_t stddev = GetStdDev();
+ uint32_t maxdiff = avg + (3 * stddev);
+ if (avg && aValue > avg + maxdiff) {
+ return;
+ }
+ }
+
+ if (mCnt < mWeight) {
+ // Compute arithmetic average until we have at least mWeight values
+ CheckedInt<uint64_t> newSumSq = CheckedInt<uint64_t>(aValue) * aValue;
+ newSumSq += mSumSq;
+ if (!newSumSq.isValid()) {
+ return; // ignore this value
+ }
+ mSumSq = newSumSq.value();
+ mSum += aValue;
+ ++mCnt;
+ } else {
+ CheckedInt<uint64_t> newSumSq = mSumSq - mSumSq / mCnt;
+ newSumSq += static_cast<uint64_t>(aValue) * aValue;
+ if (!newSumSq.isValid()) {
+ return; // ignore this value
+ }
+ mSumSq = newSumSq.value();
+
+ // Compute modified moving average for more values:
+ // newAvg = ((weight - 1) * oldAvg + newValue) / weight
+ mSum -= GetAverage();
+ mSum += aValue;
+ }
+}
+
+uint32_t CachePerfStats::MMA::GetAverage() {
+ if (mCnt == 0) {
+ return 0;
+ }
+
+ return mSum / mCnt;
+}
+
+uint32_t CachePerfStats::MMA::GetStdDev() {
+ if (mCnt == 0) {
+ return 0;
+ }
+
+ uint32_t avg = GetAverage();
+ uint64_t avgSq = static_cast<uint64_t>(avg) * avg;
+ uint64_t variance = mSumSq / mCnt;
+ if (variance < avgSq) {
+ // Due to rounding error when using integer data type, it can happen that
+ // average of squares of the values is smaller than square of the average
+ // of the values. In this case fix mSumSq.
+ variance = avgSq;
+ mSumSq = variance * mCnt;
+ }
+
+ variance -= avgSq;
+ return sqrt(static_cast<double>(variance));
+}
+
+CachePerfStats::PerfData::PerfData()
+ : mFilteredAvg(50, true), mShortAvg(3, false) {}
+
+void CachePerfStats::PerfData::AddValue(uint32_t aValue, bool aShortOnly) {
+ if (!aShortOnly) {
+ mFilteredAvg.AddValue(aValue);
+ }
+ mShortAvg.AddValue(aValue);
+}
+
+uint32_t CachePerfStats::PerfData::GetAverage(bool aFiltered) {
+ return aFiltered ? mFilteredAvg.GetAverage() : mShortAvg.GetAverage();
+}
+
+uint32_t CachePerfStats::PerfData::GetStdDev(bool aFiltered) {
+ return aFiltered ? mFilteredAvg.GetStdDev() : mShortAvg.GetStdDev();
+}
+
+// static
+void CachePerfStats::AddValue(EDataType aType, uint32_t aValue,
+ bool aShortOnly) {
+ StaticMutexAutoLock lock(sLock);
+ sData[aType].AddValue(aValue, aShortOnly);
+}
+
+// static
+uint32_t CachePerfStats::GetAverage(EDataType aType, bool aFiltered) {
+ StaticMutexAutoLock lock(sLock);
+ return sData[aType].GetAverage(aFiltered);
+}
+
+// static
+uint32_t CachePerfStats::GetStdDev(EDataType aType, bool aFiltered) {
+ StaticMutexAutoLock lock(sLock);
+ return sData[aType].GetStdDev(aFiltered);
+}
+
+// static
+bool CachePerfStats::IsCacheSlow() {
+ StaticMutexAutoLock lock(sLock);
+
+ // Compare mShortAvg with mFilteredAvg to find out whether cache is getting
+ // slower. Use only data about single IO operations because ENTRY_OPEN can be
+ // affected by more factors than a slow disk.
+ for (uint32_t i = 0; i < ENTRY_OPEN; ++i) {
+ if (i == IO_WRITE) {
+ // Skip this data type. IsCacheSlow is used for determining cache slowness
+ // when opening entries. Writes have low priority and it's normal that
+ // they are delayed a lot, but this doesn't necessarily affect opening
+ // cache entries.
+ continue;
+ }
+
+ uint32_t avgLong = sData[i].GetAverage(true);
+ if (avgLong == 0) {
+ // We have no perf data yet, skip this data type.
+ continue;
+ }
+ uint32_t avgShort = sData[i].GetAverage(false);
+ uint32_t stddevLong = sData[i].GetStdDev(true);
+ uint32_t maxdiff = avgLong + (3 * stddevLong);
+
+ if (avgShort > avgLong + maxdiff) {
+ LOG(
+ ("CachePerfStats::IsCacheSlow() - result is slow based on perf "
+ "type %u [avgShort=%u, avgLong=%u, stddevLong=%u]",
+ i, avgShort, avgLong, stddevLong));
+ ++sCacheSlowCnt;
+ return true;
+ }
+ }
+
+ ++sCacheNotSlowCnt;
+ return false;
+}
+
+// static
+void CachePerfStats::GetSlowStats(uint32_t* aSlow, uint32_t* aNotSlow) {
+ *aSlow = sCacheSlowCnt;
+ *aNotSlow = sCacheNotSlowCnt;
+}
+
+void FreeBuffer(void* aBuf) {
+#ifndef NS_FREE_PERMANENT_DATA
+ if (CacheObserver::ShuttingDown()) {
+ return;
+ }
+#endif
+
+ free(aBuf);
+}
+
+nsresult ParseAlternativeDataInfo(const char* aInfo, int64_t* _offset,
+ nsACString* _type) {
+ // The format is: "1;12345,javascript/binary"
+ // <version>;<offset>,<type>
+ mozilla::Tokenizer p(aInfo, nullptr, "/");
+ uint32_t altDataVersion = 0;
+ int64_t altDataOffset = -1;
+
+ // The metadata format has a wrong version number.
+ if (!p.ReadInteger(&altDataVersion) || altDataVersion != kAltDataVersion) {
+ LOG(
+ ("ParseAlternativeDataInfo() - altDataVersion=%u, "
+ "expectedVersion=%u",
+ altDataVersion, kAltDataVersion));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (!p.CheckChar(';') || !p.ReadInteger(&altDataOffset) ||
+ !p.CheckChar(',')) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // The requested alt-data representation is not available
+ if (altDataOffset < 0) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (_offset) {
+ *_offset = altDataOffset;
+ }
+
+ if (_type) {
+ mozilla::Unused << p.ReadUntil(Tokenizer::Token::EndOfFile(), *_type);
+ }
+
+ return NS_OK;
+}
+
+void BuildAlternativeDataInfo(const char* aInfo, int64_t aOffset,
+ nsACString& _retval) {
+ _retval.Truncate();
+ _retval.AppendInt(kAltDataVersion);
+ _retval.Append(';');
+ _retval.AppendInt(aOffset);
+ _retval.Append(',');
+ _retval.Append(aInfo);
+}
+
+} // namespace CacheFileUtils
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheFileUtils.h b/netwerk/cache2/CacheFileUtils.h
new file mode 100644
index 0000000000..57e251cf14
--- /dev/null
+++ b/netwerk/cache2/CacheFileUtils.h
@@ -0,0 +1,224 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheFileUtils__h__
+#define CacheFileUtils__h__
+
+#include "nsError.h"
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include "nsTArray.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/TimeStamp.h"
+
+class nsILoadContextInfo;
+
+namespace mozilla {
+namespace net {
+namespace CacheFileUtils {
+
+extern const char* kAltDataKey;
+
+already_AddRefed<nsILoadContextInfo> ParseKey(const nsACString& aKey,
+ nsACString* aIdEnhance = nullptr,
+ nsACString* aURISpec = nullptr);
+
+void AppendKeyPrefix(nsILoadContextInfo* aInfo, nsACString& _retval);
+
+void AppendTagWithValue(nsACString& aTarget, char const aTag,
+ const nsACString& aValue);
+
+nsresult KeyMatchesLoadContextInfo(const nsACString& aKey,
+ nsILoadContextInfo* aInfo, bool* _retval);
+
+class ValidityPair {
+ public:
+ ValidityPair(uint32_t aOffset, uint32_t aLen);
+
+ ValidityPair& operator=(const ValidityPair& aOther) = default;
+
+ // Returns true when two pairs can be merged, i.e. they do overlap or the one
+ // ends exactly where the other begins.
+ bool CanBeMerged(const ValidityPair& aOther) const;
+
+ // Returns true when aOffset is placed anywhere in the validity interval or
+ // exactly after its end.
+ bool IsInOrFollows(uint32_t aOffset) const;
+
+ // Returns true when this pair has lower offset than the other pair. In case
+ // both pairs have the same offset it returns true when this pair has a
+ // shorter length.
+ bool LessThan(const ValidityPair& aOther) const;
+
+ // Merges two pair into one.
+ void Merge(const ValidityPair& aOther);
+
+ uint32_t Offset() const { return mOffset; }
+ uint32_t Len() const { return mLen; }
+
+ private:
+ uint32_t mOffset;
+ uint32_t mLen;
+};
+
+class ValidityMap {
+ public:
+ // Prints pairs in the map into log.
+ void Log() const;
+
+ // Returns number of pairs in the map.
+ uint32_t Length() const;
+
+ // Adds a new pair to the map. It keeps the pairs ordered and merges pairs
+ // when possible.
+ void AddPair(uint32_t aOffset, uint32_t aLen);
+
+ // Removes all pairs from the map.
+ void Clear();
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ ValidityPair& operator[](uint32_t aIdx);
+
+ private:
+ nsTArray<ValidityPair> mMap;
+};
+
+class DetailedCacheHitTelemetry {
+ public:
+ enum ERecType { HIT = 0, MISS = 1 };
+
+ static void AddRecord(ERecType aType, TimeStamp aLoadStart);
+
+ private:
+ class HitRate {
+ public:
+ HitRate();
+
+ void AddRecord(ERecType aType);
+ // Returns the bucket index that the current hit rate falls into according
+ // to the given aNumOfBuckets.
+ uint32_t GetHitRateBucket(uint32_t aNumOfBuckets) const;
+ uint32_t Count();
+ void Reset();
+
+ private:
+ uint32_t mHitCnt;
+ uint32_t mMissCnt;
+ };
+
+ // Group the hits and misses statistics by cache files count ranges (0-5000,
+ // 5001-10000, ... , 95001- )
+ static const uint32_t kRangeSize = 5000;
+ static const uint32_t kNumOfRanges = 20;
+
+ // Use the same ranges to report an average hit rate. Report the hit rates
+ // (and reset the counters) every kTotalSamplesReportLimit samples.
+ static const uint32_t kTotalSamplesReportLimit = 1000;
+
+ // Report hit rate for a given cache size range only if it contains
+ // kHitRateSamplesReportLimit or more samples. This limit should avoid
+ // reporting a biased statistics.
+ static const uint32_t kHitRateSamplesReportLimit = 500;
+
+ // All hit rates are accumulated in a single telemetry probe, so to use
+ // a sane number of enumerated values the hit rate is divided into buckets
+ // instead of using a percent value. This constant defines number of buckets
+ // that we divide the hit rates into. I.e. we'll report ranges 0%-5%, 5%-10%,
+ // 10-%15%, ...
+ static const uint32_t kHitRateBuckets = 20;
+
+ // Protects sRecordCnt, sHitStats and Telemetry::Accumulated() calls.
+ static StaticMutex sLock;
+
+ // Counter of samples that is compared against kTotalSamplesReportLimit.
+ static uint32_t sRecordCnt;
+
+ // Hit rate statistics for every cache size range.
+ static HitRate sHRStats[kNumOfRanges];
+};
+
+class CachePerfStats {
+ public:
+ // perfStatTypes in displayRcwnStats() in toolkit/content/aboutNetworking.js
+ // must match EDataType
+ enum EDataType {
+ IO_OPEN = 0,
+ IO_READ = 1,
+ IO_WRITE = 2,
+ ENTRY_OPEN = 3,
+ LAST = 4
+ };
+
+ static void AddValue(EDataType aType, uint32_t aValue, bool aShortOnly);
+ static uint32_t GetAverage(EDataType aType, bool aFiltered);
+ static uint32_t GetStdDev(EDataType aType, bool aFiltered);
+ static bool IsCacheSlow();
+ static void GetSlowStats(uint32_t* aSlow, uint32_t* aNotSlow);
+
+ private:
+ // This class computes average and standard deviation, it returns an
+ // arithmetic avg and stddev until total number of values reaches mWeight.
+ // Then it returns modified moving average computed as follows:
+ //
+ // avg = (1-a)*avg + a*value
+ // avgsq = (1-a)*avgsq + a*value^2
+ // stddev = sqrt(avgsq - avg^2)
+ //
+ // where
+ // avgsq is an average of the square of the values
+ // a = 1 / weight
+ class MMA {
+ public:
+ MMA(uint32_t aTotalWeight, bool aFilter);
+
+ void AddValue(uint32_t aValue);
+ uint32_t GetAverage();
+ uint32_t GetStdDev();
+
+ private:
+ uint64_t mSum;
+ uint64_t mSumSq;
+ uint32_t mCnt;
+ uint32_t mWeight;
+ bool mFilter;
+ };
+
+ class PerfData {
+ public:
+ PerfData();
+
+ void AddValue(uint32_t aValue, bool aShortOnly);
+ uint32_t GetAverage(bool aFiltered);
+ uint32_t GetStdDev(bool aFiltered);
+
+ private:
+ // Contains filtered data (i.e. times when we think the cache and disk was
+ // not busy) for a longer time.
+ MMA mFilteredAvg;
+
+ // Contains unfiltered average of few recent values.
+ MMA mShortAvg;
+ };
+
+ static StaticMutex sLock;
+
+ static PerfData sData[LAST];
+ static uint32_t sCacheSlowCnt;
+ static uint32_t sCacheNotSlowCnt;
+};
+
+void FreeBuffer(void* aBuf);
+
+nsresult ParseAlternativeDataInfo(const char* aInfo, int64_t* _offset,
+ nsACString* _type);
+
+void BuildAlternativeDataInfo(const char* aInfo, int64_t aOffset,
+ nsACString& _retval);
+
+} // namespace CacheFileUtils
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheHashUtils.cpp b/netwerk/cache2/CacheHashUtils.cpp
new file mode 100644
index 0000000000..1a33b00603
--- /dev/null
+++ b/netwerk/cache2/CacheHashUtils.cpp
@@ -0,0 +1,235 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheHashUtils.h"
+
+#include "mozilla/BasePrincipal.h"
+#include "plstr.h"
+
+namespace mozilla {
+namespace net {
+
+/**
+ * CacheHash::Hash(const char * key, uint32_t initval)
+ *
+ * See http://burtleburtle.net/bob/hash/evahash.html for more information
+ * about this hash function.
+ *
+ * This algorithm is used to check the data integrity.
+ */
+
+static inline void hashmix(uint32_t& a, uint32_t& b, uint32_t& c) {
+ a -= b;
+ a -= c;
+ a ^= (c >> 13);
+ b -= c;
+ b -= a;
+ b ^= (a << 8);
+ c -= a;
+ c -= b;
+ c ^= (b >> 13);
+ a -= b;
+ a -= c;
+ a ^= (c >> 12);
+ b -= c;
+ b -= a;
+ b ^= (a << 16);
+ c -= a;
+ c -= b;
+ c ^= (b >> 5);
+ a -= b;
+ a -= c;
+ a ^= (c >> 3);
+ b -= c;
+ b -= a;
+ b ^= (a << 10);
+ c -= a;
+ c -= b;
+ c ^= (b >> 15);
+}
+
+CacheHash::Hash32_t CacheHash::Hash(const char* aData, uint32_t aSize,
+ uint32_t aInitval) {
+ const uint8_t* k = reinterpret_cast<const uint8_t*>(aData);
+ uint32_t a, b, c, len;
+
+ /* Set up the internal state */
+ len = aSize;
+ a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
+ c = aInitval; /* variable initialization of internal state */
+
+ /*---------------------------------------- handle most of the key */
+ while (len >= 12) {
+ a += k[0] + (uint32_t(k[1]) << 8) + (uint32_t(k[2]) << 16) +
+ (uint32_t(k[3]) << 24);
+ b += k[4] + (uint32_t(k[5]) << 8) + (uint32_t(k[6]) << 16) +
+ (uint32_t(k[7]) << 24);
+ c += k[8] + (uint32_t(k[9]) << 8) + (uint32_t(k[10]) << 16) +
+ (uint32_t(k[11]) << 24);
+ hashmix(a, b, c);
+ k += 12;
+ len -= 12;
+ }
+
+ /*------------------------------------- handle the last 11 bytes */
+ c += aSize;
+ switch (len) { /* all the case statements fall through */
+ case 11:
+ c += (uint32_t(k[10]) << 24);
+ [[fallthrough]];
+ case 10:
+ c += (uint32_t(k[9]) << 16);
+ [[fallthrough]];
+ case 9:
+ c += (uint32_t(k[8]) << 8);
+ [[fallthrough]];
+ /* the low-order byte of c is reserved for the length */
+ case 8:
+ b += (uint32_t(k[7]) << 24);
+ [[fallthrough]];
+ case 7:
+ b += (uint32_t(k[6]) << 16);
+ [[fallthrough]];
+ case 6:
+ b += (uint32_t(k[5]) << 8);
+ [[fallthrough]];
+ case 5:
+ b += k[4];
+ [[fallthrough]];
+ case 4:
+ a += (uint32_t(k[3]) << 24);
+ [[fallthrough]];
+ case 3:
+ a += (uint32_t(k[2]) << 16);
+ [[fallthrough]];
+ case 2:
+ a += (uint32_t(k[1]) << 8);
+ [[fallthrough]];
+ case 1:
+ a += k[0];
+ /* case 0: nothing left to add */
+ }
+ hashmix(a, b, c);
+
+ return c;
+}
+
+CacheHash::Hash16_t CacheHash::Hash16(const char* aData, uint32_t aSize,
+ uint32_t aInitval) {
+ Hash32_t hash = Hash(aData, aSize, aInitval);
+ return (hash & 0xFFFF);
+}
+
+NS_IMPL_ISUPPORTS0(CacheHash)
+
+CacheHash::CacheHash(uint32_t aInitval)
+ : mA(0x9e3779b9),
+ mB(0x9e3779b9),
+ mC(aInitval),
+ mPos(0),
+ mBuf(0),
+ mBufPos(0),
+ mLength(0),
+ mFinalized(false) {}
+
+void CacheHash::Feed(uint32_t aVal, uint8_t aLen) {
+ switch (mPos) {
+ case 0:
+ mA += aVal;
+ mPos++;
+ break;
+
+ case 1:
+ mB += aVal;
+ mPos++;
+ break;
+
+ case 2:
+ mPos = 0;
+ if (aLen == 4) {
+ mC += aVal;
+ hashmix(mA, mB, mC);
+ } else {
+ mC += aVal << 8;
+ }
+ }
+
+ mLength += aLen;
+}
+
+void CacheHash::Update(const char* aData, uint32_t aLen) {
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(aData);
+
+ MOZ_ASSERT(!mFinalized);
+
+ if (mBufPos) {
+ while (mBufPos != 4 && aLen) {
+ mBuf += uint32_t(*data) << 8 * mBufPos;
+ data++;
+ mBufPos++;
+ aLen--;
+ }
+
+ if (mBufPos == 4) {
+ mBufPos = 0;
+ Feed(mBuf);
+ mBuf = 0;
+ }
+ }
+
+ if (!aLen) return;
+
+ while (aLen >= 4) {
+ Feed(data[0] + (uint32_t(data[1]) << 8) + (uint32_t(data[2]) << 16) +
+ (uint32_t(data[3]) << 24));
+ data += 4;
+ aLen -= 4;
+ }
+
+ switch (aLen) {
+ case 3:
+ mBuf += data[2] << 16;
+ [[fallthrough]];
+ case 2:
+ mBuf += data[1] << 8;
+ [[fallthrough]];
+ case 1:
+ mBuf += data[0];
+ }
+
+ mBufPos = aLen;
+}
+
+CacheHash::Hash32_t CacheHash::GetHash() {
+ if (!mFinalized) {
+ if (mBufPos) {
+ Feed(mBuf, mBufPos);
+ }
+ mC += mLength;
+ hashmix(mA, mB, mC);
+ mFinalized = true;
+ }
+
+ return mC;
+}
+
+CacheHash::Hash16_t CacheHash::GetHash16() {
+ Hash32_t hash = GetHash();
+ return (hash & 0xFFFF);
+}
+
+OriginAttrsHash GetOriginAttrsHash(const mozilla::OriginAttributes& aOA) {
+ nsAutoCString suffix;
+ aOA.CreateSuffix(suffix);
+
+ SHA1Sum sum;
+ SHA1Sum::Hash hash;
+ sum.update(suffix.BeginReading(), suffix.Length());
+ sum.finish(hash);
+
+ return BigEndian::readUint64(&hash);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheHashUtils.h b/netwerk/cache2/CacheHashUtils.h
new file mode 100644
index 0000000000..574e0aff0a
--- /dev/null
+++ b/netwerk/cache2/CacheHashUtils.h
@@ -0,0 +1,67 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheHashUtils__h__
+#define CacheHashUtils__h__
+
+#include "nsISupports.h"
+#include "mozilla/Types.h"
+#include "prnetdb.h"
+#include "nsPrintfCString.h"
+
+#define LOGSHA1(x) \
+ PR_htonl((reinterpret_cast<const uint32_t*>(x))[0]), \
+ PR_htonl((reinterpret_cast<const uint32_t*>(x))[1]), \
+ PR_htonl((reinterpret_cast<const uint32_t*>(x))[2]), \
+ PR_htonl((reinterpret_cast<const uint32_t*>(x))[3]), \
+ PR_htonl((reinterpret_cast<const uint32_t*>(x))[4])
+
+#define SHA1STRING(x) \
+ (nsPrintfCString("%08x%08x%08x%08x%08x", LOGSHA1(x)).get())
+
+namespace mozilla {
+
+class OriginAttributes;
+
+namespace net {
+
+class CacheHash : public nsISupports {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ typedef uint16_t Hash16_t;
+ typedef uint32_t Hash32_t;
+
+ static Hash32_t Hash(const char* aData, uint32_t aSize,
+ uint32_t aInitval = 0);
+ static Hash16_t Hash16(const char* aData, uint32_t aSize,
+ uint32_t aInitval = 0);
+
+ explicit CacheHash(uint32_t aInitval = 0);
+
+ void Update(const char* aData, uint32_t aLen);
+ Hash32_t GetHash();
+ Hash16_t GetHash16();
+
+ private:
+ virtual ~CacheHash() = default;
+
+ void Feed(uint32_t aVal, uint8_t aLen = 4);
+
+ uint32_t mA, mB, mC;
+ uint8_t mPos;
+ uint32_t mBuf;
+ uint8_t mBufPos;
+ uint32_t mLength;
+ bool mFinalized;
+};
+
+typedef uint64_t OriginAttrsHash;
+
+OriginAttrsHash GetOriginAttrsHash(const mozilla::OriginAttributes& aOA);
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheIOThread.cpp b/netwerk/cache2/CacheIOThread.cpp
new file mode 100644
index 0000000000..66f4300c72
--- /dev/null
+++ b/netwerk/cache2/CacheIOThread.cpp
@@ -0,0 +1,641 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheIOThread.h"
+#include "CacheFileIOManager.h"
+
+#include "nsIRunnable.h"
+#include "nsISupportsImpl.h"
+#include "nsPrintfCString.h"
+#include "nsThread.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/IOInterposer.h"
+#include "mozilla/ThreadEventQueue.h"
+#include "GeckoProfiler.h"
+
+#ifdef XP_WIN
+# include <windows.h>
+#endif
+
+#ifdef MOZ_TASK_TRACER
+# include "GeckoTaskTracer.h"
+# include "TracedTaskCommon.h"
+#endif
+
+namespace mozilla {
+namespace net {
+
+namespace { // anon
+
+class CacheIOTelemetry {
+ public:
+ typedef CacheIOThread::EventQueue::size_type size_type;
+ static size_type mMinLengthToReport[CacheIOThread::LAST_LEVEL];
+ static void Report(uint32_t aLevel, size_type aLength);
+};
+
+static CacheIOTelemetry::size_type const kGranularity = 30;
+
+CacheIOTelemetry::size_type
+ CacheIOTelemetry::mMinLengthToReport[CacheIOThread::LAST_LEVEL] = {
+ kGranularity, kGranularity, kGranularity, kGranularity,
+ kGranularity, kGranularity, kGranularity, kGranularity};
+
+// static
+void CacheIOTelemetry::Report(uint32_t aLevel,
+ CacheIOTelemetry::size_type aLength) {
+ if (mMinLengthToReport[aLevel] > aLength) {
+ return;
+ }
+
+ static Telemetry::HistogramID telemetryID[] = {
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN_PRIORITY,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_READ_PRIORITY,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_MANAGEMENT,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_READ,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE_PRIORITY,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_INDEX,
+ Telemetry::HTTP_CACHE_IO_QUEUE_2_EVICT};
+
+ // Each bucket is a multiply of kGranularity (30, 60, 90..., 300+)
+ aLength = (aLength / kGranularity);
+ // Next time report only when over the current length + kGranularity
+ mMinLengthToReport[aLevel] = (aLength + 1) * kGranularity;
+
+ // 10 is number of buckets we have in each probe
+ aLength = std::min<size_type>(aLength, 10);
+
+ Telemetry::Accumulate(telemetryID[aLevel], aLength - 1); // counted from 0
+}
+
+} // namespace
+
+namespace detail {
+
+/**
+ * Helper class encapsulating platform-specific code to cancel
+ * any pending IO operation taking too long. Solely used during
+ * shutdown to prevent any IO shutdown hangs.
+ * Mainly designed for using Win32 CancelSynchronousIo function.
+ */
+class BlockingIOWatcher {
+#ifdef XP_WIN
+ // The native handle to the thread
+ HANDLE mThread;
+ // Event signaling back to the main thread, see NotifyOperationDone.
+ HANDLE mEvent;
+#endif
+
+ public:
+ // Created and destroyed on the main thread only
+ BlockingIOWatcher();
+ ~BlockingIOWatcher();
+
+ // Called on the IO thread to grab the platform specific
+ // reference to it.
+ void InitThread();
+ // If there is a blocking operation being handled on the IO
+ // thread, this is called on the main thread during shutdown.
+ // Waits for notification from the IO thread for up to two seconds.
+ // If that times out, it attempts to cancel the IO operation.
+ void WatchAndCancel(Monitor& aMonitor);
+ // Called by the IO thread after each operation has been
+ // finished (after each Run() call). This wakes the main
+ // thread up and makes WatchAndCancel() early exit and become
+ // a no-op.
+ void NotifyOperationDone();
+};
+
+#ifdef XP_WIN
+
+BlockingIOWatcher::BlockingIOWatcher() : mThread(NULL), mEvent(NULL) {
+ HMODULE kernel32_dll = GetModuleHandleW(L"kernel32.dll");
+ if (!kernel32_dll) {
+ return;
+ }
+
+ mEvent = ::CreateEventW(NULL, TRUE, FALSE, NULL);
+}
+
+BlockingIOWatcher::~BlockingIOWatcher() {
+ if (mEvent) {
+ CloseHandle(mEvent);
+ }
+ if (mThread) {
+ CloseHandle(mThread);
+ }
+}
+
+void BlockingIOWatcher::InitThread() {
+ // GetCurrentThread() only returns a pseudo handle, hence DuplicateHandle
+ ::DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+ GetCurrentProcess(), &mThread, 0, FALSE,
+ DUPLICATE_SAME_ACCESS);
+}
+
+void BlockingIOWatcher::WatchAndCancel(Monitor& aMonitor) {
+ if (!mEvent) {
+ return;
+ }
+
+ // Reset before we enter the monitor to raise the chance we catch
+ // the currently pending IO op completion.
+ ::ResetEvent(mEvent);
+
+ HANDLE thread;
+ {
+ MonitorAutoLock lock(aMonitor);
+ thread = mThread;
+
+ if (!thread) {
+ return;
+ }
+ }
+
+ LOG(("Blocking IO operation pending on IO thread, waiting..."));
+
+ // It seems wise to use the I/O lag time as a maximum time to wait
+ // for an operation to finish. When that times out and cancelation
+ // succeeds, there will be no other IO operation permitted. By default
+ // this is two seconds.
+ uint32_t maxLag =
+ std::min<uint32_t>(5, CacheObserver::MaxShutdownIOLag()) * 1000;
+
+ DWORD result = ::WaitForSingleObject(mEvent, maxLag);
+ if (result == WAIT_TIMEOUT) {
+ LOG(("CacheIOThread: Attempting to cancel a long blocking IO operation"));
+ BOOL result = ::CancelSynchronousIo(thread);
+ if (result) {
+ LOG((" cancelation signal succeeded"));
+ } else {
+ DWORD error = GetLastError();
+ LOG((" cancelation signal failed with GetLastError=%u", error));
+ }
+ }
+}
+
+void BlockingIOWatcher::NotifyOperationDone() {
+ if (mEvent) {
+ ::SetEvent(mEvent);
+ }
+}
+
+#else // WIN
+
+// Stub code only (we don't implement IO cancelation for this platform)
+
+BlockingIOWatcher::BlockingIOWatcher() = default;
+BlockingIOWatcher::~BlockingIOWatcher() = default;
+void BlockingIOWatcher::InitThread() {}
+void BlockingIOWatcher::WatchAndCancel(Monitor&) {}
+void BlockingIOWatcher::NotifyOperationDone() {}
+
+#endif
+
+} // namespace detail
+
+CacheIOThread* CacheIOThread::sSelf = nullptr;
+
+NS_IMPL_ISUPPORTS(CacheIOThread, nsIThreadObserver)
+
+CacheIOThread::CacheIOThread()
+ : mMonitor("CacheIOThread"),
+ mThread(nullptr),
+ mXPCOMThread(nullptr),
+ mLowestLevelWaiting(LAST_LEVEL),
+ mCurrentlyExecutingLevel(0),
+ mHasXPCOMEvents(false),
+ mRerunCurrentEvent(false),
+ mShutdown(false),
+ mIOCancelableEvents(0),
+ mEventCounter(0)
+#ifdef DEBUG
+ ,
+ mInsideLoop(true)
+#endif
+{
+ for (auto& item : mQueueLength) {
+ item = 0;
+ }
+
+ sSelf = this;
+}
+
+CacheIOThread::~CacheIOThread() {
+ if (mXPCOMThread) {
+ nsIThread* thread = mXPCOMThread;
+ thread->Release();
+ }
+
+ sSelf = nullptr;
+#ifdef DEBUG
+ for (auto& event : mEventQueue) {
+ MOZ_ASSERT(!event.Length());
+ }
+#endif
+}
+
+nsresult CacheIOThread::Init() {
+ {
+ MonitorAutoLock lock(mMonitor);
+ // Yeah, there is not a thread yet, but we want to make sure
+ // the sequencing is correct.
+ mBlockingIOWatcher = MakeUnique<detail::BlockingIOWatcher>();
+ }
+
+ mThread =
+ PR_CreateThread(PR_USER_THREAD, ThreadFunc, this, PR_PRIORITY_NORMAL,
+ PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 128 * 1024);
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheIOThread::Dispatch(nsIRunnable* aRunnable, uint32_t aLevel) {
+ return Dispatch(do_AddRef(aRunnable), aLevel);
+}
+
+nsresult CacheIOThread::Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aLevel) {
+ NS_ENSURE_ARG(aLevel < LAST_LEVEL);
+
+ nsCOMPtr<nsIRunnable> runnable(aRunnable);
+
+ // Runnable is always expected to be non-null, hard null-check bellow.
+ MOZ_ASSERT(runnable);
+
+ MonitorAutoLock lock(mMonitor);
+
+ if (mShutdown && (PR_GetCurrentThread() != mThread))
+ return NS_ERROR_UNEXPECTED;
+
+ return DispatchInternal(runnable.forget(), aLevel);
+}
+
+nsresult CacheIOThread::DispatchAfterPendingOpens(nsIRunnable* aRunnable) {
+ // Runnable is always expected to be non-null, hard null-check bellow.
+ MOZ_ASSERT(aRunnable);
+
+ MonitorAutoLock lock(mMonitor);
+
+ if (mShutdown && (PR_GetCurrentThread() != mThread))
+ return NS_ERROR_UNEXPECTED;
+
+ // Move everything from later executed OPEN level to the OPEN_PRIORITY level
+ // where we post the (eviction) runnable.
+ mQueueLength[OPEN_PRIORITY] += mEventQueue[OPEN].Length();
+ mQueueLength[OPEN] -= mEventQueue[OPEN].Length();
+ mEventQueue[OPEN_PRIORITY].AppendElements(mEventQueue[OPEN]);
+ mEventQueue[OPEN].Clear();
+
+ return DispatchInternal(do_AddRef(aRunnable), OPEN_PRIORITY);
+}
+
+nsresult CacheIOThread::DispatchInternal(
+ already_AddRefed<nsIRunnable> aRunnable, uint32_t aLevel) {
+ nsCOMPtr<nsIRunnable> runnable(aRunnable);
+#ifdef MOZ_TASK_TRACER
+ if (tasktracer::IsStartLogging()) {
+ runnable = tasktracer::CreateTracedRunnable(runnable.forget());
+ (static_cast<tasktracer::TracedRunnable*>(runnable.get()))->DispatchTask();
+ }
+#endif
+
+ LogRunnable::LogDispatch(runnable.get());
+
+ if (NS_WARN_IF(!runnable)) return NS_ERROR_NULL_POINTER;
+
+ mMonitor.AssertCurrentThreadOwns();
+
+ ++mQueueLength[aLevel];
+ mEventQueue[aLevel].AppendElement(runnable.forget());
+ if (mLowestLevelWaiting > aLevel) mLowestLevelWaiting = aLevel;
+
+ mMonitor.NotifyAll();
+
+ return NS_OK;
+}
+
+bool CacheIOThread::IsCurrentThread() {
+ return mThread == PR_GetCurrentThread();
+}
+
+uint32_t CacheIOThread::QueueSize(bool highPriority) {
+ MonitorAutoLock lock(mMonitor);
+ if (highPriority) {
+ return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY];
+ }
+
+ return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY] +
+ mQueueLength[MANAGEMENT] + mQueueLength[OPEN] + mQueueLength[READ];
+}
+
+bool CacheIOThread::YieldInternal() {
+ if (!IsCurrentThread()) {
+ NS_WARNING(
+ "Trying to yield to priority events on non-cache2 I/O thread? "
+ "You probably do something wrong.");
+ return false;
+ }
+
+ if (mCurrentlyExecutingLevel == XPCOM_LEVEL) {
+ // Doesn't make any sense, since this handler is the one
+ // that would be executed as the next one.
+ return false;
+ }
+
+ if (!EventsPending(mCurrentlyExecutingLevel)) return false;
+
+ mRerunCurrentEvent = true;
+ return true;
+}
+
+void CacheIOThread::Shutdown() {
+ if (!mThread) {
+ return;
+ }
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ mShutdown = true;
+ mMonitor.NotifyAll();
+ }
+
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+}
+
+void CacheIOThread::CancelBlockingIO() {
+ // This is an attempt to cancel any blocking I/O operation taking
+ // too long time.
+ if (!mBlockingIOWatcher) {
+ return;
+ }
+
+ if (!mIOCancelableEvents) {
+ LOG(("CacheIOThread::CancelBlockingIO, no blocking operation to cancel"));
+ return;
+ }
+
+ // OK, when we are here, we are processing an IO on the thread that
+ // can be cancelled.
+ mBlockingIOWatcher->WatchAndCancel(mMonitor);
+}
+
+already_AddRefed<nsIEventTarget> CacheIOThread::Target() {
+ nsCOMPtr<nsIEventTarget> target;
+
+ target = mXPCOMThread;
+ if (!target && mThread) {
+ MonitorAutoLock lock(mMonitor);
+ while (!mXPCOMThread) {
+ lock.Wait();
+ }
+
+ target = mXPCOMThread;
+ }
+
+ return target.forget();
+}
+
+// static
+void CacheIOThread::ThreadFunc(void* aClosure) {
+ // XXXmstange We'd like to register this thread with the profiler, but doing
+ // so causes leaks, see bug 1323100.
+ NS_SetCurrentThreadName("Cache2 I/O");
+
+ mozilla::IOInterposer::RegisterCurrentThread();
+ CacheIOThread* thread = static_cast<CacheIOThread*>(aClosure);
+ thread->ThreadFunc();
+ mozilla::IOInterposer::UnregisterCurrentThread();
+}
+
+void CacheIOThread::ThreadFunc() {
+ nsCOMPtr<nsIThreadInternal> threadInternal;
+
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ MOZ_ASSERT(mBlockingIOWatcher);
+ mBlockingIOWatcher->InitThread();
+
+ auto queue =
+ MakeRefPtr<ThreadEventQueue>(MakeUnique<mozilla::EventQueue>());
+ nsCOMPtr<nsIThread> xpcomThread =
+ nsThreadManager::get().CreateCurrentThread(queue,
+ nsThread::NOT_MAIN_THREAD);
+
+ threadInternal = do_QueryInterface(xpcomThread);
+ if (threadInternal) threadInternal->SetObserver(this);
+
+ mXPCOMThread = xpcomThread.forget().take();
+
+ lock.NotifyAll();
+
+ do {
+ loopStart:
+ // Reset the lowest level now, so that we can detect a new event on
+ // a lower level (i.e. higher priority) has been scheduled while
+ // executing any previously scheduled event.
+ mLowestLevelWaiting = LAST_LEVEL;
+
+ // Process xpcom events first
+ while (mHasXPCOMEvents) {
+ mHasXPCOMEvents = false;
+ mCurrentlyExecutingLevel = XPCOM_LEVEL;
+
+ MonitorAutoUnlock unlock(mMonitor);
+
+ bool processedEvent;
+ nsresult rv;
+ do {
+ nsIThread* thread = mXPCOMThread;
+ rv = thread->ProcessNextEvent(false, &processedEvent);
+
+ ++mEventCounter;
+ MOZ_ASSERT(mBlockingIOWatcher);
+ mBlockingIOWatcher->NotifyOperationDone();
+ } while (NS_SUCCEEDED(rv) && processedEvent);
+ }
+
+ uint32_t level;
+ for (level = 0; level < LAST_LEVEL; ++level) {
+ if (!mEventQueue[level].Length()) {
+ // no events on this level, go to the next level
+ continue;
+ }
+
+ LoopOneLevel(level);
+
+ // Go to the first (lowest) level again
+ goto loopStart;
+ }
+
+ if (EventsPending()) {
+ continue;
+ }
+
+ if (mShutdown) {
+ break;
+ }
+
+ AUTO_PROFILER_LABEL("CacheIOThread::ThreadFunc::Wait", IDLE);
+ lock.Wait();
+
+ } while (true);
+
+ MOZ_ASSERT(!EventsPending());
+
+#ifdef DEBUG
+ // This is for correct assertion on XPCOM events dispatch.
+ mInsideLoop = false;
+#endif
+ } // lock
+
+ if (threadInternal) threadInternal->SetObserver(nullptr);
+}
+
+void CacheIOThread::LoopOneLevel(uint32_t aLevel) {
+ EventQueue events = std::move(mEventQueue[aLevel]);
+ EventQueue::size_type length = events.Length();
+
+ mCurrentlyExecutingLevel = aLevel;
+
+ bool returnEvents = false;
+ bool reportTelemetry = true;
+
+ EventQueue::size_type index;
+ {
+ MonitorAutoUnlock unlock(mMonitor);
+
+ for (index = 0; index < length; ++index) {
+ if (EventsPending(aLevel)) {
+ // Somebody scheduled a new event on a lower level, break and harry
+ // to execute it! Don't forget to return what we haven't exec.
+ returnEvents = true;
+ break;
+ }
+
+ if (reportTelemetry) {
+ reportTelemetry = false;
+ CacheIOTelemetry::Report(aLevel, length);
+ }
+
+ // Drop any previous flagging, only an event on the current level may set
+ // this flag.
+ mRerunCurrentEvent = false;
+
+ LogRunnable::Run log(events[index].get());
+
+ events[index]->Run();
+
+ MOZ_ASSERT(mBlockingIOWatcher);
+ mBlockingIOWatcher->NotifyOperationDone();
+
+ if (mRerunCurrentEvent) {
+ // The event handler yields to higher priority events and wants to
+ // rerun.
+ log.WillRunAgain();
+ returnEvents = true;
+ break;
+ }
+
+ ++mEventCounter;
+ --mQueueLength[aLevel];
+
+ // Release outside the lock.
+ events[index] = nullptr;
+ }
+ }
+
+ if (returnEvents) {
+ // This code must prevent any AddRef/Release calls on the stored COMPtrs as
+ // it might be exhaustive and block the monitor's lock for an excessive
+ // amout of time.
+
+ // 'index' points at the event that was interrupted and asked for re-run,
+ // all events before have run, been nullified, and can be removed.
+ events.RemoveElementsAt(0, index);
+ // Move events that might have been scheduled on this queue to the tail to
+ // preserve the expected per-queue FIFO order.
+ // XXX(Bug 1631371) Check if this should use a fallible operation as it
+ // pretended earlier.
+ events.AppendElements(std::move(mEventQueue[aLevel]));
+ // And finally move everything back to the main queue.
+ mEventQueue[aLevel] = std::move(events);
+ }
+}
+
+bool CacheIOThread::EventsPending(uint32_t aLastLevel) {
+ return mLowestLevelWaiting < aLastLevel || mHasXPCOMEvents;
+}
+
+NS_IMETHODIMP CacheIOThread::OnDispatchedEvent() {
+ MonitorAutoLock lock(mMonitor);
+ mHasXPCOMEvents = true;
+ MOZ_ASSERT(mInsideLoop);
+ lock.Notify();
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheIOThread::OnProcessNextEvent(nsIThreadInternal* thread,
+ bool mayWait) {
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheIOThread::AfterProcessNextEvent(nsIThreadInternal* thread,
+ bool eventWasProcessed) {
+ return NS_OK;
+}
+
+// Memory reporting
+
+size_t CacheIOThread::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ MonitorAutoLock lock(const_cast<CacheIOThread*>(this)->mMonitor);
+
+ size_t n = 0;
+ for (const auto& event : mEventQueue) {
+ n += event.ShallowSizeOfExcludingThis(mallocSizeOf);
+ // Events referenced by the queues are arbitrary objects we cannot be sure
+ // are reported elsewhere as well as probably not implementing nsISizeOf
+ // interface. Deliberatly omitting them from reporting here.
+ }
+
+ return n;
+}
+
+size_t CacheIOThread::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+CacheIOThread::Cancelable::Cancelable(bool aCancelable)
+ : mCancelable(aCancelable) {
+ // This will only ever be used on the I/O thread,
+ // which is expected to be alive longer than this class.
+ MOZ_ASSERT(CacheIOThread::sSelf);
+ MOZ_ASSERT(CacheIOThread::sSelf->IsCurrentThread());
+
+ if (mCancelable) {
+ ++CacheIOThread::sSelf->mIOCancelableEvents;
+ }
+}
+
+CacheIOThread::Cancelable::~Cancelable() {
+ MOZ_ASSERT(CacheIOThread::sSelf);
+
+ if (mCancelable) {
+ --CacheIOThread::sSelf->mIOCancelableEvents;
+ }
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheIOThread.h b/netwerk/cache2/CacheIOThread.h
new file mode 100644
index 0000000000..3c7b11cbf8
--- /dev/null
+++ b/netwerk/cache2/CacheIOThread.h
@@ -0,0 +1,147 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheIOThread__h__
+#define CacheIOThread__h__
+
+#include "nsIThreadInternal.h"
+#include "nsISupportsImpl.h"
+#include "prthread.h"
+#include "nsTArray.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/UniquePtr.h"
+
+class nsIRunnable;
+
+namespace mozilla {
+namespace net {
+
+namespace detail {
+// A class keeping platform specific information needed to watch and
+// cancel any long blocking synchronous IO. Must be predeclared here
+// since including windows.h breaks stuff with number of macro definition
+// conflicts.
+class BlockingIOWatcher;
+} // namespace detail
+
+class CacheIOThread final : public nsIThreadObserver {
+ virtual ~CacheIOThread();
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITHREADOBSERVER
+
+ CacheIOThread();
+
+ typedef nsTArray<nsCOMPtr<nsIRunnable>> EventQueue;
+
+ enum ELevel : uint32_t {
+ OPEN_PRIORITY,
+ READ_PRIORITY,
+ MANAGEMENT, // Doesn't do any actual I/O
+ OPEN,
+ READ,
+ WRITE_PRIORITY,
+ WRITE,
+ INDEX,
+ EVICT,
+ LAST_LEVEL,
+
+ // This is actually executed as the first level, but we want this enum
+ // value merely as an indicator while other values are used as indexes
+ // to the queue array. Hence put at end and not as the first.
+ XPCOM_LEVEL
+ };
+
+ nsresult Init();
+ nsresult Dispatch(nsIRunnable* aRunnable, uint32_t aLevel);
+ nsresult Dispatch(already_AddRefed<nsIRunnable>, uint32_t aLevel);
+ // Makes sure that any previously posted event to OPEN or OPEN_PRIORITY
+ // levels (such as file opennings and dooms) are executed before aRunnable
+ // that is intended to evict stuff from the cache.
+ nsresult DispatchAfterPendingOpens(nsIRunnable* aRunnable);
+ bool IsCurrentThread();
+
+ uint32_t QueueSize(bool highPriority);
+
+ uint32_t EventCounter() const { return mEventCounter; }
+
+ /**
+ * Callable only on this thread, checks if there is an event waiting in
+ * the event queue with a higher execution priority. If so, the result
+ * is true and the current event handler should break it's work and return
+ * from Run() method immediately. The event handler will be rerun again
+ * when all more priority events are processed. Events pending after this
+ * handler (i.e. the one that called YieldAndRerun()) will not execute sooner
+ * then this handler is executed w/o a call to YieldAndRerun().
+ */
+ static bool YieldAndRerun() { return sSelf ? sSelf->YieldInternal() : false; }
+
+ void Shutdown();
+ // This method checks if there is a long blocking IO on the
+ // IO thread and tries to cancel it. It waits maximum of
+ // two seconds.
+ void CancelBlockingIO();
+ already_AddRefed<nsIEventTarget> Target();
+
+ // A stack class used to annotate running interruptable I/O event
+ class Cancelable {
+ bool mCancelable;
+
+ public:
+ explicit Cancelable(bool aCancelable);
+ ~Cancelable();
+ };
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ static void ThreadFunc(void* aClosure);
+ void ThreadFunc();
+ void LoopOneLevel(uint32_t aLevel);
+ bool EventsPending(uint32_t aLastLevel = LAST_LEVEL);
+ nsresult DispatchInternal(already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aLevel);
+ bool YieldInternal();
+
+ static CacheIOThread* sSelf;
+
+ mozilla::Monitor mMonitor;
+ PRThread* mThread;
+ UniquePtr<detail::BlockingIOWatcher> mBlockingIOWatcher;
+ Atomic<nsIThread*> mXPCOMThread;
+ Atomic<uint32_t, Relaxed> mLowestLevelWaiting;
+ uint32_t mCurrentlyExecutingLevel;
+
+ // Keeps the length of the each event queue, since LoopOneLevel moves all
+ // events into a local array.
+ Atomic<int32_t> mQueueLength[LAST_LEVEL];
+
+ EventQueue mEventQueue[LAST_LEVEL];
+ // Raised when nsIEventTarget.Dispatch() is called on this thread
+ Atomic<bool, Relaxed> mHasXPCOMEvents;
+ // See YieldAndRerun() above
+ bool mRerunCurrentEvent;
+ // Signal to process all pending events and then shutdown
+ // Synchronized by mMonitor
+ bool mShutdown;
+ // If > 0 there is currently an I/O operation on the thread that
+ // can be canceled when after shutdown, see the Shutdown() method
+ // for usage. Made a counter to allow nesting of the Cancelable class.
+ Atomic<uint32_t, Relaxed> mIOCancelableEvents;
+ // Event counter that increases with every event processed.
+ Atomic<uint32_t, Relaxed> mEventCounter;
+#ifdef DEBUG
+ bool mInsideLoop;
+#endif
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheIndex.cpp b/netwerk/cache2/CacheIndex.cpp
new file mode 100644
index 0000000000..4de3698685
--- /dev/null
+++ b/netwerk/cache2/CacheIndex.cpp
@@ -0,0 +1,3903 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheIndex.h"
+
+#include "CacheLog.h"
+#include "CacheFileIOManager.h"
+#include "CacheFileMetadata.h"
+#include "CacheIndexIterator.h"
+#include "CacheIndexContextIterator.h"
+#include "nsThreadUtils.h"
+#include "nsISizeOf.h"
+#include "nsPrintfCString.h"
+#include "mozilla/DebugOnly.h"
+#include "prinrval.h"
+#include "nsIFile.h"
+#include "nsITimer.h"
+#include "mozilla/AutoRestore.h"
+#include <algorithm>
+#include "mozilla/StaticPrefs_network.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Unused.h"
+
+#define kMinUnwrittenChanges 300
+#define kMinDumpInterval 20000 // in milliseconds
+#define kMaxBufSize 16384
+#define kIndexVersion 0x0000000A
+#define kUpdateIndexStartDelay 50000 // in milliseconds
+#define kTelemetryReportBytesLimit (2U * 1024U * 1024U * 1024U) // 2GB
+
+#define INDEX_NAME "index"
+#define TEMP_INDEX_NAME "index.tmp"
+#define JOURNAL_NAME "index.log"
+
+namespace mozilla {
+namespace net {
+
+namespace {
+
+class FrecencyComparator {
+ public:
+ bool Equals(CacheIndexRecord* a, CacheIndexRecord* b) const {
+ if (!a || !b) {
+ return false;
+ }
+
+ return a->mFrecency == b->mFrecency;
+ }
+ bool LessThan(CacheIndexRecord* a, CacheIndexRecord* b) const {
+ // Removed (=null) entries must be at the end of the array.
+ if (!a) {
+ return false;
+ }
+ if (!b) {
+ return true;
+ }
+
+ // Place entries with frecency 0 at the end of the non-removed entries.
+ if (a->mFrecency == 0) {
+ return false;
+ }
+ if (b->mFrecency == 0) {
+ return true;
+ }
+
+ return a->mFrecency < b->mFrecency;
+ }
+};
+
+} // namespace
+
+CacheIndexRecord::~CacheIndexRecord() {
+ if (!StaticPrefs::network_cache_frecency_array_check_enabled()) {
+ return;
+ }
+
+ if (!(mFlags & CacheIndexEntry::kDirtyMask) &&
+ ((mFlags & CacheIndexEntry::kFileSizeMask) == 0)) {
+ return;
+ }
+
+ RefPtr<CacheIndex> index = CacheIndex::gInstance;
+ if (index) {
+ CacheIndex::sLock.AssertCurrentThreadOwns();
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ bool found = index->mFrecencyArray.RecordExisted(this);
+ MOZ_DIAGNOSTIC_ASSERT(!found);
+#endif
+ }
+}
+
+/**
+ * This helper class is responsible for keeping CacheIndex::mIndexStats and
+ * CacheIndex::mFrecencyArray up to date.
+ */
+class CacheIndexEntryAutoManage {
+ public:
+ CacheIndexEntryAutoManage(const SHA1Sum::Hash* aHash, CacheIndex* aIndex)
+ : mIndex(aIndex),
+ mOldRecord(nullptr),
+ mOldFrecency(0),
+ mDoNotSearchInIndex(false),
+ mDoNotSearchInUpdates(false) {
+ CacheIndex::sLock.AssertCurrentThreadOwns();
+
+ mHash = aHash;
+ const CacheIndexEntry* entry = FindEntry();
+ mIndex->mIndexStats.BeforeChange(entry);
+ if (entry && entry->IsInitialized() && !entry->IsRemoved()) {
+ mOldRecord = entry->mRec.get();
+ mOldFrecency = entry->mRec->mFrecency;
+ }
+ }
+
+ ~CacheIndexEntryAutoManage() {
+ CacheIndex::sLock.AssertCurrentThreadOwns();
+
+ const CacheIndexEntry* entry = FindEntry();
+ mIndex->mIndexStats.AfterChange(entry);
+ if (!entry || !entry->IsInitialized() || entry->IsRemoved()) {
+ entry = nullptr;
+ }
+
+ if (entry && !mOldRecord) {
+ mIndex->mFrecencyArray.AppendRecord(entry->mRec.get());
+ mIndex->AddRecordToIterators(entry->mRec.get());
+ } else if (!entry && mOldRecord) {
+ mIndex->mFrecencyArray.RemoveRecord(mOldRecord);
+ mIndex->RemoveRecordFromIterators(mOldRecord);
+ } else if (entry && mOldRecord) {
+ auto rec = entry->mRec.get();
+ if (rec != mOldRecord) {
+ // record has a different address, we have to replace it
+ mIndex->ReplaceRecordInIterators(mOldRecord, rec);
+
+ if (entry->mRec->mFrecency == mOldFrecency) {
+ // If frecency hasn't changed simply replace the pointer
+ mIndex->mFrecencyArray.ReplaceRecord(mOldRecord, rec);
+ } else {
+ // Remove old pointer and insert the new one at the end of the array
+ mIndex->mFrecencyArray.RemoveRecord(mOldRecord);
+ mIndex->mFrecencyArray.AppendRecord(rec);
+ }
+ } else if (entry->mRec->mFrecency != mOldFrecency) {
+ // Move the element at the end of the array
+ mIndex->mFrecencyArray.RemoveRecord(rec);
+ mIndex->mFrecencyArray.AppendRecord(rec);
+ }
+ } else {
+ // both entries were removed or not initialized, do nothing
+ }
+ }
+
+ // We cannot rely on nsTHashtable::GetEntry() in case we are removing entries
+ // while iterating. Destructor is called before the entry is removed. Caller
+ // must call one of following methods to skip lookup in the hashtable.
+ void DoNotSearchInIndex() { mDoNotSearchInIndex = true; }
+ void DoNotSearchInUpdates() { mDoNotSearchInUpdates = true; }
+
+ private:
+ const CacheIndexEntry* FindEntry() {
+ const CacheIndexEntry* entry = nullptr;
+
+ switch (mIndex->mState) {
+ case CacheIndex::READING:
+ case CacheIndex::WRITING:
+ if (!mDoNotSearchInUpdates) {
+ entry = mIndex->mPendingUpdates.GetEntry(*mHash);
+ }
+ [[fallthrough]];
+ case CacheIndex::BUILDING:
+ case CacheIndex::UPDATING:
+ case CacheIndex::READY:
+ if (!entry && !mDoNotSearchInIndex) {
+ entry = mIndex->mIndex.GetEntry(*mHash);
+ }
+ break;
+ case CacheIndex::INITIAL:
+ case CacheIndex::SHUTDOWN:
+ default:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ return entry;
+ }
+
+ const SHA1Sum::Hash* mHash;
+ RefPtr<CacheIndex> mIndex;
+ CacheIndexRecord* mOldRecord;
+ uint32_t mOldFrecency;
+ bool mDoNotSearchInIndex;
+ bool mDoNotSearchInUpdates;
+};
+
+class FileOpenHelper final : public CacheFileIOListener {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ explicit FileOpenHelper(CacheIndex* aIndex)
+ : mIndex(aIndex), mCanceled(false) {}
+
+ void Cancel() {
+ CacheIndex::sLock.AssertCurrentThreadOwns();
+ mCanceled = true;
+ }
+
+ private:
+ virtual ~FileOpenHelper() = default;
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override {
+ MOZ_CRASH("FileOpenHelper::OnDataWritten should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override {
+ MOZ_CRASH("FileOpenHelper::OnDataRead should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override {
+ MOZ_CRASH("FileOpenHelper::OnFileDoomed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
+ MOZ_CRASH("FileOpenHelper::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
+ nsresult aResult) override {
+ MOZ_CRASH("FileOpenHelper::OnFileRenamed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ RefPtr<CacheIndex> mIndex;
+ bool mCanceled;
+};
+
+NS_IMETHODIMP FileOpenHelper::OnFileOpened(CacheFileHandle* aHandle,
+ nsresult aResult) {
+ StaticMutexAutoLock lock(CacheIndex::sLock);
+
+ if (mCanceled) {
+ if (aHandle) {
+ CacheFileIOManager::DoomFile(aHandle, nullptr);
+ }
+
+ return NS_OK;
+ }
+
+ mIndex->OnFileOpenedInternal(this, aHandle, aResult);
+
+ return NS_OK;
+}
+
+NS_IMPL_ISUPPORTS(FileOpenHelper, CacheFileIOListener);
+
+StaticRefPtr<CacheIndex> CacheIndex::gInstance;
+StaticMutex CacheIndex::sLock;
+
+NS_IMPL_ADDREF(CacheIndex)
+NS_IMPL_RELEASE(CacheIndex)
+
+NS_INTERFACE_MAP_BEGIN(CacheIndex)
+ NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
+ NS_INTERFACE_MAP_ENTRY(nsIRunnable)
+NS_INTERFACE_MAP_END
+
+CacheIndex::CacheIndex()
+ : mState(INITIAL),
+ mShuttingDown(false),
+ mIndexNeedsUpdate(false),
+ mRemovingAll(false),
+ mIndexOnDiskIsValid(false),
+ mDontMarkIndexClean(false),
+ mIndexTimeStamp(0),
+ mUpdateEventPending(false),
+ mSkipEntries(0),
+ mProcessEntries(0),
+ mRWBuf(nullptr),
+ mRWBufSize(0),
+ mRWBufPos(0),
+ mRWPending(false),
+ mJournalReadSuccessfully(false),
+ mAsyncGetDiskConsumptionBlocked(false),
+ mTotalBytesWritten(0) {
+ sLock.AssertCurrentThreadOwns();
+ LOG(("CacheIndex::CacheIndex [this=%p]", this));
+ MOZ_ASSERT(!gInstance, "multiple CacheIndex instances!");
+}
+
+CacheIndex::~CacheIndex() {
+ sLock.AssertCurrentThreadOwns();
+ LOG(("CacheIndex::~CacheIndex [this=%p]", this));
+
+ ReleaseBuffer();
+}
+
+// static
+nsresult CacheIndex::Init(nsIFile* aCacheDirectory) {
+ LOG(("CacheIndex::Init()"));
+
+ MOZ_ASSERT(NS_IsMainThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ if (gInstance) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ RefPtr<CacheIndex> idx = new CacheIndex();
+
+ nsresult rv = idx->InitInternal(aCacheDirectory);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ gInstance = std::move(idx);
+ return NS_OK;
+}
+
+nsresult CacheIndex::InitInternal(nsIFile* aCacheDirectory) {
+ nsresult rv;
+
+ rv = aCacheDirectory->Clone(getter_AddRefs(mCacheDirectory));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mStartTime = TimeStamp::NowLoRes();
+
+ ReadIndexFromDisk();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::PreShutdown() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ LOG(("CacheIndex::PreShutdown() [gInstance=%p]", gInstance.get()));
+
+ nsresult rv;
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ LOG(
+ ("CacheIndex::PreShutdown() - [state=%d, indexOnDiskIsValid=%d, "
+ "dontMarkIndexClean=%d]",
+ index->mState, index->mIndexOnDiskIsValid, index->mDontMarkIndexClean));
+
+ LOG(("CacheIndex::PreShutdown() - Closing iterators."));
+ for (uint32_t i = 0; i < index->mIterators.Length();) {
+ rv = index->mIterators[i]->CloseInternal(NS_ERROR_FAILURE);
+ if (NS_FAILED(rv)) {
+ // CacheIndexIterator::CloseInternal() removes itself from mIteratos iff
+ // it returns success.
+ LOG(
+ ("CacheIndex::PreShutdown() - Failed to remove iterator %p. "
+ "[rv=0x%08" PRIx32 "]",
+ index->mIterators[i], static_cast<uint32_t>(rv)));
+ i++;
+ }
+ }
+
+ index->mShuttingDown = true;
+
+ if (index->mState == READY) {
+ return NS_OK; // nothing to do
+ }
+
+ nsCOMPtr<nsIRunnable> event;
+ event = NewRunnableMethod("net::CacheIndex::PreShutdownInternal", index,
+ &CacheIndex::PreShutdownInternal);
+
+ nsCOMPtr<nsIEventTarget> ioTarget = CacheFileIOManager::IOTarget();
+ MOZ_ASSERT(ioTarget);
+
+ // PreShutdownInternal() will be executed before any queued event on INDEX
+ // level. That's OK since we don't want to wait for any operation in progess.
+ rv = ioTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("CacheIndex::PreShutdown() - Can't dispatch event");
+ LOG(("CacheIndex::PreShutdown() - Can't dispatch event"));
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+void CacheIndex::PreShutdownInternal() {
+ StaticMutexAutoLock lock(sLock);
+
+ LOG(
+ ("CacheIndex::PreShutdownInternal() - [state=%d, indexOnDiskIsValid=%d, "
+ "dontMarkIndexClean=%d]",
+ mState, mIndexOnDiskIsValid, mDontMarkIndexClean));
+
+ MOZ_ASSERT(mShuttingDown);
+
+ if (mUpdateTimer) {
+ mUpdateTimer->Cancel();
+ mUpdateTimer = nullptr;
+ }
+
+ switch (mState) {
+ case WRITING:
+ FinishWrite(false);
+ break;
+ case READY:
+ // nothing to do, write the journal in Shutdown()
+ break;
+ case READING:
+ FinishRead(false);
+ break;
+ case BUILDING:
+ case UPDATING:
+ FinishUpdate(false);
+ break;
+ default:
+ MOZ_ASSERT(false, "Implement me!");
+ }
+
+ // We should end up in READY state
+ MOZ_ASSERT(mState == READY);
+}
+
+// static
+nsresult CacheIndex::Shutdown() {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ LOG(("CacheIndex::Shutdown() [gInstance=%p]", gInstance.get()));
+
+ RefPtr<CacheIndex> index = gInstance.forget();
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ bool sanitize = CacheObserver::ClearCacheOnShutdown();
+
+ LOG(
+ ("CacheIndex::Shutdown() - [state=%d, indexOnDiskIsValid=%d, "
+ "dontMarkIndexClean=%d, sanitize=%d]",
+ index->mState, index->mIndexOnDiskIsValid, index->mDontMarkIndexClean,
+ sanitize));
+
+ MOZ_ASSERT(index->mShuttingDown);
+
+ EState oldState = index->mState;
+ index->ChangeState(SHUTDOWN);
+
+ if (oldState != READY) {
+ LOG(
+ ("CacheIndex::Shutdown() - Unexpected state. Did posting of "
+ "PreShutdownInternal() fail?"));
+ }
+
+ switch (oldState) {
+ case WRITING:
+ index->FinishWrite(false);
+ [[fallthrough]];
+ case READY:
+ if (index->mIndexOnDiskIsValid && !index->mDontMarkIndexClean) {
+ if (!sanitize && NS_FAILED(index->WriteLogToDisk())) {
+ index->RemoveJournalAndTempFile();
+ }
+ } else {
+ index->RemoveJournalAndTempFile();
+ }
+ break;
+ case READING:
+ index->FinishRead(false);
+ break;
+ case BUILDING:
+ case UPDATING:
+ index->FinishUpdate(false);
+ break;
+ default:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ if (sanitize) {
+ index->RemoveAllIndexFiles();
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::AddEntry(const SHA1Sum::Hash* aHash) {
+ LOG(("CacheIndex::AddEntry() [hash=%08x%08x%08x%08x%08x]", LOGSHA1(aHash)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Getters in CacheIndexStats assert when mStateLogged is true since the
+ // information is incomplete between calls to BeforeChange() and AfterChange()
+ // (i.e. while CacheIndexEntryAutoManage exists). We need to check whether
+ // non-fresh entries exists outside the scope of CacheIndexEntryAutoManage.
+ bool updateIfNonFreshEntriesExist = false;
+
+ {
+ CacheIndexEntryAutoManage entryMng(aHash, index);
+
+ CacheIndexEntry* entry = index->mIndex.GetEntry(*aHash);
+ bool entryRemoved = entry && entry->IsRemoved();
+ CacheIndexEntryUpdate* updated = nullptr;
+
+ if (index->mState == READY || index->mState == UPDATING ||
+ index->mState == BUILDING) {
+ MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
+
+ if (entry && !entryRemoved) {
+ // Found entry in index that shouldn't exist.
+
+ if (entry->IsFresh()) {
+ // Someone removed the file on disk while FF is running. Update
+ // process can fix only non-fresh entries (i.e. entries that were not
+ // added within this session). Start update only if we have such
+ // entries.
+ //
+ // TODO: This should be very rare problem. If it turns out not to be
+ // true, change the update process so that it also iterates all
+ // initialized non-empty entries and checks whether the file exists.
+
+ LOG(
+ ("CacheIndex::AddEntry() - Cache file was removed outside FF "
+ "process!"));
+
+ updateIfNonFreshEntriesExist = true;
+ } else if (index->mState == READY) {
+ // Index is outdated, update it.
+ LOG(
+ ("CacheIndex::AddEntry() - Found entry that shouldn't exist, "
+ "update is needed"));
+ index->mIndexNeedsUpdate = true;
+ } else {
+ // We cannot be here when building index since all entries are fresh
+ // during building.
+ MOZ_ASSERT(index->mState == UPDATING);
+ }
+ }
+
+ if (!entry) {
+ entry = index->mIndex.PutEntry(*aHash);
+ }
+ } else { // WRITING, READING
+ updated = index->mPendingUpdates.GetEntry(*aHash);
+ bool updatedRemoved = updated && updated->IsRemoved();
+
+ if ((updated && !updatedRemoved) ||
+ (!updated && entry && !entryRemoved && entry->IsFresh())) {
+ // Fresh entry found, so the file was removed outside FF
+ LOG(
+ ("CacheIndex::AddEntry() - Cache file was removed outside FF "
+ "process!"));
+
+ updateIfNonFreshEntriesExist = true;
+ } else if (!updated && entry && !entryRemoved) {
+ if (index->mState == WRITING) {
+ LOG(
+ ("CacheIndex::AddEntry() - Found entry that shouldn't exist, "
+ "update is needed"));
+ index->mIndexNeedsUpdate = true;
+ }
+ // Ignore if state is READING since the index information is partial
+ }
+
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ }
+
+ if (updated) {
+ updated->InitNew();
+ updated->MarkDirty();
+ updated->MarkFresh();
+ } else {
+ entry->InitNew();
+ entry->MarkDirty();
+ entry->MarkFresh();
+ }
+ }
+
+ if (updateIfNonFreshEntriesExist &&
+ index->mIndexStats.Count() != index->mIndexStats.Fresh()) {
+ index->mIndexNeedsUpdate = true;
+ }
+
+ index->StartUpdatingIndexIfNeeded();
+ index->WriteIndexToDiskIfNeeded();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::EnsureEntryExists(const SHA1Sum::Hash* aHash) {
+ LOG(("CacheIndex::EnsureEntryExists() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHash)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ {
+ CacheIndexEntryAutoManage entryMng(aHash, index);
+
+ CacheIndexEntry* entry = index->mIndex.GetEntry(*aHash);
+ bool entryRemoved = entry && entry->IsRemoved();
+
+ if (index->mState == READY || index->mState == UPDATING ||
+ index->mState == BUILDING) {
+ MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
+
+ if (!entry || entryRemoved) {
+ if (entryRemoved && entry->IsFresh()) {
+ // This could happen only if somebody copies files to the entries
+ // directory while FF is running.
+ LOG(
+ ("CacheIndex::EnsureEntryExists() - Cache file was added outside "
+ "FF process! Update is needed."));
+ index->mIndexNeedsUpdate = true;
+ } else if (index->mState == READY ||
+ (entryRemoved && !entry->IsFresh())) {
+ // Removed non-fresh entries can be present as a result of
+ // MergeJournal()
+ LOG(
+ ("CacheIndex::EnsureEntryExists() - Didn't find entry that should"
+ " exist, update is needed"));
+ index->mIndexNeedsUpdate = true;
+ }
+
+ if (!entry) {
+ entry = index->mIndex.PutEntry(*aHash);
+ }
+ entry->InitNew();
+ entry->MarkDirty();
+ }
+ entry->MarkFresh();
+ } else { // WRITING, READING
+ CacheIndexEntryUpdate* updated = index->mPendingUpdates.GetEntry(*aHash);
+ bool updatedRemoved = updated && updated->IsRemoved();
+
+ if (updatedRemoved || (!updated && entryRemoved && entry->IsFresh())) {
+ // Fresh information about missing entry found. This could happen only
+ // if somebody copies files to the entries directory while FF is
+ // running.
+ LOG(
+ ("CacheIndex::EnsureEntryExists() - Cache file was added outside "
+ "FF process! Update is needed."));
+ index->mIndexNeedsUpdate = true;
+ } else if (!updated && (!entry || entryRemoved)) {
+ if (index->mState == WRITING) {
+ LOG(
+ ("CacheIndex::EnsureEntryExists() - Didn't find entry that should"
+ " exist, update is needed"));
+ index->mIndexNeedsUpdate = true;
+ }
+ // Ignore if state is READING since the index information is partial
+ }
+
+ // We don't need entryRemoved and updatedRemoved info anymore
+ if (entryRemoved) entry = nullptr;
+ if (updatedRemoved) updated = nullptr;
+
+ if (updated) {
+ updated->MarkFresh();
+ } else {
+ if (!entry) {
+ // Create a new entry
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ updated->InitNew();
+ updated->MarkFresh();
+ updated->MarkDirty();
+ } else {
+ if (!entry->IsFresh()) {
+ // To mark the entry fresh we must make a copy of index entry
+ // since the index is read-only.
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ *updated = *entry;
+ updated->MarkFresh();
+ }
+ }
+ }
+ }
+ }
+
+ index->StartUpdatingIndexIfNeeded();
+ index->WriteIndexToDiskIfNeeded();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::InitEntry(const SHA1Sum::Hash* aHash,
+ OriginAttrsHash aOriginAttrsHash,
+ bool aAnonymous, bool aPinned) {
+ LOG(
+ ("CacheIndex::InitEntry() [hash=%08x%08x%08x%08x%08x, "
+ "originAttrsHash=%" PRIx64 ", anonymous=%d, pinned=%d]",
+ LOGSHA1(aHash), aOriginAttrsHash, aAnonymous, aPinned));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ {
+ CacheIndexEntryAutoManage entryMng(aHash, index);
+
+ CacheIndexEntry* entry = index->mIndex.GetEntry(*aHash);
+ CacheIndexEntryUpdate* updated = nullptr;
+ bool reinitEntry = false;
+
+ if (entry && entry->IsRemoved()) {
+ entry = nullptr;
+ }
+
+ if (index->mState == READY || index->mState == UPDATING ||
+ index->mState == BUILDING) {
+ MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
+ MOZ_ASSERT(entry);
+ MOZ_ASSERT(entry->IsFresh());
+
+ if (!entry) {
+ LOG(("CacheIndex::InitEntry() - Entry was not found in mIndex!"));
+ NS_WARNING(
+ ("CacheIndex::InitEntry() - Entry was not found in mIndex!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (IsCollision(entry, aOriginAttrsHash, aAnonymous)) {
+ index->mIndexNeedsUpdate =
+ true; // TODO Does this really help in case of collision?
+ reinitEntry = true;
+ } else {
+ if (entry->IsInitialized()) {
+ return NS_OK;
+ }
+ }
+ } else {
+ updated = index->mPendingUpdates.GetEntry(*aHash);
+ DebugOnly<bool> removed = updated && updated->IsRemoved();
+
+ MOZ_ASSERT(updated || !removed);
+ MOZ_ASSERT(updated || entry);
+
+ if (!updated && !entry) {
+ LOG(
+ ("CacheIndex::InitEntry() - Entry was found neither in mIndex nor "
+ "in mPendingUpdates!"));
+ NS_WARNING(
+ ("CacheIndex::InitEntry() - Entry was found neither in "
+ "mIndex nor in mPendingUpdates!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (updated) {
+ MOZ_ASSERT(updated->IsFresh());
+
+ if (IsCollision(updated, aOriginAttrsHash, aAnonymous)) {
+ index->mIndexNeedsUpdate = true;
+ reinitEntry = true;
+ } else {
+ if (updated->IsInitialized()) {
+ return NS_OK;
+ }
+ }
+ } else {
+ MOZ_ASSERT(entry->IsFresh());
+
+ if (IsCollision(entry, aOriginAttrsHash, aAnonymous)) {
+ index->mIndexNeedsUpdate = true;
+ reinitEntry = true;
+ } else {
+ if (entry->IsInitialized()) {
+ return NS_OK;
+ }
+ }
+
+ // make a copy of a read-only entry
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ *updated = *entry;
+ }
+ }
+
+ if (reinitEntry) {
+ // There is a collision and we are going to rewrite this entry. Initialize
+ // it as a new entry.
+ if (updated) {
+ updated->InitNew();
+ updated->MarkFresh();
+ } else {
+ entry->InitNew();
+ entry->MarkFresh();
+ }
+ }
+
+ if (updated) {
+ updated->Init(aOriginAttrsHash, aAnonymous, aPinned);
+ updated->MarkDirty();
+ } else {
+ entry->Init(aOriginAttrsHash, aAnonymous, aPinned);
+ entry->MarkDirty();
+ }
+ }
+
+ index->StartUpdatingIndexIfNeeded();
+ index->WriteIndexToDiskIfNeeded();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::RemoveEntry(const SHA1Sum::Hash* aHash) {
+ LOG(("CacheIndex::RemoveEntry() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(aHash)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ {
+ CacheIndexEntryAutoManage entryMng(aHash, index);
+
+ CacheIndexEntry* entry = index->mIndex.GetEntry(*aHash);
+ bool entryRemoved = entry && entry->IsRemoved();
+
+ if (index->mState == READY || index->mState == UPDATING ||
+ index->mState == BUILDING) {
+ MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
+
+ if (!entry || entryRemoved) {
+ if (entryRemoved && entry->IsFresh()) {
+ // This could happen only if somebody copies files to the entries
+ // directory while FF is running.
+ LOG(
+ ("CacheIndex::RemoveEntry() - Cache file was added outside FF "
+ "process! Update is needed."));
+ index->mIndexNeedsUpdate = true;
+ } else if (index->mState == READY ||
+ (entryRemoved && !entry->IsFresh())) {
+ // Removed non-fresh entries can be present as a result of
+ // MergeJournal()
+ LOG(
+ ("CacheIndex::RemoveEntry() - Didn't find entry that should exist"
+ ", update is needed"));
+ index->mIndexNeedsUpdate = true;
+ }
+ } else {
+ if (entry) {
+ if (!entry->IsDirty() && entry->IsFileEmpty()) {
+ index->mIndex.RemoveEntry(entry);
+ entry = nullptr;
+ } else {
+ entry->MarkRemoved();
+ entry->MarkDirty();
+ entry->MarkFresh();
+ }
+ }
+ }
+ } else { // WRITING, READING
+ CacheIndexEntryUpdate* updated = index->mPendingUpdates.GetEntry(*aHash);
+ bool updatedRemoved = updated && updated->IsRemoved();
+
+ if (updatedRemoved || (!updated && entryRemoved && entry->IsFresh())) {
+ // Fresh information about missing entry found. This could happen only
+ // if somebody copies files to the entries directory while FF is
+ // running.
+ LOG(
+ ("CacheIndex::RemoveEntry() - Cache file was added outside FF "
+ "process! Update is needed."));
+ index->mIndexNeedsUpdate = true;
+ } else if (!updated && (!entry || entryRemoved)) {
+ if (index->mState == WRITING) {
+ LOG(
+ ("CacheIndex::RemoveEntry() - Didn't find entry that should exist"
+ ", update is needed"));
+ index->mIndexNeedsUpdate = true;
+ }
+ // Ignore if state is READING since the index information is partial
+ }
+
+ if (!updated) {
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ updated->InitNew();
+ }
+
+ updated->MarkRemoved();
+ updated->MarkDirty();
+ updated->MarkFresh();
+ }
+ }
+
+ index->StartUpdatingIndexIfNeeded();
+ index->WriteIndexToDiskIfNeeded();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::UpdateEntry(const SHA1Sum::Hash* aHash,
+ const uint32_t* aFrecency,
+ const bool* aHasAltData,
+ const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType,
+ const uint32_t* aSize) {
+ LOG(
+ ("CacheIndex::UpdateEntry() [hash=%08x%08x%08x%08x%08x, "
+ "frecency=%s, hasAltData=%s, onStartTime=%s, onStopTime=%s, "
+ "contentType=%s, size=%s]",
+ LOGSHA1(aHash), aFrecency ? nsPrintfCString("%u", *aFrecency).get() : "",
+ aHasAltData ? (*aHasAltData ? "true" : "false") : "",
+ aOnStartTime ? nsPrintfCString("%u", *aOnStartTime).get() : "",
+ aOnStopTime ? nsPrintfCString("%u", *aOnStopTime).get() : "",
+ aContentType ? nsPrintfCString("%u", *aContentType).get() : "",
+ aSize ? nsPrintfCString("%u", *aSize).get() : ""));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ {
+ CacheIndexEntryAutoManage entryMng(aHash, index);
+
+ CacheIndexEntry* entry = index->mIndex.GetEntry(*aHash);
+
+ if (entry && entry->IsRemoved()) {
+ entry = nullptr;
+ }
+
+ if (index->mState == READY || index->mState == UPDATING ||
+ index->mState == BUILDING) {
+ MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
+ MOZ_ASSERT(entry);
+
+ if (!entry) {
+ LOG(("CacheIndex::UpdateEntry() - Entry was not found in mIndex!"));
+ NS_WARNING(
+ ("CacheIndex::UpdateEntry() - Entry was not found in mIndex!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (!HasEntryChanged(entry, aFrecency, aHasAltData, aOnStartTime,
+ aOnStopTime, aContentType, aSize)) {
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(entry->IsFresh());
+ MOZ_ASSERT(entry->IsInitialized());
+ entry->MarkDirty();
+
+ if (aFrecency) {
+ entry->SetFrecency(*aFrecency);
+ }
+
+ if (aHasAltData) {
+ entry->SetHasAltData(*aHasAltData);
+ }
+
+ if (aOnStartTime) {
+ entry->SetOnStartTime(*aOnStartTime);
+ }
+
+ if (aOnStopTime) {
+ entry->SetOnStopTime(*aOnStopTime);
+ }
+
+ if (aContentType) {
+ entry->SetContentType(*aContentType);
+ }
+
+ if (aSize) {
+ entry->SetFileSize(*aSize);
+ }
+ } else {
+ CacheIndexEntryUpdate* updated = index->mPendingUpdates.GetEntry(*aHash);
+ DebugOnly<bool> removed = updated && updated->IsRemoved();
+
+ MOZ_ASSERT(updated || !removed);
+ MOZ_ASSERT(updated || entry);
+
+ if (!updated) {
+ if (!entry) {
+ LOG(
+ ("CacheIndex::UpdateEntry() - Entry was found neither in mIndex "
+ "nor in mPendingUpdates!"));
+ NS_WARNING(
+ ("CacheIndex::UpdateEntry() - Entry was found neither in "
+ "mIndex nor in mPendingUpdates!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // make a copy of a read-only entry
+ updated = index->mPendingUpdates.PutEntry(*aHash);
+ *updated = *entry;
+ }
+
+ MOZ_ASSERT(updated->IsFresh());
+ MOZ_ASSERT(updated->IsInitialized());
+ updated->MarkDirty();
+
+ if (aFrecency) {
+ updated->SetFrecency(*aFrecency);
+ }
+
+ if (aHasAltData) {
+ updated->SetHasAltData(*aHasAltData);
+ }
+
+ if (aOnStartTime) {
+ updated->SetOnStartTime(*aOnStartTime);
+ }
+
+ if (aOnStopTime) {
+ updated->SetOnStopTime(*aOnStopTime);
+ }
+
+ if (aContentType) {
+ updated->SetContentType(*aContentType);
+ }
+
+ if (aSize) {
+ updated->SetFileSize(*aSize);
+ }
+ }
+ }
+
+ index->WriteIndexToDiskIfNeeded();
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::RemoveAll() {
+ LOG(("CacheIndex::RemoveAll()"));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsCOMPtr<nsIFile> file;
+
+ {
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ MOZ_ASSERT(!index->mRemovingAll);
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ AutoRestore<bool> saveRemovingAll(index->mRemovingAll);
+ index->mRemovingAll = true;
+
+ // Doom index and journal handles but don't null them out since this will be
+ // done in FinishWrite/FinishRead methods.
+ if (index->mIndexHandle) {
+ CacheFileIOManager::DoomFile(index->mIndexHandle, nullptr);
+ } else {
+ // We don't have a handle to index file, so get the file here, but delete
+ // it outside the lock. Ignore the result since this is not fatal.
+ index->GetFile(nsLiteralCString(INDEX_NAME), getter_AddRefs(file));
+ }
+
+ if (index->mJournalHandle) {
+ CacheFileIOManager::DoomFile(index->mJournalHandle, nullptr);
+ }
+
+ switch (index->mState) {
+ case WRITING:
+ index->FinishWrite(false);
+ break;
+ case READY:
+ // nothing to do
+ break;
+ case READING:
+ index->FinishRead(false);
+ break;
+ case BUILDING:
+ case UPDATING:
+ index->FinishUpdate(false);
+ break;
+ default:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ // We should end up in READY state
+ MOZ_ASSERT(index->mState == READY);
+
+ // There should not be any handle
+ MOZ_ASSERT(!index->mIndexHandle);
+ MOZ_ASSERT(!index->mJournalHandle);
+
+ index->mIndexOnDiskIsValid = false;
+ index->mIndexNeedsUpdate = false;
+
+ index->mIndexStats.Clear();
+ index->mFrecencyArray.Clear();
+ index->mIndex.Clear();
+
+ for (uint32_t i = 0; i < index->mIterators.Length();) {
+ nsresult rv = index->mIterators[i]->CloseInternal(NS_ERROR_NOT_AVAILABLE);
+ if (NS_FAILED(rv)) {
+ // CacheIndexIterator::CloseInternal() removes itself from mIterators
+ // iff it returns success.
+ LOG(
+ ("CacheIndex::RemoveAll() - Failed to remove iterator %p. "
+ "[rv=0x%08" PRIx32 "]",
+ index->mIterators[i], static_cast<uint32_t>(rv)));
+ i++;
+ }
+ }
+ }
+
+ if (file) {
+ // Ignore the result. The file might not exist and the failure is not fatal.
+ file->Remove(false);
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::HasEntry(
+ const nsACString& aKey, EntryStatus* _retval,
+ const std::function<void(const CacheIndexEntry*)>& aCB) {
+ LOG(("CacheIndex::HasEntry() [key=%s]", PromiseFlatCString(aKey).get()));
+
+ SHA1Sum sum;
+ SHA1Sum::Hash hash;
+ sum.update(aKey.BeginReading(), aKey.Length());
+ sum.finish(hash);
+
+ return HasEntry(hash, _retval, aCB);
+}
+
+// static
+nsresult CacheIndex::HasEntry(
+ const SHA1Sum::Hash& hash, EntryStatus* _retval,
+ const std::function<void(const CacheIndexEntry*)>& aCB) {
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ const CacheIndexEntry* entry = nullptr;
+
+ switch (index->mState) {
+ case READING:
+ case WRITING:
+ entry = index->mPendingUpdates.GetEntry(hash);
+ [[fallthrough]];
+ case BUILDING:
+ case UPDATING:
+ case READY:
+ if (!entry) {
+ entry = index->mIndex.GetEntry(hash);
+ }
+ break;
+ case INITIAL:
+ case SHUTDOWN:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ if (!entry) {
+ if (index->mState == READY || index->mState == WRITING) {
+ *_retval = DOES_NOT_EXIST;
+ } else {
+ *_retval = DO_NOT_KNOW;
+ }
+ } else {
+ if (entry->IsRemoved()) {
+ if (entry->IsFresh()) {
+ *_retval = DOES_NOT_EXIST;
+ } else {
+ *_retval = DO_NOT_KNOW;
+ }
+ } else {
+ *_retval = EXISTS;
+ if (aCB) {
+ aCB(entry);
+ }
+ }
+ }
+
+ LOG(("CacheIndex::HasEntry() - result is %u", *_retval));
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::GetEntryForEviction(bool aIgnoreEmptyEntries,
+ SHA1Sum::Hash* aHash, uint32_t* aCnt) {
+ LOG(("CacheIndex::GetEntryForEviction()"));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) return NS_ERROR_NOT_INITIALIZED;
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (index->mIndexStats.Size() == 0) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ int32_t mediaUsage =
+ round(static_cast<double>(index->mIndexStats.SizeByType(
+ nsICacheEntry::CONTENT_TYPE_MEDIA)) *
+ 100.0 / static_cast<double>(index->mIndexStats.Size()));
+ int32_t mediaUsageLimit =
+ StaticPrefs::browser_cache_disk_content_type_media_limit();
+ bool evictMedia = false;
+ if (mediaUsage > mediaUsageLimit) {
+ LOG(
+ ("CacheIndex::GetEntryForEviction() - media content type is over the "
+ "limit [mediaUsage=%d, mediaUsageLimit=%d]",
+ mediaUsage, mediaUsageLimit));
+ evictMedia = true;
+ }
+
+ SHA1Sum::Hash hash;
+ CacheIndexRecord* foundRecord = nullptr;
+ uint32_t skipped = 0;
+
+ // find first non-forced valid and unpinned entry with the lowest frecency
+ index->mFrecencyArray.SortIfNeeded();
+
+ for (auto iter = index->mFrecencyArray.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexRecord* rec = iter.Get();
+
+ memcpy(&hash, rec->mHash, sizeof(SHA1Sum::Hash));
+
+ ++skipped;
+
+ if (evictMedia && CacheIndexEntry::GetContentType(rec) !=
+ nsICacheEntry::CONTENT_TYPE_MEDIA) {
+ continue;
+ }
+
+ if (IsForcedValidEntry(&hash)) {
+ continue;
+ }
+
+ if (CacheIndexEntry::IsPinned(rec)) {
+ continue;
+ }
+
+ if (aIgnoreEmptyEntries && !CacheIndexEntry::GetFileSize(*rec)) {
+ continue;
+ }
+
+ --skipped;
+ foundRecord = rec;
+ break;
+ }
+
+ if (!foundRecord) return NS_ERROR_NOT_AVAILABLE;
+
+ *aCnt = skipped;
+
+ LOG(
+ ("CacheIndex::GetEntryForEviction() - returning entry "
+ "[hash=%08x%08x%08x%08x%08x, cnt=%u, frecency=%u, contentType=%u]",
+ LOGSHA1(&hash), *aCnt, foundRecord->mFrecency,
+ CacheIndexEntry::GetContentType(foundRecord)));
+
+ memcpy(aHash, &hash, sizeof(SHA1Sum::Hash));
+
+ return NS_OK;
+}
+
+// static
+bool CacheIndex::IsForcedValidEntry(const SHA1Sum::Hash* aHash) {
+ RefPtr<CacheFileHandle> handle;
+
+ CacheFileIOManager::gInstance->mHandles.GetHandle(aHash,
+ getter_AddRefs(handle));
+
+ if (!handle) return false;
+
+ nsCString hashKey = handle->Key();
+ return CacheStorageService::Self()->IsForcedValidEntry(hashKey);
+}
+
+// static
+nsresult CacheIndex::GetCacheSize(uint32_t* _retval) {
+ LOG(("CacheIndex::GetCacheSize()"));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) return NS_ERROR_NOT_INITIALIZED;
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *_retval = index->mIndexStats.Size();
+ LOG(("CacheIndex::GetCacheSize() - returning %u", *_retval));
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::GetEntryFileCount(uint32_t* _retval) {
+ LOG(("CacheIndex::GetEntryFileCount()"));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *_retval = index->mIndexStats.ActiveEntriesCount();
+ LOG(("CacheIndex::GetEntryFileCount() - returning %u", *_retval));
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::GetCacheStats(nsILoadContextInfo* aInfo, uint32_t* aSize,
+ uint32_t* aCount) {
+ LOG(("CacheIndex::GetCacheStats() [info=%p]", aInfo));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *aSize = 0;
+ *aCount = 0;
+
+ for (auto iter = index->mFrecencyArray.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexRecord* record = iter.Get();
+ if (aInfo && !CacheIndexEntry::RecordMatchesLoadContextInfo(record, aInfo))
+ continue;
+
+ *aSize += CacheIndexEntry::GetFileSize(*record);
+ ++*aCount;
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::AsyncGetDiskConsumption(
+ nsICacheStorageConsumptionObserver* aObserver) {
+ LOG(("CacheIndex::AsyncGetDiskConsumption()"));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ RefPtr<DiskConsumptionObserver> observer =
+ DiskConsumptionObserver::Init(aObserver);
+
+ NS_ENSURE_ARG(observer);
+
+ if ((index->mState == READY || index->mState == WRITING) &&
+ !index->mAsyncGetDiskConsumptionBlocked) {
+ LOG(("CacheIndex::AsyncGetDiskConsumption - calling immediately"));
+ // Safe to call the callback under the lock,
+ // we always post to the main thread.
+ observer->OnDiskConsumption(index->mIndexStats.Size() << 10);
+ return NS_OK;
+ }
+
+ LOG(("CacheIndex::AsyncGetDiskConsumption - remembering callback"));
+ // Will be called when the index get to the READY state.
+ index->mDiskConsumptionObservers.AppendElement(observer);
+
+ // Move forward with index re/building if it is pending
+ RefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
+ if (ioThread) {
+ ioThread->Dispatch(
+ NS_NewRunnableFunction("net::CacheIndex::AsyncGetDiskConsumption",
+ []() -> void {
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+ if (index && index->mUpdateTimer) {
+ index->mUpdateTimer->Cancel();
+ index->DelayedUpdateLocked();
+ }
+ }),
+ CacheIOThread::INDEX);
+ }
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::GetIterator(nsILoadContextInfo* aInfo, bool aAddNew,
+ CacheIndexIterator** _retval) {
+ LOG(("CacheIndex::GetIterator() [info=%p, addNew=%d]", aInfo, aAddNew));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ RefPtr<CacheIndexIterator> idxIter;
+ if (aInfo) {
+ idxIter = new CacheIndexContextIterator(index, aAddNew, aInfo);
+ } else {
+ idxIter = new CacheIndexIterator(index, aAddNew);
+ }
+
+ index->mFrecencyArray.SortIfNeeded();
+
+ for (auto iter = index->mFrecencyArray.Iter(); !iter.Done(); iter.Next()) {
+ idxIter->AddRecord(iter.Get());
+ }
+
+ index->mIterators.AppendElement(idxIter);
+ idxIter.swap(*_retval);
+ return NS_OK;
+}
+
+// static
+nsresult CacheIndex::IsUpToDate(bool* _retval) {
+ LOG(("CacheIndex::IsUpToDate()"));
+
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ if (!index->IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *_retval = (index->mState == READY || index->mState == WRITING) &&
+ !index->mIndexNeedsUpdate && !index->mShuttingDown;
+
+ LOG(("CacheIndex::IsUpToDate() - returning %d", *_retval));
+ return NS_OK;
+}
+
+bool CacheIndex::IsIndexUsable() {
+ MOZ_ASSERT(mState != INITIAL);
+
+ switch (mState) {
+ case INITIAL:
+ case SHUTDOWN:
+ return false;
+
+ case READING:
+ case WRITING:
+ case BUILDING:
+ case UPDATING:
+ case READY:
+ break;
+ }
+
+ return true;
+}
+
+// static
+bool CacheIndex::IsCollision(CacheIndexEntry* aEntry,
+ OriginAttrsHash aOriginAttrsHash,
+ bool aAnonymous) {
+ if (!aEntry->IsInitialized()) {
+ return false;
+ }
+
+ if (aEntry->Anonymous() != aAnonymous ||
+ aEntry->OriginAttrsHash() != aOriginAttrsHash) {
+ LOG(
+ ("CacheIndex::IsCollision() - Collision detected for entry hash=%08x"
+ "%08x%08x%08x%08x, expected values: originAttrsHash=%" PRIu64 ", "
+ "anonymous=%d; actual values: originAttrsHash=%" PRIu64
+ ", anonymous=%d]",
+ LOGSHA1(aEntry->Hash()), aOriginAttrsHash, aAnonymous,
+ aEntry->OriginAttrsHash(), aEntry->Anonymous()));
+ return true;
+ }
+
+ return false;
+}
+
+// static
+bool CacheIndex::HasEntryChanged(
+ CacheIndexEntry* aEntry, const uint32_t* aFrecency, const bool* aHasAltData,
+ const uint16_t* aOnStartTime, const uint16_t* aOnStopTime,
+ const uint8_t* aContentType, const uint32_t* aSize) {
+ if (aFrecency && *aFrecency != aEntry->GetFrecency()) {
+ return true;
+ }
+
+ if (aHasAltData && *aHasAltData != aEntry->GetHasAltData()) {
+ return true;
+ }
+
+ if (aOnStartTime && *aOnStartTime != aEntry->GetOnStartTime()) {
+ return true;
+ }
+
+ if (aOnStopTime && *aOnStopTime != aEntry->GetOnStopTime()) {
+ return true;
+ }
+
+ if (aContentType && *aContentType != aEntry->GetContentType()) {
+ return true;
+ }
+
+ if (aSize &&
+ (*aSize & CacheIndexEntry::kFileSizeMask) != aEntry->GetFileSize()) {
+ return true;
+ }
+
+ return false;
+}
+
+void CacheIndex::ProcessPendingOperations() {
+ LOG(("CacheIndex::ProcessPendingOperations()"));
+
+ sLock.AssertCurrentThreadOwns();
+
+ for (auto iter = mPendingUpdates.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntryUpdate* update = iter.Get();
+
+ LOG(("CacheIndex::ProcessPendingOperations() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(update->Hash())));
+
+ MOZ_ASSERT(update->IsFresh());
+
+ CacheIndexEntry* entry = mIndex.GetEntry(*update->Hash());
+
+ {
+ CacheIndexEntryAutoManage emng(update->Hash(), this);
+ emng.DoNotSearchInUpdates();
+
+ if (update->IsRemoved()) {
+ if (entry) {
+ if (entry->IsRemoved()) {
+ MOZ_ASSERT(entry->IsFresh());
+ MOZ_ASSERT(entry->IsDirty());
+ } else if (!entry->IsDirty() && entry->IsFileEmpty()) {
+ // Entries with empty file are not stored in index on disk. Just
+ // remove the entry, but only in case the entry is not dirty, i.e.
+ // the entry file was empty when we wrote the index.
+ mIndex.RemoveEntry(entry);
+ entry = nullptr;
+ } else {
+ entry->MarkRemoved();
+ entry->MarkDirty();
+ entry->MarkFresh();
+ }
+ }
+ } else if (entry) {
+ // Some information in mIndex can be newer than in mPendingUpdates (see
+ // bug 1074832). This will copy just those values that were really
+ // updated.
+ update->ApplyUpdate(entry);
+ } else {
+ // There is no entry in mIndex, copy all information from
+ // mPendingUpdates to mIndex.
+ entry = mIndex.PutEntry(*update->Hash());
+ *entry = *update;
+ }
+ }
+
+ iter.Remove();
+ }
+
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+
+ EnsureCorrectStats();
+}
+
+bool CacheIndex::WriteIndexToDiskIfNeeded() {
+ if (mState != READY || mShuttingDown || mRWPending) {
+ return false;
+ }
+
+ if (!mLastDumpTime.IsNull() &&
+ (TimeStamp::NowLoRes() - mLastDumpTime).ToMilliseconds() <
+ kMinDumpInterval) {
+ return false;
+ }
+
+ if (mIndexStats.Dirty() < kMinUnwrittenChanges) {
+ return false;
+ }
+
+ WriteIndexToDisk();
+ return true;
+}
+
+void CacheIndex::WriteIndexToDisk() {
+ LOG(("CacheIndex::WriteIndexToDisk()"));
+ mIndexStats.Log();
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+ MOZ_ASSERT(mState == READY);
+ MOZ_ASSERT(!mRWBuf);
+ MOZ_ASSERT(!mRWHash);
+ MOZ_ASSERT(!mRWPending);
+
+ ChangeState(WRITING);
+
+ mProcessEntries = mIndexStats.ActiveEntriesCount();
+
+ mIndexFileOpener = new FileOpenHelper(this);
+ rv = CacheFileIOManager::OpenFile(
+ nsLiteralCString(TEMP_INDEX_NAME),
+ CacheFileIOManager::SPECIAL_FILE | CacheFileIOManager::CREATE,
+ mIndexFileOpener);
+ if (NS_FAILED(rv)) {
+ LOG(("CacheIndex::WriteIndexToDisk() - Can't open file [rv=0x%08" PRIx32
+ "]",
+ static_cast<uint32_t>(rv)));
+ FinishWrite(false);
+ return;
+ }
+
+ // Write index header to a buffer, it will be written to disk together with
+ // records in WriteRecords() once we open the file successfully.
+ AllocBuffer();
+ mRWHash = new CacheHash();
+
+ mRWBufPos = 0;
+ // index version
+ NetworkEndian::writeUint32(mRWBuf + mRWBufPos, kIndexVersion);
+ mRWBufPos += sizeof(uint32_t);
+ // timestamp
+ NetworkEndian::writeUint32(mRWBuf + mRWBufPos,
+ static_cast<uint32_t>(PR_Now() / PR_USEC_PER_SEC));
+ mRWBufPos += sizeof(uint32_t);
+ // dirty flag
+ NetworkEndian::writeUint32(mRWBuf + mRWBufPos, 1);
+ mRWBufPos += sizeof(uint32_t);
+ // amount of data written to the cache
+ NetworkEndian::writeUint32(mRWBuf + mRWBufPos,
+ static_cast<uint32_t>(mTotalBytesWritten >> 10));
+ mRWBufPos += sizeof(uint32_t);
+
+ mSkipEntries = 0;
+}
+
+void CacheIndex::WriteRecords() {
+ LOG(("CacheIndex::WriteRecords()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+ MOZ_ASSERT(mState == WRITING);
+ MOZ_ASSERT(!mRWPending);
+
+ int64_t fileOffset;
+
+ if (mSkipEntries) {
+ MOZ_ASSERT(mRWBufPos == 0);
+ fileOffset = sizeof(CacheIndexHeader);
+ fileOffset += sizeof(CacheIndexRecord) * mSkipEntries;
+ } else {
+ MOZ_ASSERT(mRWBufPos == sizeof(CacheIndexHeader));
+ fileOffset = 0;
+ }
+ uint32_t hashOffset = mRWBufPos;
+
+ char* buf = mRWBuf + mRWBufPos;
+ uint32_t skip = mSkipEntries;
+ uint32_t processMax = (mRWBufSize - mRWBufPos) / sizeof(CacheIndexRecord);
+ MOZ_ASSERT(processMax != 0 ||
+ mProcessEntries ==
+ 0); // TODO make sure we can write an empty index
+ uint32_t processed = 0;
+#ifdef DEBUG
+ bool hasMore = false;
+#endif
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntry* entry = iter.Get();
+ if (entry->IsRemoved() || !entry->IsInitialized() || entry->IsFileEmpty()) {
+ continue;
+ }
+
+ if (skip) {
+ skip--;
+ continue;
+ }
+
+ if (processed == processMax) {
+#ifdef DEBUG
+ hasMore = true;
+#endif
+ break;
+ }
+
+ entry->WriteToBuf(buf);
+ buf += sizeof(CacheIndexRecord);
+ processed++;
+ }
+
+ MOZ_ASSERT(mRWBufPos != static_cast<uint32_t>(buf - mRWBuf) ||
+ mProcessEntries == 0);
+ mRWBufPos = buf - mRWBuf;
+ mSkipEntries += processed;
+ MOZ_ASSERT(mSkipEntries <= mProcessEntries);
+
+ mRWHash->Update(mRWBuf + hashOffset, mRWBufPos - hashOffset);
+
+ if (mSkipEntries == mProcessEntries) {
+ MOZ_ASSERT(!hasMore);
+
+ // We've processed all records
+ if (mRWBufPos + sizeof(CacheHash::Hash32_t) > mRWBufSize) {
+ // realloc buffer to spare another write cycle
+ mRWBufSize = mRWBufPos + sizeof(CacheHash::Hash32_t);
+ mRWBuf = static_cast<char*>(moz_xrealloc(mRWBuf, mRWBufSize));
+ }
+
+ NetworkEndian::writeUint32(mRWBuf + mRWBufPos, mRWHash->GetHash());
+ mRWBufPos += sizeof(CacheHash::Hash32_t);
+ } else {
+ MOZ_ASSERT(hasMore);
+ }
+
+ rv = CacheFileIOManager::Write(mIndexHandle, fileOffset, mRWBuf, mRWBufPos,
+ mSkipEntries == mProcessEntries, false, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::WriteRecords() - CacheFileIOManager::Write() failed "
+ "synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishWrite(false);
+ } else {
+ mRWPending = true;
+ }
+
+ mRWBufPos = 0;
+}
+
+void CacheIndex::FinishWrite(bool aSucceeded) {
+ LOG(("CacheIndex::FinishWrite() [succeeded=%d]", aSucceeded));
+
+ MOZ_ASSERT((!aSucceeded && mState == SHUTDOWN) || mState == WRITING);
+
+ sLock.AssertCurrentThreadOwns();
+
+ // If there is write operation pending we must be cancelling writing of the
+ // index when shutting down or removing the whole index.
+ MOZ_ASSERT(!mRWPending || (!aSucceeded && (mShuttingDown || mRemovingAll)));
+
+ mIndexHandle = nullptr;
+ mRWHash = nullptr;
+ ReleaseBuffer();
+
+ if (aSucceeded) {
+ // Opening of the file must not be in progress if writing succeeded.
+ MOZ_ASSERT(!mIndexFileOpener);
+
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntry* entry = iter.Get();
+
+ bool remove = false;
+ {
+ CacheIndexEntryAutoManage emng(entry->Hash(), this);
+
+ if (entry->IsRemoved()) {
+ emng.DoNotSearchInIndex();
+ remove = true;
+ } else if (entry->IsDirty()) {
+ entry->ClearDirty();
+ }
+ }
+ if (remove) {
+ iter.Remove();
+ }
+ }
+
+ mIndexOnDiskIsValid = true;
+ } else {
+ if (mIndexFileOpener) {
+ // If opening of the file is still in progress (e.g. WRITE process was
+ // canceled by RemoveAll()) then we need to cancel the opener to make sure
+ // that OnFileOpenedInternal() won't be called.
+ mIndexFileOpener->Cancel();
+ mIndexFileOpener = nullptr;
+ }
+ }
+
+ ProcessPendingOperations();
+ mIndexStats.Log();
+
+ if (mState == WRITING) {
+ ChangeState(READY);
+ mLastDumpTime = TimeStamp::NowLoRes();
+ }
+}
+
+nsresult CacheIndex::GetFile(const nsACString& aName, nsIFile** _retval) {
+ nsresult rv;
+
+ nsCOMPtr<nsIFile> file;
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative(aName);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ file.swap(*_retval);
+ return NS_OK;
+}
+
+void CacheIndex::RemoveFile(const nsACString& aName) {
+ MOZ_ASSERT(mState == SHUTDOWN);
+
+ nsresult rv;
+
+ nsCOMPtr<nsIFile> file;
+ rv = GetFile(aName, getter_AddRefs(file));
+ NS_ENSURE_SUCCESS_VOID(rv);
+
+ rv = file->Remove(false);
+ if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
+ LOG(
+ ("CacheIndex::RemoveFile() - Cannot remove old entry file from disk "
+ "[rv=0x%08" PRIx32 ", name=%s]",
+ static_cast<uint32_t>(rv), PromiseFlatCString(aName).get()));
+ }
+}
+
+void CacheIndex::RemoveAllIndexFiles() {
+ LOG(("CacheIndex::RemoveAllIndexFiles()"));
+ RemoveFile(nsLiteralCString(INDEX_NAME));
+ RemoveJournalAndTempFile();
+}
+
+void CacheIndex::RemoveJournalAndTempFile() {
+ LOG(("CacheIndex::RemoveJournalAndTempFile()"));
+ RemoveFile(nsLiteralCString(TEMP_INDEX_NAME));
+ RemoveFile(nsLiteralCString(JOURNAL_NAME));
+}
+
+class WriteLogHelper {
+ public:
+ explicit WriteLogHelper(PRFileDesc* aFD)
+ : mFD(aFD), mBufSize(kMaxBufSize), mBufPos(0) {
+ mHash = new CacheHash();
+ mBuf = static_cast<char*>(moz_xmalloc(mBufSize));
+ }
+
+ ~WriteLogHelper() { free(mBuf); }
+
+ nsresult AddEntry(CacheIndexEntry* aEntry);
+ nsresult Finish();
+
+ private:
+ nsresult FlushBuffer();
+
+ PRFileDesc* mFD;
+ char* mBuf;
+ uint32_t mBufSize;
+ int32_t mBufPos;
+ RefPtr<CacheHash> mHash;
+};
+
+nsresult WriteLogHelper::AddEntry(CacheIndexEntry* aEntry) {
+ nsresult rv;
+
+ if (mBufPos + sizeof(CacheIndexRecord) > mBufSize) {
+ mHash->Update(mBuf, mBufPos);
+
+ rv = FlushBuffer();
+ NS_ENSURE_SUCCESS(rv, rv);
+ MOZ_ASSERT(mBufPos + sizeof(CacheIndexRecord) <= mBufSize);
+ }
+
+ aEntry->WriteToBuf(mBuf + mBufPos);
+ mBufPos += sizeof(CacheIndexRecord);
+
+ return NS_OK;
+}
+
+nsresult WriteLogHelper::Finish() {
+ nsresult rv;
+
+ mHash->Update(mBuf, mBufPos);
+ if (mBufPos + sizeof(CacheHash::Hash32_t) > mBufSize) {
+ rv = FlushBuffer();
+ NS_ENSURE_SUCCESS(rv, rv);
+ MOZ_ASSERT(mBufPos + sizeof(CacheHash::Hash32_t) <= mBufSize);
+ }
+
+ NetworkEndian::writeUint32(mBuf + mBufPos, mHash->GetHash());
+ mBufPos += sizeof(CacheHash::Hash32_t);
+
+ rv = FlushBuffer();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult WriteLogHelper::FlushBuffer() {
+ if (CacheObserver::IsPastShutdownIOLag()) {
+ LOG(("WriteLogHelper::FlushBuffer() - Interrupting writing journal."));
+ return NS_ERROR_FAILURE;
+ }
+
+ int32_t bytesWritten = PR_Write(mFD, mBuf, mBufPos);
+
+ if (bytesWritten != mBufPos) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mBufPos = 0;
+ return NS_OK;
+}
+
+nsresult CacheIndex::WriteLogToDisk() {
+ LOG(("CacheIndex::WriteLogToDisk()"));
+
+ nsresult rv;
+
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+ MOZ_ASSERT(mState == SHUTDOWN);
+
+ if (CacheObserver::IsPastShutdownIOLag()) {
+ LOG(("CacheIndex::WriteLogToDisk() - Skipping writing journal."));
+ return NS_ERROR_FAILURE;
+ }
+
+ RemoveFile(nsLiteralCString(TEMP_INDEX_NAME));
+
+ nsCOMPtr<nsIFile> indexFile;
+ rv = GetFile(nsLiteralCString(INDEX_NAME), getter_AddRefs(indexFile));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> logFile;
+ rv = GetFile(nsLiteralCString(JOURNAL_NAME), getter_AddRefs(logFile));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mIndexStats.Log();
+
+ PRFileDesc* fd = nullptr;
+ rv = logFile->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 0600,
+ &fd);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ WriteLogHelper wlh(fd);
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntry* entry = iter.Get();
+ if (entry->IsRemoved() || entry->IsDirty()) {
+ rv = wlh.AddEntry(entry);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ }
+ }
+
+ rv = wlh.Finish();
+ PR_Close(fd);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = indexFile->OpenNSPRFileDesc(PR_RDWR, 0600, &fd);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Seek to dirty flag in the index header and clear it.
+ static_assert(2 * sizeof(uint32_t) == offsetof(CacheIndexHeader, mIsDirty),
+ "Unexpected offset of CacheIndexHeader::mIsDirty");
+ int64_t offset = PR_Seek64(fd, 2 * sizeof(uint32_t), PR_SEEK_SET);
+ if (offset == -1) {
+ PR_Close(fd);
+ return NS_ERROR_FAILURE;
+ }
+
+ uint32_t isDirty = 0;
+ int32_t bytesWritten = PR_Write(fd, &isDirty, sizeof(isDirty));
+ PR_Close(fd);
+ if (bytesWritten != sizeof(isDirty)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+void CacheIndex::ReadIndexFromDisk() {
+ LOG(("CacheIndex::ReadIndexFromDisk()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+ MOZ_ASSERT(mState == INITIAL);
+
+ ChangeState(READING);
+
+ mIndexFileOpener = new FileOpenHelper(this);
+ rv = CacheFileIOManager::OpenFile(
+ nsLiteralCString(INDEX_NAME),
+ CacheFileIOManager::SPECIAL_FILE | CacheFileIOManager::OPEN,
+ mIndexFileOpener);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::ReadIndexFromDisk() - CacheFileIOManager::OpenFile() "
+ "failed [rv=0x%08" PRIx32 ", file=%s]",
+ static_cast<uint32_t>(rv), INDEX_NAME));
+ FinishRead(false);
+ return;
+ }
+
+ mJournalFileOpener = new FileOpenHelper(this);
+ rv = CacheFileIOManager::OpenFile(
+ nsLiteralCString(JOURNAL_NAME),
+ CacheFileIOManager::SPECIAL_FILE | CacheFileIOManager::OPEN,
+ mJournalFileOpener);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::ReadIndexFromDisk() - CacheFileIOManager::OpenFile() "
+ "failed [rv=0x%08" PRIx32 ", file=%s]",
+ static_cast<uint32_t>(rv), JOURNAL_NAME));
+ FinishRead(false);
+ }
+
+ mTmpFileOpener = new FileOpenHelper(this);
+ rv = CacheFileIOManager::OpenFile(
+ nsLiteralCString(TEMP_INDEX_NAME),
+ CacheFileIOManager::SPECIAL_FILE | CacheFileIOManager::OPEN,
+ mTmpFileOpener);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::ReadIndexFromDisk() - CacheFileIOManager::OpenFile() "
+ "failed [rv=0x%08" PRIx32 ", file=%s]",
+ static_cast<uint32_t>(rv), TEMP_INDEX_NAME));
+ FinishRead(false);
+ }
+}
+
+void CacheIndex::StartReadingIndex() {
+ LOG(("CacheIndex::StartReadingIndex()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(mIndexHandle);
+ MOZ_ASSERT(mState == READING);
+ MOZ_ASSERT(!mIndexOnDiskIsValid);
+ MOZ_ASSERT(!mDontMarkIndexClean);
+ MOZ_ASSERT(!mJournalReadSuccessfully);
+ MOZ_ASSERT(mIndexHandle->FileSize() >= 0);
+ MOZ_ASSERT(!mRWPending);
+
+ int64_t entriesSize = mIndexHandle->FileSize() - sizeof(CacheIndexHeader) -
+ sizeof(CacheHash::Hash32_t);
+
+ if (entriesSize < 0 || entriesSize % sizeof(CacheIndexRecord)) {
+ LOG(("CacheIndex::StartReadingIndex() - Index is corrupted"));
+ FinishRead(false);
+ return;
+ }
+
+ AllocBuffer();
+ mSkipEntries = 0;
+ mRWHash = new CacheHash();
+
+ mRWBufPos =
+ std::min(mRWBufSize, static_cast<uint32_t>(mIndexHandle->FileSize()));
+
+ rv = CacheFileIOManager::Read(mIndexHandle, 0, mRWBuf, mRWBufPos, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::StartReadingIndex() - CacheFileIOManager::Read() failed "
+ "synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishRead(false);
+ } else {
+ mRWPending = true;
+ }
+}
+
+void CacheIndex::ParseRecords() {
+ LOG(("CacheIndex::ParseRecords()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(!mRWPending);
+
+ uint32_t entryCnt = (mIndexHandle->FileSize() - sizeof(CacheIndexHeader) -
+ sizeof(CacheHash::Hash32_t)) /
+ sizeof(CacheIndexRecord);
+ uint32_t pos = 0;
+
+ if (!mSkipEntries) {
+ if (NetworkEndian::readUint32(mRWBuf + pos) != kIndexVersion) {
+ FinishRead(false);
+ return;
+ }
+ pos += sizeof(uint32_t);
+
+ mIndexTimeStamp = NetworkEndian::readUint32(mRWBuf + pos);
+ pos += sizeof(uint32_t);
+
+ if (NetworkEndian::readUint32(mRWBuf + pos)) {
+ if (mJournalHandle) {
+ CacheFileIOManager::DoomFile(mJournalHandle, nullptr);
+ mJournalHandle = nullptr;
+ }
+ } else {
+ uint32_t* isDirty =
+ reinterpret_cast<uint32_t*>(moz_xmalloc(sizeof(uint32_t)));
+ NetworkEndian::writeUint32(isDirty, 1);
+
+ // Mark index dirty. The buffer is freed by CacheFileIOManager when
+ // nullptr is passed as the listener and the call doesn't fail
+ // synchronously.
+ rv = CacheFileIOManager::Write(mIndexHandle, 2 * sizeof(uint32_t),
+ reinterpret_cast<char*>(isDirty),
+ sizeof(uint32_t), true, false, nullptr);
+ if (NS_FAILED(rv)) {
+ // This is not fatal, just free the memory
+ free(isDirty);
+ }
+ }
+ pos += sizeof(uint32_t);
+
+ uint64_t dataWritten = NetworkEndian::readUint32(mRWBuf + pos);
+ pos += sizeof(uint32_t);
+ dataWritten <<= 10;
+ mTotalBytesWritten += dataWritten;
+ }
+
+ uint32_t hashOffset = pos;
+
+ while (pos + sizeof(CacheIndexRecord) <= mRWBufPos &&
+ mSkipEntries != entryCnt) {
+ CacheIndexRecord* rec = reinterpret_cast<CacheIndexRecord*>(mRWBuf + pos);
+ CacheIndexEntry tmpEntry(&rec->mHash);
+ tmpEntry.ReadFromBuf(mRWBuf + pos);
+
+ if (tmpEntry.IsDirty() || !tmpEntry.IsInitialized() ||
+ tmpEntry.IsFileEmpty() || tmpEntry.IsFresh() || tmpEntry.IsRemoved()) {
+ LOG(
+ ("CacheIndex::ParseRecords() - Invalid entry found in index, removing"
+ " whole index [dirty=%d, initialized=%d, fileEmpty=%d, fresh=%d, "
+ "removed=%d]",
+ tmpEntry.IsDirty(), tmpEntry.IsInitialized(), tmpEntry.IsFileEmpty(),
+ tmpEntry.IsFresh(), tmpEntry.IsRemoved()));
+ FinishRead(false);
+ return;
+ }
+
+ CacheIndexEntryAutoManage emng(tmpEntry.Hash(), this);
+
+ CacheIndexEntry* entry = mIndex.PutEntry(*tmpEntry.Hash());
+ *entry = tmpEntry;
+
+ pos += sizeof(CacheIndexRecord);
+ mSkipEntries++;
+ }
+
+ mRWHash->Update(mRWBuf + hashOffset, pos - hashOffset);
+
+ if (pos != mRWBufPos) {
+ memmove(mRWBuf, mRWBuf + pos, mRWBufPos - pos);
+ }
+
+ mRWBufPos -= pos;
+ pos = 0;
+
+ int64_t fileOffset = sizeof(CacheIndexHeader) +
+ mSkipEntries * sizeof(CacheIndexRecord) + mRWBufPos;
+
+ MOZ_ASSERT(fileOffset <= mIndexHandle->FileSize());
+ if (fileOffset == mIndexHandle->FileSize()) {
+ uint32_t expectedHash = NetworkEndian::readUint32(mRWBuf);
+ if (mRWHash->GetHash() != expectedHash) {
+ LOG(("CacheIndex::ParseRecords() - Hash mismatch, [is %x, should be %x]",
+ mRWHash->GetHash(), expectedHash));
+ FinishRead(false);
+ return;
+ }
+
+ mIndexOnDiskIsValid = true;
+ mJournalReadSuccessfully = false;
+
+ if (mJournalHandle) {
+ StartReadingJournal();
+ } else {
+ FinishRead(false);
+ }
+
+ return;
+ }
+
+ pos = mRWBufPos;
+ uint32_t toRead =
+ std::min(mRWBufSize - pos,
+ static_cast<uint32_t>(mIndexHandle->FileSize() - fileOffset));
+ mRWBufPos = pos + toRead;
+
+ rv = CacheFileIOManager::Read(mIndexHandle, fileOffset, mRWBuf + pos, toRead,
+ this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::ParseRecords() - CacheFileIOManager::Read() failed "
+ "synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishRead(false);
+ return;
+ }
+ mRWPending = true;
+}
+
+void CacheIndex::StartReadingJournal() {
+ LOG(("CacheIndex::StartReadingJournal()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(mJournalHandle);
+ MOZ_ASSERT(mIndexOnDiskIsValid);
+ MOZ_ASSERT(mTmpJournal.Count() == 0);
+ MOZ_ASSERT(mJournalHandle->FileSize() >= 0);
+ MOZ_ASSERT(!mRWPending);
+
+ int64_t entriesSize =
+ mJournalHandle->FileSize() - sizeof(CacheHash::Hash32_t);
+
+ if (entriesSize < 0 || entriesSize % sizeof(CacheIndexRecord)) {
+ LOG(("CacheIndex::StartReadingJournal() - Journal is corrupted"));
+ FinishRead(false);
+ return;
+ }
+
+ mSkipEntries = 0;
+ mRWHash = new CacheHash();
+
+ mRWBufPos =
+ std::min(mRWBufSize, static_cast<uint32_t>(mJournalHandle->FileSize()));
+
+ rv = CacheFileIOManager::Read(mJournalHandle, 0, mRWBuf, mRWBufPos, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::StartReadingJournal() - CacheFileIOManager::Read() failed"
+ " synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishRead(false);
+ } else {
+ mRWPending = true;
+ }
+}
+
+void CacheIndex::ParseJournal() {
+ LOG(("CacheIndex::ParseJournal()"));
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(!mRWPending);
+
+ uint32_t entryCnt =
+ (mJournalHandle->FileSize() - sizeof(CacheHash::Hash32_t)) /
+ sizeof(CacheIndexRecord);
+
+ uint32_t pos = 0;
+
+ while (pos + sizeof(CacheIndexRecord) <= mRWBufPos &&
+ mSkipEntries != entryCnt) {
+ CacheIndexEntry tmpEntry(reinterpret_cast<SHA1Sum::Hash*>(mRWBuf + pos));
+ tmpEntry.ReadFromBuf(mRWBuf + pos);
+
+ CacheIndexEntry* entry = mTmpJournal.PutEntry(*tmpEntry.Hash());
+ *entry = tmpEntry;
+
+ if (entry->IsDirty() || entry->IsFresh()) {
+ LOG(
+ ("CacheIndex::ParseJournal() - Invalid entry found in journal, "
+ "ignoring whole journal [dirty=%d, fresh=%d]",
+ entry->IsDirty(), entry->IsFresh()));
+ FinishRead(false);
+ return;
+ }
+
+ pos += sizeof(CacheIndexRecord);
+ mSkipEntries++;
+ }
+
+ mRWHash->Update(mRWBuf, pos);
+
+ if (pos != mRWBufPos) {
+ memmove(mRWBuf, mRWBuf + pos, mRWBufPos - pos);
+ }
+
+ mRWBufPos -= pos;
+ pos = 0;
+
+ int64_t fileOffset = mSkipEntries * sizeof(CacheIndexRecord) + mRWBufPos;
+
+ MOZ_ASSERT(fileOffset <= mJournalHandle->FileSize());
+ if (fileOffset == mJournalHandle->FileSize()) {
+ uint32_t expectedHash = NetworkEndian::readUint32(mRWBuf);
+ if (mRWHash->GetHash() != expectedHash) {
+ LOG(("CacheIndex::ParseJournal() - Hash mismatch, [is %x, should be %x]",
+ mRWHash->GetHash(), expectedHash));
+ FinishRead(false);
+ return;
+ }
+
+ mJournalReadSuccessfully = true;
+ FinishRead(true);
+ return;
+ }
+
+ pos = mRWBufPos;
+ uint32_t toRead =
+ std::min(mRWBufSize - pos,
+ static_cast<uint32_t>(mJournalHandle->FileSize() - fileOffset));
+ mRWBufPos = pos + toRead;
+
+ rv = CacheFileIOManager::Read(mJournalHandle, fileOffset, mRWBuf + pos,
+ toRead, this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::ParseJournal() - CacheFileIOManager::Read() failed "
+ "synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishRead(false);
+ return;
+ }
+ mRWPending = true;
+}
+
+void CacheIndex::MergeJournal() {
+ LOG(("CacheIndex::MergeJournal()"));
+
+ sLock.AssertCurrentThreadOwns();
+
+ for (auto iter = mTmpJournal.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntry* entry = iter.Get();
+
+ LOG(("CacheIndex::MergeJournal() [hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(entry->Hash())));
+
+ CacheIndexEntry* entry2 = mIndex.GetEntry(*entry->Hash());
+ {
+ CacheIndexEntryAutoManage emng(entry->Hash(), this);
+ if (entry->IsRemoved()) {
+ if (entry2) {
+ entry2->MarkRemoved();
+ entry2->MarkDirty();
+ }
+ } else {
+ if (!entry2) {
+ entry2 = mIndex.PutEntry(*entry->Hash());
+ }
+
+ *entry2 = *entry;
+ entry2->MarkDirty();
+ }
+ }
+ iter.Remove();
+ }
+
+ MOZ_ASSERT(mTmpJournal.Count() == 0);
+}
+
+void CacheIndex::EnsureNoFreshEntry() {
+#ifdef DEBUG_STATS
+ CacheIndexStats debugStats;
+ debugStats.DisableLogging();
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ debugStats.BeforeChange(nullptr);
+ debugStats.AfterChange(iter.Get());
+ }
+ MOZ_ASSERT(debugStats.Fresh() == 0);
+#endif
+}
+
+void CacheIndex::EnsureCorrectStats() {
+#ifdef DEBUG_STATS
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+ CacheIndexStats debugStats;
+ debugStats.DisableLogging();
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ debugStats.BeforeChange(nullptr);
+ debugStats.AfterChange(iter.Get());
+ }
+ MOZ_ASSERT(debugStats == mIndexStats);
+#endif
+}
+
+void CacheIndex::FinishRead(bool aSucceeded) {
+ LOG(("CacheIndex::FinishRead() [succeeded=%d]", aSucceeded));
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT((!aSucceeded && mState == SHUTDOWN) || mState == READING);
+
+ MOZ_ASSERT(
+ // -> rebuild
+ (!aSucceeded && !mIndexOnDiskIsValid && !mJournalReadSuccessfully) ||
+ // -> update
+ (!aSucceeded && mIndexOnDiskIsValid && !mJournalReadSuccessfully) ||
+ // -> ready
+ (aSucceeded && mIndexOnDiskIsValid && mJournalReadSuccessfully));
+
+ // If there is read operation pending we must be cancelling reading of the
+ // index when shutting down or removing the whole index.
+ MOZ_ASSERT(!mRWPending || (!aSucceeded && (mShuttingDown || mRemovingAll)));
+
+ if (mState == SHUTDOWN) {
+ RemoveFile(nsLiteralCString(TEMP_INDEX_NAME));
+ RemoveFile(nsLiteralCString(JOURNAL_NAME));
+ } else {
+ if (mIndexHandle && !mIndexOnDiskIsValid) {
+ CacheFileIOManager::DoomFile(mIndexHandle, nullptr);
+ }
+
+ if (mJournalHandle) {
+ CacheFileIOManager::DoomFile(mJournalHandle, nullptr);
+ }
+ }
+
+ if (mIndexFileOpener) {
+ mIndexFileOpener->Cancel();
+ mIndexFileOpener = nullptr;
+ }
+ if (mJournalFileOpener) {
+ mJournalFileOpener->Cancel();
+ mJournalFileOpener = nullptr;
+ }
+ if (mTmpFileOpener) {
+ mTmpFileOpener->Cancel();
+ mTmpFileOpener = nullptr;
+ }
+
+ mIndexHandle = nullptr;
+ mJournalHandle = nullptr;
+ mRWHash = nullptr;
+ ReleaseBuffer();
+
+ if (mState == SHUTDOWN) {
+ return;
+ }
+
+ if (!mIndexOnDiskIsValid) {
+ MOZ_ASSERT(mTmpJournal.Count() == 0);
+ EnsureNoFreshEntry();
+ ProcessPendingOperations();
+ // Remove all entries that we haven't seen during this session
+ RemoveNonFreshEntries();
+ StartUpdatingIndex(true);
+ return;
+ }
+
+ if (!mJournalReadSuccessfully) {
+ mTmpJournal.Clear();
+ EnsureNoFreshEntry();
+ ProcessPendingOperations();
+ StartUpdatingIndex(false);
+ return;
+ }
+
+ MergeJournal();
+ EnsureNoFreshEntry();
+ ProcessPendingOperations();
+ mIndexStats.Log();
+
+ ChangeState(READY);
+ mLastDumpTime = TimeStamp::NowLoRes(); // Do not dump new index immediately
+}
+
+// static
+void CacheIndex::DelayedUpdate(nsITimer* aTimer, void* aClosure) {
+ LOG(("CacheIndex::DelayedUpdate()"));
+
+ StaticMutexAutoLock lock(sLock);
+ RefPtr<CacheIndex> index = gInstance;
+
+ if (!index) {
+ return;
+ }
+
+ index->DelayedUpdateLocked();
+}
+
+// static
+void CacheIndex::DelayedUpdateLocked() {
+ LOG(("CacheIndex::DelayedUpdateLocked()"));
+
+ sLock.AssertCurrentThreadOwns();
+
+ nsresult rv;
+
+ mUpdateTimer = nullptr;
+
+ if (!IsIndexUsable()) {
+ return;
+ }
+
+ if (mState == READY && mShuttingDown) {
+ return;
+ }
+
+ // mUpdateEventPending must be false here since StartUpdatingIndex() won't
+ // schedule timer if it is true.
+ MOZ_ASSERT(!mUpdateEventPending);
+ if (mState != BUILDING && mState != UPDATING) {
+ LOG(("CacheIndex::DelayedUpdateLocked() - Update was canceled"));
+ return;
+ }
+
+ // We need to redispatch to run with lower priority
+ RefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
+ MOZ_ASSERT(ioThread);
+
+ mUpdateEventPending = true;
+ rv = ioThread->Dispatch(this, CacheIOThread::INDEX);
+ if (NS_FAILED(rv)) {
+ mUpdateEventPending = false;
+ NS_WARNING("CacheIndex::DelayedUpdateLocked() - Can't dispatch event");
+ LOG(("CacheIndex::DelayedUpdate() - Can't dispatch event"));
+ FinishUpdate(false);
+ }
+}
+
+nsresult CacheIndex::ScheduleUpdateTimer(uint32_t aDelay) {
+ LOG(("CacheIndex::ScheduleUpdateTimer() [delay=%u]", aDelay));
+
+ MOZ_ASSERT(!mUpdateTimer);
+
+ nsCOMPtr<nsIEventTarget> ioTarget = CacheFileIOManager::IOTarget();
+ MOZ_ASSERT(ioTarget);
+
+ return NS_NewTimerWithFuncCallback(
+ getter_AddRefs(mUpdateTimer), CacheIndex::DelayedUpdate, nullptr, aDelay,
+ nsITimer::TYPE_ONE_SHOT, "net::CacheIndex::ScheduleUpdateTimer",
+ ioTarget);
+}
+
+nsresult CacheIndex::SetupDirectoryEnumerator() {
+ MOZ_ASSERT(!NS_IsMainThread());
+ MOZ_ASSERT(!mDirEnumerator);
+
+ nsresult rv;
+ nsCOMPtr<nsIFile> file;
+
+ rv = mCacheDirectory->Clone(getter_AddRefs(file));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = file->AppendNative(nsLiteralCString(ENTRIES_DIR));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool exists;
+ rv = file->Exists(&exists);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!exists) {
+ NS_WARNING(
+ "CacheIndex::SetupDirectoryEnumerator() - Entries directory "
+ "doesn't exist!");
+ LOG(
+ ("CacheIndex::SetupDirectoryEnumerator() - Entries directory doesn't "
+ "exist!"));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ rv = file->GetDirectoryEntries(getter_AddRefs(mDirEnumerator));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult CacheIndex::InitEntryFromDiskData(CacheIndexEntry* aEntry,
+ CacheFileMetadata* aMetaData,
+ int64_t aFileSize) {
+ nsresult rv;
+
+ aEntry->InitNew();
+ aEntry->MarkDirty();
+ aEntry->MarkFresh();
+
+ aEntry->Init(GetOriginAttrsHash(aMetaData->OriginAttributes()),
+ aMetaData->IsAnonymous(), aMetaData->Pinned());
+
+ aEntry->SetFrecency(aMetaData->GetFrecency());
+
+ const char* altData = aMetaData->GetElement(CacheFileUtils::kAltDataKey);
+ bool hasAltData = altData ? true : false;
+ if (hasAltData && NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
+ altData, nullptr, nullptr))) {
+ return NS_ERROR_FAILURE;
+ }
+ aEntry->SetHasAltData(hasAltData);
+
+ static auto toUint16 = [](const char* aUint16String) -> uint16_t {
+ if (!aUint16String) {
+ return kIndexTimeNotAvailable;
+ }
+ nsresult rv;
+ uint64_t n64 = nsDependentCString(aUint16String).ToInteger64(&rv);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
+ };
+
+ aEntry->SetOnStartTime(
+ toUint16(aMetaData->GetElement("net-response-time-onstart")));
+ aEntry->SetOnStopTime(
+ toUint16(aMetaData->GetElement("net-response-time-onstop")));
+
+ const char* contentTypeStr = aMetaData->GetElement("ctid");
+ uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ if (contentTypeStr) {
+ int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv);
+ if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN ||
+ n64 >= nsICacheEntry::CONTENT_TYPE_LAST) {
+ n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ }
+ contentType = n64;
+ }
+ aEntry->SetContentType(contentType);
+
+ aEntry->SetFileSize(static_cast<uint32_t>(std::min(
+ static_cast<int64_t>(PR_UINT32_MAX), (aFileSize + 0x3FF) >> 10)));
+ return NS_OK;
+}
+
+bool CacheIndex::IsUpdatePending() {
+ sLock.AssertCurrentThreadOwns();
+
+ if (mUpdateTimer || mUpdateEventPending) {
+ return true;
+ }
+
+ return false;
+}
+
+void CacheIndex::BuildIndex() {
+ LOG(("CacheIndex::BuildIndex()"));
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+
+ nsresult rv;
+
+ if (!mDirEnumerator) {
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = SetupDirectoryEnumerator();
+ }
+ if (mState == SHUTDOWN) {
+ // The index was shut down while we released the lock. FinishUpdate() was
+ // already called from Shutdown(), so just simply return here.
+ return;
+ }
+
+ if (NS_FAILED(rv)) {
+ FinishUpdate(false);
+ return;
+ }
+ }
+
+ while (true) {
+ if (CacheIOThread::YieldAndRerun()) {
+ LOG((
+ "CacheIndex::BuildIndex() - Breaking loop for higher level events."));
+ mUpdateEventPending = true;
+ return;
+ }
+
+ bool fileExists = false;
+ nsCOMPtr<nsIFile> file;
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = mDirEnumerator->GetNextFile(getter_AddRefs(file));
+
+ if (file) {
+ file->Exists(&fileExists);
+ }
+ }
+ if (mState == SHUTDOWN) {
+ return;
+ }
+ if (!file) {
+ FinishUpdate(NS_SUCCEEDED(rv));
+ return;
+ }
+
+ nsAutoCString leaf;
+ rv = file->GetNativeLeafName(leaf);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::BuildIndex() - GetNativeLeafName() failed! Skipping "
+ "file."));
+ mDontMarkIndexClean = true;
+ continue;
+ }
+
+ if (!fileExists) {
+ LOG(
+ ("CacheIndex::BuildIndex() - File returned by the iterator was "
+ "removed in the meantime [name=%s]",
+ leaf.get()));
+ continue;
+ }
+
+ SHA1Sum::Hash hash;
+ rv = CacheFileIOManager::StrToHash(leaf, &hash);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::BuildIndex() - Filename is not a hash, removing file. "
+ "[name=%s]",
+ leaf.get()));
+ file->Remove(false);
+ continue;
+ }
+
+ CacheIndexEntry* entry = mIndex.GetEntry(hash);
+ if (entry && entry->IsRemoved()) {
+ LOG(
+ ("CacheIndex::BuildIndex() - Found file that should not exist. "
+ "[name=%s]",
+ leaf.get()));
+ entry->Log();
+ MOZ_ASSERT(entry->IsFresh());
+ entry = nullptr;
+ }
+
+#ifdef DEBUG
+ RefPtr<CacheFileHandle> handle;
+ CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
+ getter_AddRefs(handle));
+#endif
+
+ if (entry) {
+ // the entry is up to date
+ LOG(
+ ("CacheIndex::BuildIndex() - Skipping file because the entry is up to"
+ " date. [name=%s]",
+ leaf.get()));
+ entry->Log();
+ MOZ_ASSERT(entry->IsFresh()); // The entry must be from this session
+ // there must be an active CacheFile if the entry is not initialized
+ MOZ_ASSERT(entry->IsInitialized() || handle);
+ continue;
+ }
+
+ MOZ_ASSERT(!handle);
+
+ RefPtr<CacheFileMetadata> meta = new CacheFileMetadata();
+ int64_t size = 0;
+
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = meta->SyncReadMetadata(file);
+
+ if (NS_SUCCEEDED(rv)) {
+ rv = file->GetFileSize(&size);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::BuildIndex() - Cannot get filesize of file that was"
+ " successfully parsed. [name=%s]",
+ leaf.get()));
+ }
+ }
+ }
+ if (mState == SHUTDOWN) {
+ return;
+ }
+
+ // Nobody could add the entry while the lock was released since we modify
+ // the index only on IO thread and this loop is executed on IO thread too.
+ entry = mIndex.GetEntry(hash);
+ MOZ_ASSERT(!entry || entry->IsRemoved());
+
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::BuildIndex() - CacheFileMetadata::SyncReadMetadata() "
+ "failed, removing file. [name=%s]",
+ leaf.get()));
+ file->Remove(false);
+ } else {
+ CacheIndexEntryAutoManage entryMng(&hash, this);
+ entry = mIndex.PutEntry(hash);
+ if (NS_FAILED(InitEntryFromDiskData(entry, meta, size))) {
+ LOG(
+ ("CacheIndex::BuildIndex() - CacheFile::InitEntryFromDiskData() "
+ "failed, removing file. [name=%s]",
+ leaf.get()));
+ file->Remove(false);
+ entry->MarkRemoved();
+ } else {
+ LOG(("CacheIndex::BuildIndex() - Added entry to index. [name=%s]",
+ leaf.get()));
+ entry->Log();
+ }
+ }
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+}
+
+bool CacheIndex::StartUpdatingIndexIfNeeded(bool aSwitchingToReadyState) {
+ // Start updating process when we are in or we are switching to READY state
+ // and index needs update, but not during shutdown or when removing all
+ // entries.
+ if ((mState == READY || aSwitchingToReadyState) && mIndexNeedsUpdate &&
+ !mShuttingDown && !mRemovingAll) {
+ LOG(("CacheIndex::StartUpdatingIndexIfNeeded() - starting update process"));
+ mIndexNeedsUpdate = false;
+ StartUpdatingIndex(false);
+ return true;
+ }
+
+ return false;
+}
+
+void CacheIndex::StartUpdatingIndex(bool aRebuild) {
+ LOG(("CacheIndex::StartUpdatingIndex() [rebuild=%d]", aRebuild));
+
+ sLock.AssertCurrentThreadOwns();
+
+ nsresult rv;
+
+ mIndexStats.Log();
+
+ ChangeState(aRebuild ? BUILDING : UPDATING);
+ mDontMarkIndexClean = false;
+
+ if (mShuttingDown || mRemovingAll) {
+ FinishUpdate(false);
+ return;
+ }
+
+ if (IsUpdatePending()) {
+ LOG(("CacheIndex::StartUpdatingIndex() - Update is already pending"));
+ return;
+ }
+
+ uint32_t elapsed = (TimeStamp::NowLoRes() - mStartTime).ToMilliseconds();
+ if (elapsed < kUpdateIndexStartDelay) {
+ LOG(
+ ("CacheIndex::StartUpdatingIndex() - %u ms elapsed since startup, "
+ "scheduling timer to fire in %u ms.",
+ elapsed, kUpdateIndexStartDelay - elapsed));
+ rv = ScheduleUpdateTimer(kUpdateIndexStartDelay - elapsed);
+ if (NS_SUCCEEDED(rv)) {
+ return;
+ }
+
+ LOG(
+ ("CacheIndex::StartUpdatingIndex() - ScheduleUpdateTimer() failed. "
+ "Starting update immediately."));
+ } else {
+ LOG(
+ ("CacheIndex::StartUpdatingIndex() - %u ms elapsed since startup, "
+ "starting update now.",
+ elapsed));
+ }
+
+ RefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
+ MOZ_ASSERT(ioThread);
+
+ // We need to dispatch an event even if we are on IO thread since we need to
+ // update the index with the correct priority.
+ mUpdateEventPending = true;
+ rv = ioThread->Dispatch(this, CacheIOThread::INDEX);
+ if (NS_FAILED(rv)) {
+ mUpdateEventPending = false;
+ NS_WARNING("CacheIndex::StartUpdatingIndex() - Can't dispatch event");
+ LOG(("CacheIndex::StartUpdatingIndex() - Can't dispatch event"));
+ FinishUpdate(false);
+ }
+}
+
+void CacheIndex::UpdateIndex() {
+ LOG(("CacheIndex::UpdateIndex()"));
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+
+ nsresult rv;
+
+ if (!mDirEnumerator) {
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = SetupDirectoryEnumerator();
+ }
+ if (mState == SHUTDOWN) {
+ // The index was shut down while we released the lock. FinishUpdate() was
+ // already called from Shutdown(), so just simply return here.
+ return;
+ }
+
+ if (NS_FAILED(rv)) {
+ FinishUpdate(false);
+ return;
+ }
+ }
+
+ while (true) {
+ if (CacheIOThread::YieldAndRerun()) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Breaking loop for higher level "
+ "events."));
+ mUpdateEventPending = true;
+ return;
+ }
+
+ bool fileExists = false;
+ nsCOMPtr<nsIFile> file;
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = mDirEnumerator->GetNextFile(getter_AddRefs(file));
+
+ if (file) {
+ file->Exists(&fileExists);
+ }
+ }
+ if (mState == SHUTDOWN) {
+ return;
+ }
+ if (!file) {
+ FinishUpdate(NS_SUCCEEDED(rv));
+ return;
+ }
+
+ nsAutoCString leaf;
+ rv = file->GetNativeLeafName(leaf);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - GetNativeLeafName() failed! Skipping "
+ "file."));
+ mDontMarkIndexClean = true;
+ continue;
+ }
+
+ if (!fileExists) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - File returned by the iterator was "
+ "removed in the meantime [name=%s]",
+ leaf.get()));
+ continue;
+ }
+
+ SHA1Sum::Hash hash;
+ rv = CacheFileIOManager::StrToHash(leaf, &hash);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Filename is not a hash, removing file. "
+ "[name=%s]",
+ leaf.get()));
+ file->Remove(false);
+ continue;
+ }
+
+ CacheIndexEntry* entry = mIndex.GetEntry(hash);
+ if (entry && entry->IsRemoved()) {
+ if (entry->IsFresh()) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Found file that should not exist. "
+ "[name=%s]",
+ leaf.get()));
+ entry->Log();
+ }
+ entry = nullptr;
+ }
+
+#ifdef DEBUG
+ RefPtr<CacheFileHandle> handle;
+ CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
+ getter_AddRefs(handle));
+#endif
+
+ if (entry && entry->IsFresh()) {
+ // the entry is up to date
+ LOG(
+ ("CacheIndex::UpdateIndex() - Skipping file because the entry is up "
+ " to date. [name=%s]",
+ leaf.get()));
+ entry->Log();
+ // there must be an active CacheFile if the entry is not initialized
+ MOZ_ASSERT(entry->IsInitialized() || handle);
+ continue;
+ }
+
+ MOZ_ASSERT(!handle);
+
+ if (entry) {
+ PRTime lastModifiedTime;
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = file->GetLastModifiedTime(&lastModifiedTime);
+ }
+ if (mState == SHUTDOWN) {
+ return;
+ }
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Cannot get lastModifiedTime. "
+ "[name=%s]",
+ leaf.get()));
+ // Assume the file is newer than index
+ } else {
+ if (mIndexTimeStamp > (lastModifiedTime / PR_MSEC_PER_SEC)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Skipping file because of last "
+ "modified time. [name=%s, indexTimeStamp=%" PRIu32 ", "
+ "lastModifiedTime=%" PRId64 "]",
+ leaf.get(), mIndexTimeStamp,
+ lastModifiedTime / PR_MSEC_PER_SEC));
+
+ CacheIndexEntryAutoManage entryMng(&hash, this);
+ entry->MarkFresh();
+ continue;
+ }
+ }
+ }
+
+ RefPtr<CacheFileMetadata> meta = new CacheFileMetadata();
+ int64_t size = 0;
+
+ {
+ // Do not do IO under the lock.
+ StaticMutexAutoUnlock unlock(sLock);
+ rv = meta->SyncReadMetadata(file);
+
+ if (NS_SUCCEEDED(rv)) {
+ rv = file->GetFileSize(&size);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Cannot get filesize of file that "
+ "was successfully parsed. [name=%s]",
+ leaf.get()));
+ }
+ }
+ }
+ if (mState == SHUTDOWN) {
+ return;
+ }
+
+ // Nobody could add the entry while the lock was released since we modify
+ // the index only on IO thread and this loop is executed on IO thread too.
+ entry = mIndex.GetEntry(hash);
+ MOZ_ASSERT(!entry || !entry->IsFresh());
+
+ CacheIndexEntryAutoManage entryMng(&hash, this);
+
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - CacheFileMetadata::SyncReadMetadata() "
+ "failed, removing file. [name=%s]",
+ leaf.get()));
+ } else {
+ entry = mIndex.PutEntry(hash);
+ rv = InitEntryFromDiskData(entry, meta, size);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::UpdateIndex() - CacheIndex::InitEntryFromDiskData "
+ "failed, removing file. [name=%s]",
+ leaf.get()));
+ }
+ }
+
+ if (NS_FAILED(rv)) {
+ file->Remove(false);
+ if (entry) {
+ entry->MarkRemoved();
+ entry->MarkFresh();
+ entry->MarkDirty();
+ }
+ } else {
+ LOG(
+ ("CacheIndex::UpdateIndex() - Added/updated entry to/in index. "
+ "[name=%s]",
+ leaf.get()));
+ entry->Log();
+ }
+ }
+
+ MOZ_ASSERT_UNREACHABLE("We should never get here");
+}
+
+void CacheIndex::FinishUpdate(bool aSucceeded) {
+ LOG(("CacheIndex::FinishUpdate() [succeeded=%d]", aSucceeded));
+
+ MOZ_ASSERT(mState == UPDATING || mState == BUILDING ||
+ (!aSucceeded && mState == SHUTDOWN));
+
+ sLock.AssertCurrentThreadOwns();
+
+ if (mDirEnumerator) {
+ if (NS_IsMainThread()) {
+ LOG(
+ ("CacheIndex::FinishUpdate() - posting of PreShutdownInternal failed?"
+ " Cannot safely release mDirEnumerator, leaking it!"));
+ NS_WARNING(("CacheIndex::FinishUpdate() - Leaking mDirEnumerator!"));
+ // This can happen only in case dispatching event to IO thread failed in
+ // CacheIndex::PreShutdown().
+ Unused << mDirEnumerator.forget(); // Leak it since dir enumerator is not
+ // threadsafe
+ } else {
+ mDirEnumerator->Close();
+ mDirEnumerator = nullptr;
+ }
+ }
+
+ if (!aSucceeded) {
+ mDontMarkIndexClean = true;
+ }
+
+ if (mState == SHUTDOWN) {
+ return;
+ }
+
+ if (mState == UPDATING && aSucceeded) {
+ // If we've iterated over all entries successfully then all entries that
+ // really exist on the disk are now marked as fresh. All non-fresh entries
+ // don't exist anymore and must be removed from the index.
+ RemoveNonFreshEntries();
+ }
+
+ // Make sure we won't start update. If the build or update failed, there is no
+ // reason to believe that it will succeed next time.
+ mIndexNeedsUpdate = false;
+
+ ChangeState(READY);
+ mLastDumpTime = TimeStamp::NowLoRes(); // Do not dump new index immediately
+}
+
+void CacheIndex::RemoveNonFreshEntries() {
+ for (auto iter = mIndex.Iter(); !iter.Done(); iter.Next()) {
+ CacheIndexEntry* entry = iter.Get();
+ if (entry->IsFresh()) {
+ continue;
+ }
+
+ LOG(
+ ("CacheIndex::RemoveNonFreshEntries() - Removing entry. "
+ "[hash=%08x%08x%08x%08x%08x]",
+ LOGSHA1(entry->Hash())));
+
+ {
+ CacheIndexEntryAutoManage emng(entry->Hash(), this);
+ emng.DoNotSearchInIndex();
+ }
+
+ iter.Remove();
+ }
+}
+
+// static
+char const* CacheIndex::StateString(EState aState) {
+ switch (aState) {
+ case INITIAL:
+ return "INITIAL";
+ case READING:
+ return "READING";
+ case WRITING:
+ return "WRITING";
+ case BUILDING:
+ return "BUILDING";
+ case UPDATING:
+ return "UPDATING";
+ case READY:
+ return "READY";
+ case SHUTDOWN:
+ return "SHUTDOWN";
+ }
+
+ MOZ_ASSERT(false, "Unexpected state!");
+ return "?";
+}
+
+void CacheIndex::ChangeState(EState aNewState) {
+ LOG(("CacheIndex::ChangeState() changing state %s -> %s", StateString(mState),
+ StateString(aNewState)));
+
+ // All pending updates should be processed before changing state
+ MOZ_ASSERT(mPendingUpdates.Count() == 0);
+
+ // PreShutdownInternal() should change the state to READY from every state. It
+ // may go through different states, but once we are in READY state the only
+ // possible transition is to SHUTDOWN state.
+ MOZ_ASSERT(!mShuttingDown || mState != READY || aNewState == SHUTDOWN);
+
+ // Start updating process when switching to READY state if needed
+ if (aNewState == READY && StartUpdatingIndexIfNeeded(true)) {
+ return;
+ }
+
+ // Try to evict entries over limit everytime we're leaving state READING,
+ // BUILDING or UPDATING, but not during shutdown or when removing all
+ // entries.
+ if (!mShuttingDown && !mRemovingAll && aNewState != SHUTDOWN &&
+ (mState == READING || mState == BUILDING || mState == UPDATING)) {
+ CacheFileIOManager::EvictIfOverLimit();
+ }
+
+ mState = aNewState;
+
+ if (mState != SHUTDOWN) {
+ CacheFileIOManager::CacheIndexStateChanged();
+ }
+
+ NotifyAsyncGetDiskConsumptionCallbacks();
+}
+
+void CacheIndex::NotifyAsyncGetDiskConsumptionCallbacks() {
+ if ((mState == READY || mState == WRITING) &&
+ !mAsyncGetDiskConsumptionBlocked && mDiskConsumptionObservers.Length()) {
+ for (uint32_t i = 0; i < mDiskConsumptionObservers.Length(); ++i) {
+ DiskConsumptionObserver* o = mDiskConsumptionObservers[i];
+ // Safe to call under the lock. We always post to the main thread.
+ o->OnDiskConsumption(mIndexStats.Size() << 10);
+ }
+
+ mDiskConsumptionObservers.Clear();
+ }
+}
+
+void CacheIndex::AllocBuffer() {
+ switch (mState) {
+ case WRITING:
+ mRWBufSize = sizeof(CacheIndexHeader) + sizeof(CacheHash::Hash32_t) +
+ mProcessEntries * sizeof(CacheIndexRecord);
+ if (mRWBufSize > kMaxBufSize) {
+ mRWBufSize = kMaxBufSize;
+ }
+ break;
+ case READING:
+ mRWBufSize = kMaxBufSize;
+ break;
+ default:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ mRWBuf = static_cast<char*>(moz_xmalloc(mRWBufSize));
+}
+
+void CacheIndex::ReleaseBuffer() {
+ sLock.AssertCurrentThreadOwns();
+
+ if (!mRWBuf || mRWPending) {
+ return;
+ }
+
+ LOG(("CacheIndex::ReleaseBuffer() releasing buffer"));
+
+ free(mRWBuf);
+ mRWBuf = nullptr;
+ mRWBufSize = 0;
+ mRWBufPos = 0;
+}
+
+void CacheIndex::FrecencyArray::AppendRecord(CacheIndexRecord* aRecord) {
+ LOG(
+ ("CacheIndex::FrecencyArray::AppendRecord() [record=%p, hash=%08x%08x%08x"
+ "%08x%08x]",
+ aRecord, LOGSHA1(aRecord->mHash)));
+
+ MOZ_ASSERT(!mRecs.Contains(aRecord));
+ mRecs.AppendElement(aRecord);
+
+ // If the new frecency is 0, the element should be at the end of the array,
+ // i.e. this change doesn't affect order of the array
+ if (aRecord->mFrecency != 0) {
+ ++mUnsortedElements;
+ }
+}
+
+void CacheIndex::FrecencyArray::RemoveRecord(CacheIndexRecord* aRecord) {
+ LOG(("CacheIndex::FrecencyArray::RemoveRecord() [record=%p]", aRecord));
+
+ decltype(mRecs)::index_type idx;
+ idx = mRecs.IndexOf(aRecord);
+ MOZ_RELEASE_ASSERT(idx != mRecs.NoIndex);
+ mRecs[idx] = nullptr;
+ ++mRemovedElements;
+
+ // Calling SortIfNeeded ensures that we get rid of removed elements in the
+ // array once we hit the limit.
+ SortIfNeeded();
+}
+
+void CacheIndex::FrecencyArray::ReplaceRecord(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord) {
+ LOG(
+ ("CacheIndex::FrecencyArray::ReplaceRecord() [oldRecord=%p, "
+ "newRecord=%p]",
+ aOldRecord, aNewRecord));
+
+ decltype(mRecs)::index_type idx;
+ idx = mRecs.IndexOf(aOldRecord);
+ MOZ_RELEASE_ASSERT(idx != mRecs.NoIndex);
+ mRecs[idx] = aNewRecord;
+}
+
+bool CacheIndex::FrecencyArray::RecordExisted(CacheIndexRecord* aRecord) {
+ return mRecs.Contains(aRecord);
+}
+
+void CacheIndex::FrecencyArray::SortIfNeeded() {
+ const uint32_t kMaxUnsortedCount = 512;
+ const uint32_t kMaxUnsortedPercent = 10;
+ const uint32_t kMaxRemovedCount = 512;
+
+ uint32_t unsortedLimit = std::min<uint32_t>(
+ kMaxUnsortedCount, Length() * kMaxUnsortedPercent / 100);
+
+ if (mUnsortedElements > unsortedLimit ||
+ mRemovedElements > kMaxRemovedCount) {
+ LOG(
+ ("CacheIndex::FrecencyArray::SortIfNeeded() - Sorting array "
+ "[unsortedElements=%u, unsortedLimit=%u, removedElements=%u, "
+ "maxRemovedCount=%u]",
+ mUnsortedElements, unsortedLimit, mRemovedElements, kMaxRemovedCount));
+
+ mRecs.Sort(FrecencyComparator());
+ mUnsortedElements = 0;
+ if (mRemovedElements) {
+#ifdef DEBUG
+ for (uint32_t i = Length(); i < mRecs.Length(); ++i) {
+ MOZ_ASSERT(!mRecs[i]);
+ }
+#endif
+ // Removed elements are at the end after sorting.
+ mRecs.RemoveElementsAt(Length(), mRemovedElements);
+ mRemovedElements = 0;
+ }
+ }
+}
+
+void CacheIndex::AddRecordToIterators(CacheIndexRecord* aRecord) {
+ sLock.AssertCurrentThreadOwns();
+
+ for (uint32_t i = 0; i < mIterators.Length(); ++i) {
+ // Add a new record only when iterator is supposed to be updated.
+ if (mIterators[i]->ShouldBeNewAdded()) {
+ mIterators[i]->AddRecord(aRecord);
+ }
+ }
+}
+
+void CacheIndex::RemoveRecordFromIterators(CacheIndexRecord* aRecord) {
+ sLock.AssertCurrentThreadOwns();
+
+ for (uint32_t i = 0; i < mIterators.Length(); ++i) {
+ // Remove the record from iterator always, it makes no sence to return
+ // non-existing entries. Also the pointer to the record is no longer valid
+ // once the entry is removed from index.
+ mIterators[i]->RemoveRecord(aRecord);
+ }
+}
+
+void CacheIndex::ReplaceRecordInIterators(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord) {
+ sLock.AssertCurrentThreadOwns();
+
+ for (uint32_t i = 0; i < mIterators.Length(); ++i) {
+ // We have to replace the record always since the pointer is no longer
+ // valid after this point. NOTE: Replacing the record doesn't mean that
+ // a new entry was added, it just means that the data in the entry was
+ // changed (e.g. a file size) and we had to track this change in
+ // mPendingUpdates since mIndex was read-only.
+ mIterators[i]->ReplaceRecord(aOldRecord, aNewRecord);
+ }
+}
+
+nsresult CacheIndex::Run() {
+ LOG(("CacheIndex::Run()"));
+
+ StaticMutexAutoLock lock(sLock);
+
+ if (!IsIndexUsable()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mState == READY && mShuttingDown) {
+ return NS_OK;
+ }
+
+ mUpdateEventPending = false;
+
+ switch (mState) {
+ case BUILDING:
+ BuildIndex();
+ break;
+ case UPDATING:
+ UpdateIndex();
+ break;
+ default:
+ LOG(("CacheIndex::Run() - Update/Build was canceled"));
+ }
+
+ return NS_OK;
+}
+
+void CacheIndex::OnFileOpenedInternal(FileOpenHelper* aOpener,
+ CacheFileHandle* aHandle,
+ nsresult aResult) {
+ LOG(
+ ("CacheIndex::OnFileOpenedInternal() [opener=%p, handle=%p, "
+ "result=0x%08" PRIx32 "]",
+ aOpener, aHandle, static_cast<uint32_t>(aResult)));
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsresult rv;
+
+ sLock.AssertCurrentThreadOwns();
+
+ MOZ_RELEASE_ASSERT(IsIndexUsable());
+
+ if (mState == READY && mShuttingDown) {
+ return;
+ }
+
+ switch (mState) {
+ case WRITING:
+ MOZ_ASSERT(aOpener == mIndexFileOpener);
+ mIndexFileOpener = nullptr;
+
+ if (NS_FAILED(aResult)) {
+ LOG(
+ ("CacheIndex::OnFileOpenedInternal() - Can't open index file for "
+ "writing [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(aResult)));
+ FinishWrite(false);
+ } else {
+ mIndexHandle = aHandle;
+ WriteRecords();
+ }
+ break;
+ case READING:
+ if (aOpener == mIndexFileOpener) {
+ mIndexFileOpener = nullptr;
+
+ if (NS_SUCCEEDED(aResult)) {
+ if (aHandle->FileSize() == 0) {
+ FinishRead(false);
+ CacheFileIOManager::DoomFile(aHandle, nullptr);
+ break;
+ }
+ mIndexHandle = aHandle;
+ } else {
+ FinishRead(false);
+ break;
+ }
+ } else if (aOpener == mJournalFileOpener) {
+ mJournalFileOpener = nullptr;
+ mJournalHandle = aHandle;
+ } else if (aOpener == mTmpFileOpener) {
+ mTmpFileOpener = nullptr;
+ mTmpHandle = aHandle;
+ } else {
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+
+ if (mIndexFileOpener || mJournalFileOpener || mTmpFileOpener) {
+ // Some opener still didn't finish
+ break;
+ }
+
+ // We fail and cancel all other openers when we opening index file fails.
+ MOZ_ASSERT(mIndexHandle);
+
+ if (mTmpHandle) {
+ CacheFileIOManager::DoomFile(mTmpHandle, nullptr);
+ mTmpHandle = nullptr;
+
+ if (mJournalHandle) { // this shouldn't normally happen
+ LOG(
+ ("CacheIndex::OnFileOpenedInternal() - Unexpected state, all "
+ "files [%s, %s, %s] should never exist. Removing whole index.",
+ INDEX_NAME, JOURNAL_NAME, TEMP_INDEX_NAME));
+ FinishRead(false);
+ break;
+ }
+ }
+
+ if (mJournalHandle) {
+ // Rename journal to make sure we update index on next start in case
+ // firefox crashes
+ rv = CacheFileIOManager::RenameFile(
+ mJournalHandle, nsLiteralCString(TEMP_INDEX_NAME), this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::OnFileOpenedInternal() - CacheFileIOManager::"
+ "RenameFile() failed synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishRead(false);
+ break;
+ }
+ } else {
+ StartReadingIndex();
+ }
+
+ break;
+ default:
+ MOZ_ASSERT(false, "Unexpected state!");
+ }
+}
+
+nsresult CacheIndex::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheIndex::OnFileOpened should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheIndex::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) {
+ LOG(("CacheIndex::OnDataWritten() [handle=%p, result=0x%08" PRIx32 "]",
+ aHandle, static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ nsresult rv;
+
+ StaticMutexAutoLock lock(sLock);
+
+ MOZ_RELEASE_ASSERT(IsIndexUsable());
+ MOZ_RELEASE_ASSERT(mRWPending);
+ mRWPending = false;
+
+ if (mState == READY && mShuttingDown) {
+ return NS_OK;
+ }
+
+ switch (mState) {
+ case WRITING:
+ MOZ_ASSERT(mIndexHandle == aHandle);
+
+ if (NS_FAILED(aResult)) {
+ FinishWrite(false);
+ } else {
+ if (mSkipEntries == mProcessEntries) {
+ rv = CacheFileIOManager::RenameFile(
+ mIndexHandle, nsLiteralCString(INDEX_NAME), this);
+ if (NS_FAILED(rv)) {
+ LOG(
+ ("CacheIndex::OnDataWritten() - CacheFileIOManager::"
+ "RenameFile() failed synchronously [rv=0x%08" PRIx32 "]",
+ static_cast<uint32_t>(rv)));
+ FinishWrite(false);
+ }
+ } else {
+ WriteRecords();
+ }
+ }
+ break;
+ default:
+ // Writing was canceled.
+ LOG(
+ ("CacheIndex::OnDataWritten() - ignoring notification since the "
+ "operation was previously canceled [state=%d]",
+ mState));
+ ReleaseBuffer();
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheIndex::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) {
+ LOG(("CacheIndex::OnDataRead() [handle=%p, result=0x%08" PRIx32 "]", aHandle,
+ static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ MOZ_RELEASE_ASSERT(IsIndexUsable());
+ MOZ_RELEASE_ASSERT(mRWPending);
+ mRWPending = false;
+
+ switch (mState) {
+ case READING:
+ MOZ_ASSERT(mIndexHandle == aHandle || mJournalHandle == aHandle);
+
+ if (NS_FAILED(aResult)) {
+ FinishRead(false);
+ } else {
+ if (!mIndexOnDiskIsValid) {
+ ParseRecords();
+ } else {
+ ParseJournal();
+ }
+ }
+ break;
+ default:
+ // Reading was canceled.
+ LOG(
+ ("CacheIndex::OnDataRead() - ignoring notification since the "
+ "operation was previously canceled [state=%d]",
+ mState));
+ ReleaseBuffer();
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheIndex::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheIndex::OnFileDoomed should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheIndex::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
+ MOZ_CRASH("CacheIndex::OnEOFSet should not be called!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+nsresult CacheIndex::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) {
+ LOG(("CacheIndex::OnFileRenamed() [handle=%p, result=0x%08" PRIx32 "]",
+ aHandle, static_cast<uint32_t>(aResult)));
+
+ MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
+
+ StaticMutexAutoLock lock(sLock);
+
+ MOZ_RELEASE_ASSERT(IsIndexUsable());
+
+ if (mState == READY && mShuttingDown) {
+ return NS_OK;
+ }
+
+ switch (mState) {
+ case WRITING:
+ // This is a result of renaming the new index written to tmpfile to index
+ // file. This is the last step when writing the index and the whole
+ // writing process is successful iff renaming was successful.
+
+ if (mIndexHandle != aHandle) {
+ LOG(
+ ("CacheIndex::OnFileRenamed() - ignoring notification since it "
+ "belongs to previously canceled operation [state=%d]",
+ mState));
+ break;
+ }
+
+ FinishWrite(NS_SUCCEEDED(aResult));
+ break;
+ case READING:
+ // This is a result of renaming journal file to tmpfile. It is renamed
+ // before we start reading index and journal file and it should normally
+ // succeed. If it fails give up reading of index.
+
+ if (mJournalHandle != aHandle) {
+ LOG(
+ ("CacheIndex::OnFileRenamed() - ignoring notification since it "
+ "belongs to previously canceled operation [state=%d]",
+ mState));
+ break;
+ }
+
+ if (NS_FAILED(aResult)) {
+ FinishRead(false);
+ } else {
+ StartReadingIndex();
+ }
+ break;
+ default:
+ // Reading/writing was canceled.
+ LOG(
+ ("CacheIndex::OnFileRenamed() - ignoring notification since the "
+ "operation was previously canceled [state=%d]",
+ mState));
+ }
+
+ return NS_OK;
+}
+
+// Memory reporting
+
+size_t CacheIndex::SizeOfExcludingThisInternal(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ sLock.AssertCurrentThreadOwns();
+
+ size_t n = 0;
+ nsCOMPtr<nsISizeOf> sizeOf;
+
+ // mIndexHandle and mJournalHandle are reported via SizeOfHandlesRunnable
+ // in CacheFileIOManager::SizeOfExcludingThisInternal as part of special
+ // handles array.
+
+ sizeOf = do_QueryInterface(mCacheDirectory);
+ if (sizeOf) {
+ n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ sizeOf = do_QueryInterface(mUpdateTimer);
+ if (sizeOf) {
+ n += sizeOf->SizeOfIncludingThis(mallocSizeOf);
+ }
+
+ n += mallocSizeOf(mRWBuf);
+ n += mallocSizeOf(mRWHash);
+
+ n += mIndex.SizeOfExcludingThis(mallocSizeOf);
+ n += mPendingUpdates.SizeOfExcludingThis(mallocSizeOf);
+ n += mTmpJournal.SizeOfExcludingThis(mallocSizeOf);
+
+ // mFrecencyArray items are reported by mIndex/mPendingUpdates
+ n += mFrecencyArray.mRecs.ShallowSizeOfExcludingThis(mallocSizeOf);
+ n += mDiskConsumptionObservers.ShallowSizeOfExcludingThis(mallocSizeOf);
+
+ return n;
+}
+
+// static
+size_t CacheIndex::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ sLock.AssertCurrentThreadOwns();
+
+ if (!gInstance) return 0;
+
+ return gInstance->SizeOfExcludingThisInternal(mallocSizeOf);
+}
+
+// static
+size_t CacheIndex::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ StaticMutexAutoLock lock(sLock);
+
+ return mallocSizeOf(gInstance) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+// static
+void CacheIndex::UpdateTotalBytesWritten(uint32_t aBytesWritten) {
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+ if (!index) {
+ return;
+ }
+
+ index->mTotalBytesWritten += aBytesWritten;
+
+ // Do telemetry report if enough data has been written and the index is
+ // in READY state. The data is available also in WRITING state, but we would
+ // need to deal with pending updates.
+ if (index->mTotalBytesWritten >= kTelemetryReportBytesLimit &&
+ index->mState == READY && !index->mIndexNeedsUpdate &&
+ !index->mShuttingDown) {
+ index->DoTelemetryReport();
+ index->mTotalBytesWritten = 0;
+ return;
+ }
+}
+
+void CacheIndex::DoTelemetryReport() {
+ static const nsLiteralCString
+ contentTypeNames[nsICacheEntry::CONTENT_TYPE_LAST] = {
+ "UNKNOWN"_ns, "OTHER"_ns, "JAVASCRIPT"_ns, "IMAGE"_ns,
+ "MEDIA"_ns, "STYLESHEET"_ns, "WASM"_ns};
+
+ for (uint32_t i = 0; i < nsICacheEntry::CONTENT_TYPE_LAST; ++i) {
+ if (mIndexStats.Size() > 0) {
+ Telemetry::Accumulate(
+ Telemetry::NETWORK_CACHE_SIZE_SHARE, contentTypeNames[i],
+ round(static_cast<double>(mIndexStats.SizeByType(i)) * 100.0 /
+ static_cast<double>(mIndexStats.Size())));
+ }
+
+ if (mIndexStats.Count() > 0) {
+ Telemetry::Accumulate(
+ Telemetry::NETWORK_CACHE_ENTRY_COUNT_SHARE, contentTypeNames[i],
+ round(static_cast<double>(mIndexStats.CountByType(i)) * 100.0 /
+ static_cast<double>(mIndexStats.Count())));
+ }
+ }
+
+ nsCString probeKey;
+ if (CacheObserver::SmartCacheSizeEnabled()) {
+ probeKey = "SMARTSIZE"_ns;
+ } else {
+ probeKey = "USERDEFINEDSIZE"_ns;
+ }
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_ENTRY_COUNT, probeKey,
+ mIndexStats.Count());
+ Telemetry::Accumulate(Telemetry::NETWORK_CACHE_SIZE, probeKey,
+ mIndexStats.Size() >> 10);
+}
+
+// static
+void CacheIndex::OnAsyncEviction(bool aEvicting) {
+ StaticMutexAutoLock lock(sLock);
+
+ RefPtr<CacheIndex> index = gInstance;
+ if (!index) {
+ return;
+ }
+
+ index->mAsyncGetDiskConsumptionBlocked = aEvicting;
+ if (!aEvicting) {
+ index->NotifyAsyncGetDiskConsumptionCallbacks();
+ }
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheIndex.h b/netwerk/cache2/CacheIndex.h
new file mode 100644
index 0000000000..bf98333435
--- /dev/null
+++ b/netwerk/cache2/CacheIndex.h
@@ -0,0 +1,1277 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheIndex__h__
+#define CacheIndex__h__
+
+#include "CacheLog.h"
+#include "CacheFileIOManager.h"
+#include "nsIRunnable.h"
+#include "CacheHashUtils.h"
+#include "nsICacheStorageService.h"
+#include "nsICacheEntry.h"
+#include "nsILoadContextInfo.h"
+#include "nsIWeakReferenceUtils.h"
+#include "nsTHashtable.h"
+#include "nsThreadUtils.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/SHA1.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/UniquePtr.h"
+
+class nsIFile;
+class nsIDirectoryEnumerator;
+class nsITimer;
+
+#ifdef DEBUG
+# define DEBUG_STATS 1
+#endif
+
+namespace mozilla {
+namespace net {
+
+class CacheFileMetadata;
+class FileOpenHelper;
+class CacheIndexIterator;
+
+const uint16_t kIndexTimeNotAvailable = 0xFFFFU;
+const uint16_t kIndexTimeOutOfBound = 0xFFFEU;
+
+typedef struct {
+ // Version of the index. The index must be ignored and deleted when the file
+ // on disk was written with a newer version.
+ uint32_t mVersion;
+
+ // Timestamp of time when the last successful write of the index started.
+ // During update process we use this timestamp for a quick validation of entry
+ // files. If last modified time of the file is lower than this timestamp, we
+ // skip parsing of such file since the information in index should be up to
+ // date.
+ uint32_t mTimeStamp;
+
+ // We set this flag as soon as possible after parsing index during startup
+ // and clean it after we write journal to disk during shutdown. We ignore the
+ // journal and start update process whenever this flag is set during index
+ // parsing.
+ uint32_t mIsDirty;
+
+ // The amount of data written to the cache. When it reaches
+ // kTelemetryReportBytesLimit a telemetry report is sent and the counter is
+ // reset.
+ uint32_t mKBWritten;
+} CacheIndexHeader;
+
+static_assert(sizeof(CacheIndexHeader::mVersion) +
+ sizeof(CacheIndexHeader::mTimeStamp) +
+ sizeof(CacheIndexHeader::mIsDirty) +
+ sizeof(CacheIndexHeader::mKBWritten) ==
+ sizeof(CacheIndexHeader),
+ "Unexpected sizeof(CacheIndexHeader)!");
+
+#pragma pack(push, 1)
+struct CacheIndexRecord {
+ SHA1Sum::Hash mHash;
+ uint32_t mFrecency;
+ OriginAttrsHash mOriginAttrsHash;
+ uint16_t mOnStartTime;
+ uint16_t mOnStopTime;
+ uint8_t mContentType;
+
+ /*
+ * 1000 0000 0000 0000 0000 0000 0000 0000 : initialized
+ * 0100 0000 0000 0000 0000 0000 0000 0000 : anonymous
+ * 0010 0000 0000 0000 0000 0000 0000 0000 : removed
+ * 0001 0000 0000 0000 0000 0000 0000 0000 : dirty
+ * 0000 1000 0000 0000 0000 0000 0000 0000 : fresh
+ * 0000 0100 0000 0000 0000 0000 0000 0000 : pinned
+ * 0000 0010 0000 0000 0000 0000 0000 0000 : has cached alt data
+ * 0000 0001 0000 0000 0000 0000 0000 0000 : reserved
+ * 0000 0000 1111 1111 1111 1111 1111 1111 : file size (in kB)
+ */
+ uint32_t mFlags;
+
+ CacheIndexRecord()
+ : mFrecency(0),
+ mOriginAttrsHash(0),
+ mOnStartTime(kIndexTimeNotAvailable),
+ mOnStopTime(kIndexTimeNotAvailable),
+ mContentType(nsICacheEntry::CONTENT_TYPE_UNKNOWN),
+ mFlags(0) {}
+
+ ~CacheIndexRecord();
+};
+#pragma pack(pop)
+
+static_assert(sizeof(CacheIndexRecord::mHash) +
+ sizeof(CacheIndexRecord::mFrecency) +
+ sizeof(CacheIndexRecord::mOriginAttrsHash) +
+ sizeof(CacheIndexRecord::mOnStartTime) +
+ sizeof(CacheIndexRecord::mOnStopTime) +
+ sizeof(CacheIndexRecord::mContentType) +
+ sizeof(CacheIndexRecord::mFlags) ==
+ sizeof(CacheIndexRecord),
+ "Unexpected sizeof(CacheIndexRecord)!");
+
+class CacheIndexEntry : public PLDHashEntryHdr {
+ public:
+ typedef const SHA1Sum::Hash& KeyType;
+ typedef const SHA1Sum::Hash* KeyTypePointer;
+
+ explicit CacheIndexEntry(KeyTypePointer aKey) {
+ MOZ_COUNT_CTOR(CacheIndexEntry);
+ mRec = MakeUnique<CacheIndexRecord>();
+ LOG(("CacheIndexEntry::CacheIndexEntry() - Created record [rec=%p]",
+ mRec.get()));
+ memcpy(&mRec->mHash, aKey, sizeof(SHA1Sum::Hash));
+ }
+ CacheIndexEntry(const CacheIndexEntry& aOther) {
+ MOZ_ASSERT_UNREACHABLE("CacheIndexEntry copy constructor is forbidden!");
+ }
+ ~CacheIndexEntry() {
+ MOZ_COUNT_DTOR(CacheIndexEntry);
+ LOG(("CacheIndexEntry::~CacheIndexEntry() - Deleting record [rec=%p]",
+ mRec.get()));
+ }
+
+ // KeyEquals(): does this entry match this key?
+ bool KeyEquals(KeyTypePointer aKey) const {
+ return memcmp(&mRec->mHash, aKey, sizeof(SHA1Sum::Hash)) == 0;
+ }
+
+ // KeyToPointer(): Convert KeyType to KeyTypePointer
+ static KeyTypePointer KeyToPointer(KeyType aKey) { return &aKey; }
+
+ // HashKey(): calculate the hash number
+ static PLDHashNumber HashKey(KeyTypePointer aKey) {
+ return (reinterpret_cast<const uint32_t*>(aKey))[0];
+ }
+
+ // ALLOW_MEMMOVE can we move this class with memmove(), or do we have
+ // to use the copy constructor?
+ enum { ALLOW_MEMMOVE = true };
+
+ bool operator==(const CacheIndexEntry& aOther) const {
+ return KeyEquals(&aOther.mRec->mHash);
+ }
+
+ CacheIndexEntry& operator=(const CacheIndexEntry& aOther) {
+ MOZ_ASSERT(
+ memcmp(&mRec->mHash, &aOther.mRec->mHash, sizeof(SHA1Sum::Hash)) == 0);
+ mRec->mFrecency = aOther.mRec->mFrecency;
+ mRec->mOriginAttrsHash = aOther.mRec->mOriginAttrsHash;
+ mRec->mOnStartTime = aOther.mRec->mOnStartTime;
+ mRec->mOnStopTime = aOther.mRec->mOnStopTime;
+ mRec->mContentType = aOther.mRec->mContentType;
+ mRec->mFlags = aOther.mRec->mFlags;
+ return *this;
+ }
+
+ void InitNew() {
+ mRec->mFrecency = 0;
+ mRec->mOriginAttrsHash = 0;
+ mRec->mOnStartTime = kIndexTimeNotAvailable;
+ mRec->mOnStopTime = kIndexTimeNotAvailable;
+ mRec->mContentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ mRec->mFlags = 0;
+ }
+
+ void Init(OriginAttrsHash aOriginAttrsHash, bool aAnonymous, bool aPinned) {
+ MOZ_ASSERT(mRec->mFrecency == 0);
+ MOZ_ASSERT(mRec->mOriginAttrsHash == 0);
+ MOZ_ASSERT(mRec->mOnStartTime == kIndexTimeNotAvailable);
+ MOZ_ASSERT(mRec->mOnStopTime == kIndexTimeNotAvailable);
+ MOZ_ASSERT(mRec->mContentType == nsICacheEntry::CONTENT_TYPE_UNKNOWN);
+ // When we init the entry it must be fresh and may be dirty
+ MOZ_ASSERT((mRec->mFlags & ~kDirtyMask) == kFreshMask);
+
+ mRec->mOriginAttrsHash = aOriginAttrsHash;
+ mRec->mFlags |= kInitializedMask;
+ if (aAnonymous) {
+ mRec->mFlags |= kAnonymousMask;
+ }
+ if (aPinned) {
+ mRec->mFlags |= kPinnedMask;
+ }
+ }
+
+ const SHA1Sum::Hash* Hash() const { return &mRec->mHash; }
+
+ bool IsInitialized() const { return !!(mRec->mFlags & kInitializedMask); }
+
+ mozilla::net::OriginAttrsHash OriginAttrsHash() const {
+ return mRec->mOriginAttrsHash;
+ }
+
+ bool Anonymous() const { return !!(mRec->mFlags & kAnonymousMask); }
+
+ bool IsRemoved() const { return !!(mRec->mFlags & kRemovedMask); }
+ void MarkRemoved() { mRec->mFlags |= kRemovedMask; }
+
+ bool IsDirty() const { return !!(mRec->mFlags & kDirtyMask); }
+ void MarkDirty() { mRec->mFlags |= kDirtyMask; }
+ void ClearDirty() { mRec->mFlags &= ~kDirtyMask; }
+
+ bool IsFresh() const { return !!(mRec->mFlags & kFreshMask); }
+ void MarkFresh() { mRec->mFlags |= kFreshMask; }
+
+ bool IsPinned() const { return !!(mRec->mFlags & kPinnedMask); }
+
+ void SetFrecency(uint32_t aFrecency) { mRec->mFrecency = aFrecency; }
+ uint32_t GetFrecency() const { return mRec->mFrecency; }
+
+ void SetHasAltData(bool aHasAltData) {
+ aHasAltData ? mRec->mFlags |= kHasAltDataMask
+ : mRec->mFlags &= ~kHasAltDataMask;
+ }
+ bool GetHasAltData() const { return !!(mRec->mFlags & kHasAltDataMask); }
+
+ void SetOnStartTime(uint16_t aTime) { mRec->mOnStartTime = aTime; }
+ uint16_t GetOnStartTime() const { return mRec->mOnStartTime; }
+
+ void SetOnStopTime(uint16_t aTime) { mRec->mOnStopTime = aTime; }
+ uint16_t GetOnStopTime() const { return mRec->mOnStopTime; }
+
+ void SetContentType(uint8_t aType) { mRec->mContentType = aType; }
+ uint8_t GetContentType() const { return GetContentType(mRec.get()); }
+ static uint8_t GetContentType(CacheIndexRecord* aRec) {
+ if (aRec->mContentType >= nsICacheEntry::CONTENT_TYPE_LAST) {
+ LOG(
+ ("CacheIndexEntry::GetContentType() - Found invalid content type "
+ "[hash=%08x%08x%08x%08x%08x, contentType=%u]",
+ LOGSHA1(aRec->mHash), aRec->mContentType));
+ return nsICacheEntry::CONTENT_TYPE_UNKNOWN;
+ }
+ return aRec->mContentType;
+ }
+
+ // Sets filesize in kilobytes.
+ void SetFileSize(uint32_t aFileSize) {
+ if (aFileSize > kFileSizeMask) {
+ LOG(
+ ("CacheIndexEntry::SetFileSize() - FileSize is too large, "
+ "truncating to %u",
+ kFileSizeMask));
+ aFileSize = kFileSizeMask;
+ }
+ mRec->mFlags &= ~kFileSizeMask;
+ mRec->mFlags |= aFileSize;
+ }
+ // Returns filesize in kilobytes.
+ uint32_t GetFileSize() const { return GetFileSize(*mRec); }
+ static uint32_t GetFileSize(const CacheIndexRecord& aRec) {
+ return aRec.mFlags & kFileSizeMask;
+ }
+ static uint32_t IsPinned(CacheIndexRecord* aRec) {
+ return aRec->mFlags & kPinnedMask;
+ }
+ bool IsFileEmpty() const { return GetFileSize() == 0; }
+
+ void WriteToBuf(void* aBuf) {
+ uint8_t* ptr = static_cast<uint8_t*>(aBuf);
+ memcpy(ptr, mRec->mHash, sizeof(SHA1Sum::Hash));
+ ptr += sizeof(SHA1Sum::Hash);
+ NetworkEndian::writeUint32(ptr, mRec->mFrecency);
+ ptr += sizeof(uint32_t);
+ NetworkEndian::writeUint64(ptr, mRec->mOriginAttrsHash);
+ ptr += sizeof(uint64_t);
+ NetworkEndian::writeUint16(ptr, mRec->mOnStartTime);
+ ptr += sizeof(uint16_t);
+ NetworkEndian::writeUint16(ptr, mRec->mOnStopTime);
+ ptr += sizeof(uint16_t);
+ *ptr = mRec->mContentType;
+ ptr += sizeof(uint8_t);
+ // Dirty and fresh flags should never go to disk, since they make sense only
+ // during current session.
+ NetworkEndian::writeUint32(ptr, mRec->mFlags & ~(kDirtyMask | kFreshMask));
+ }
+
+ void ReadFromBuf(void* aBuf) {
+ const uint8_t* ptr = static_cast<const uint8_t*>(aBuf);
+ MOZ_ASSERT(memcmp(&mRec->mHash, ptr, sizeof(SHA1Sum::Hash)) == 0);
+ ptr += sizeof(SHA1Sum::Hash);
+ mRec->mFrecency = NetworkEndian::readUint32(ptr);
+ ptr += sizeof(uint32_t);
+ mRec->mOriginAttrsHash = NetworkEndian::readUint64(ptr);
+ ptr += sizeof(uint64_t);
+ mRec->mOnStartTime = NetworkEndian::readUint16(ptr);
+ ptr += sizeof(uint16_t);
+ mRec->mOnStopTime = NetworkEndian::readUint16(ptr);
+ ptr += sizeof(uint16_t);
+ mRec->mContentType = *ptr;
+ ptr += sizeof(uint8_t);
+ mRec->mFlags = NetworkEndian::readUint32(ptr);
+ }
+
+ void Log() const {
+ LOG(
+ ("CacheIndexEntry::Log() [this=%p, hash=%08x%08x%08x%08x%08x, fresh=%u,"
+ " initialized=%u, removed=%u, dirty=%u, anonymous=%u, "
+ "originAttrsHash=%" PRIx64 ", frecency=%u, hasAltData=%u, "
+ "onStartTime=%u, onStopTime=%u, contentType=%u, size=%u]",
+ this, LOGSHA1(mRec->mHash), IsFresh(), IsInitialized(), IsRemoved(),
+ IsDirty(), Anonymous(), OriginAttrsHash(), GetFrecency(),
+ GetHasAltData(), GetOnStartTime(), GetOnStopTime(), GetContentType(),
+ GetFileSize()));
+ }
+
+ static bool RecordMatchesLoadContextInfo(CacheIndexRecord* aRec,
+ nsILoadContextInfo* aInfo) {
+ MOZ_ASSERT(aInfo);
+
+ if (!aInfo->IsPrivate() &&
+ GetOriginAttrsHash(*aInfo->OriginAttributesPtr()) ==
+ aRec->mOriginAttrsHash &&
+ aInfo->IsAnonymous() == !!(aRec->mFlags & kAnonymousMask)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(mRec.get());
+ }
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+ }
+
+ private:
+ friend class CacheIndexEntryUpdate;
+ friend class CacheIndex;
+ friend class CacheIndexEntryAutoManage;
+ friend struct CacheIndexRecord;
+
+ static const uint32_t kInitializedMask = 0x80000000;
+ static const uint32_t kAnonymousMask = 0x40000000;
+
+ // This flag is set when the entry was removed. We need to keep this
+ // information in memory until we write the index file.
+ static const uint32_t kRemovedMask = 0x20000000;
+
+ // This flag is set when the information in memory is not in sync with the
+ // information in index file on disk.
+ static const uint32_t kDirtyMask = 0x10000000;
+
+ // This flag is set when the information about the entry is fresh, i.e.
+ // we've created or opened this entry during this session, or we've seen
+ // this entry during update or build process.
+ static const uint32_t kFreshMask = 0x08000000;
+
+ // Indicates a pinned entry.
+ static const uint32_t kPinnedMask = 0x04000000;
+
+ // Indicates there is cached alternative data in the entry.
+ static const uint32_t kHasAltDataMask = 0x02000000;
+ static const uint32_t kReservedMask = 0x01000000;
+
+ // FileSize in kilobytes
+ static const uint32_t kFileSizeMask = 0x00FFFFFF;
+
+ UniquePtr<CacheIndexRecord> mRec;
+};
+
+class CacheIndexEntryUpdate : public CacheIndexEntry {
+ public:
+ explicit CacheIndexEntryUpdate(CacheIndexEntry::KeyTypePointer aKey)
+ : CacheIndexEntry(aKey), mUpdateFlags(0) {
+ MOZ_COUNT_CTOR(CacheIndexEntryUpdate);
+ LOG(("CacheIndexEntryUpdate::CacheIndexEntryUpdate()"));
+ }
+ ~CacheIndexEntryUpdate() {
+ MOZ_COUNT_DTOR(CacheIndexEntryUpdate);
+ LOG(("CacheIndexEntryUpdate::~CacheIndexEntryUpdate()"));
+ }
+
+ CacheIndexEntryUpdate& operator=(const CacheIndexEntry& aOther) {
+ MOZ_ASSERT(
+ memcmp(&mRec->mHash, &aOther.mRec->mHash, sizeof(SHA1Sum::Hash)) == 0);
+ mUpdateFlags = 0;
+ *(static_cast<CacheIndexEntry*>(this)) = aOther;
+ return *this;
+ }
+
+ void InitNew() {
+ mUpdateFlags = kFrecencyUpdatedMask | kHasAltDataUpdatedMask |
+ kOnStartTimeUpdatedMask | kOnStopTimeUpdatedMask |
+ kContentTypeUpdatedMask | kFileSizeUpdatedMask;
+ CacheIndexEntry::InitNew();
+ }
+
+ void SetFrecency(uint32_t aFrecency) {
+ mUpdateFlags |= kFrecencyUpdatedMask;
+ CacheIndexEntry::SetFrecency(aFrecency);
+ }
+
+ void SetHasAltData(bool aHasAltData) {
+ mUpdateFlags |= kHasAltDataUpdatedMask;
+ CacheIndexEntry::SetHasAltData(aHasAltData);
+ }
+
+ void SetOnStartTime(uint16_t aTime) {
+ mUpdateFlags |= kOnStartTimeUpdatedMask;
+ CacheIndexEntry::SetOnStartTime(aTime);
+ }
+
+ void SetOnStopTime(uint16_t aTime) {
+ mUpdateFlags |= kOnStopTimeUpdatedMask;
+ CacheIndexEntry::SetOnStopTime(aTime);
+ }
+
+ void SetContentType(uint8_t aType) {
+ mUpdateFlags |= kContentTypeUpdatedMask;
+ CacheIndexEntry::SetContentType(aType);
+ }
+
+ void SetFileSize(uint32_t aFileSize) {
+ mUpdateFlags |= kFileSizeUpdatedMask;
+ CacheIndexEntry::SetFileSize(aFileSize);
+ }
+
+ void ApplyUpdate(CacheIndexEntry* aDst) {
+ MOZ_ASSERT(
+ memcmp(&mRec->mHash, &aDst->mRec->mHash, sizeof(SHA1Sum::Hash)) == 0);
+ if (mUpdateFlags & kFrecencyUpdatedMask) {
+ aDst->mRec->mFrecency = mRec->mFrecency;
+ }
+ aDst->mRec->mOriginAttrsHash = mRec->mOriginAttrsHash;
+ if (mUpdateFlags & kOnStartTimeUpdatedMask) {
+ aDst->mRec->mOnStartTime = mRec->mOnStartTime;
+ }
+ if (mUpdateFlags & kOnStopTimeUpdatedMask) {
+ aDst->mRec->mOnStopTime = mRec->mOnStopTime;
+ }
+ if (mUpdateFlags & kContentTypeUpdatedMask) {
+ aDst->mRec->mContentType = mRec->mContentType;
+ }
+ if (mUpdateFlags & kHasAltDataUpdatedMask &&
+ ((aDst->mRec->mFlags ^ mRec->mFlags) & kHasAltDataMask)) {
+ // Toggle the bit if we need to.
+ aDst->mRec->mFlags ^= kHasAltDataMask;
+ }
+
+ if (mUpdateFlags & kFileSizeUpdatedMask) {
+ // Copy all flags except |HasAltData|.
+ aDst->mRec->mFlags |= (mRec->mFlags & ~kHasAltDataMask);
+ } else {
+ // Copy all flags except |HasAltData| and file size.
+ aDst->mRec->mFlags &= kFileSizeMask;
+ aDst->mRec->mFlags |= (mRec->mFlags & ~kHasAltDataMask & ~kFileSizeMask);
+ }
+ }
+
+ private:
+ static const uint32_t kFrecencyUpdatedMask = 0x00000001;
+ static const uint32_t kContentTypeUpdatedMask = 0x00000002;
+ static const uint32_t kFileSizeUpdatedMask = 0x00000004;
+ static const uint32_t kHasAltDataUpdatedMask = 0x00000008;
+ static const uint32_t kOnStartTimeUpdatedMask = 0x00000010;
+ static const uint32_t kOnStopTimeUpdatedMask = 0x00000020;
+
+ uint32_t mUpdateFlags;
+};
+
+class CacheIndexStats {
+ public:
+ CacheIndexStats()
+ : mCount(0),
+ mNotInitialized(0),
+ mRemoved(0),
+ mDirty(0),
+ mFresh(0),
+ mEmpty(0),
+ mSize(0)
+#ifdef DEBUG
+ ,
+ mStateLogged(false),
+ mDisableLogging(false)
+#endif
+ {
+ for (uint32_t i = 0; i < nsICacheEntry::CONTENT_TYPE_LAST; ++i) {
+ mCountByType[i] = 0;
+ mSizeByType[i] = 0;
+ }
+ }
+
+ bool operator==(const CacheIndexStats& aOther) const {
+ for (uint32_t i = 0; i < nsICacheEntry::CONTENT_TYPE_LAST; ++i) {
+ if (mCountByType[i] != aOther.mCountByType[i] ||
+ mSizeByType[i] != aOther.mSizeByType[i]) {
+ return false;
+ }
+ }
+
+ return
+#ifdef DEBUG
+ aOther.mStateLogged == mStateLogged &&
+#endif
+ aOther.mCount == mCount && aOther.mNotInitialized == mNotInitialized &&
+ aOther.mRemoved == mRemoved && aOther.mDirty == mDirty &&
+ aOther.mFresh == mFresh && aOther.mEmpty == mEmpty &&
+ aOther.mSize == mSize;
+ }
+
+#ifdef DEBUG
+ void DisableLogging() { mDisableLogging = true; }
+#endif
+
+ void Log() {
+ LOG(
+ ("CacheIndexStats::Log() [count=%u, notInitialized=%u, removed=%u, "
+ "dirty=%u, fresh=%u, empty=%u, size=%u]",
+ mCount, mNotInitialized, mRemoved, mDirty, mFresh, mEmpty, mSize));
+ }
+
+ void Clear() {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::Clear() - state logged!");
+
+ mCount = 0;
+ mNotInitialized = 0;
+ mRemoved = 0;
+ mDirty = 0;
+ mFresh = 0;
+ mEmpty = 0;
+ mSize = 0;
+ for (uint32_t i = 0; i < nsICacheEntry::CONTENT_TYPE_LAST; ++i) {
+ mCountByType[i] = 0;
+ mSizeByType[i] = 0;
+ }
+ }
+
+#ifdef DEBUG
+ bool StateLogged() { return mStateLogged; }
+#endif
+
+ uint32_t Count() {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::Count() - state logged!");
+ return mCount;
+ }
+
+ uint32_t CountByType(uint8_t aContentType) {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::CountByType() - state logged!");
+ MOZ_RELEASE_ASSERT(aContentType < nsICacheEntry::CONTENT_TYPE_LAST);
+ return mCountByType[aContentType];
+ }
+
+ uint32_t Dirty() {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::Dirty() - state logged!");
+ return mDirty;
+ }
+
+ uint32_t Fresh() {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::Fresh() - state logged!");
+ return mFresh;
+ }
+
+ uint32_t ActiveEntriesCount() {
+ MOZ_ASSERT(!mStateLogged,
+ "CacheIndexStats::ActiveEntriesCount() - state "
+ "logged!");
+ return mCount - mRemoved - mNotInitialized - mEmpty;
+ }
+
+ uint32_t Size() {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::Size() - state logged!");
+ return mSize;
+ }
+
+ uint32_t SizeByType(uint8_t aContentType) {
+ MOZ_ASSERT(!mStateLogged, "CacheIndexStats::SizeByType() - state logged!");
+ MOZ_RELEASE_ASSERT(aContentType < nsICacheEntry::CONTENT_TYPE_LAST);
+ return mSizeByType[aContentType];
+ }
+
+ void BeforeChange(const CacheIndexEntry* aEntry) {
+#ifdef DEBUG_STATS
+ if (!mDisableLogging) {
+ LOG(("CacheIndexStats::BeforeChange()"));
+ Log();
+ }
+#endif
+
+ MOZ_ASSERT(!mStateLogged,
+ "CacheIndexStats::BeforeChange() - state "
+ "logged!");
+#ifdef DEBUG
+ mStateLogged = true;
+#endif
+ if (aEntry) {
+ MOZ_ASSERT(mCount);
+ uint8_t contentType = aEntry->GetContentType();
+ mCount--;
+ mCountByType[contentType]--;
+ if (aEntry->IsDirty()) {
+ MOZ_ASSERT(mDirty);
+ mDirty--;
+ }
+ if (aEntry->IsFresh()) {
+ MOZ_ASSERT(mFresh);
+ mFresh--;
+ }
+ if (aEntry->IsRemoved()) {
+ MOZ_ASSERT(mRemoved);
+ mRemoved--;
+ } else {
+ if (!aEntry->IsInitialized()) {
+ MOZ_ASSERT(mNotInitialized);
+ mNotInitialized--;
+ } else {
+ if (aEntry->IsFileEmpty()) {
+ MOZ_ASSERT(mEmpty);
+ mEmpty--;
+ } else {
+ MOZ_ASSERT(mSize >= aEntry->GetFileSize());
+ mSize -= aEntry->GetFileSize();
+ mSizeByType[contentType] -= aEntry->GetFileSize();
+ }
+ }
+ }
+ }
+ }
+
+ void AfterChange(const CacheIndexEntry* aEntry) {
+ MOZ_ASSERT(mStateLogged,
+ "CacheIndexStats::AfterChange() - state not "
+ "logged!");
+#ifdef DEBUG
+ mStateLogged = false;
+#endif
+ if (aEntry) {
+ uint8_t contentType = aEntry->GetContentType();
+ ++mCount;
+ ++mCountByType[contentType];
+ if (aEntry->IsDirty()) {
+ mDirty++;
+ }
+ if (aEntry->IsFresh()) {
+ mFresh++;
+ }
+ if (aEntry->IsRemoved()) {
+ mRemoved++;
+ } else {
+ if (!aEntry->IsInitialized()) {
+ mNotInitialized++;
+ } else {
+ if (aEntry->IsFileEmpty()) {
+ mEmpty++;
+ } else {
+ mSize += aEntry->GetFileSize();
+ mSizeByType[contentType] += aEntry->GetFileSize();
+ }
+ }
+ }
+ }
+
+#ifdef DEBUG_STATS
+ if (!mDisableLogging) {
+ LOG(("CacheIndexStats::AfterChange()"));
+ Log();
+ }
+#endif
+ }
+
+ private:
+ uint32_t mCount;
+ uint32_t mCountByType[nsICacheEntry::CONTENT_TYPE_LAST];
+ uint32_t mNotInitialized;
+ uint32_t mRemoved;
+ uint32_t mDirty;
+ uint32_t mFresh;
+ uint32_t mEmpty;
+ uint32_t mSize;
+ uint32_t mSizeByType[nsICacheEntry::CONTENT_TYPE_LAST];
+#ifdef DEBUG
+ // We completely remove the data about an entry from the stats in
+ // BeforeChange() and set this flag to true. The entry is then modified,
+ // deleted or created and the data is again put into the stats and this flag
+ // set to false. Statistics must not be read during this time since the
+ // information is not correct.
+ bool mStateLogged;
+
+ // Disables logging in this instance of CacheIndexStats
+ bool mDisableLogging;
+#endif
+};
+
+class CacheIndex final : public CacheFileIOListener, public nsIRunnable {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIRUNNABLE
+
+ CacheIndex();
+
+ static nsresult Init(nsIFile* aCacheDirectory);
+ static nsresult PreShutdown();
+ static nsresult Shutdown();
+
+ // Following methods can be called only on IO thread.
+
+ // Add entry to the index. The entry shouldn't be present in index. This
+ // method is called whenever a new handle for a new entry file is created. The
+ // newly created entry is not initialized and it must be either initialized
+ // with InitEntry() or removed with RemoveEntry().
+ static nsresult AddEntry(const SHA1Sum::Hash* aHash);
+
+ // Inform index about an existing entry that should be present in index. This
+ // method is called whenever a new handle for an existing entry file is
+ // created. Like in case of AddEntry(), either InitEntry() or RemoveEntry()
+ // must be called on the entry, since the entry is not initizlized if the
+ // index is outdated.
+ static nsresult EnsureEntryExists(const SHA1Sum::Hash* aHash);
+
+ // Initialize the entry. It MUST be present in index. Call to AddEntry() or
+ // EnsureEntryExists() must precede the call to this method.
+ static nsresult InitEntry(const SHA1Sum::Hash* aHash,
+ OriginAttrsHash aOriginAttrsHash, bool aAnonymous,
+ bool aPinned);
+
+ // Remove entry from index. The entry should be present in index.
+ static nsresult RemoveEntry(const SHA1Sum::Hash* aHash);
+
+ // Update some information in entry. The entry MUST be present in index and
+ // MUST be initialized. Call to AddEntry() or EnsureEntryExists() and to
+ // InitEntry() must precede the call to this method.
+ // Pass nullptr if the value didn't change.
+ static nsresult UpdateEntry(const SHA1Sum::Hash* aHash,
+ const uint32_t* aFrecency,
+ const bool* aHasAltData,
+ const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType,
+ const uint32_t* aSize);
+
+ // Remove all entries from the index. Called when clearing the whole cache.
+ static nsresult RemoveAll();
+
+ enum EntryStatus { EXISTS = 0, DOES_NOT_EXIST = 1, DO_NOT_KNOW = 2 };
+
+ // Returns status of the entry in index for the given key. It can be called
+ // on any thread.
+ // If the optional aCB callback is given, the it will be called with a
+ // CacheIndexEntry only if _retval is EXISTS when the method returns.
+ static nsresult HasEntry(
+ const nsACString& aKey, EntryStatus* _retval,
+ const std::function<void(const CacheIndexEntry*)>& aCB = nullptr);
+ static nsresult HasEntry(
+ const SHA1Sum::Hash& hash, EntryStatus* _retval,
+ const std::function<void(const CacheIndexEntry*)>& aCB = nullptr);
+
+ // Returns a hash of the least important entry that should be evicted if the
+ // cache size is over limit and also returns a total number of all entries in
+ // the index minus the number of forced valid entries and unpinned entries
+ // that we encounter when searching (see below)
+ static nsresult GetEntryForEviction(bool aIgnoreEmptyEntries,
+ SHA1Sum::Hash* aHash, uint32_t* aCnt);
+
+ // Checks if a cache entry is currently forced valid. Used to prevent an entry
+ // (that has been forced valid) from being evicted when the cache size reaches
+ // its limit.
+ static bool IsForcedValidEntry(const SHA1Sum::Hash* aHash);
+
+ // Returns cache size in kB.
+ static nsresult GetCacheSize(uint32_t* _retval);
+
+ // Returns number of entry files in the cache
+ static nsresult GetEntryFileCount(uint32_t* _retval);
+
+ // Synchronously returns the disk occupation and number of entries
+ // per-context. Callable on any thread. It will ignore loadContextInfo and get
+ // stats for all entries if the aInfo is a nullptr.
+ static nsresult GetCacheStats(nsILoadContextInfo* aInfo, uint32_t* aSize,
+ uint32_t* aCount);
+
+ // Asynchronously gets the disk cache size, used for display in the UI.
+ static nsresult AsyncGetDiskConsumption(
+ nsICacheStorageConsumptionObserver* aObserver);
+
+ // Returns an iterator that returns entries matching a given context that were
+ // present in the index at the time this method was called. If aAddNew is true
+ // then the iterator will also return entries created after this call.
+ // NOTE: When some entry is removed from index it is removed also from the
+ // iterator regardless what aAddNew was passed.
+ static nsresult GetIterator(nsILoadContextInfo* aInfo, bool aAddNew,
+ CacheIndexIterator** _retval);
+
+ // Returns true if we _think_ that the index is up to date. I.e. the state is
+ // READY or WRITING and mIndexNeedsUpdate as well as mShuttingDown is false.
+ static nsresult IsUpToDate(bool* _retval);
+
+ // Called from CacheStorageService::Clear() and
+ // CacheFileContextEvictor::EvictEntries(), sets a flag that blocks
+ // notification to AsyncGetDiskConsumption.
+ static void OnAsyncEviction(bool aEvicting);
+
+ // We keep track of total bytes written to the cache to be able to do
+ // a telemetry report after writting certain amount of data to the cache.
+ static void UpdateTotalBytesWritten(uint32_t aBytesWritten);
+
+ // Memory reporting
+ static size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+ static size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ private:
+ friend class CacheIndexEntryAutoManage;
+ friend class FileOpenHelper;
+ friend class CacheIndexIterator;
+ friend struct CacheIndexRecord;
+
+ virtual ~CacheIndex();
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override;
+ void OnFileOpenedInternal(FileOpenHelper* aOpener, CacheFileHandle* aHandle,
+ nsresult aResult);
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override;
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) override;
+
+ nsresult InitInternal(nsIFile* aCacheDirectory);
+ void PreShutdownInternal();
+
+ // This method returns false when index is not initialized or is shut down.
+ bool IsIndexUsable();
+
+ // This method checks whether the entry has the same values of
+ // originAttributes and isAnonymous. We don't expect to find a collision
+ // since these values are part of the key that we hash and we use a strong
+ // hash function.
+ static bool IsCollision(CacheIndexEntry* aEntry,
+ OriginAttrsHash aOriginAttrsHash, bool aAnonymous);
+
+ // Checks whether any of the information about the entry has changed.
+ static bool HasEntryChanged(CacheIndexEntry* aEntry,
+ const uint32_t* aFrecency,
+ const bool* aHasAltData,
+ const uint16_t* aOnStartTime,
+ const uint16_t* aOnStopTime,
+ const uint8_t* aContentType,
+ const uint32_t* aSize);
+
+ // Merge all pending operations from mPendingUpdates into mIndex.
+ void ProcessPendingOperations();
+
+ // Following methods perform writing of the index file.
+ //
+ // The index is written periodically, but not earlier than once in
+ // kMinDumpInterval and there must be at least kMinUnwrittenChanges
+ // differences between index on disk and in memory. Index is always first
+ // written to a temporary file and the old index file is replaced when the
+ // writing process succeeds.
+ //
+ // Starts writing of index when both limits (minimal delay between writes and
+ // minimum number of changes in index) were exceeded.
+ bool WriteIndexToDiskIfNeeded();
+ // Starts writing of index file.
+ void WriteIndexToDisk();
+ // Serializes part of mIndex hashtable to the write buffer a writes the buffer
+ // to the file.
+ void WriteRecords();
+ // Finalizes writing process.
+ void FinishWrite(bool aSucceeded);
+
+ // Following methods perform writing of the journal during shutdown. All these
+ // methods must be called only during shutdown since they write/delete files
+ // directly on the main thread instead of using CacheFileIOManager that does
+ // it asynchronously on IO thread. Journal contains only entries that are
+ // dirty, i.e. changes that are not present in the index file on the disk.
+ // When the log is written successfully, the dirty flag in index file is
+ // cleared.
+ nsresult GetFile(const nsACString& aName, nsIFile** _retval);
+ void RemoveFile(const nsACString& aName);
+ void RemoveAllIndexFiles();
+ void RemoveJournalAndTempFile();
+ // Writes journal to the disk and clears dirty flag in index header.
+ nsresult WriteLogToDisk();
+
+ // Following methods perform reading of the index from the disk.
+ //
+ // Index is read at startup just after initializing the CacheIndex. There are
+ // 3 files used when manipulating with index: index file, journal file and
+ // a temporary file. All files contain the hash of the data, so we can check
+ // whether the content is valid and complete. Index file contains also a dirty
+ // flag in the index header which is unset on a clean shutdown. During opening
+ // and reading of the files we determine the status of the whole index from
+ // the states of the separate files. Following table shows all possible
+ // combinations:
+ //
+ // index, journal, tmpfile
+ // M * * - index is missing -> BUILD
+ // I * * - index is invalid -> BUILD
+ // D * * - index is dirty -> UPDATE
+ // C M * - index is dirty -> UPDATE
+ // C I * - unexpected state -> UPDATE
+ // C V E - unexpected state -> UPDATE
+ // C V M - index is up to date -> READY
+ //
+ // where the letters mean:
+ // * - any state
+ // E - file exists
+ // M - file is missing
+ // I - data is invalid (parsing failed or hash didn't match)
+ // D - dirty (data in index file is correct, but dirty flag is set)
+ // C - clean (index file is clean)
+ // V - valid (data in journal file is correct)
+ //
+ // Note: We accept the data from journal only when the index is up to date as
+ // a whole (i.e. C,V,M state).
+ //
+ // We rename the journal file to the temporary file as soon as possible after
+ // initial test to ensure that we start update process on the next startup if
+ // FF crashes during parsing of the index.
+ //
+ // Initiates reading index from disk.
+ void ReadIndexFromDisk();
+ // Starts reading data from index file.
+ void StartReadingIndex();
+ // Parses data read from index file.
+ void ParseRecords();
+ // Starts reading data from journal file.
+ void StartReadingJournal();
+ // Parses data read from journal file.
+ void ParseJournal();
+ // Merges entries from journal into mIndex.
+ void MergeJournal();
+ // In debug build this method checks that we have no fresh entry in mIndex
+ // after we finish reading index and before we process pending operations.
+ void EnsureNoFreshEntry();
+ // In debug build this method is called after processing pending operations
+ // to make sure mIndexStats contains correct information.
+ void EnsureCorrectStats();
+ // Finalizes reading process.
+ void FinishRead(bool aSucceeded);
+
+ // Following methods perform updating and building of the index.
+ // Timer callback that starts update or build process.
+ static void DelayedUpdate(nsITimer* aTimer, void* aClosure);
+ void DelayedUpdateLocked();
+ // Posts timer event that start update or build process.
+ nsresult ScheduleUpdateTimer(uint32_t aDelay);
+ nsresult SetupDirectoryEnumerator();
+ nsresult InitEntryFromDiskData(CacheIndexEntry* aEntry,
+ CacheFileMetadata* aMetaData,
+ int64_t aFileSize);
+ // Returns true when either a timer is scheduled or event is posted.
+ bool IsUpdatePending();
+ // Iterates through all files in entries directory that we didn't create/open
+ // during this session, parses them and adds the entries to the index.
+ void BuildIndex();
+
+ bool StartUpdatingIndexIfNeeded(bool aSwitchingToReadyState = false);
+ // Starts update or build process or fires a timer when it is too early after
+ // startup.
+ void StartUpdatingIndex(bool aRebuild);
+ // Iterates through all files in entries directory that we didn't create/open
+ // during this session and theirs last modified time is newer than timestamp
+ // in the index header. Parses the files and adds the entries to the index.
+ void UpdateIndex();
+ // Finalizes update or build process.
+ void FinishUpdate(bool aSucceeded);
+
+ void RemoveNonFreshEntries();
+
+ enum EState {
+ // Initial state in which the index is not usable
+ // Possible transitions:
+ // -> READING
+ INITIAL = 0,
+
+ // Index is being read from the disk.
+ // Possible transitions:
+ // -> INITIAL - We failed to dispatch a read event.
+ // -> BUILDING - No or corrupted index file was found.
+ // -> UPDATING - No or corrupted journal file was found.
+ // - Dirty flag was set in index header.
+ // -> READY - Index was read successfully or was interrupted by
+ // pre-shutdown.
+ // -> SHUTDOWN - This could happen only in case of pre-shutdown failure.
+ READING = 1,
+
+ // Index is being written to the disk.
+ // Possible transitions:
+ // -> READY - Writing of index finished or was interrupted by
+ // pre-shutdown..
+ // -> UPDATING - Writing of index finished, but index was found outdated
+ // during writing.
+ // -> SHUTDOWN - This could happen only in case of pre-shutdown failure.
+ WRITING = 2,
+
+ // Index is being build.
+ // Possible transitions:
+ // -> READY - Building of index finished or was interrupted by
+ // pre-shutdown.
+ // -> SHUTDOWN - This could happen only in case of pre-shutdown failure.
+ BUILDING = 3,
+
+ // Index is being updated.
+ // Possible transitions:
+ // -> READY - Updating of index finished or was interrupted by
+ // pre-shutdown.
+ // -> SHUTDOWN - This could happen only in case of pre-shutdown failure.
+ UPDATING = 4,
+
+ // Index is ready.
+ // Possible transitions:
+ // -> UPDATING - Index was found outdated.
+ // -> SHUTDOWN - Index is shutting down.
+ READY = 5,
+
+ // Index is shutting down.
+ SHUTDOWN = 6
+ };
+
+ static char const* StateString(EState aState);
+ void ChangeState(EState aNewState);
+ void NotifyAsyncGetDiskConsumptionCallbacks();
+
+ // Allocates and releases buffer used for reading and writing index.
+ void AllocBuffer();
+ void ReleaseBuffer();
+
+ // Methods used by CacheIndexEntryAutoManage to keep the iterators up to date.
+ void AddRecordToIterators(CacheIndexRecord* aRecord);
+ void RemoveRecordFromIterators(CacheIndexRecord* aRecord);
+ void ReplaceRecordInIterators(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord);
+
+ // Memory reporting (private part)
+ size_t SizeOfExcludingThisInternal(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ // Reports telemetry about cache, i.e. size, entry count and content type
+ // stats.
+ void DoTelemetryReport();
+
+ static mozilla::StaticRefPtr<CacheIndex> gInstance;
+ static StaticMutex sLock;
+
+ nsCOMPtr<nsIFile> mCacheDirectory;
+
+ EState mState;
+ // Timestamp of time when the index was initialized. We use it to delay
+ // initial update or build of index.
+ TimeStamp mStartTime;
+ // Set to true in PreShutdown(), it is checked on variaous places to prevent
+ // starting any process (write, update, etc.) during shutdown.
+ bool mShuttingDown;
+ // When set to true, update process should start as soon as possible. This
+ // flag is set whenever we find some inconsistency which would be fixed by
+ // update process. The flag is checked always when switching to READY state.
+ // To make sure we start the update process as soon as possible, methods that
+ // set this flag should also call StartUpdatingIndexIfNeeded() to cover the
+ // case when we are currently in READY state.
+ bool mIndexNeedsUpdate;
+ // Set at the beginning of RemoveAll() which clears the whole index. When
+ // removing all entries we must stop any pending reading, writing, updating or
+ // building operation. This flag is checked at various places and it prevents
+ // we won't start another operation (e.g. canceling reading of the index would
+ // normally start update or build process)
+ bool mRemovingAll;
+ // Whether the index file on disk exists and is valid.
+ bool mIndexOnDiskIsValid;
+ // When something goes wrong during updating or building process, we don't
+ // mark index clean (and also don't write journal) to ensure that update or
+ // build will be initiated on the next start.
+ bool mDontMarkIndexClean;
+ // Timestamp value from index file. It is used during update process to skip
+ // entries that were last modified before this timestamp.
+ uint32_t mIndexTimeStamp;
+ // Timestamp of last time the index was dumped to disk.
+ // NOTE: The index might not be necessarily dumped at this time. The value
+ // is used to schedule next dump of the index.
+ TimeStamp mLastDumpTime;
+
+ // Timer of delayed update/build.
+ nsCOMPtr<nsITimer> mUpdateTimer;
+ // True when build or update event is posted
+ bool mUpdateEventPending;
+
+ // Helper members used when reading/writing index from/to disk.
+ // Contains number of entries that should be skipped:
+ // - in hashtable when writing index because they were already written
+ // - in index file when reading index because they were already read
+ uint32_t mSkipEntries;
+ // Number of entries that should be written to disk. This is number of entries
+ // in hashtable that are initialized and are not marked as removed when
+ // writing begins.
+ uint32_t mProcessEntries;
+ char* mRWBuf;
+ uint32_t mRWBufSize;
+ uint32_t mRWBufPos;
+ RefPtr<CacheHash> mRWHash;
+
+ // True if read or write operation is pending. It is used to ensure that
+ // mRWBuf is not freed until OnDataRead or OnDataWritten is called.
+ bool mRWPending;
+
+ // Reading of journal succeeded if true.
+ bool mJournalReadSuccessfully;
+
+ // Handle used for writing and reading index file.
+ RefPtr<CacheFileHandle> mIndexHandle;
+ // Handle used for reading journal file.
+ RefPtr<CacheFileHandle> mJournalHandle;
+ // Used to check the existence of the file during reading process.
+ RefPtr<CacheFileHandle> mTmpHandle;
+
+ RefPtr<FileOpenHelper> mIndexFileOpener;
+ RefPtr<FileOpenHelper> mJournalFileOpener;
+ RefPtr<FileOpenHelper> mTmpFileOpener;
+
+ // Directory enumerator used when building and updating index.
+ nsCOMPtr<nsIDirectoryEnumerator> mDirEnumerator;
+
+ // Main index hashtable.
+ nsTHashtable<CacheIndexEntry> mIndex;
+
+ // We cannot add, remove or change any entry in mIndex in states READING and
+ // WRITING. We track all changes in mPendingUpdates during these states.
+ nsTHashtable<CacheIndexEntryUpdate> mPendingUpdates;
+
+ // Contains information statistics for mIndex + mPendingUpdates.
+ CacheIndexStats mIndexStats;
+
+ // When reading journal, we must first parse the whole file and apply the
+ // changes iff the journal was read successfully. mTmpJournal is used to store
+ // entries from the journal file. We throw away all these entries if parsing
+ // of the journal fails or the hash does not match.
+ nsTHashtable<CacheIndexEntry> mTmpJournal;
+
+ // FrecencyArray maintains order of entry records for eviction. Ideally, the
+ // records would be ordered by frecency all the time, but since this would be
+ // quite expensive, we allow certain amount of entries to be out of order.
+ // When the frecency is updated the new value is always bigger than the old
+ // one. Instead of keeping updated entries at the same position, we move them
+ // at the end of the array. This protects recently updated entries from
+ // eviction. The array is sorted once we hit the limit of maximum unsorted
+ // entries.
+ class FrecencyArray {
+ class Iterator {
+ public:
+ explicit Iterator(nsTArray<CacheIndexRecord*>* aRecs)
+ : mRecs(aRecs), mIdx(0) {
+ while (!Done() && !(*mRecs)[mIdx]) {
+ mIdx++;
+ }
+ }
+
+ bool Done() const { return mIdx == mRecs->Length(); }
+
+ CacheIndexRecord* Get() const {
+ MOZ_ASSERT(!Done());
+ return (*mRecs)[mIdx];
+ }
+
+ void Next() {
+ MOZ_ASSERT(!Done());
+ ++mIdx;
+ while (!Done() && !(*mRecs)[mIdx]) {
+ mIdx++;
+ }
+ }
+
+ private:
+ nsTArray<CacheIndexRecord*>* mRecs;
+ uint32_t mIdx;
+ };
+
+ public:
+ Iterator Iter() { return Iterator(&mRecs); }
+
+ FrecencyArray() : mUnsortedElements(0), mRemovedElements(0) {}
+
+ // Methods used by CacheIndexEntryAutoManage to keep the array up to date.
+ void AppendRecord(CacheIndexRecord* aRecord);
+ void RemoveRecord(CacheIndexRecord* aRecord);
+ void ReplaceRecord(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord);
+ bool RecordExisted(CacheIndexRecord* aRecord);
+ void SortIfNeeded();
+
+ size_t Length() const { return mRecs.Length() - mRemovedElements; }
+ void Clear() { mRecs.Clear(); }
+
+ private:
+ friend class CacheIndex;
+
+ nsTArray<CacheIndexRecord*> mRecs;
+ uint32_t mUnsortedElements;
+ // Instead of removing elements from the array immediately, we null them out
+ // and the iterator skips them when accessing the array. The null pointers
+ // are placed at the end during sorting and we strip them out all at once.
+ // This saves moving a lot of memory in nsTArray::RemoveElementsAt.
+ uint32_t mRemovedElements;
+ };
+
+ FrecencyArray mFrecencyArray;
+
+ nsTArray<CacheIndexIterator*> mIterators;
+
+ // This flag is true iff we are between CacheStorageService:Clear() and
+ // processing all contexts to be evicted. It will make UI to show
+ // "calculating" instead of any intermediate cache size.
+ bool mAsyncGetDiskConsumptionBlocked;
+
+ class DiskConsumptionObserver : public Runnable {
+ public:
+ static DiskConsumptionObserver* Init(
+ nsICacheStorageConsumptionObserver* aObserver) {
+ nsWeakPtr observer = do_GetWeakReference(aObserver);
+ if (!observer) return nullptr;
+
+ return new DiskConsumptionObserver(observer);
+ }
+
+ void OnDiskConsumption(int64_t aSize) {
+ mSize = aSize;
+ NS_DispatchToMainThread(this);
+ }
+
+ private:
+ explicit DiskConsumptionObserver(nsWeakPtr const& aWeakObserver)
+ : Runnable("net::CacheIndex::DiskConsumptionObserver"),
+ mObserver(aWeakObserver),
+ mSize(0) {}
+ virtual ~DiskConsumptionObserver() {
+ if (mObserver && !NS_IsMainThread()) {
+ NS_ReleaseOnMainThread("DiskConsumptionObserver::mObserver",
+ mObserver.forget());
+ }
+ }
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsCOMPtr<nsICacheStorageConsumptionObserver> observer =
+ do_QueryReferent(mObserver);
+
+ mObserver = nullptr;
+
+ if (observer) {
+ observer->OnNetworkCacheDiskConsumption(mSize);
+ }
+
+ return NS_OK;
+ }
+
+ nsWeakPtr mObserver;
+ int64_t mSize;
+ };
+
+ // List of async observers that want to get disk consumption information
+ nsTArray<RefPtr<DiskConsumptionObserver> > mDiskConsumptionObservers;
+
+ // Number of bytes written to the cache since the last telemetry report
+ uint64_t mTotalBytesWritten;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheIndexContextIterator.cpp b/netwerk/cache2/CacheIndexContextIterator.cpp
new file mode 100644
index 0000000000..0c53049158
--- /dev/null
+++ b/netwerk/cache2/CacheIndexContextIterator.cpp
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheIndexContextIterator.h"
+#include "CacheIndex.h"
+#include "nsString.h"
+
+namespace mozilla {
+namespace net {
+
+CacheIndexContextIterator::CacheIndexContextIterator(CacheIndex* aIndex,
+ bool aAddNew,
+ nsILoadContextInfo* aInfo)
+ : CacheIndexIterator(aIndex, aAddNew), mInfo(aInfo) {}
+
+void CacheIndexContextIterator::AddRecord(CacheIndexRecord* aRecord) {
+ if (CacheIndexEntry::RecordMatchesLoadContextInfo(aRecord, mInfo)) {
+ CacheIndexIterator::AddRecord(aRecord);
+ }
+}
+
+void CacheIndexContextIterator::AddRecords(
+ const nsTArray<CacheIndexRecord*>& aRecords) {
+ // We need to add one by one so that those with wrong context are ignored.
+ for (uint32_t i = 0; i < aRecords.Length(); ++i) {
+ AddRecord(aRecords[i]);
+ }
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheIndexContextIterator.h b/netwerk/cache2/CacheIndexContextIterator.h
new file mode 100644
index 0000000000..6217783d75
--- /dev/null
+++ b/netwerk/cache2/CacheIndexContextIterator.h
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheIndexContextIterator__h__
+#define CacheIndexContextIterator__h__
+
+#include "CacheIndexIterator.h"
+
+class nsILoadContextInfo;
+
+namespace mozilla {
+namespace net {
+
+class CacheIndexContextIterator : public CacheIndexIterator {
+ public:
+ CacheIndexContextIterator(CacheIndex* aIndex, bool aAddNew,
+ nsILoadContextInfo* aInfo);
+ virtual ~CacheIndexContextIterator() = default;
+
+ private:
+ virtual void AddRecord(CacheIndexRecord* aRecord) override;
+ virtual void AddRecords(const nsTArray<CacheIndexRecord*>& aRecords);
+
+ nsCOMPtr<nsILoadContextInfo> mInfo;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheIndexIterator.cpp b/netwerk/cache2/CacheIndexIterator.cpp
new file mode 100644
index 0000000000..274b0bcc01
--- /dev/null
+++ b/netwerk/cache2/CacheIndexIterator.cpp
@@ -0,0 +1,102 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheIndexIterator.h"
+#include "CacheIndex.h"
+#include "nsString.h"
+#include "mozilla/DebugOnly.h"
+
+namespace mozilla {
+namespace net {
+
+CacheIndexIterator::CacheIndexIterator(CacheIndex* aIndex, bool aAddNew)
+ : mStatus(NS_OK), mIndex(aIndex), mAddNew(aAddNew) {
+ LOG(("CacheIndexIterator::CacheIndexIterator() [this=%p]", this));
+}
+
+CacheIndexIterator::~CacheIndexIterator() {
+ LOG(("CacheIndexIterator::~CacheIndexIterator() [this=%p]", this));
+
+ Close();
+}
+
+nsresult CacheIndexIterator::GetNextHash(SHA1Sum::Hash* aHash) {
+ LOG(("CacheIndexIterator::GetNextHash() [this=%p]", this));
+
+ StaticMutexAutoLock lock(CacheIndex::sLock);
+
+ if (NS_FAILED(mStatus)) {
+ return mStatus;
+ }
+
+ if (!mRecords.Length()) {
+ CloseInternal(NS_ERROR_NOT_AVAILABLE);
+ return mStatus;
+ }
+
+ memcpy(aHash, mRecords.PopLastElement()->mHash, sizeof(SHA1Sum::Hash));
+
+ return NS_OK;
+}
+
+nsresult CacheIndexIterator::Close() {
+ LOG(("CacheIndexIterator::Close() [this=%p]", this));
+
+ StaticMutexAutoLock lock(CacheIndex::sLock);
+
+ return CloseInternal(NS_ERROR_NOT_AVAILABLE);
+}
+
+nsresult CacheIndexIterator::CloseInternal(nsresult aStatus) {
+ LOG(("CacheIndexIterator::CloseInternal() [this=%p, status=0x%08" PRIx32 "]",
+ this, static_cast<uint32_t>(aStatus)));
+
+ // Make sure status will be a failure
+ MOZ_ASSERT(NS_FAILED(aStatus));
+ if (NS_SUCCEEDED(aStatus)) {
+ aStatus = NS_ERROR_UNEXPECTED;
+ }
+
+ if (NS_FAILED(mStatus)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ DebugOnly<bool> removed = mIndex->mIterators.RemoveElement(this);
+ MOZ_ASSERT(removed);
+ mStatus = aStatus;
+
+ return NS_OK;
+}
+
+void CacheIndexIterator::AddRecord(CacheIndexRecord* aRecord) {
+ LOG(("CacheIndexIterator::AddRecord() [this=%p, record=%p]", this, aRecord));
+
+ mRecords.AppendElement(aRecord);
+}
+
+bool CacheIndexIterator::RemoveRecord(CacheIndexRecord* aRecord) {
+ LOG(("CacheIndexIterator::RemoveRecord() [this=%p, record=%p]", this,
+ aRecord));
+
+ return mRecords.RemoveElement(aRecord);
+}
+
+bool CacheIndexIterator::ReplaceRecord(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord) {
+ LOG(
+ ("CacheIndexIterator::ReplaceRecord() [this=%p, oldRecord=%p, "
+ "newRecord=%p]",
+ this, aOldRecord, aNewRecord));
+
+ if (RemoveRecord(aOldRecord)) {
+ AddRecord(aNewRecord);
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheIndexIterator.h b/netwerk/cache2/CacheIndexIterator.h
new file mode 100644
index 0000000000..8b952f190a
--- /dev/null
+++ b/netwerk/cache2/CacheIndexIterator.h
@@ -0,0 +1,57 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheIndexIterator__h__
+#define CacheIndexIterator__h__
+
+#include "nsTArray.h"
+#include "nsCOMPtr.h"
+#include "mozilla/SHA1.h"
+
+namespace mozilla {
+namespace net {
+
+class CacheIndex;
+struct CacheIndexRecord;
+
+class CacheIndexIterator {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CacheIndexIterator)
+
+ CacheIndexIterator(CacheIndex* aIndex, bool aAddNew);
+
+ protected:
+ virtual ~CacheIndexIterator();
+
+ public:
+ // Returns a hash of a next entry. If there is no entry NS_ERROR_NOT_AVAILABLE
+ // is returned and the iterator is closed. Other error is returned when the
+ // iterator is closed for other reason, e.g. shutdown.
+ nsresult GetNextHash(SHA1Sum::Hash* aHash);
+
+ // Closes the iterator. This means the iterator is removed from the list of
+ // iterators in CacheIndex.
+ nsresult Close();
+
+ protected:
+ friend class CacheIndex;
+
+ nsresult CloseInternal(nsresult aStatus);
+
+ bool ShouldBeNewAdded() { return mAddNew; }
+ virtual void AddRecord(CacheIndexRecord* aRecord);
+ bool RemoveRecord(CacheIndexRecord* aRecord);
+ bool ReplaceRecord(CacheIndexRecord* aOldRecord,
+ CacheIndexRecord* aNewRecord);
+
+ nsresult mStatus;
+ RefPtr<CacheIndex> mIndex;
+ nsTArray<CacheIndexRecord*> mRecords;
+ bool mAddNew;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheLog.cpp b/netwerk/cache2/CacheLog.cpp
new file mode 100644
index 0000000000..67d879a6c4
--- /dev/null
+++ b/netwerk/cache2/CacheLog.cpp
@@ -0,0 +1,22 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+
+namespace mozilla {
+namespace net {
+
+// Log module for cache2 (2013) cache implementation logging...
+//
+// To enable logging (see prlog.h for full details):
+//
+// set MOZ_LOG=cache2:5
+// set MOZ_LOG_FILE=network.log
+//
+// This enables LogLevel::Debug level information and places all output in
+// the file network.log.
+LazyLogModule gCache2Log("cache2");
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheLog.h b/netwerk/cache2/CacheLog.h
new file mode 100644
index 0000000000..f4117ea4f7
--- /dev/null
+++ b/netwerk/cache2/CacheLog.h
@@ -0,0 +1,20 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Cache2Log__h__
+#define Cache2Log__h__
+
+#include "mozilla/Logging.h"
+
+namespace mozilla {
+namespace net {
+
+extern LazyLogModule gCache2Log;
+#define LOG(x) MOZ_LOG(gCache2Log, mozilla::LogLevel::Debug, x)
+#define LOG_ENABLED() MOZ_LOG_TEST(gCache2Log, mozilla::LogLevel::Debug)
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheObserver.cpp b/netwerk/cache2/CacheObserver.cpp
new file mode 100644
index 0000000000..398cd494ec
--- /dev/null
+++ b/netwerk/cache2/CacheObserver.cpp
@@ -0,0 +1,257 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheObserver.h"
+
+#include "CacheStorageService.h"
+#include "CacheFileIOManager.h"
+#include "LoadContextInfo.h"
+#include "nsICacheStorage.h"
+#include "nsIObserverService.h"
+#include "mozilla/Services.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/TimeStamp.h"
+#include "nsServiceManagerUtils.h"
+#include "mozilla/net/NeckoCommon.h"
+#include "prsystem.h"
+#include <time.h>
+#include <math.h>
+
+namespace mozilla {
+namespace net {
+
+StaticRefPtr<CacheObserver> CacheObserver::sSelf;
+
+static float const kDefaultHalfLifeHours = 24.0F; // 24 hours
+float CacheObserver::sHalfLifeHours = kDefaultHalfLifeHours;
+
+// The default value will be overwritten as soon as the correct smart size is
+// calculated by CacheFileIOManager::UpdateSmartCacheSize(). It's limited to 1GB
+// just for case the size is never calculated which might in theory happen if
+// GetDiskSpaceAvailable() always fails.
+Atomic<uint32_t, Relaxed> CacheObserver::sSmartDiskCacheCapacity(1024 * 1024);
+
+Atomic<PRIntervalTime> CacheObserver::sShutdownDemandedTime(
+ PR_INTERVAL_NO_TIMEOUT);
+
+NS_IMPL_ISUPPORTS(CacheObserver, nsIObserver, nsISupportsWeakReference)
+
+// static
+nsresult CacheObserver::Init() {
+ if (IsNeckoChild()) {
+ return NS_OK;
+ }
+
+ if (sSelf) {
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
+ if (!obs) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ sSelf = new CacheObserver();
+
+ obs->AddObserver(sSelf, "prefservice:after-app-defaults", true);
+ obs->AddObserver(sSelf, "profile-do-change", true);
+ obs->AddObserver(sSelf, "browser-delayed-startup-finished", true);
+ obs->AddObserver(sSelf, "profile-before-change", true);
+ obs->AddObserver(sSelf, "xpcom-shutdown", true);
+ obs->AddObserver(sSelf, "last-pb-context-exited", true);
+ obs->AddObserver(sSelf, "memory-pressure", true);
+
+ return NS_OK;
+}
+
+// static
+nsresult CacheObserver::Shutdown() {
+ if (!sSelf) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ sSelf = nullptr;
+ return NS_OK;
+}
+
+void CacheObserver::AttachToPreferences() {
+ mozilla::Preferences::GetComplex(
+ "browser.cache.disk.parent_directory", NS_GET_IID(nsIFile),
+ getter_AddRefs(mCacheParentDirectoryOverride));
+
+ sHalfLifeHours = std::max(
+ 0.01F, std::min(1440.0F, mozilla::Preferences::GetFloat(
+ "browser.cache.frecency_half_life_hours",
+ kDefaultHalfLifeHours)));
+}
+
+// static
+uint32_t CacheObserver::MemoryCacheCapacity() {
+ if (StaticPrefs::browser_cache_memory_capacity() >= 0) {
+ return StaticPrefs::browser_cache_memory_capacity();
+ }
+
+ // Cache of the calculated memory capacity based on the system memory size in
+ // KB (C++11 guarantees local statics will be initialized once and in a
+ // thread-safe way.)
+ static int32_t sAutoMemoryCacheCapacity = ([] {
+ uint64_t bytes = PR_GetPhysicalMemorySize();
+ // If getting the physical memory failed, arbitrarily assume
+ // 32 MB of RAM. We use a low default to have a reasonable
+ // size on all the devices we support.
+ if (bytes == 0) {
+ bytes = 32 * 1024 * 1024;
+ }
+ // Conversion from unsigned int64_t to double doesn't work on all platforms.
+ // We need to truncate the value at INT64_MAX to make sure we don't
+ // overflow.
+ if (bytes > INT64_MAX) {
+ bytes = INT64_MAX;
+ }
+ uint64_t kbytes = bytes >> 10;
+ double kBytesD = double(kbytes);
+ double x = log(kBytesD) / log(2.0) - 14;
+
+ int32_t capacity = 0;
+ if (x > 0) {
+ // 0.1 is added here for rounding
+ capacity = (int32_t)(x * x / 3.0 + x + 2.0 / 3 + 0.1);
+ if (capacity > 32) {
+ capacity = 32;
+ }
+ capacity <<= 10;
+ }
+ return capacity;
+ })();
+
+ return sAutoMemoryCacheCapacity;
+}
+
+// static
+void CacheObserver::SetSmartDiskCacheCapacity(uint32_t aCapacity) {
+ sSmartDiskCacheCapacity = aCapacity;
+}
+
+// static
+uint32_t CacheObserver::DiskCacheCapacity() {
+ return SmartCacheSizeEnabled() ? sSmartDiskCacheCapacity
+ : StaticPrefs::browser_cache_disk_capacity();
+}
+
+// static
+void CacheObserver::ParentDirOverride(nsIFile** aDir) {
+ if (NS_WARN_IF(!aDir)) return;
+
+ *aDir = nullptr;
+
+ if (!sSelf) return;
+ if (!sSelf->mCacheParentDirectoryOverride) return;
+
+ sSelf->mCacheParentDirectoryOverride->Clone(aDir);
+}
+
+// static
+bool CacheObserver::EntryIsTooBig(int64_t aSize, bool aUsingDisk) {
+ // If custom limit is set, check it.
+ int64_t preferredLimit =
+ aUsingDisk ? MaxDiskEntrySize() : MaxMemoryEntrySize();
+
+ // do not convert to bytes when the limit is -1, which means no limit
+ if (preferredLimit > 0) {
+ preferredLimit <<= 10;
+ }
+
+ if (preferredLimit != -1 && aSize > preferredLimit) return true;
+
+ // Otherwise (or when in the custom limit), check limit based on the global
+ // limit. It's 1/8 of the respective capacity.
+ int64_t derivedLimit =
+ aUsingDisk ? DiskCacheCapacity() : MemoryCacheCapacity();
+ derivedLimit <<= (10 - 3);
+
+ if (aSize > derivedLimit) return true;
+
+ return false;
+}
+
+// static
+bool CacheObserver::IsPastShutdownIOLag() {
+#ifdef DEBUG
+ return false;
+#endif
+
+ if (sShutdownDemandedTime == PR_INTERVAL_NO_TIMEOUT ||
+ MaxShutdownIOLag() == UINT32_MAX) {
+ return false;
+ }
+
+ static const PRIntervalTime kMaxShutdownIOLag =
+ PR_SecondsToInterval(MaxShutdownIOLag());
+
+ if ((PR_IntervalNow() - sShutdownDemandedTime) > kMaxShutdownIOLag) {
+ return true;
+ }
+
+ return false;
+}
+
+NS_IMETHODIMP
+CacheObserver::Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) {
+ if (!strcmp(aTopic, "prefservice:after-app-defaults")) {
+ CacheFileIOManager::Init();
+ return NS_OK;
+ }
+
+ if (!strcmp(aTopic, "profile-do-change")) {
+ AttachToPreferences();
+ CacheFileIOManager::Init();
+ CacheFileIOManager::OnProfile();
+ return NS_OK;
+ }
+
+ if (!strcmp(aTopic, "browser-delayed-startup-finished")) {
+ CacheStorageService::CleaupCacheDirectories();
+ return NS_OK;
+ }
+
+ if (!strcmp(aTopic, "profile-change-net-teardown") ||
+ !strcmp(aTopic, "profile-before-change") ||
+ !strcmp(aTopic, "xpcom-shutdown")) {
+ if (sShutdownDemandedTime == PR_INTERVAL_NO_TIMEOUT) {
+ sShutdownDemandedTime = PR_IntervalNow();
+ }
+
+ RefPtr<CacheStorageService> service = CacheStorageService::Self();
+ if (service) {
+ service->Shutdown();
+ }
+
+ CacheFileIOManager::Shutdown();
+ return NS_OK;
+ }
+
+ if (!strcmp(aTopic, "last-pb-context-exited")) {
+ RefPtr<CacheStorageService> service = CacheStorageService::Self();
+ if (service) {
+ service->DropPrivateBrowsingEntries();
+ }
+
+ return NS_OK;
+ }
+
+ if (!strcmp(aTopic, "memory-pressure")) {
+ RefPtr<CacheStorageService> service = CacheStorageService::Self();
+ if (service)
+ service->PurgeFromMemory(nsICacheStorageService::PURGE_EVERYTHING);
+
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(false, "Missing observer handler");
+ return NS_OK;
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheObserver.h b/netwerk/cache2/CacheObserver.h
new file mode 100644
index 0000000000..633af7eec5
--- /dev/null
+++ b/netwerk/cache2/CacheObserver.h
@@ -0,0 +1,109 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheObserver__h__
+#define CacheObserver__h__
+
+#include "nsIObserver.h"
+#include "nsIFile.h"
+#include "nsCOMPtr.h"
+#include "nsWeakReference.h"
+#include "mozilla/StaticPrefs_browser.h"
+#include "mozilla/StaticPrefs_privacy.h"
+#include "mozilla/StaticPtr.h"
+#include <algorithm>
+
+namespace mozilla {
+namespace net {
+
+class CacheObserver : public nsIObserver, public nsSupportsWeakReference {
+ virtual ~CacheObserver() = default;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIOBSERVER
+
+ static nsresult Init();
+ static nsresult Shutdown();
+ static CacheObserver* Self() { return sSelf; }
+
+ // Access to preferences
+ static bool UseDiskCache() {
+ return StaticPrefs::browser_cache_disk_enable();
+ }
+ static bool UseMemoryCache() {
+ return StaticPrefs::browser_cache_memory_enable();
+ }
+ static uint32_t MetadataMemoryLimit() // result in kilobytes.
+ {
+ return StaticPrefs::browser_cache_disk_metadata_memory_limit();
+ }
+ static uint32_t MemoryCacheCapacity(); // result in kilobytes.
+ static uint32_t DiskCacheCapacity(); // result in kilobytes.
+ static void SetSmartDiskCacheCapacity(uint32_t); // parameter in kilobytes.
+ static uint32_t DiskFreeSpaceSoftLimit() // result in kilobytes.
+ {
+ return StaticPrefs::browser_cache_disk_free_space_soft_limit();
+ }
+ static uint32_t DiskFreeSpaceHardLimit() // result in kilobytes.
+ {
+ return StaticPrefs::browser_cache_disk_free_space_hard_limit();
+ }
+ static bool SmartCacheSizeEnabled() {
+ return StaticPrefs::browser_cache_disk_smart_size_enabled();
+ }
+ static uint32_t PreloadChunkCount() {
+ return StaticPrefs::browser_cache_disk_preload_chunk_count();
+ }
+ static uint32_t MaxMemoryEntrySize() // result in kilobytes.
+ {
+ return StaticPrefs::browser_cache_memory_max_entry_size();
+ }
+ static uint32_t MaxDiskEntrySize() // result in kilobytes.
+ {
+ return StaticPrefs::browser_cache_disk_max_entry_size();
+ }
+ static uint32_t MaxDiskChunksMemoryUsage(
+ bool aPriority) // result in kilobytes.
+ {
+ return aPriority
+ ? StaticPrefs::
+ browser_cache_disk_max_priority_chunks_memory_usage()
+ : StaticPrefs::browser_cache_disk_max_chunks_memory_usage();
+ }
+ static uint32_t HalfLifeSeconds() { return sHalfLifeHours * 60.0F * 60.0F; }
+ static bool ClearCacheOnShutdown() {
+ return StaticPrefs::privacy_sanitize_sanitizeOnShutdown() &&
+ StaticPrefs::privacy_clearOnShutdown_cache();
+ }
+ static void ParentDirOverride(nsIFile** aDir);
+
+ static bool EntryIsTooBig(int64_t aSize, bool aUsingDisk);
+
+ static uint32_t MaxShutdownIOLag() {
+ return StaticPrefs::browser_cache_max_shutdown_io_lag();
+ }
+ static bool IsPastShutdownIOLag();
+
+ static bool ShuttingDown() {
+ return sShutdownDemandedTime != PR_INTERVAL_NO_TIMEOUT;
+ }
+
+ private:
+ static StaticRefPtr<CacheObserver> sSelf;
+
+ void AttachToPreferences();
+
+ static int32_t sAutoMemoryCacheCapacity;
+ static Atomic<uint32_t, Relaxed> sSmartDiskCacheCapacity;
+ static float sHalfLifeHours;
+ static Atomic<PRIntervalTime> sShutdownDemandedTime;
+
+ // Non static properties, accessible via sSelf
+ nsCOMPtr<nsIFile> mCacheParentDirectoryOverride;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheStorage.cpp b/netwerk/cache2/CacheStorage.cpp
new file mode 100644
index 0000000000..5f3d047bf1
--- /dev/null
+++ b/netwerk/cache2/CacheStorage.cpp
@@ -0,0 +1,253 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheStorage.h"
+#include "CacheStorageService.h"
+#include "CacheEntry.h"
+#include "CacheObserver.h"
+
+#include "OldWrappers.h"
+
+#include "nsICacheEntryDoomCallback.h"
+
+#include "nsIApplicationCache.h"
+#include "nsIApplicationCacheService.h"
+#include "nsIURI.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsServiceManagerUtils.h"
+
+namespace mozilla::net {
+
+NS_IMPL_ISUPPORTS(CacheStorage, nsICacheStorage)
+
+CacheStorage::CacheStorage(nsILoadContextInfo* aInfo, bool aAllowDisk,
+ bool aLookupAppCache, bool aSkipSizeCheck,
+ bool aPinning)
+ : mLoadContextInfo(aInfo ? GetLoadContextInfo(aInfo) : nullptr),
+ mWriteToDisk(aAllowDisk),
+ mLookupAppCache(aLookupAppCache),
+ mSkipSizeCheck(aSkipSizeCheck),
+ mPinning(aPinning) {}
+
+NS_IMETHODIMP CacheStorage::AsyncOpenURI(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ uint32_t aFlags,
+ nsICacheEntryOpenCallback* aCallback) {
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ if (MOZ_UNLIKELY(!CacheObserver::UseDiskCache()) && mWriteToDisk &&
+ !(aFlags & OPEN_INTERCEPTED)) {
+ aCallback->OnCacheEntryAvailable(nullptr, false, nullptr,
+ NS_ERROR_NOT_AVAILABLE);
+ return NS_OK;
+ }
+
+ if (MOZ_UNLIKELY(!CacheObserver::UseMemoryCache()) && !mWriteToDisk &&
+ !(aFlags & OPEN_INTERCEPTED)) {
+ aCallback->OnCacheEntryAvailable(nullptr, false, nullptr,
+ NS_ERROR_NOT_AVAILABLE);
+ return NS_OK;
+ }
+
+ NS_ENSURE_ARG(aURI);
+ NS_ENSURE_ARG(aCallback);
+
+ nsresult rv;
+
+ bool truncate = aFlags & nsICacheStorage::OPEN_TRUNCATE;
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString asciiSpec;
+ rv = noRefURI->GetAsciiSpec(asciiSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIApplicationCache> appCache;
+ if (LookupAppCache()) {
+ rv = ChooseApplicationCache(noRefURI, getter_AddRefs(appCache));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (appCache) {
+ // From a chosen appcache open only as readonly
+ aFlags &= ~nsICacheStorage::OPEN_TRUNCATE;
+ }
+ }
+
+ if (appCache) {
+ nsAutoCString scheme;
+ rv = noRefURI->GetScheme(scheme);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<_OldCacheLoad> appCacheLoad =
+ new _OldCacheLoad(scheme, asciiSpec, aCallback, appCache, LoadInfo(),
+ WriteToDisk(), aFlags);
+ rv = appCacheLoad->Start();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ LOG(("CacheStorage::AsyncOpenURI loading from appcache"));
+ return NS_OK;
+ }
+
+ RefPtr<CacheEntryHandle> entry;
+ rv = CacheStorageService::Self()->AddStorageEntry(
+ this, asciiSpec, aIdExtension,
+ truncate, // replace any existing one?
+ getter_AddRefs(entry));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // May invoke the callback synchronously
+ entry->Entry()->AsyncOpen(aCallback, aFlags);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorage::OpenTruncate(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntry** aCacheEntry) {
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString asciiSpec;
+ rv = noRefURI->GetAsciiSpec(asciiSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<CacheEntryHandle> handle;
+ rv = CacheStorageService::Self()->AddStorageEntry(
+ this, asciiSpec, aIdExtension,
+ true, // replace any existing one
+ getter_AddRefs(handle));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Just open w/o callback, similar to nsICacheEntry.recreate().
+ handle->Entry()->AsyncOpen(nullptr, OPEN_TRUNCATE);
+
+ // Return a write handler, consumer is supposed to fill in the entry.
+ RefPtr<CacheEntryHandle> writeHandle = handle->Entry()->NewWriteHandle();
+ writeHandle.forget(aCacheEntry);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorage::Exists(nsIURI* aURI, const nsACString& aIdExtension,
+ bool* aResult) {
+ NS_ENSURE_ARG(aURI);
+ NS_ENSURE_ARG(aResult);
+
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString asciiSpec;
+ rv = noRefURI->GetAsciiSpec(asciiSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return CacheStorageService::Self()->CheckStorageEntry(this, asciiSpec,
+ aIdExtension, aResult);
+}
+
+NS_IMETHODIMP
+CacheStorage::GetCacheIndexEntryAttrs(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ bool* aHasAltData, uint32_t* aSizeInKB) {
+ NS_ENSURE_ARG(aURI);
+ NS_ENSURE_ARG(aHasAltData);
+ NS_ENSURE_ARG(aSizeInKB);
+ if (!CacheStorageService::Self()) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString asciiSpec;
+ rv = noRefURI->GetAsciiSpec(asciiSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return CacheStorageService::Self()->GetCacheIndexEntryAttrs(
+ this, asciiSpec, aIdExtension, aHasAltData, aSizeInKB);
+}
+
+NS_IMETHODIMP CacheStorage::AsyncDoomURI(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntryDoomCallback* aCallback) {
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString asciiSpec;
+ rv = noRefURI->GetAsciiSpec(asciiSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = CacheStorageService::Self()->DoomStorageEntry(this, asciiSpec,
+ aIdExtension, aCallback);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorage::AsyncEvictStorage(
+ nsICacheEntryDoomCallback* aCallback) {
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv =
+ CacheStorageService::Self()->DoomStorageEntries(this, aCallback);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorage::AsyncVisitStorage(nsICacheStorageVisitor* aVisitor,
+ bool aVisitEntries) {
+ LOG(("CacheStorage::AsyncVisitStorage [this=%p, cb=%p, disk=%d]", this,
+ aVisitor, (bool)mWriteToDisk));
+ if (!CacheStorageService::Self()) return NS_ERROR_NOT_INITIALIZED;
+
+ nsresult rv = CacheStorageService::Self()->WalkStorageEntries(
+ this, aVisitEntries, aVisitor);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+// Internal
+
+nsresult CacheStorage::ChooseApplicationCache(nsIURI* aURI,
+ nsIApplicationCache** aCache) {
+ nsresult rv;
+
+ nsCOMPtr<nsIApplicationCacheService> appCacheService =
+ do_GetService(NS_APPLICATIONCACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString cacheKey;
+ rv = aURI->GetAsciiSpec(cacheKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = appCacheService->ChooseApplicationCache(cacheKey, LoadInfo(), aCache);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+} // namespace mozilla::net
diff --git a/netwerk/cache2/CacheStorage.h b/netwerk/cache2/CacheStorage.h
new file mode 100644
index 0000000000..c6fd9cabb8
--- /dev/null
+++ b/netwerk/cache2/CacheStorage.h
@@ -0,0 +1,72 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheStorage__h__
+#define CacheStorage__h__
+
+#include "nsICacheStorage.h"
+#include "CacheEntry.h"
+#include "LoadContextInfo.h"
+
+#include "nsRefPtrHashtable.h"
+#include "nsThreadUtils.h"
+#include "nsCOMPtr.h"
+#include "nsILoadContextInfo.h"
+#include "nsIApplicationCache.h"
+
+class nsIURI;
+class nsIApplicationCache;
+
+namespace mozilla {
+namespace net {
+
+// This dance is needed to make CacheEntryTable declarable-only in headers
+// w/o exporting CacheEntry.h file to make nsNetModule.cpp compilable.
+typedef nsRefPtrHashtable<nsCStringHashKey, CacheEntry> TCacheEntryTable;
+class CacheEntryTable : public TCacheEntryTable {
+ public:
+ enum EType { MEMORY_ONLY, ALL_ENTRIES };
+
+ explicit CacheEntryTable(EType aType) : mType(aType) {}
+ EType Type() const { return mType; }
+
+ private:
+ EType const mType;
+ CacheEntryTable() = delete;
+};
+
+class CacheStorage : public nsICacheStorage {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHESTORAGE
+
+ public:
+ CacheStorage(nsILoadContextInfo* aInfo, bool aAllowDisk, bool aLookupAppCache,
+ bool aSkipSizeCheck, bool aPinning);
+
+ protected:
+ virtual ~CacheStorage() = default;
+
+ nsresult ChooseApplicationCache(nsIURI* aURI, nsIApplicationCache** aCache);
+
+ RefPtr<LoadContextInfo> mLoadContextInfo;
+ bool mWriteToDisk : 1;
+ bool mLookupAppCache : 1;
+ bool mSkipSizeCheck : 1;
+ bool mPinning : 1;
+
+ public:
+ nsILoadContextInfo* LoadInfo() const { return mLoadContextInfo; }
+ bool WriteToDisk() const {
+ return mWriteToDisk &&
+ (!mLoadContextInfo || !mLoadContextInfo->IsPrivate());
+ }
+ bool LookupAppCache() const { return mLookupAppCache; }
+ bool SkipSizeCheck() const { return mSkipSizeCheck; }
+ bool Pinning() const { return mPinning; }
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/CacheStorageService.cpp b/netwerk/cache2/CacheStorageService.cpp
new file mode 100644
index 0000000000..d06c420ff4
--- /dev/null
+++ b/netwerk/cache2/CacheStorageService.cpp
@@ -0,0 +1,2376 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CacheLog.h"
+#include "CacheStorageService.h"
+#include "CacheFileIOManager.h"
+#include "CacheObserver.h"
+#include "CacheIndex.h"
+#include "CacheIndexIterator.h"
+#include "CacheStorage.h"
+#include "AppCacheStorage.h"
+#include "CacheEntry.h"
+#include "CacheFileUtils.h"
+
+#include "OldWrappers.h"
+#include "nsCacheService.h"
+#include "nsDeleteDir.h"
+
+#include "nsICacheStorageVisitor.h"
+#include "nsIObserverService.h"
+#include "nsIFile.h"
+#include "nsIURI.h"
+#include "nsCOMPtr.h"
+#include "nsContentUtils.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsServiceManagerUtils.h"
+#include "nsXULAppAPI.h"
+#include "mozilla/AtomicBitfields.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Services.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+namespace mozilla {
+namespace net {
+
+namespace {
+
+void AppendMemoryStorageTag(nsAutoCString& key) {
+ // Using DEL as the very last ascii-7 character we can use in the list of
+ // attributes
+ key.Append('\x7f');
+ key.Append(',');
+}
+
+} // namespace
+
+// Not defining as static or class member of CacheStorageService since
+// it would otherwise need to include CacheEntry.h and that then would
+// need to be exported to make nsNetModule.cpp compilable.
+typedef nsClassHashtable<nsCStringHashKey, CacheEntryTable> GlobalEntryTables;
+
+/**
+ * Keeps tables of entries. There is one entries table for each distinct load
+ * context type. The distinction is based on following load context info
+ * states: <isPrivate|isAnon|inIsolatedMozBrowser> which builds a mapping
+ * key.
+ *
+ * Thread-safe to access, protected by the service mutex.
+ */
+static GlobalEntryTables* sGlobalEntryTables;
+
+CacheMemoryConsumer::CacheMemoryConsumer(uint32_t aFlags)
+ : mReportedMemoryConsumption(0), mFlags(aFlags) {}
+
+void CacheMemoryConsumer::DoMemoryReport(uint32_t aCurrentSize) {
+ if (!(mFlags & DONT_REPORT) && CacheStorageService::Self()) {
+ CacheStorageService::Self()->OnMemoryConsumptionChange(this, aCurrentSize);
+ }
+}
+
+CacheStorageService::MemoryPool::MemoryPool(EType aType)
+ : mType(aType), mMemorySize(0) {}
+
+CacheStorageService::MemoryPool::~MemoryPool() {
+ if (mMemorySize != 0) {
+ NS_ERROR(
+ "Network cache reported memory consumption is not at 0, probably "
+ "leaking?");
+ }
+}
+
+uint32_t CacheStorageService::MemoryPool::Limit() const {
+ uint32_t limit = 0;
+
+ switch (mType) {
+ case DISK:
+ limit = CacheObserver::MetadataMemoryLimit();
+ break;
+ case MEMORY:
+ limit = CacheObserver::MemoryCacheCapacity();
+ break;
+ default:
+ MOZ_CRASH("Bad pool type");
+ }
+
+ static const uint32_t kMaxLimit = 0x3FFFFF;
+ if (limit > kMaxLimit) {
+ LOG((" a memory limit (%u) is unexpectedly high, clipping to %u", limit,
+ kMaxLimit));
+ limit = kMaxLimit;
+ }
+
+ return limit << 10;
+}
+
+NS_IMPL_ISUPPORTS(CacheStorageService, nsICacheStorageService,
+ nsIMemoryReporter, nsITimerCallback, nsICacheTesting,
+ nsINamed)
+
+CacheStorageService* CacheStorageService::sSelf = nullptr;
+
+CacheStorageService::CacheStorageService()
+ : mLock("CacheStorageService.mLock"),
+ mForcedValidEntriesLock("CacheStorageService.mForcedValidEntriesLock"),
+ mShutdown(false),
+ mDiskPool(MemoryPool::DISK),
+ mMemoryPool(MemoryPool::MEMORY)
+#ifdef MOZ_TSAN
+ ,
+ mPurgeTimerActive(false)
+#endif
+{
+ CacheFileIOManager::Init();
+
+ MOZ_ASSERT(XRE_IsParentProcess());
+ MOZ_ASSERT(!sSelf);
+
+ sSelf = this;
+ sGlobalEntryTables = new GlobalEntryTables();
+
+ RegisterStrongMemoryReporter(this);
+}
+
+CacheStorageService::~CacheStorageService() {
+ LOG(("CacheStorageService::~CacheStorageService"));
+ sSelf = nullptr;
+}
+
+void CacheStorageService::Shutdown() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) return;
+
+ LOG(("CacheStorageService::Shutdown - start"));
+
+ mShutdown = true;
+
+ nsCOMPtr<nsIRunnable> event =
+ NewRunnableMethod("net::CacheStorageService::ShutdownBackground", this,
+ &CacheStorageService::ShutdownBackground);
+ Dispatch(event);
+
+#ifdef NS_FREE_PERMANENT_DATA
+ sGlobalEntryTables->Clear();
+ delete sGlobalEntryTables;
+#endif
+ sGlobalEntryTables = nullptr;
+
+ LOG(("CacheStorageService::Shutdown - done"));
+}
+
+void CacheStorageService::ShutdownBackground() {
+ LOG(("CacheStorageService::ShutdownBackground - start"));
+
+ MOZ_ASSERT(IsOnManagementThread());
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ // Cancel purge timer to avoid leaking.
+ if (mPurgeTimer) {
+ LOG((" freeing the timer"));
+ mPurgeTimer->Cancel();
+ }
+ }
+
+#ifdef NS_FREE_PERMANENT_DATA
+ Pool(false).mFrecencyArray.Clear();
+ Pool(false).mExpirationArray.Clear();
+ Pool(true).mFrecencyArray.Clear();
+ Pool(true).mExpirationArray.Clear();
+#endif
+
+ LOG(("CacheStorageService::ShutdownBackground - done"));
+}
+
+// Internal management methods
+
+namespace {
+
+// WalkCacheRunnable
+// Base class for particular storage entries visiting
+class WalkCacheRunnable : public Runnable,
+ public CacheStorageService::EntryInfoCallback {
+ protected:
+ WalkCacheRunnable(nsICacheStorageVisitor* aVisitor, bool aVisitEntries)
+ : Runnable("net::WalkCacheRunnable"),
+ mService(CacheStorageService::Self()),
+ mCallback(aVisitor),
+ mSize(0),
+ mCancel(false) {
+ MOZ_ASSERT(NS_IsMainThread());
+ StoreNotifyStorage(true);
+ StoreVisitEntries(aVisitEntries);
+ }
+
+ virtual ~WalkCacheRunnable() {
+ if (mCallback) {
+ ProxyReleaseMainThread("WalkCacheRunnable::mCallback", mCallback);
+ }
+ }
+
+ RefPtr<CacheStorageService> mService;
+ nsCOMPtr<nsICacheStorageVisitor> mCallback;
+
+ uint64_t mSize;
+
+ // clang-format off
+ MOZ_ATOMIC_BITFIELDS(mAtomicBitfields, 8, (
+ (bool, NotifyStorage, 1),
+ (bool, VisitEntries, 1)
+ ))
+ // clang-format on
+
+ Atomic<bool> mCancel;
+};
+
+// WalkMemoryCacheRunnable
+// Responsible to visit memory storage and walk
+// all entries on it asynchronously.
+class WalkMemoryCacheRunnable : public WalkCacheRunnable {
+ public:
+ WalkMemoryCacheRunnable(nsILoadContextInfo* aLoadInfo, bool aVisitEntries,
+ nsICacheStorageVisitor* aVisitor)
+ : WalkCacheRunnable(aVisitor, aVisitEntries) {
+ CacheFileUtils::AppendKeyPrefix(aLoadInfo, mContextKey);
+ MOZ_ASSERT(NS_IsMainThread());
+ }
+
+ nsresult Walk() { return mService->Dispatch(this); }
+
+ private:
+ NS_IMETHOD Run() override {
+ if (CacheStorageService::IsOnManagementThread()) {
+ LOG(("WalkMemoryCacheRunnable::Run - collecting [this=%p]", this));
+ // First, walk, count and grab all entries from the storage
+
+ mozilla::MutexAutoLock lock(CacheStorageService::Self()->Lock());
+
+ if (!CacheStorageService::IsRunning()) return NS_ERROR_NOT_INITIALIZED;
+
+ for (auto iterGlobal = sGlobalEntryTables->Iter(); !iterGlobal.Done();
+ iterGlobal.Next()) {
+ CacheEntryTable* entries = iterGlobal.UserData();
+ if (entries->Type() != CacheEntryTable::MEMORY_ONLY) {
+ continue;
+ }
+
+ for (auto iter = entries->Iter(); !iter.Done(); iter.Next()) {
+ CacheEntry* entry = iter.UserData();
+
+ MOZ_ASSERT(!entry->IsUsingDisk());
+
+ mSize += entry->GetMetadataMemoryConsumption();
+
+ int64_t size;
+ if (NS_SUCCEEDED(entry->GetDataSize(&size))) {
+ mSize += size;
+ }
+ mEntryArray.AppendElement(entry);
+ }
+ }
+
+ // Next, we dispatch to the main thread
+ } else if (NS_IsMainThread()) {
+ LOG(("WalkMemoryCacheRunnable::Run - notifying [this=%p]", this));
+
+ if (LoadNotifyStorage()) {
+ LOG((" storage"));
+
+ uint64_t capacity = CacheObserver::MemoryCacheCapacity();
+ capacity <<= 10; // kilobytes to bytes
+
+ // Second, notify overall storage info
+ mCallback->OnCacheStorageInfo(mEntryArray.Length(), mSize, capacity,
+ nullptr);
+ if (!LoadVisitEntries()) return NS_OK; // done
+
+ StoreNotifyStorage(false);
+
+ } else {
+ LOG((" entry [left=%zu, canceled=%d]", mEntryArray.Length(),
+ (bool)mCancel));
+
+ // Third, notify each entry until depleted or canceled
+ if (!mEntryArray.Length() || mCancel) {
+ mCallback->OnCacheEntryVisitCompleted();
+ return NS_OK; // done
+ }
+
+ // Grab the next entry
+ RefPtr<CacheEntry> entry = mEntryArray[0];
+ mEntryArray.RemoveElementAt(0);
+
+ // Invokes this->OnEntryInfo, that calls the callback with all
+ // information of the entry.
+ CacheStorageService::GetCacheEntryInfo(entry, this);
+ }
+ } else {
+ MOZ_CRASH("Bad thread");
+ return NS_ERROR_FAILURE;
+ }
+
+ NS_DispatchToMainThread(this);
+ return NS_OK;
+ }
+
+ virtual ~WalkMemoryCacheRunnable() {
+ if (mCallback)
+ ProxyReleaseMainThread("WalkMemoryCacheRunnable::mCallback", mCallback);
+ }
+
+ virtual void OnEntryInfo(const nsACString& aURISpec,
+ const nsACString& aIdEnhance, int64_t aDataSize,
+ int32_t aFetchCount, uint32_t aLastModifiedTime,
+ uint32_t aExpirationTime, bool aPinned,
+ nsILoadContextInfo* aInfo) override {
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> uri;
+ rv = NS_NewURI(getter_AddRefs(uri), aURISpec);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ rv = mCallback->OnCacheEntryInfo(uri, aIdEnhance, aDataSize, aFetchCount,
+ aLastModifiedTime, aExpirationTime,
+ aPinned, aInfo);
+ if (NS_FAILED(rv)) {
+ LOG((" callback failed, canceling the walk"));
+ mCancel = true;
+ }
+ }
+
+ private:
+ nsCString mContextKey;
+ nsTArray<RefPtr<CacheEntry>> mEntryArray;
+};
+
+// WalkDiskCacheRunnable
+// Using the cache index information to get the list of files per context.
+class WalkDiskCacheRunnable : public WalkCacheRunnable {
+ public:
+ WalkDiskCacheRunnable(nsILoadContextInfo* aLoadInfo, bool aVisitEntries,
+ nsICacheStorageVisitor* aVisitor)
+ : WalkCacheRunnable(aVisitor, aVisitEntries),
+ mLoadInfo(aLoadInfo),
+ mPass(COLLECT_STATS),
+ mCount(0) {}
+
+ nsresult Walk() {
+ // TODO, bug 998693
+ // Initial index build should be forced here so that about:cache soon
+ // after startup gives some meaningfull results.
+
+ // Dispatch to the INDEX level in hope that very recent cache entries
+ // information gets to the index list before we grab the index iterator
+ // for the first time. This tries to avoid miss of entries that has
+ // been created right before the visit is required.
+ RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
+ NS_ENSURE_TRUE(thread, NS_ERROR_NOT_INITIALIZED);
+
+ return thread->Dispatch(this, CacheIOThread::INDEX);
+ }
+
+ private:
+ // Invokes OnCacheEntryInfo callback for each single found entry.
+ // There is one instance of this class per one entry.
+ class OnCacheEntryInfoRunnable : public Runnable {
+ public:
+ explicit OnCacheEntryInfoRunnable(WalkDiskCacheRunnable* aWalker)
+ : Runnable("net::WalkDiskCacheRunnable::OnCacheEntryInfoRunnable"),
+ mWalker(aWalker),
+ mDataSize(0),
+ mFetchCount(0),
+ mLastModifiedTime(0),
+ mExpirationTime(0),
+ mPinned(false) {}
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsresult rv;
+
+ nsCOMPtr<nsIURI> uri;
+ rv = NS_NewURI(getter_AddRefs(uri), mURISpec);
+ if (NS_FAILED(rv)) {
+ return NS_OK;
+ }
+
+ rv = mWalker->mCallback->OnCacheEntryInfo(
+ uri, mIdEnhance, mDataSize, mFetchCount, mLastModifiedTime,
+ mExpirationTime, mPinned, mInfo);
+ if (NS_FAILED(rv)) {
+ mWalker->mCancel = true;
+ }
+
+ return NS_OK;
+ }
+
+ RefPtr<WalkDiskCacheRunnable> mWalker;
+
+ nsCString mURISpec;
+ nsCString mIdEnhance;
+ int64_t mDataSize;
+ int32_t mFetchCount;
+ uint32_t mLastModifiedTime;
+ uint32_t mExpirationTime;
+ bool mPinned;
+ nsCOMPtr<nsILoadContextInfo> mInfo;
+ };
+
+ NS_IMETHOD Run() override {
+ // The main loop
+ nsresult rv;
+
+ if (CacheStorageService::IsOnManagementThread()) {
+ switch (mPass) {
+ case COLLECT_STATS:
+ // Get quickly the cache stats.
+ uint32_t size;
+ rv = CacheIndex::GetCacheStats(mLoadInfo, &size, &mCount);
+ if (NS_FAILED(rv)) {
+ if (LoadVisitEntries()) {
+ // both onStorageInfo and onCompleted are expected
+ NS_DispatchToMainThread(this);
+ }
+ return NS_DispatchToMainThread(this);
+ }
+
+ mSize = static_cast<uint64_t>(size) << 10;
+
+ // Invoke onCacheStorageInfo with valid information.
+ NS_DispatchToMainThread(this);
+
+ if (!LoadVisitEntries()) {
+ return NS_OK; // done
+ }
+
+ mPass = ITERATE_METADATA;
+ [[fallthrough]];
+
+ case ITERATE_METADATA:
+ // Now grab the context iterator.
+ if (!mIter) {
+ rv =
+ CacheIndex::GetIterator(mLoadInfo, true, getter_AddRefs(mIter));
+ if (NS_FAILED(rv)) {
+ // Invoke onCacheEntryVisitCompleted now
+ return NS_DispatchToMainThread(this);
+ }
+ }
+
+ while (!mCancel && !CacheObserver::ShuttingDown()) {
+ if (CacheIOThread::YieldAndRerun()) return NS_OK;
+
+ SHA1Sum::Hash hash;
+ rv = mIter->GetNextHash(&hash);
+ if (NS_FAILED(rv)) break; // done (or error?)
+
+ // This synchronously invokes OnEntryInfo on this class where we
+ // redispatch to the main thread for the consumer callback.
+ CacheFileIOManager::GetEntryInfo(&hash, this);
+ }
+
+ // Invoke onCacheEntryVisitCompleted on the main thread
+ NS_DispatchToMainThread(this);
+ }
+ } else if (NS_IsMainThread()) {
+ if (LoadNotifyStorage()) {
+ nsCOMPtr<nsIFile> dir;
+ CacheFileIOManager::GetCacheDirectory(getter_AddRefs(dir));
+ uint64_t capacity = CacheObserver::DiskCacheCapacity();
+ capacity <<= 10; // kilobytes to bytes
+ mCallback->OnCacheStorageInfo(mCount, mSize, capacity, dir);
+ StoreNotifyStorage(false);
+ } else {
+ mCallback->OnCacheEntryVisitCompleted();
+ }
+ } else {
+ MOZ_CRASH("Bad thread");
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+ }
+
+ virtual void OnEntryInfo(const nsACString& aURISpec,
+ const nsACString& aIdEnhance, int64_t aDataSize,
+ int32_t aFetchCount, uint32_t aLastModifiedTime,
+ uint32_t aExpirationTime, bool aPinned,
+ nsILoadContextInfo* aInfo) override {
+ // Called directly from CacheFileIOManager::GetEntryInfo.
+
+ // Invoke onCacheEntryInfo on the main thread for this entry.
+ RefPtr<OnCacheEntryInfoRunnable> info = new OnCacheEntryInfoRunnable(this);
+ info->mURISpec = aURISpec;
+ info->mIdEnhance = aIdEnhance;
+ info->mDataSize = aDataSize;
+ info->mFetchCount = aFetchCount;
+ info->mLastModifiedTime = aLastModifiedTime;
+ info->mExpirationTime = aExpirationTime;
+ info->mPinned = aPinned;
+ info->mInfo = aInfo;
+
+ NS_DispatchToMainThread(info);
+ }
+
+ RefPtr<nsILoadContextInfo> mLoadInfo;
+ enum {
+ // First, we collect stats for the load context.
+ COLLECT_STATS,
+
+ // Second, if demanded, we iterate over the entries gethered
+ // from the iterator and call CacheFileIOManager::GetEntryInfo
+ // for each found entry.
+ ITERATE_METADATA,
+ } mPass;
+
+ RefPtr<CacheIndexIterator> mIter;
+ uint32_t mCount;
+};
+
+} // namespace
+
+void CacheStorageService::DropPrivateBrowsingEntries() {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) return;
+
+ nsTArray<nsCString> keys;
+ for (auto iter = sGlobalEntryTables->Iter(); !iter.Done(); iter.Next()) {
+ const nsACString& key = iter.Key();
+ nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(key);
+ if (info && info->IsPrivate()) {
+ keys.AppendElement(key);
+ }
+ }
+
+ for (uint32_t i = 0; i < keys.Length(); ++i) {
+ DoomStorageEntries(keys[i], nullptr, true, false, nullptr);
+ }
+}
+
+namespace {
+
+class CleaupCacheDirectoriesRunnable : public Runnable {
+ public:
+ NS_DECL_NSIRUNNABLE
+ static bool Post();
+
+ private:
+ CleaupCacheDirectoriesRunnable()
+ : Runnable("net::CleaupCacheDirectoriesRunnable") {
+ nsCacheService::GetDiskCacheDirectory(getter_AddRefs(mCache1Dir));
+ CacheFileIOManager::GetCacheDirectory(getter_AddRefs(mCache2Dir));
+#if defined(MOZ_WIDGET_ANDROID)
+ CacheFileIOManager::GetProfilelessCacheDirectory(
+ getter_AddRefs(mCache2Profileless));
+#endif
+ }
+
+ virtual ~CleaupCacheDirectoriesRunnable() = default;
+ nsCOMPtr<nsIFile> mCache1Dir, mCache2Dir;
+#if defined(MOZ_WIDGET_ANDROID)
+ nsCOMPtr<nsIFile> mCache2Profileless;
+#endif
+};
+
+// static
+bool CleaupCacheDirectoriesRunnable::Post() {
+ // To obtain the cache1 directory we must unfortunately instantiate the old
+ // cache service despite it may not be used at all... This also initializes
+ // nsDeleteDir.
+ nsCOMPtr<nsICacheService> service = do_GetService(NS_CACHESERVICE_CONTRACTID);
+ if (!service) return false;
+
+ nsCOMPtr<nsIEventTarget> thread;
+ service->GetCacheIOTarget(getter_AddRefs(thread));
+ if (!thread) return false;
+
+ RefPtr<CleaupCacheDirectoriesRunnable> r =
+ new CleaupCacheDirectoriesRunnable();
+ thread->Dispatch(r, NS_DISPATCH_NORMAL);
+ return true;
+}
+
+NS_IMETHODIMP CleaupCacheDirectoriesRunnable::Run() {
+ MOZ_ASSERT(!NS_IsMainThread());
+
+ if (mCache1Dir) {
+ nsDeleteDir::RemoveOldTrashes(mCache1Dir);
+ }
+ if (mCache2Dir) {
+ nsDeleteDir::RemoveOldTrashes(mCache2Dir);
+ }
+#if defined(MOZ_WIDGET_ANDROID)
+ if (mCache2Profileless) {
+ nsDeleteDir::RemoveOldTrashes(mCache2Profileless);
+ // Always delete the profileless cache on Android
+ nsDeleteDir::DeleteDir(mCache2Profileless, true, 30000);
+ }
+#endif
+
+ if (mCache1Dir) {
+ nsDeleteDir::DeleteDir(mCache1Dir, true, 30000);
+ }
+
+ return NS_OK;
+}
+
+} // namespace
+
+// static
+void CacheStorageService::CleaupCacheDirectories() {
+ // Make sure we schedule just once in case CleaupCacheDirectories gets called
+ // multiple times from some reason.
+ static bool runOnce = CleaupCacheDirectoriesRunnable::Post();
+ if (!runOnce) {
+ NS_WARNING("Could not start cache trashes cleanup");
+ }
+}
+
+// Helper methods
+
+// static
+bool CacheStorageService::IsOnManagementThread() {
+ RefPtr<CacheStorageService> service = Self();
+ if (!service) return false;
+
+ nsCOMPtr<nsIEventTarget> target = service->Thread();
+ if (!target) return false;
+
+ bool currentThread;
+ nsresult rv = target->IsOnCurrentThread(&currentThread);
+ return NS_SUCCEEDED(rv) && currentThread;
+}
+
+already_AddRefed<nsIEventTarget> CacheStorageService::Thread() const {
+ return CacheFileIOManager::IOTarget();
+}
+
+nsresult CacheStorageService::Dispatch(nsIRunnable* aEvent) {
+ RefPtr<CacheIOThread> cacheIOThread = CacheFileIOManager::IOThread();
+ if (!cacheIOThread) return NS_ERROR_NOT_AVAILABLE;
+
+ return cacheIOThread->Dispatch(aEvent, CacheIOThread::MANAGEMENT);
+}
+
+namespace CacheStorageEvictHelper {
+
+nsresult ClearStorage(bool const aPrivate, bool const aAnonymous,
+ OriginAttributes& aOa) {
+ nsresult rv;
+
+ aOa.SyncAttributesWithPrivateBrowsing(aPrivate);
+ RefPtr<LoadContextInfo> info = GetLoadContextInfo(aAnonymous, aOa);
+
+ nsCOMPtr<nsICacheStorage> storage;
+ RefPtr<CacheStorageService> service = CacheStorageService::Self();
+ NS_ENSURE_TRUE(service, NS_ERROR_FAILURE);
+
+ // Clear disk storage
+ rv = service->DiskCacheStorage(info, false, getter_AddRefs(storage));
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = storage->AsyncEvictStorage(nullptr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Clear memory storage
+ rv = service->MemoryCacheStorage(info, getter_AddRefs(storage));
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = storage->AsyncEvictStorage(nullptr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+nsresult Run(OriginAttributes& aOa) {
+ nsresult rv;
+
+ // Clear all [private X anonymous] combinations
+ rv = ClearStorage(false, false, aOa);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = ClearStorage(false, true, aOa);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = ClearStorage(true, false, aOa);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = ClearStorage(true, true, aOa);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+} // namespace CacheStorageEvictHelper
+
+// nsICacheStorageService
+
+NS_IMETHODIMP CacheStorageService::MemoryCacheStorage(
+ nsILoadContextInfo* aLoadContextInfo, nsICacheStorage** _retval) {
+ NS_ENSURE_ARG(_retval);
+
+ nsCOMPtr<nsICacheStorage> storage =
+ new CacheStorage(aLoadContextInfo, false, false, false, false);
+ storage.forget(_retval);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::DiskCacheStorage(
+ nsILoadContextInfo* aLoadContextInfo, bool aLookupAppCache,
+ nsICacheStorage** _retval) {
+ NS_ENSURE_ARG(_retval);
+
+ // TODO save some heap granularity - cache commonly used storages.
+
+ // When disk cache is disabled, still provide a storage, but just keep stuff
+ // in memory.
+ bool useDisk = CacheObserver::UseDiskCache();
+
+ nsCOMPtr<nsICacheStorage> storage =
+ new CacheStorage(aLoadContextInfo, useDisk, aLookupAppCache,
+ false /* size limit */, false /* don't pin */);
+ storage.forget(_retval);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::PinningCacheStorage(
+ nsILoadContextInfo* aLoadContextInfo, nsICacheStorage** _retval) {
+ NS_ENSURE_ARG(aLoadContextInfo);
+ NS_ENSURE_ARG(_retval);
+
+ // When disk cache is disabled don't pretend we cache.
+ if (!CacheObserver::UseDiskCache()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsCOMPtr<nsICacheStorage> storage = new CacheStorage(
+ aLoadContextInfo, true /* use disk */, false /* no appcache */,
+ true /* ignore size checks */, true /* pin */);
+ storage.forget(_retval);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::AppCacheStorage(
+ nsILoadContextInfo* aLoadContextInfo,
+ nsIApplicationCache* aApplicationCache, nsICacheStorage** _retval) {
+ NS_ENSURE_ARG(_retval);
+
+ nsCOMPtr<nsICacheStorage> storage;
+ // Using classification since cl believes we want to instantiate this method
+ // having the same name as the desired class...
+ storage =
+ new mozilla::net::AppCacheStorage(aLoadContextInfo, aApplicationCache);
+
+ storage.forget(_retval);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::SynthesizedCacheStorage(
+ nsILoadContextInfo* aLoadContextInfo, nsICacheStorage** _retval) {
+ NS_ENSURE_ARG(aLoadContextInfo);
+ NS_ENSURE_ARG(_retval);
+
+ nsCOMPtr<nsICacheStorage> storage =
+ new CacheStorage(aLoadContextInfo, false, false,
+ true /* skip size checks for synthesized cache */,
+ false /* no pinning */);
+ storage.forget(_retval);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::Clear() {
+ nsresult rv;
+
+ // Tell the index to block notification to AsyncGetDiskConsumption.
+ // Will be allowed again from CacheFileContextEvictor::EvictEntries()
+ // when all the context have been removed from disk.
+ CacheIndex::OnAsyncEviction(true);
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ {
+ mozilla::MutexAutoLock forcedValidEntriesLock(mForcedValidEntriesLock);
+ mForcedValidEntries.Clear();
+ }
+
+ NS_ENSURE_TRUE(!mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ nsTArray<nsCString> keys;
+ for (auto iter = sGlobalEntryTables->Iter(); !iter.Done(); iter.Next()) {
+ keys.AppendElement(iter.Key());
+ }
+
+ for (uint32_t i = 0; i < keys.Length(); ++i) {
+ DoomStorageEntries(keys[i], nullptr, true, false, nullptr);
+ }
+
+ // Passing null as a load info means to evict all contexts.
+ // EvictByContext() respects the entry pinning. EvictAll() does not.
+ rv = CacheFileIOManager::EvictByContext(nullptr, false, u""_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::ClearOrigin(nsIPrincipal* aPrincipal) {
+ nsresult rv;
+
+ if (NS_WARN_IF(!aPrincipal)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsAutoString origin;
+ rv = nsContentUtils::GetUTFOrigin(aPrincipal, origin);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = ClearOriginInternal(origin, aPrincipal->OriginAttributesRef(), true);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = ClearOriginInternal(origin, aPrincipal->OriginAttributesRef(), false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::ClearOriginAttributes(
+ const nsAString& aOriginAttributes) {
+ nsresult rv;
+
+ if (NS_WARN_IF(aOriginAttributes.IsEmpty())) {
+ return NS_ERROR_FAILURE;
+ }
+
+ OriginAttributes oa;
+ if (!oa.Init(aOriginAttributes)) {
+ NS_ERROR("Could not parse the argument for OriginAttributes");
+ return NS_ERROR_FAILURE;
+ }
+
+ rv = CacheStorageEvictHelper::Run(oa);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+static bool RemoveExactEntry(CacheEntryTable* aEntries, nsACString const& aKey,
+ CacheEntry* aEntry, bool aOverwrite) {
+ RefPtr<CacheEntry> existingEntry;
+ if (!aEntries->Get(aKey, getter_AddRefs(existingEntry))) {
+ LOG(("RemoveExactEntry [entry=%p already gone]", aEntry));
+ return false; // Already removed...
+ }
+
+ if (!aOverwrite && existingEntry != aEntry) {
+ LOG(("RemoveExactEntry [entry=%p already replaced]", aEntry));
+ return false; // Already replaced...
+ }
+
+ LOG(("RemoveExactEntry [entry=%p removed]", aEntry));
+ aEntries->Remove(aKey);
+ return true;
+}
+
+nsresult CacheStorageService::ClearOriginInternal(
+ const nsAString& aOrigin, const OriginAttributes& aOriginAttributes,
+ bool aAnonymous) {
+ nsresult rv;
+
+ RefPtr<LoadContextInfo> info =
+ GetLoadContextInfo(aAnonymous, aOriginAttributes);
+ if (NS_WARN_IF(!info)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (sGlobalEntryTables) {
+ for (auto iter = sGlobalEntryTables->Iter(); !iter.Done(); iter.Next()) {
+ bool matches = false;
+ rv =
+ CacheFileUtils::KeyMatchesLoadContextInfo(iter.Key(), info, &matches);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!matches) {
+ continue;
+ }
+
+ CacheEntryTable* table = iter.UserData();
+ MOZ_ASSERT(table);
+
+ nsTArray<RefPtr<CacheEntry>> entriesToDelete;
+
+ for (auto entryIter = table->Iter(); !entryIter.Done();
+ entryIter.Next()) {
+ CacheEntry* entry = entryIter.UserData();
+
+ nsCOMPtr<nsIURI> uri;
+ rv = NS_NewURI(getter_AddRefs(uri), entry->GetURI());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoString origin;
+ rv = nsContentUtils::GetUTFOrigin(uri, origin);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (origin != aOrigin) {
+ continue;
+ }
+
+ entriesToDelete.AppendElement(entry);
+ }
+
+ for (RefPtr<CacheEntry>& entry : entriesToDelete) {
+ nsAutoCString entryKey;
+ rv = entry->HashingKey(entryKey);
+ if (NS_FAILED(rv)) {
+ NS_ERROR("aEntry->HashingKey() failed?");
+ return rv;
+ }
+
+ RemoveExactEntry(table, entryKey, entry, false /* don't overwrite */);
+ }
+ }
+ }
+
+ rv = CacheFileIOManager::EvictByContext(info, false /* pinned */, aOrigin);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::PurgeFromMemory(uint32_t aWhat) {
+ uint32_t what;
+
+ switch (aWhat) {
+ case PURGE_DISK_DATA_ONLY:
+ what = CacheEntry::PURGE_DATA_ONLY_DISK_BACKED;
+ break;
+
+ case PURGE_DISK_ALL:
+ what = CacheEntry::PURGE_WHOLE_ONLY_DISK_BACKED;
+ break;
+
+ case PURGE_EVERYTHING:
+ what = CacheEntry::PURGE_WHOLE;
+ break;
+
+ default:
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ nsCOMPtr<nsIRunnable> event = new PurgeFromMemoryRunnable(this, what);
+
+ return Dispatch(event);
+}
+
+NS_IMETHODIMP CacheStorageService::PurgeFromMemoryRunnable::Run() {
+ if (NS_IsMainThread()) {
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (observerService) {
+ observerService->NotifyObservers(
+ nullptr, "cacheservice:purge-memory-pools", nullptr);
+ }
+
+ return NS_OK;
+ }
+
+ if (mService) {
+ // TODO not all flags apply to both pools
+ mService->Pool(true).PurgeAll(mWhat);
+ mService->Pool(false).PurgeAll(mWhat);
+ mService = nullptr;
+ }
+
+ NS_DispatchToMainThread(this);
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::AsyncGetDiskConsumption(
+ nsICacheStorageConsumptionObserver* aObserver) {
+ NS_ENSURE_ARG(aObserver);
+
+ nsresult rv;
+
+ rv = CacheIndex::AsyncGetDiskConsumption(aObserver);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::GetIoTarget(nsIEventTarget** aEventTarget) {
+ NS_ENSURE_ARG(aEventTarget);
+
+ nsCOMPtr<nsIEventTarget> ioTarget = CacheFileIOManager::IOTarget();
+ ioTarget.forget(aEventTarget);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheStorageService::AsyncVisitAllStorages(
+ nsICacheStorageVisitor* aVisitor, bool aVisitEntries) {
+ LOG(("CacheStorageService::AsyncVisitAllStorages [cb=%p]", aVisitor));
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ // Walking the disk cache also walks the memory cache.
+ RefPtr<WalkDiskCacheRunnable> event =
+ new WalkDiskCacheRunnable(nullptr, aVisitEntries, aVisitor);
+ return event->Walk();
+
+ return NS_OK;
+}
+
+// Methods used by CacheEntry for management of in-memory structures.
+
+namespace {
+
+class FrecencyComparator {
+ public:
+ bool Equals(CacheEntry* a, CacheEntry* b) const {
+ return a->GetFrecency() == b->GetFrecency();
+ }
+ bool LessThan(CacheEntry* a, CacheEntry* b) const {
+ // We deliberately want to keep the '0' frecency entries at the tail of the
+ // aray, because these are new entries and would just slow down purging of
+ // the pools based on frecency.
+ if (a->GetFrecency() == 0.0 && b->GetFrecency() > 0.0) {
+ return false;
+ }
+ if (a->GetFrecency() > 0.0 && b->GetFrecency() == 0.0) {
+ return true;
+ }
+
+ return a->GetFrecency() < b->GetFrecency();
+ }
+};
+
+class ExpirationComparator {
+ public:
+ bool Equals(CacheEntry* a, CacheEntry* b) const {
+ return a->GetExpirationTime() == b->GetExpirationTime();
+ }
+ bool LessThan(CacheEntry* a, CacheEntry* b) const {
+ return a->GetExpirationTime() < b->GetExpirationTime();
+ }
+};
+
+} // namespace
+
+void CacheStorageService::RegisterEntry(CacheEntry* aEntry) {
+ MOZ_ASSERT(IsOnManagementThread());
+
+ if (mShutdown || !aEntry->CanRegister()) return;
+
+ TelemetryRecordEntryCreation(aEntry);
+
+ LOG(("CacheStorageService::RegisterEntry [entry=%p]", aEntry));
+
+ MemoryPool& pool = Pool(aEntry->IsUsingDisk());
+ pool.mFrecencyArray.AppendElement(aEntry);
+ pool.mExpirationArray.AppendElement(aEntry);
+
+ aEntry->SetRegistered(true);
+}
+
+void CacheStorageService::UnregisterEntry(CacheEntry* aEntry) {
+ MOZ_ASSERT(IsOnManagementThread());
+
+ if (!aEntry->IsRegistered()) return;
+
+ TelemetryRecordEntryRemoval(aEntry);
+
+ LOG(("CacheStorageService::UnregisterEntry [entry=%p]", aEntry));
+
+ MemoryPool& pool = Pool(aEntry->IsUsingDisk());
+ mozilla::DebugOnly<bool> removedFrecency =
+ pool.mFrecencyArray.RemoveElement(aEntry);
+ mozilla::DebugOnly<bool> removedExpiration =
+ pool.mExpirationArray.RemoveElement(aEntry);
+
+ MOZ_ASSERT(mShutdown || (removedFrecency && removedExpiration));
+
+ // Note: aEntry->CanRegister() since now returns false
+ aEntry->SetRegistered(false);
+}
+
+static bool AddExactEntry(CacheEntryTable* aEntries, nsACString const& aKey,
+ CacheEntry* aEntry, bool aOverwrite) {
+ RefPtr<CacheEntry> existingEntry;
+ if (!aOverwrite && aEntries->Get(aKey, getter_AddRefs(existingEntry))) {
+ bool equals = existingEntry == aEntry;
+ LOG(("AddExactEntry [entry=%p equals=%d]", aEntry, equals));
+ return equals; // Already there...
+ }
+
+ LOG(("AddExactEntry [entry=%p put]", aEntry));
+ aEntries->Put(aKey, RefPtr{aEntry});
+ return true;
+}
+
+bool CacheStorageService::RemoveEntry(CacheEntry* aEntry,
+ bool aOnlyUnreferenced) {
+ LOG(("CacheStorageService::RemoveEntry [entry=%p]", aEntry));
+
+ nsAutoCString entryKey;
+ nsresult rv = aEntry->HashingKey(entryKey);
+ if (NS_FAILED(rv)) {
+ NS_ERROR("aEntry->HashingKey() failed?");
+ return false;
+ }
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) {
+ LOG((" after shutdown"));
+ return false;
+ }
+
+ if (aOnlyUnreferenced) {
+ if (aEntry->IsReferenced()) {
+ LOG((" still referenced, not removing"));
+ return false;
+ }
+
+ if (!aEntry->IsUsingDisk() &&
+ IsForcedValidEntry(aEntry->GetStorageID(), entryKey)) {
+ LOG((" forced valid, not removing"));
+ return false;
+ }
+ }
+
+ CacheEntryTable* entries;
+ if (sGlobalEntryTables->Get(aEntry->GetStorageID(), &entries))
+ RemoveExactEntry(entries, entryKey, aEntry, false /* don't overwrite */);
+
+ nsAutoCString memoryStorageID(aEntry->GetStorageID());
+ AppendMemoryStorageTag(memoryStorageID);
+
+ if (sGlobalEntryTables->Get(memoryStorageID, &entries))
+ RemoveExactEntry(entries, entryKey, aEntry, false /* don't overwrite */);
+
+ return true;
+}
+
+void CacheStorageService::RecordMemoryOnlyEntry(CacheEntry* aEntry,
+ bool aOnlyInMemory,
+ bool aOverwrite) {
+ LOG(
+ ("CacheStorageService::RecordMemoryOnlyEntry [entry=%p, memory=%d, "
+ "overwrite=%d]",
+ aEntry, aOnlyInMemory, aOverwrite));
+ // This method is responsible to put this entry to a special record hashtable
+ // that contains only entries that are stored in memory.
+ // Keep in mind that every entry, regardless of whether is in-memory-only or
+ // not is always recorded in the storage master hash table, the one identified
+ // by CacheEntry.StorageID().
+
+ mLock.AssertCurrentThreadOwns();
+
+ if (mShutdown) {
+ LOG((" after shutdown"));
+ return;
+ }
+
+ nsresult rv;
+
+ nsAutoCString entryKey;
+ rv = aEntry->HashingKey(entryKey);
+ if (NS_FAILED(rv)) {
+ NS_ERROR("aEntry->HashingKey() failed?");
+ return;
+ }
+
+ CacheEntryTable* entries = nullptr;
+ nsAutoCString memoryStorageID(aEntry->GetStorageID());
+ AppendMemoryStorageTag(memoryStorageID);
+
+ if (!sGlobalEntryTables->Get(memoryStorageID, &entries)) {
+ if (!aOnlyInMemory) {
+ LOG((" not recorded as memory only"));
+ return;
+ }
+
+ entries = new CacheEntryTable(CacheEntryTable::MEMORY_ONLY);
+ sGlobalEntryTables->Put(memoryStorageID, entries);
+ LOG((" new memory-only storage table for %s", memoryStorageID.get()));
+ }
+
+ if (aOnlyInMemory) {
+ AddExactEntry(entries, entryKey, aEntry, aOverwrite);
+ } else {
+ RemoveExactEntry(entries, entryKey, aEntry, aOverwrite);
+ }
+}
+
+// Checks if a cache entry is forced valid (will be loaded directly from cache
+// without further validation) - see nsICacheEntry.idl for further details
+bool CacheStorageService::IsForcedValidEntry(nsACString const& aContextKey,
+ nsACString const& aEntryKey) {
+ return IsForcedValidEntry(aContextKey + aEntryKey);
+}
+
+bool CacheStorageService::IsForcedValidEntry(
+ nsACString const& aContextEntryKey) {
+ mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
+
+ TimeStamp validUntil;
+
+ if (!mForcedValidEntries.Get(aContextEntryKey, &validUntil)) {
+ return false;
+ }
+
+ if (validUntil.IsNull()) {
+ return false;
+ }
+
+ // Entry timeout not reached yet
+ if (TimeStamp::NowLoRes() <= validUntil) {
+ return true;
+ }
+
+ // Entry timeout has been reached
+ mForcedValidEntries.Remove(aContextEntryKey);
+ return false;
+}
+
+// Allows a cache entry to be loaded directly from cache without further
+// validation - see nsICacheEntry.idl for further details
+void CacheStorageService::ForceEntryValidFor(nsACString const& aContextKey,
+ nsACString const& aEntryKey,
+ uint32_t aSecondsToTheFuture) {
+ mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
+
+ TimeStamp now = TimeStamp::NowLoRes();
+ ForcedValidEntriesPrune(now);
+
+ // This will be the timeout
+ TimeStamp validUntil = now + TimeDuration::FromSeconds(aSecondsToTheFuture);
+
+ mForcedValidEntries.Put(aContextKey + aEntryKey, validUntil);
+}
+
+void CacheStorageService::RemoveEntryForceValid(nsACString const& aContextKey,
+ nsACString const& aEntryKey) {
+ mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
+
+ LOG(("CacheStorageService::RemoveEntryForceValid context='%s' entryKey=%s",
+ aContextKey.BeginReading(), aEntryKey.BeginReading()));
+ mForcedValidEntries.Remove(aContextKey + aEntryKey);
+}
+
+// Cleans out the old entries in mForcedValidEntries
+void CacheStorageService::ForcedValidEntriesPrune(TimeStamp& now) {
+ static TimeDuration const oneMinute = TimeDuration::FromSeconds(60);
+ static TimeStamp dontPruneUntil = now + oneMinute;
+ if (now < dontPruneUntil) return;
+
+ for (auto iter = mForcedValidEntries.Iter(); !iter.Done(); iter.Next()) {
+ if (iter.Data() < now) {
+ iter.Remove();
+ }
+ }
+ dontPruneUntil = now + oneMinute;
+}
+
+void CacheStorageService::OnMemoryConsumptionChange(
+ CacheMemoryConsumer* aConsumer, uint32_t aCurrentMemoryConsumption) {
+ LOG(("CacheStorageService::OnMemoryConsumptionChange [consumer=%p, size=%u]",
+ aConsumer, aCurrentMemoryConsumption));
+
+ uint32_t savedMemorySize = aConsumer->mReportedMemoryConsumption;
+ if (savedMemorySize == aCurrentMemoryConsumption) return;
+
+ // Exchange saved size with current one.
+ aConsumer->mReportedMemoryConsumption = aCurrentMemoryConsumption;
+
+ bool usingDisk = !(aConsumer->mFlags & CacheMemoryConsumer::MEMORY_ONLY);
+ bool overLimit = Pool(usingDisk).OnMemoryConsumptionChange(
+ savedMemorySize, aCurrentMemoryConsumption);
+
+ if (!overLimit) return;
+
+ // It's likely the timer has already been set when we get here,
+ // check outside the lock to save resources.
+#ifdef MOZ_TSAN
+ if (mPurgeTimerActive) {
+#else
+ if (mPurgeTimer) {
+#endif
+ return;
+ }
+
+ // We don't know if this is called under the service lock or not,
+ // hence rather dispatch.
+ RefPtr<nsIEventTarget> cacheIOTarget = Thread();
+ if (!cacheIOTarget) return;
+
+ // Dispatch as a priority task, we want to set the purge timer
+ // ASAP to prevent vain redispatch of this event.
+ nsCOMPtr<nsIRunnable> event = NewRunnableMethod(
+ "net::CacheStorageService::SchedulePurgeOverMemoryLimit", this,
+ &CacheStorageService::SchedulePurgeOverMemoryLimit);
+ cacheIOTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
+}
+
+bool CacheStorageService::MemoryPool::OnMemoryConsumptionChange(
+ uint32_t aSavedMemorySize, uint32_t aCurrentMemoryConsumption) {
+ mMemorySize -= aSavedMemorySize;
+ mMemorySize += aCurrentMemoryConsumption;
+
+ LOG((" mMemorySize=%u (+%u,-%u)", uint32_t(mMemorySize),
+ aCurrentMemoryConsumption, aSavedMemorySize));
+
+ // Bypass purging when memory has not grew up significantly
+ if (aCurrentMemoryConsumption <= aSavedMemorySize) return false;
+
+ return mMemorySize > Limit();
+}
+
+void CacheStorageService::SchedulePurgeOverMemoryLimit() {
+ LOG(("CacheStorageService::SchedulePurgeOverMemoryLimit"));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) {
+ LOG((" past shutdown"));
+ return;
+ }
+
+ if (mPurgeTimer) {
+ LOG((" timer already up"));
+ return;
+ }
+
+ mPurgeTimer = NS_NewTimer();
+ if (mPurgeTimer) {
+#ifdef MOZ_TSAN
+ mPurgeTimerActive = true;
+#endif
+ nsresult rv;
+ rv = mPurgeTimer->InitWithCallback(this, 1000, nsITimer::TYPE_ONE_SHOT);
+ LOG((" timer init rv=0x%08" PRIx32, static_cast<uint32_t>(rv)));
+ }
+}
+
+NS_IMETHODIMP
+CacheStorageService::Notify(nsITimer* aTimer) {
+ LOG(("CacheStorageService::Notify"));
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (aTimer == mPurgeTimer) {
+#ifdef MOZ_TSAN
+ mPurgeTimerActive = false;
+#endif
+ mPurgeTimer = nullptr;
+
+ nsCOMPtr<nsIRunnable> event =
+ NewRunnableMethod("net::CacheStorageService::PurgeOverMemoryLimit",
+ this, &CacheStorageService::PurgeOverMemoryLimit);
+ Dispatch(event);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheStorageService::GetName(nsACString& aName) {
+ aName.AssignLiteral("CacheStorageService");
+ return NS_OK;
+}
+
+void CacheStorageService::PurgeOverMemoryLimit() {
+ MOZ_ASSERT(IsOnManagementThread());
+
+ LOG(("CacheStorageService::PurgeOverMemoryLimit"));
+
+ static TimeDuration const kFourSeconds = TimeDuration::FromSeconds(4);
+ TimeStamp now = TimeStamp::NowLoRes();
+
+ if (!mLastPurgeTime.IsNull() && now - mLastPurgeTime < kFourSeconds) {
+ LOG((" bypassed, too soon"));
+ return;
+ }
+
+ mLastPurgeTime = now;
+
+ Pool(true).PurgeOverMemoryLimit();
+ Pool(false).PurgeOverMemoryLimit();
+}
+
+void CacheStorageService::MemoryPool::PurgeOverMemoryLimit() {
+ TimeStamp start(TimeStamp::Now());
+
+ uint32_t const memoryLimit = Limit();
+ if (mMemorySize > memoryLimit) {
+ LOG((" memory data consumption over the limit, abandon expired entries"));
+ PurgeExpired();
+ }
+
+ // No longer makes sense since:
+ // Memory entries are never purged partially, only as a whole when the memory
+ // cache limit is overreached.
+ // Disk entries throw the data away ASAP so that only metadata are kept.
+ // TODO when this concept of two separate pools is found working, the code
+ // should clean up.
+#if 0
+ if (mMemorySize > memoryLimit) {
+ LOG((" memory data consumption over the limit, abandon disk backed data"));
+ PurgeByFrecency(CacheEntry::PURGE_DATA_ONLY_DISK_BACKED);
+ }
+
+ if (mMemorySize > memoryLimit) {
+ LOG((" metadata consumtion over the limit, abandon disk backed entries"));
+ PurgeByFrecency(CacheEntry::PURGE_WHOLE_ONLY_DISK_BACKED);
+ }
+#endif
+
+ if (mMemorySize > memoryLimit) {
+ LOG((" memory data consumption over the limit, abandon any entry"));
+ PurgeByFrecency(CacheEntry::PURGE_WHOLE);
+ }
+
+ LOG((" purging took %1.2fms", (TimeStamp::Now() - start).ToMilliseconds()));
+}
+
+void CacheStorageService::MemoryPool::PurgeExpired() {
+ MOZ_ASSERT(IsOnManagementThread());
+
+ mExpirationArray.Sort(ExpirationComparator());
+ uint32_t now = NowInSeconds();
+
+ uint32_t const memoryLimit = Limit();
+
+ for (uint32_t i = 0;
+ mMemorySize > memoryLimit && i < mExpirationArray.Length();) {
+ if (CacheIOThread::YieldAndRerun()) return;
+
+ RefPtr<CacheEntry> entry = mExpirationArray[i];
+
+ uint32_t expirationTime = entry->GetExpirationTime();
+ if (expirationTime > 0 && expirationTime <= now &&
+ entry->Purge(CacheEntry::PURGE_WHOLE)) {
+ LOG((" purged expired, entry=%p, exptime=%u (now=%u)", entry.get(),
+ entry->GetExpirationTime(), now));
+ continue;
+ }
+
+ // not purged, move to the next one
+ ++i;
+ }
+}
+
+void CacheStorageService::MemoryPool::PurgeByFrecency(uint32_t aWhat) {
+ MOZ_ASSERT(IsOnManagementThread());
+
+ // Pretend the limit is 10% lower so that we get rid of more entries at one
+ // shot and save the sorting below.
+ uint32_t const memoryLimit = Limit() * 0.9;
+
+ // Let's do our best and try to shorten the array to at least this size so
+ // that it doesn't overgrow. We will ignore higher priority events and keep
+ // looping to try to purge while the array is larget than this size.
+ static size_t const kFrecencyArrayLengthLimit = 2000;
+
+ LOG(("MemoryPool::PurgeByFrecency, len=%zu", mFrecencyArray.Length()));
+
+ mFrecencyArray.Sort(FrecencyComparator());
+
+ for (uint32_t i = 0;
+ mMemorySize > memoryLimit && i < mFrecencyArray.Length();) {
+ if (mFrecencyArray.Length() <= kFrecencyArrayLengthLimit &&
+ CacheIOThread::YieldAndRerun()) {
+ LOG(("MemoryPool::PurgeByFrecency interrupted"));
+ return;
+ }
+
+ RefPtr<CacheEntry> entry = mFrecencyArray[i];
+ if (entry->Purge(aWhat)) {
+ LOG((" abandoned (%d), entry=%p, frecency=%1.10f", aWhat, entry.get(),
+ entry->GetFrecency()));
+ continue;
+ }
+
+ // not purged, move to the next one
+ ++i;
+ }
+
+ LOG(("MemoryPool::PurgeByFrecency done"));
+}
+
+void CacheStorageService::MemoryPool::PurgeAll(uint32_t aWhat) {
+ LOG(("CacheStorageService::MemoryPool::PurgeAll aWhat=%d", aWhat));
+ MOZ_ASSERT(IsOnManagementThread());
+
+ for (uint32_t i = 0; i < mFrecencyArray.Length();) {
+ if (CacheIOThread::YieldAndRerun()) return;
+
+ RefPtr<CacheEntry> entry = mFrecencyArray[i];
+
+ if (entry->Purge(aWhat)) {
+ LOG((" abandoned entry=%p", entry.get()));
+ continue;
+ }
+
+ // not purged, move to the next one
+ ++i;
+ }
+}
+
+// Methods exposed to and used by CacheStorage.
+
+nsresult CacheStorageService::AddStorageEntry(CacheStorage const* aStorage,
+ const nsACString& aURI,
+ const nsACString& aIdExtension,
+ bool aReplace,
+ CacheEntryHandle** aResult) {
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_ARG(aStorage);
+
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ return AddStorageEntry(contextKey, aURI, aIdExtension,
+ aStorage->WriteToDisk(), aStorage->SkipSizeCheck(),
+ aStorage->Pinning(), aReplace, aResult);
+}
+
+nsresult CacheStorageService::AddStorageEntry(
+ const nsACString& aContextKey, const nsACString& aURI,
+ const nsACString& aIdExtension, bool aWriteToDisk, bool aSkipSizeCheck,
+ bool aPin, bool aReplace, CacheEntryHandle** aResult) {
+ nsresult rv;
+
+ nsAutoCString entryKey;
+ rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ LOG(("CacheStorageService::AddStorageEntry [entryKey=%s, contextKey=%s]",
+ entryKey.get(), aContextKey.BeginReading()));
+
+ RefPtr<CacheEntry> entry;
+ RefPtr<CacheEntryHandle> handle;
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ // Ensure storage table
+ CacheEntryTable* entries;
+ if (!sGlobalEntryTables->Get(aContextKey, &entries)) {
+ entries = new CacheEntryTable(CacheEntryTable::ALL_ENTRIES);
+ sGlobalEntryTables->Put(aContextKey, entries);
+ LOG((" new storage entries table for context '%s'",
+ aContextKey.BeginReading()));
+ }
+
+ bool entryExists = entries->Get(entryKey, getter_AddRefs(entry));
+
+ if (entryExists && !aReplace) {
+ // check whether we want to turn this entry to a memory-only.
+ if (MOZ_UNLIKELY(!aWriteToDisk) && MOZ_LIKELY(entry->IsUsingDisk())) {
+ LOG((" entry is persistent but we want mem-only, replacing it"));
+ aReplace = true;
+ }
+ }
+
+ // If truncate is demanded, delete and doom the current entry
+ if (entryExists && aReplace) {
+ entries->Remove(entryKey);
+
+ LOG((" dooming entry %p for %s because of OPEN_TRUNCATE", entry.get(),
+ entryKey.get()));
+ // On purpose called under the lock to prevent races of doom and open on
+ // I/O thread No need to remove from both memory-only and all-entries
+ // tables. The new entry will overwrite the shadow entry in its ctor.
+ entry->DoomAlreadyRemoved();
+
+ entry = nullptr;
+ entryExists = false;
+
+ // Would only lead to deleting force-valid timestamp again. We don't need
+ // the replace information anymore after this point anyway.
+ aReplace = false;
+ }
+
+ // Ensure entry for the particular URL
+ if (!entryExists) {
+ // When replacing with a new entry, always remove the current force-valid
+ // timestamp, this is the only place to do it.
+ if (aReplace) {
+ RemoveEntryForceValid(aContextKey, entryKey);
+ }
+
+ // Entry is not in the hashtable or has just been truncated...
+ entry = new CacheEntry(aContextKey, aURI, aIdExtension, aWriteToDisk,
+ aSkipSizeCheck, aPin);
+ entries->Put(entryKey, RefPtr{entry});
+ LOG((" new entry %p for %s", entry.get(), entryKey.get()));
+ }
+
+ if (entry) {
+ // Here, if this entry was not for a long time referenced by any consumer,
+ // gets again first 'handles count' reference.
+ handle = entry->NewHandle();
+ }
+ }
+
+ handle.forget(aResult);
+ return NS_OK;
+}
+
+nsresult CacheStorageService::CheckStorageEntry(CacheStorage const* aStorage,
+ const nsACString& aURI,
+ const nsACString& aIdExtension,
+ bool* aResult) {
+ nsresult rv;
+
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ if (!aStorage->WriteToDisk()) {
+ AppendMemoryStorageTag(contextKey);
+ }
+
+ LOG(("CacheStorageService::CheckStorageEntry [uri=%s, eid=%s, contextKey=%s]",
+ aURI.BeginReading(), aIdExtension.BeginReading(), contextKey.get()));
+
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ nsAutoCString entryKey;
+ rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ CacheEntryTable* entries;
+ if ((*aResult = sGlobalEntryTables->Get(contextKey, &entries)) &&
+ entries->GetWeak(entryKey, aResult)) {
+ LOG((" found in hash tables"));
+ return NS_OK;
+ }
+ }
+
+ if (!aStorage->WriteToDisk()) {
+ // Memory entry, nothing more to do.
+ LOG((" not found in hash tables"));
+ return NS_OK;
+ }
+
+ // Disk entry, not found in the hashtable, check the index.
+ nsAutoCString fileKey;
+ rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, fileKey);
+
+ CacheIndex::EntryStatus status;
+ rv = CacheIndex::HasEntry(fileKey, &status);
+ if (NS_FAILED(rv) || status == CacheIndex::DO_NOT_KNOW) {
+ LOG((" index doesn't know, rv=0x%08" PRIx32, static_cast<uint32_t>(rv)));
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *aResult = status == CacheIndex::EXISTS;
+ LOG((" %sfound in index", *aResult ? "" : "not "));
+ return NS_OK;
+}
+
+nsresult CacheStorageService::GetCacheIndexEntryAttrs(
+ CacheStorage const* aStorage, const nsACString& aURI,
+ const nsACString& aIdExtension, bool* aHasAltData, uint32_t* aFileSizeKb) {
+ nsresult rv;
+
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ LOG(
+ ("CacheStorageService::GetCacheIndexEntryAttrs [uri=%s, eid=%s, "
+ "contextKey=%s]",
+ aURI.BeginReading(), aIdExtension.BeginReading(), contextKey.get()));
+
+ nsAutoCString fileKey;
+ rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, fileKey);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ *aHasAltData = false;
+ *aFileSizeKb = 0;
+ auto closure = [&aHasAltData, &aFileSizeKb](const CacheIndexEntry* entry) {
+ *aHasAltData = entry->GetHasAltData();
+ *aFileSizeKb = entry->GetFileSize();
+ };
+
+ CacheIndex::EntryStatus status;
+ rv = CacheIndex::HasEntry(fileKey, &status, closure);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ if (status != CacheIndex::EXISTS) {
+ return NS_ERROR_CACHE_KEY_NOT_FOUND;
+ }
+
+ return NS_OK;
+}
+
+namespace {
+
+class CacheEntryDoomByKeyCallback : public CacheFileIOListener,
+ public nsIRunnable {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIRUNNABLE
+
+ explicit CacheEntryDoomByKeyCallback(nsICacheEntryDoomCallback* aCallback)
+ : mCallback(aCallback), mResult(NS_ERROR_NOT_INITIALIZED) {}
+
+ private:
+ virtual ~CacheEntryDoomByKeyCallback();
+
+ NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
+ return NS_OK;
+ }
+ NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
+ nsresult aResult) override {
+ return NS_OK;
+ }
+ NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
+ nsresult aResult) override {
+ return NS_OK;
+ }
+ NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
+ NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
+ return NS_OK;
+ }
+ NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
+ nsresult aResult) override {
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
+ nsresult mResult;
+};
+
+CacheEntryDoomByKeyCallback::~CacheEntryDoomByKeyCallback() {
+ if (mCallback)
+ ProxyReleaseMainThread("CacheEntryDoomByKeyCallback::mCallback", mCallback);
+}
+
+NS_IMETHODIMP CacheEntryDoomByKeyCallback::OnFileDoomed(
+ CacheFileHandle* aHandle, nsresult aResult) {
+ if (!mCallback) return NS_OK;
+
+ mResult = aResult;
+ if (NS_IsMainThread()) {
+ Run();
+ } else {
+ NS_DispatchToMainThread(this);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP CacheEntryDoomByKeyCallback::Run() {
+ mCallback->OnCacheEntryDoomed(mResult);
+ return NS_OK;
+}
+
+NS_IMPL_ISUPPORTS(CacheEntryDoomByKeyCallback, CacheFileIOListener,
+ nsIRunnable);
+
+} // namespace
+
+nsresult CacheStorageService::DoomStorageEntry(
+ CacheStorage const* aStorage, const nsACString& aURI,
+ const nsACString& aIdExtension, nsICacheEntryDoomCallback* aCallback) {
+ LOG(("CacheStorageService::DoomStorageEntry"));
+
+ NS_ENSURE_ARG(aStorage);
+
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ nsAutoCString entryKey;
+ nsresult rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<CacheEntry> entry;
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ CacheEntryTable* entries;
+ if (sGlobalEntryTables->Get(contextKey, &entries)) {
+ if (entries->Get(entryKey, getter_AddRefs(entry))) {
+ if (aStorage->WriteToDisk() || !entry->IsUsingDisk()) {
+ // When evicting from disk storage, purge
+ // When evicting from memory storage and the entry is memory-only,
+ // purge
+ LOG(
+ (" purging entry %p for %s [storage use disk=%d, entry use "
+ "disk=%d]",
+ entry.get(), entryKey.get(), aStorage->WriteToDisk(),
+ entry->IsUsingDisk()));
+ entries->Remove(entryKey);
+ } else {
+ // Otherwise, leave it
+ LOG(
+ (" leaving entry %p for %s [storage use disk=%d, entry use "
+ "disk=%d]",
+ entry.get(), entryKey.get(), aStorage->WriteToDisk(),
+ entry->IsUsingDisk()));
+ entry = nullptr;
+ }
+ }
+ }
+
+ if (!entry) {
+ RemoveEntryForceValid(contextKey, entryKey);
+ }
+ }
+
+ if (entry) {
+ LOG((" dooming entry %p for %s", entry.get(), entryKey.get()));
+ return entry->AsyncDoom(aCallback);
+ }
+
+ LOG((" no entry loaded for %s", entryKey.get()));
+
+ if (aStorage->WriteToDisk()) {
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, entryKey);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ LOG((" dooming file only for %s", entryKey.get()));
+
+ RefPtr<CacheEntryDoomByKeyCallback> callback(
+ new CacheEntryDoomByKeyCallback(aCallback));
+ rv = CacheFileIOManager::DoomFileByKey(entryKey, callback);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+ }
+
+ class Callback : public Runnable {
+ public:
+ explicit Callback(nsICacheEntryDoomCallback* aCallback)
+ : mozilla::Runnable("Callback"), mCallback(aCallback) {}
+ NS_IMETHOD Run() override {
+ mCallback->OnCacheEntryDoomed(NS_ERROR_NOT_AVAILABLE);
+ return NS_OK;
+ }
+ nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
+ };
+
+ if (aCallback) {
+ RefPtr<Runnable> callback = new Callback(aCallback);
+ return NS_DispatchToMainThread(callback);
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheStorageService::DoomStorageEntries(
+ CacheStorage const* aStorage, nsICacheEntryDoomCallback* aCallback) {
+ LOG(("CacheStorageService::DoomStorageEntries"));
+
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+ NS_ENSURE_ARG(aStorage);
+
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ return DoomStorageEntries(contextKey, aStorage->LoadInfo(),
+ aStorage->WriteToDisk(), aStorage->Pinning(),
+ aCallback);
+}
+
+nsresult CacheStorageService::DoomStorageEntries(
+ const nsACString& aContextKey, nsILoadContextInfo* aContext,
+ bool aDiskStorage, bool aPinned, nsICacheEntryDoomCallback* aCallback) {
+ LOG(("CacheStorageService::DoomStorageEntries [context=%s]",
+ aContextKey.BeginReading()));
+
+ mLock.AssertCurrentThreadOwns();
+
+ NS_ENSURE_TRUE(!mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ nsAutoCString memoryStorageID(aContextKey);
+ AppendMemoryStorageTag(memoryStorageID);
+
+ if (aDiskStorage) {
+ LOG((" dooming disk+memory storage of %s", aContextKey.BeginReading()));
+
+ // Walk one by one and remove entries according their pin status
+ CacheEntryTable *diskEntries, *memoryEntries;
+ if (sGlobalEntryTables->Get(aContextKey, &diskEntries)) {
+ sGlobalEntryTables->Get(memoryStorageID, &memoryEntries);
+
+ for (auto iter = diskEntries->Iter(); !iter.Done(); iter.Next()) {
+ auto entry = iter.Data();
+ if (entry->DeferOrBypassRemovalOnPinStatus(aPinned)) {
+ continue;
+ }
+
+ if (memoryEntries) {
+ RemoveExactEntry(memoryEntries, iter.Key(), entry, false);
+ }
+ iter.Remove();
+ }
+ }
+
+ if (aContext && !aContext->IsPrivate()) {
+ LOG((" dooming disk entries"));
+ CacheFileIOManager::EvictByContext(aContext, aPinned, u""_ns);
+ }
+ } else {
+ LOG((" dooming memory-only storage of %s", aContextKey.BeginReading()));
+
+ // Remove the memory entries table from the global tables.
+ // Since we store memory entries also in the disk entries table
+ // we need to remove the memory entries from the disk table one
+ // by one manually.
+ mozilla::UniquePtr<CacheEntryTable> memoryEntries;
+ sGlobalEntryTables->Remove(memoryStorageID, &memoryEntries);
+
+ CacheEntryTable* diskEntries;
+ if (memoryEntries && sGlobalEntryTables->Get(aContextKey, &diskEntries)) {
+ for (auto iter = memoryEntries->Iter(); !iter.Done(); iter.Next()) {
+ auto entry = iter.Data();
+ RemoveExactEntry(diskEntries, iter.Key(), entry, false);
+ }
+ }
+ }
+
+ {
+ mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
+
+ if (aContext) {
+ for (auto iter = mForcedValidEntries.Iter(); !iter.Done(); iter.Next()) {
+ bool matches;
+ DebugOnly<nsresult> rv = CacheFileUtils::KeyMatchesLoadContextInfo(
+ iter.Key(), aContext, &matches);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+ if (matches) {
+ iter.Remove();
+ }
+ }
+ } else {
+ mForcedValidEntries.Clear();
+ }
+ }
+
+ // An artificial callback. This is a candidate for removal tho. In the new
+ // cache any 'doom' or 'evict' function ensures that the entry or entries
+ // being doomed is/are not accessible after the function returns. So there is
+ // probably no need for a callback - has no meaning. But for compatibility
+ // with the old cache that is still in the tree we keep the API similar to be
+ // able to make tests as well as other consumers work for now.
+ class Callback : public Runnable {
+ public:
+ explicit Callback(nsICacheEntryDoomCallback* aCallback)
+ : mozilla::Runnable("Callback"), mCallback(aCallback) {}
+ NS_IMETHOD Run() override {
+ mCallback->OnCacheEntryDoomed(NS_OK);
+ return NS_OK;
+ }
+ nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
+ };
+
+ if (aCallback) {
+ RefPtr<Runnable> callback = new Callback(aCallback);
+ return NS_DispatchToMainThread(callback);
+ }
+
+ return NS_OK;
+}
+
+nsresult CacheStorageService::WalkStorageEntries(
+ CacheStorage const* aStorage, bool aVisitEntries,
+ nsICacheStorageVisitor* aVisitor) {
+ LOG(("CacheStorageService::WalkStorageEntries [cb=%p, visitentries=%d]",
+ aVisitor, aVisitEntries));
+ NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
+
+ NS_ENSURE_ARG(aStorage);
+
+ if (aStorage->WriteToDisk()) {
+ RefPtr<WalkDiskCacheRunnable> event = new WalkDiskCacheRunnable(
+ aStorage->LoadInfo(), aVisitEntries, aVisitor);
+ return event->Walk();
+ }
+
+ RefPtr<WalkMemoryCacheRunnable> event = new WalkMemoryCacheRunnable(
+ aStorage->LoadInfo(), aVisitEntries, aVisitor);
+ return event->Walk();
+}
+
+void CacheStorageService::CacheFileDoomed(nsILoadContextInfo* aLoadContextInfo,
+ const nsACString& aIdExtension,
+ const nsACString& aURISpec) {
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aLoadContextInfo, contextKey);
+
+ nsAutoCString entryKey;
+ CacheEntry::HashingKey(""_ns, aIdExtension, aURISpec, entryKey);
+
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) {
+ return;
+ }
+
+ CacheEntryTable* entries;
+ RefPtr<CacheEntry> entry;
+
+ if (sGlobalEntryTables->Get(contextKey, &entries) &&
+ entries->Get(entryKey, getter_AddRefs(entry))) {
+ if (entry->IsFileDoomed()) {
+ // Need to remove under the lock to avoid possible race leading
+ // to duplication of the entry per its key.
+ RemoveExactEntry(entries, entryKey, entry, false);
+ entry->DoomAlreadyRemoved();
+ }
+
+ // Entry found, but it's not the entry that has been found doomed
+ // by the lower eviction layer. Just leave everything unchanged.
+ return;
+ }
+
+ RemoveEntryForceValid(contextKey, entryKey);
+}
+
+bool CacheStorageService::GetCacheEntryInfo(
+ nsILoadContextInfo* aLoadContextInfo, const nsACString& aIdExtension,
+ const nsACString& aURISpec, EntryInfoCallback* aCallback) {
+ nsAutoCString contextKey;
+ CacheFileUtils::AppendKeyPrefix(aLoadContextInfo, contextKey);
+
+ nsAutoCString entryKey;
+ CacheEntry::HashingKey(""_ns, aIdExtension, aURISpec, entryKey);
+
+ RefPtr<CacheEntry> entry;
+ {
+ mozilla::MutexAutoLock lock(mLock);
+
+ if (mShutdown) {
+ return false;
+ }
+
+ CacheEntryTable* entries;
+ if (!sGlobalEntryTables->Get(contextKey, &entries)) {
+ return false;
+ }
+
+ if (!entries->Get(entryKey, getter_AddRefs(entry))) {
+ return false;
+ }
+ }
+
+ GetCacheEntryInfo(entry, aCallback);
+ return true;
+}
+
+// static
+void CacheStorageService::GetCacheEntryInfo(CacheEntry* aEntry,
+ EntryInfoCallback* aCallback) {
+ nsCString const uriSpec = aEntry->GetURI();
+ nsCString const enhanceId = aEntry->GetEnhanceID();
+
+ nsAutoCString entryKey;
+ aEntry->HashingKeyWithStorage(entryKey);
+
+ nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(entryKey);
+
+ uint32_t dataSize;
+ if (NS_FAILED(aEntry->GetStorageDataSize(&dataSize))) {
+ dataSize = 0;
+ }
+ int32_t fetchCount;
+ if (NS_FAILED(aEntry->GetFetchCount(&fetchCount))) {
+ fetchCount = 0;
+ }
+ uint32_t lastModified;
+ if (NS_FAILED(aEntry->GetLastModified(&lastModified))) {
+ lastModified = 0;
+ }
+ uint32_t expirationTime;
+ if (NS_FAILED(aEntry->GetExpirationTime(&expirationTime))) {
+ expirationTime = 0;
+ }
+
+ aCallback->OnEntryInfo(uriSpec, enhanceId, dataSize, fetchCount, lastModified,
+ expirationTime, aEntry->IsPinned(), info);
+}
+
+// static
+uint32_t CacheStorageService::CacheQueueSize(bool highPriority) {
+ RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
+ // The thread will be null at shutdown.
+ if (!thread) {
+ return 0;
+ }
+ return thread->QueueSize(highPriority);
+}
+
+// Telemetry collection
+
+namespace {
+
+bool TelemetryEntryKey(CacheEntry const* entry, nsAutoCString& key) {
+ nsAutoCString entryKey;
+ nsresult rv = entry->HashingKey(entryKey);
+ if (NS_FAILED(rv)) return false;
+
+ if (entry->GetStorageID().IsEmpty()) {
+ // Hopefully this will be const-copied, saves some memory
+ key = entryKey;
+ } else {
+ key.Assign(entry->GetStorageID());
+ key.Append(':');
+ key.Append(entryKey);
+ }
+
+ return true;
+}
+
+} // namespace
+
+void CacheStorageService::TelemetryPrune(TimeStamp& now) {
+ static TimeDuration const oneMinute = TimeDuration::FromSeconds(60);
+ static TimeStamp dontPruneUntil = now + oneMinute;
+ if (now < dontPruneUntil) return;
+
+ static TimeDuration const fifteenMinutes = TimeDuration::FromSeconds(900);
+ for (auto iter = mPurgeTimeStamps.Iter(); !iter.Done(); iter.Next()) {
+ if (now - iter.Data() > fifteenMinutes) {
+ // We are not interested in resurrection of entries after 15 minutes
+ // of time. This is also the limit for the telemetry.
+ iter.Remove();
+ }
+ }
+ dontPruneUntil = now + oneMinute;
+}
+
+void CacheStorageService::TelemetryRecordEntryCreation(
+ CacheEntry const* entry) {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ nsAutoCString key;
+ if (!TelemetryEntryKey(entry, key)) return;
+
+ TimeStamp now = TimeStamp::NowLoRes();
+ TelemetryPrune(now);
+
+ // When an entry is craeted (registered actually) we check if there is
+ // a timestamp marked when this very same cache entry has been removed
+ // (deregistered) because of over-memory-limit purging. If there is such
+ // a timestamp found accumulate telemetry on how long the entry was away.
+ TimeStamp timeStamp;
+ if (!mPurgeTimeStamps.Get(key, &timeStamp)) return;
+
+ mPurgeTimeStamps.Remove(key);
+
+ Telemetry::AccumulateTimeDelta(Telemetry::HTTP_CACHE_ENTRY_RELOAD_TIME,
+ timeStamp, TimeStamp::NowLoRes());
+}
+
+void CacheStorageService::TelemetryRecordEntryRemoval(CacheEntry const* entry) {
+ MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
+
+ // Doomed entries must not be considered, we are only interested in purged
+ // entries. Note that the mIsDoomed flag is always set before deregistration
+ // happens.
+ if (entry->IsDoomed()) return;
+
+ nsAutoCString key;
+ if (!TelemetryEntryKey(entry, key)) return;
+
+ // When an entry is removed (deregistered actually) we put a timestamp for
+ // this entry to the hashtable so that when the entry is created (registered)
+ // again we know how long it was away. Also accumulate number of AsyncOpen
+ // calls on the entry, this tells us how efficiently the pool actually works.
+
+ TimeStamp now = TimeStamp::NowLoRes();
+ TelemetryPrune(now);
+ mPurgeTimeStamps.Put(key, now);
+
+ Telemetry::Accumulate(Telemetry::HTTP_CACHE_ENTRY_REUSE_COUNT,
+ entry->UseCount());
+ Telemetry::AccumulateTimeDelta(Telemetry::HTTP_CACHE_ENTRY_ALIVE_TIME,
+ entry->LoadStart(), TimeStamp::NowLoRes());
+}
+
+// nsIMemoryReporter
+
+size_t CacheStorageService::SizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
+
+ size_t n = 0;
+ // The elemets are referenced by sGlobalEntryTables and are reported from
+ // there
+ n += Pool(true).mFrecencyArray.ShallowSizeOfExcludingThis(mallocSizeOf);
+ n += Pool(true).mExpirationArray.ShallowSizeOfExcludingThis(mallocSizeOf);
+ n += Pool(false).mFrecencyArray.ShallowSizeOfExcludingThis(mallocSizeOf);
+ n += Pool(false).mExpirationArray.ShallowSizeOfExcludingThis(mallocSizeOf);
+ // Entries reported manually in CacheStorageService::CollectReports callback
+ if (sGlobalEntryTables) {
+ n += sGlobalEntryTables->ShallowSizeOfIncludingThis(mallocSizeOf);
+ }
+ n += mPurgeTimeStamps.SizeOfExcludingThis(mallocSizeOf);
+
+ return n;
+}
+
+size_t CacheStorageService::SizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
+}
+
+NS_IMETHODIMP
+CacheStorageService::CollectReports(nsIHandleReportCallback* aHandleReport,
+ nsISupports* aData, bool aAnonymize) {
+ MOZ_COLLECT_REPORT("explicit/network/cache2/io", KIND_HEAP, UNITS_BYTES,
+ CacheFileIOManager::SizeOfIncludingThis(MallocSizeOf),
+ "Memory used by the cache IO manager.");
+
+ MOZ_COLLECT_REPORT("explicit/network/cache2/index", KIND_HEAP, UNITS_BYTES,
+ CacheIndex::SizeOfIncludingThis(MallocSizeOf),
+ "Memory used by the cache index.");
+
+ MutexAutoLock lock(mLock);
+
+ // Report the service instance, this doesn't report entries, done lower
+ MOZ_COLLECT_REPORT("explicit/network/cache2/service", KIND_HEAP, UNITS_BYTES,
+ SizeOfIncludingThis(MallocSizeOf),
+ "Memory used by the cache storage service.");
+
+ // Report all entries, each storage separately (by the context key)
+ //
+ // References are:
+ // sGlobalEntryTables to N CacheEntryTable
+ // CacheEntryTable to N CacheEntry
+ // CacheEntry to 1 CacheFile
+ // CacheFile to
+ // N CacheFileChunk (keeping the actual data)
+ // 1 CacheFileMetadata (keeping http headers etc.)
+ // 1 CacheFileOutputStream
+ // N CacheFileInputStream
+ if (sGlobalEntryTables) {
+ for (auto iter1 = sGlobalEntryTables->Iter(); !iter1.Done(); iter1.Next()) {
+ CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
+
+ CacheEntryTable* table = iter1.UserData();
+
+ size_t size = 0;
+ mozilla::MallocSizeOf mallocSizeOf = CacheStorageService::MallocSizeOf;
+
+ size += table->ShallowSizeOfIncludingThis(mallocSizeOf);
+ for (auto iter2 = table->Iter(); !iter2.Done(); iter2.Next()) {
+ size += iter2.Key().SizeOfExcludingThisIfUnshared(mallocSizeOf);
+
+ // Bypass memory-only entries, those will be reported when iterating the
+ // memory only table. Memory-only entries are stored in both ALL_ENTRIES
+ // and MEMORY_ONLY hashtables.
+ RefPtr<mozilla::net::CacheEntry> const& entry = iter2.Data();
+ if (table->Type() == CacheEntryTable::MEMORY_ONLY ||
+ entry->IsUsingDisk()) {
+ size += entry->SizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+
+ aHandleReport->Callback(
+ ""_ns,
+ nsPrintfCString(
+ "explicit/network/cache2/%s-storage(%s)",
+ table->Type() == CacheEntryTable::MEMORY_ONLY ? "memory" : "disk",
+ aAnonymize ? "<anonymized>" : iter1.Key().BeginReading()),
+ nsIMemoryReporter::KIND_HEAP, nsIMemoryReporter::UNITS_BYTES, size,
+ "Memory used by the cache storage."_ns, aData);
+ }
+ }
+
+ return NS_OK;
+}
+
+// nsICacheTesting
+
+NS_IMETHODIMP
+CacheStorageService::IOThreadSuspender::Run() {
+ MonitorAutoLock mon(mMon);
+ while (!mSignaled) {
+ mon.Wait();
+ }
+ return NS_OK;
+}
+
+void CacheStorageService::IOThreadSuspender::Notify() {
+ MonitorAutoLock mon(mMon);
+ mSignaled = true;
+ mon.Notify();
+}
+
+NS_IMETHODIMP
+CacheStorageService::SuspendCacheIOThread(uint32_t aLevel) {
+ RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
+ if (!thread) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ MOZ_ASSERT(!mActiveIOSuspender);
+ mActiveIOSuspender = new IOThreadSuspender();
+ return thread->Dispatch(mActiveIOSuspender, aLevel);
+}
+
+NS_IMETHODIMP
+CacheStorageService::ResumeCacheIOThread() {
+ MOZ_ASSERT(mActiveIOSuspender);
+
+ RefPtr<IOThreadSuspender> suspender;
+ suspender.swap(mActiveIOSuspender);
+ suspender->Notify();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+CacheStorageService::Flush(nsIObserver* aObserver) {
+ RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
+ if (!thread) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (!observerService) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Adding as weak, the consumer is responsible to keep the reference
+ // until notified.
+ observerService->AddObserver(aObserver, "cacheservice:purge-memory-pools",
+ false);
+
+ // This runnable will do the purging and when done, notifies the above
+ // observer. We dispatch it to the CLOSE level, so all data writes scheduled
+ // up to this time will be done before this purging happens.
+ RefPtr<CacheStorageService::PurgeFromMemoryRunnable> r =
+ new CacheStorageService::PurgeFromMemoryRunnable(this,
+ CacheEntry::PURGE_WHOLE);
+
+ return thread->Dispatch(r, CacheIOThread::WRITE);
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/CacheStorageService.h b/netwerk/cache2/CacheStorageService.h
new file mode 100644
index 0000000000..107e574701
--- /dev/null
+++ b/netwerk/cache2/CacheStorageService.h
@@ -0,0 +1,437 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CacheStorageService__h__
+#define CacheStorageService__h__
+
+#include "nsICacheStorageService.h"
+#include "nsIMemoryReporter.h"
+#include "nsINamed.h"
+#include "nsITimer.h"
+#include "nsICacheTesting.h"
+
+#include "nsClassHashtable.h"
+#include "nsDataHashtable.h"
+#include "nsString.h"
+#include "nsThreadUtils.h"
+#include "nsProxyRelease.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
+
+class nsIURI;
+class nsICacheEntryDoomCallback;
+class nsICacheStorageVisitor;
+class nsIRunnable;
+class nsIThread;
+class nsIEventTarget;
+
+namespace mozilla {
+
+class OriginAttributes;
+
+namespace net {
+
+class CacheStorageService;
+class CacheStorage;
+class CacheEntry;
+class CacheEntryHandle;
+
+class CacheMemoryConsumer {
+ private:
+ friend class CacheStorageService;
+ uint32_t mReportedMemoryConsumption : 30;
+ uint32_t mFlags : 2;
+
+ private:
+ CacheMemoryConsumer() = delete;
+
+ protected:
+ enum {
+ // No special treatment, reports always to the disk-entries pool.
+ NORMAL = 0,
+ // This consumer is belonging to a memory-only cache entry, used to decide
+ // which of the two disk and memory pools count this consumption at.
+ MEMORY_ONLY = 1 << 0,
+ // Prevent reports of this consumer at all, used for disk data chunks since
+ // we throw them away as soon as the entry is not used by any consumer and
+ // don't want to make them wipe the whole pool out during their short life.
+ DONT_REPORT = 1 << 1
+ };
+
+ explicit CacheMemoryConsumer(uint32_t aFlags);
+ ~CacheMemoryConsumer() { DoMemoryReport(0); }
+ void DoMemoryReport(uint32_t aCurrentSize);
+};
+
+class CacheStorageService final : public nsICacheStorageService,
+ public nsIMemoryReporter,
+ public nsITimerCallback,
+ public nsICacheTesting,
+ public nsINamed {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHESTORAGESERVICE
+ NS_DECL_NSIMEMORYREPORTER
+ NS_DECL_NSITIMERCALLBACK
+ NS_DECL_NSICACHETESTING
+ NS_DECL_NSINAMED
+
+ CacheStorageService();
+
+ void Shutdown();
+ void DropPrivateBrowsingEntries();
+
+ // Takes care of deleting any pending trashes for both cache1 and cache2
+ // as well as old cache directory.
+ static void CleaupCacheDirectories();
+
+ static CacheStorageService* Self() { return sSelf; }
+ static nsISupports* SelfISupports() {
+ return static_cast<nsICacheStorageService*>(Self());
+ }
+ nsresult Dispatch(nsIRunnable* aEvent);
+ static bool IsRunning() { return sSelf && !sSelf->mShutdown; }
+ static bool IsOnManagementThread();
+ already_AddRefed<nsIEventTarget> Thread() const;
+ mozilla::Mutex& Lock() { return mLock; }
+
+ // Tracks entries that may be forced valid in a pruned hashtable.
+ nsDataHashtable<nsCStringHashKey, TimeStamp> mForcedValidEntries;
+ void ForcedValidEntriesPrune(TimeStamp& now);
+
+ // Helper thread-safe interface to pass entry info, only difference from
+ // nsICacheStorageVisitor is that instead of nsIURI only the uri spec is
+ // passed.
+ class EntryInfoCallback {
+ public:
+ virtual void OnEntryInfo(const nsACString& aURISpec,
+ const nsACString& aIdEnhance, int64_t aDataSize,
+ int32_t aFetchCount, uint32_t aLastModifiedTime,
+ uint32_t aExpirationTime, bool aPinned,
+ nsILoadContextInfo* aInfo) = 0;
+ };
+
+ // Invokes OnEntryInfo for the given aEntry, synchronously.
+ static void GetCacheEntryInfo(CacheEntry* aEntry,
+ EntryInfoCallback* aVisitor);
+
+ nsresult GetCacheIndexEntryAttrs(CacheStorage const* aStorage,
+ const nsACString& aURI,
+ const nsACString& aIdExtension,
+ bool* aHasAltData, uint32_t* aFileSizeKb);
+
+ static uint32_t CacheQueueSize(bool highPriority);
+
+ // Memory reporting
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
+
+ private:
+ virtual ~CacheStorageService();
+ void ShutdownBackground();
+
+ private:
+ // The following methods may only be called on the management
+ // thread.
+ friend class CacheEntry;
+
+ /**
+ * Registers the entry in management ordered arrays, a mechanism
+ * helping with weighted purge of entries.
+ * Management arrays keep hard reference to the entry. Entry is
+ * responsible to remove it self or the service is responsible to
+ * remove the entry when it's no longer needed.
+ */
+ void RegisterEntry(CacheEntry* aEntry);
+
+ /**
+ * Deregisters the entry from management arrays. References are
+ * then released.
+ */
+ void UnregisterEntry(CacheEntry* aEntry);
+
+ /**
+ * Removes the entry from the related entry hash table, if still present.
+ */
+ bool RemoveEntry(CacheEntry* aEntry, bool aOnlyUnreferenced = false);
+
+ /**
+ * Tells the storage service whether this entry is only to be stored in
+ * memory.
+ */
+ void RecordMemoryOnlyEntry(CacheEntry* aEntry, bool aOnlyInMemory,
+ bool aOverwrite);
+
+ /**
+ * Sets a cache entry valid (overrides the default loading behavior by loading
+ * directly from cache) for the given number of seconds
+ * See nsICacheEntry.idl for more details
+ */
+ void ForceEntryValidFor(nsACString const& aContextKey,
+ nsACString const& aEntryKey,
+ uint32_t aSecondsToTheFuture);
+
+ /**
+ * Remove the validity info
+ */
+ void RemoveEntryForceValid(nsACString const& aContextKey,
+ nsACString const& aEntryKey);
+
+ /**
+ * Retrieves the status of the cache entry to see if it has been forced valid
+ * (so it will loaded directly from cache without further validation)
+ */
+ bool IsForcedValidEntry(nsACString const& aContextKey,
+ nsACString const& aEntryKey);
+
+ private:
+ friend class CacheIndex;
+
+ /**
+ * CacheIndex uses this to prevent a cache entry from being prememptively
+ * thrown away when forced valid
+ * See nsICacheEntry.idl for more details
+ */
+ bool IsForcedValidEntry(nsACString const& aEntryKeyWithContext);
+
+ private:
+ // These are helpers for telemetry monitoring of the memory pools.
+ void TelemetryPrune(TimeStamp& now);
+ void TelemetryRecordEntryCreation(CacheEntry const* entry);
+ void TelemetryRecordEntryRemoval(CacheEntry const* entry);
+
+ private:
+ // Following methods are thread safe to call.
+ friend class CacheStorage;
+
+ /**
+ * Get, or create when not existing and demanded, an entry for the storage
+ * and uri+id extension.
+ */
+ nsresult AddStorageEntry(CacheStorage const* aStorage, const nsACString& aURI,
+ const nsACString& aIdExtension, bool aReplace,
+ CacheEntryHandle** aResult);
+
+ /**
+ * Check existance of an entry. This may throw NS_ERROR_NOT_AVAILABLE
+ * when the information cannot be obtained synchronously w/o blocking.
+ */
+ nsresult CheckStorageEntry(CacheStorage const* aStorage,
+ const nsACString& aURI,
+ const nsACString& aIdExtension, bool* aResult);
+
+ /**
+ * Removes the entry from the related entry hash table, if still present
+ * and returns it.
+ */
+ nsresult DoomStorageEntry(CacheStorage const* aStorage,
+ const nsACString& aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntryDoomCallback* aCallback);
+
+ /**
+ * Removes and returns entry table for the storage.
+ */
+ nsresult DoomStorageEntries(CacheStorage const* aStorage,
+ nsICacheEntryDoomCallback* aCallback);
+
+ /**
+ * Walk all entiries beloging to the storage.
+ */
+ nsresult WalkStorageEntries(CacheStorage const* aStorage, bool aVisitEntries,
+ nsICacheStorageVisitor* aVisitor);
+
+ private:
+ friend class CacheFileIOManager;
+
+ /**
+ * CacheFileIOManager uses this method to notify CacheStorageService that
+ * an active entry was removed. This method is called even if the entry
+ * removal was originated by CacheStorageService.
+ */
+ void CacheFileDoomed(nsILoadContextInfo* aLoadContextInfo,
+ const nsACString& aIdExtension,
+ const nsACString& aURISpec);
+
+ /**
+ * Tries to find an existing entry in the hashtables and synchronously call
+ * OnCacheEntryInfo of the aVisitor callback when found.
+ * @retuns
+ * true, when the entry has been found that also implies the callbacks has
+ * beem invoked
+ * false, when an entry has not been found
+ */
+ bool GetCacheEntryInfo(nsILoadContextInfo* aLoadContextInfo,
+ const nsACString& aIdExtension,
+ const nsACString& aURISpec,
+ EntryInfoCallback* aCallback);
+
+ private:
+ friend class CacheMemoryConsumer;
+
+ /**
+ * When memory consumption of this entry radically changes, this method
+ * is called to reflect the size of allocated memory. This call may purge
+ * unspecified number of entries from memory (but not from disk).
+ */
+ void OnMemoryConsumptionChange(CacheMemoryConsumer* aConsumer,
+ uint32_t aCurrentMemoryConsumption);
+
+ /**
+ * If not already pending, it schedules mPurgeTimer that fires after 1 second
+ * and dispatches PurgeOverMemoryLimit().
+ */
+ void SchedulePurgeOverMemoryLimit();
+
+ /**
+ * Called on the management thread, removes all expired and then least used
+ * entries from the memory, first from the disk pool and then from the memory
+ * pool.
+ */
+ void PurgeOverMemoryLimit();
+
+ private:
+ nsresult DoomStorageEntries(const nsACString& aContextKey,
+ nsILoadContextInfo* aContext, bool aDiskStorage,
+ bool aPin, nsICacheEntryDoomCallback* aCallback);
+ nsresult AddStorageEntry(const nsACString& aContextKey,
+ const nsACString& aURI,
+ const nsACString& aIdExtension, bool aWriteToDisk,
+ bool aSkipSizeCheck, bool aPin, bool aReplace,
+ CacheEntryHandle** aResult);
+
+ nsresult ClearOriginInternal(
+ const nsAString& aOrigin,
+ const mozilla::OriginAttributes& aOriginAttributes, bool aAnonymous);
+
+ static CacheStorageService* sSelf;
+
+ mozilla::Mutex mLock;
+ mozilla::Mutex mForcedValidEntriesLock;
+
+ Atomic<bool, Relaxed> mShutdown;
+
+ // Accessible only on the service thread
+ class MemoryPool {
+ public:
+ enum EType {
+ DISK,
+ MEMORY,
+ } mType;
+
+ explicit MemoryPool(EType aType);
+ ~MemoryPool();
+
+ nsTArray<RefPtr<CacheEntry>> mFrecencyArray;
+ nsTArray<RefPtr<CacheEntry>> mExpirationArray;
+ Atomic<uint32_t, Relaxed> mMemorySize;
+
+ bool OnMemoryConsumptionChange(uint32_t aSavedMemorySize,
+ uint32_t aCurrentMemoryConsumption);
+ /**
+ * Purges entries from memory based on the frecency ordered array.
+ */
+ void PurgeOverMemoryLimit();
+ void PurgeExpired();
+ void PurgeByFrecency(uint32_t aWhat);
+ void PurgeAll(uint32_t aWhat);
+
+ private:
+ uint32_t Limit() const;
+ MemoryPool() = delete;
+ };
+
+ MemoryPool mDiskPool;
+ MemoryPool mMemoryPool;
+ TimeStamp mLastPurgeTime;
+ MemoryPool& Pool(bool aUsingDisk) {
+ return aUsingDisk ? mDiskPool : mMemoryPool;
+ }
+ MemoryPool const& Pool(bool aUsingDisk) const {
+ return aUsingDisk ? mDiskPool : mMemoryPool;
+ }
+
+ nsCOMPtr<nsITimer> mPurgeTimer;
+#ifdef MOZ_TSAN
+ // In OnMemoryConsumptionChange() we check whether the timer exists, but we
+ // cannot grab the lock there (see comment 6 in bug 1614637) and TSan reports
+ // a data race. This data race is harmless, so we use this atomic flag only in
+ // TSan build to suppress it.
+ Atomic<bool, Relaxed> mPurgeTimerActive;
+#endif
+
+ class PurgeFromMemoryRunnable : public Runnable {
+ public:
+ PurgeFromMemoryRunnable(CacheStorageService* aService, uint32_t aWhat)
+ : Runnable("net::CacheStorageService::PurgeFromMemoryRunnable"),
+ mService(aService),
+ mWhat(aWhat) {}
+
+ private:
+ virtual ~PurgeFromMemoryRunnable() = default;
+
+ NS_IMETHOD Run() override;
+
+ RefPtr<CacheStorageService> mService;
+ uint32_t mWhat;
+ };
+
+ // Used just for telemetry purposes, accessed only on the management thread.
+ // Note: not included in the memory reporter, this is not expected to be huge
+ // and also would be complicated to report since reporting happens on the main
+ // thread but this table is manipulated on the management thread.
+ nsDataHashtable<nsCStringHashKey, mozilla::TimeStamp> mPurgeTimeStamps;
+
+ // nsICacheTesting
+ class IOThreadSuspender : public Runnable {
+ public:
+ IOThreadSuspender()
+ : Runnable("net::CacheStorageService::IOThreadSuspender"),
+ mMon("IOThreadSuspender"),
+ mSignaled(false) {}
+ void Notify();
+
+ private:
+ virtual ~IOThreadSuspender() = default;
+ NS_IMETHOD Run() override;
+
+ Monitor mMon;
+ bool mSignaled;
+ };
+
+ RefPtr<IOThreadSuspender> mActiveIOSuspender;
+};
+
+template <class T>
+void ProxyRelease(const char* aName, nsCOMPtr<T>& object,
+ nsIEventTarget* target) {
+ NS_ProxyRelease(aName, target, object.forget());
+}
+
+template <class T>
+void ProxyReleaseMainThread(const char* aName, nsCOMPtr<T>& object) {
+ ProxyRelease(aName, object, GetMainThreadEventTarget());
+}
+
+} // namespace net
+} // namespace mozilla
+
+#define NS_CACHE_STORAGE_SERVICE_CID \
+ { \
+ 0xea70b098, 0x5014, 0x4e21, { \
+ 0xae, 0xe1, 0x75, 0xe6, 0xb2, 0xc4, 0xb8, 0xe0 \
+ } \
+ }
+
+#define NS_CACHE_STORAGE_SERVICE_CONTRACTID \
+ "@mozilla.org/netwerk/cache-storage-service;1"
+
+#define NS_CACHE_STORAGE_SERVICE_CONTRACTID2 \
+ "@mozilla.org/network/cache-storage-service;1"
+
+#endif
diff --git a/netwerk/cache2/OldWrappers.cpp b/netwerk/cache2/OldWrappers.cpp
new file mode 100644
index 0000000000..b96dc2ff29
--- /dev/null
+++ b/netwerk/cache2/OldWrappers.cpp
@@ -0,0 +1,1037 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Stuff to link the old imp to the new api - will go away!
+
+#include "CacheLog.h"
+#include "OldWrappers.h"
+#include "CacheStorage.h"
+#include "CacheStorageService.h"
+#include "LoadContextInfo.h"
+#include "nsCacheService.h"
+
+#include "nsIURI.h"
+#include "nsICacheSession.h"
+#include "nsIApplicationCache.h"
+#include "nsIApplicationCacheService.h"
+#include "nsIStreamTransportService.h"
+#include "nsIFile.h"
+#include "nsICacheEntryDoomCallback.h"
+#include "nsICacheListener.h"
+#include "nsICacheStorageVisitor.h"
+
+#include "nsServiceManagerUtils.h"
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsProxyRelease.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Telemetry.h"
+
+static NS_DEFINE_CID(kStreamTransportServiceCID, NS_STREAMTRANSPORTSERVICE_CID);
+
+static uint32_t const CHECK_MULTITHREADED =
+ nsICacheStorage::CHECK_MULTITHREADED;
+
+namespace mozilla {
+namespace net {
+
+namespace {
+
+// Fires the doom callback back on the main thread
+// after the cache I/O thread is looped.
+
+class DoomCallbackSynchronizer : public Runnable {
+ public:
+ explicit DoomCallbackSynchronizer(nsICacheEntryDoomCallback* cb)
+ : Runnable("net::DoomCallbackSynchronizer"), mCB(cb) {}
+ nsresult Dispatch();
+
+ private:
+ virtual ~DoomCallbackSynchronizer() = default;
+
+ NS_DECL_NSIRUNNABLE
+ nsCOMPtr<nsICacheEntryDoomCallback> mCB;
+};
+
+nsresult DoomCallbackSynchronizer::Dispatch() {
+ nsresult rv;
+
+ nsCOMPtr<nsICacheService> serv =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIEventTarget> eventTarget;
+ rv = serv->GetCacheIOTarget(getter_AddRefs(eventTarget));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = eventTarget->Dispatch(this, NS_DISPATCH_NORMAL);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP DoomCallbackSynchronizer::Run() {
+ if (!NS_IsMainThread()) {
+ NS_DispatchToMainThread(this);
+ } else {
+ if (mCB) mCB->OnCacheEntryDoomed(NS_OK);
+ }
+ return NS_OK;
+}
+
+// Receives doom callback from the old API and forwards to the new API
+
+class DoomCallbackWrapper : public nsICacheListener {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHELISTENER
+
+ explicit DoomCallbackWrapper(nsICacheEntryDoomCallback* cb) : mCB(cb) {}
+
+ private:
+ virtual ~DoomCallbackWrapper() = default;
+
+ nsCOMPtr<nsICacheEntryDoomCallback> mCB;
+};
+
+NS_IMPL_ISUPPORTS(DoomCallbackWrapper, nsICacheListener);
+
+NS_IMETHODIMP DoomCallbackWrapper::OnCacheEntryAvailable(
+ nsICacheEntryDescriptor* descriptor, nsCacheAccessMode accessGranted,
+ nsresult status) {
+ return NS_OK;
+}
+
+NS_IMETHODIMP DoomCallbackWrapper::OnCacheEntryDoomed(nsresult status) {
+ if (!mCB) return NS_ERROR_NULL_POINTER;
+
+ mCB->OnCacheEntryDoomed(status);
+ mCB = nullptr;
+ return NS_OK;
+}
+
+} // namespace
+
+// _OldVisitCallbackWrapper
+// Receives visit callbacks from the old API and forwards it to the new API
+
+NS_IMPL_ISUPPORTS(_OldVisitCallbackWrapper, nsICacheVisitor)
+
+_OldVisitCallbackWrapper::~_OldVisitCallbackWrapper() {
+ if (!mHit) {
+ // The device has not been found, to not break the chain, simulate
+ // storage info callback.
+ mCB->OnCacheStorageInfo(0, 0, 0, nullptr);
+ }
+
+ if (mVisitEntries) {
+ mCB->OnCacheEntryVisitCompleted();
+ }
+}
+
+NS_IMETHODIMP _OldVisitCallbackWrapper::VisitDevice(
+ const char* deviceID, nsICacheDeviceInfo* deviceInfo, bool* _retval) {
+ if (!mCB) return NS_ERROR_NULL_POINTER;
+
+ *_retval = false;
+ if (strcmp(deviceID, mDeviceID)) {
+ // Not the device we want to visit
+ return NS_OK;
+ }
+
+ mHit = true;
+
+ nsresult rv;
+
+ uint32_t capacity;
+ rv = deviceInfo->GetMaximumSize(&capacity);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> dir;
+ if (!strcmp(mDeviceID, "disk")) {
+ nsCacheService::GetDiskCacheDirectory(getter_AddRefs(dir));
+ } else if (!strcmp(mDeviceID, "offline")) {
+ nsCacheService::GetAppCacheDirectory(getter_AddRefs(dir));
+ }
+
+ if (mLoadInfo && mLoadInfo->IsAnonymous()) {
+ // Anonymous visiting reports 0, 0 since we cannot count that
+ // early the number of anon entries.
+ mCB->OnCacheStorageInfo(0, 0, capacity, dir);
+ } else {
+ // Non-anon visitor counts all non-anon + ALL ANON entries,
+ // there is no way to determine the number of entries when
+ // using the old cache APIs - there is no concept of anonymous
+ // storage.
+ uint32_t entryCount;
+ rv = deviceInfo->GetEntryCount(&entryCount);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t totalSize;
+ rv = deviceInfo->GetTotalSize(&totalSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mCB->OnCacheStorageInfo(entryCount, totalSize, capacity, dir);
+ }
+
+ *_retval = mVisitEntries;
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldVisitCallbackWrapper::VisitEntry(const char* deviceID,
+ nsICacheEntryInfo* entryInfo,
+ bool* _retval) {
+ MOZ_ASSERT(!strcmp(deviceID, mDeviceID));
+
+ nsresult rv;
+
+ *_retval = true;
+
+ // Read all informative properties from the entry.
+ nsAutoCString clientId;
+ rv = entryInfo->GetClientID(clientId);
+ if (NS_FAILED(rv)) return NS_OK;
+
+ if (mLoadInfo->IsPrivate() !=
+ StringBeginsWith(clientId, "HTTP-memory-only-PB"_ns)) {
+ return NS_OK;
+ }
+
+ nsAutoCString cacheKey, enhanceId;
+ rv = entryInfo->GetKey(cacheKey);
+ if (NS_FAILED(rv)) return NS_OK;
+
+ if (StringBeginsWith(cacheKey, "anon&"_ns)) {
+ if (!mLoadInfo->IsAnonymous()) return NS_OK;
+
+ cacheKey = Substring(cacheKey, 5, cacheKey.Length());
+ } else if (mLoadInfo->IsAnonymous()) {
+ return NS_OK;
+ }
+
+ if (StringBeginsWith(cacheKey, "id="_ns)) {
+ int32_t uriSpecEnd = cacheKey.Find("&uri=");
+ if (uriSpecEnd == kNotFound) // Corrupted, ignore
+ return NS_OK;
+
+ enhanceId = Substring(cacheKey, 3, uriSpecEnd - 3);
+ cacheKey = Substring(cacheKey, uriSpecEnd + 1, cacheKey.Length());
+ }
+
+ if (StringBeginsWith(cacheKey, "uri="_ns)) {
+ cacheKey = Substring(cacheKey, 4, cacheKey.Length());
+ }
+
+ nsCOMPtr<nsIURI> uri;
+ // cacheKey is strip of any prefixes
+ rv = NS_NewURI(getter_AddRefs(uri), cacheKey);
+ if (NS_FAILED(rv)) return NS_OK;
+
+ uint32_t dataSize;
+ if (NS_FAILED(entryInfo->GetDataSize(&dataSize))) dataSize = 0;
+ int32_t fetchCount;
+ if (NS_FAILED(entryInfo->GetFetchCount(&fetchCount))) fetchCount = 0;
+ uint32_t expirationTime;
+ if (NS_FAILED(entryInfo->GetExpirationTime(&expirationTime)))
+ expirationTime = 0;
+ uint32_t lastModified;
+ if (NS_FAILED(entryInfo->GetLastModified(&lastModified))) lastModified = 0;
+
+ // Send them to the consumer.
+ rv = mCB->OnCacheEntryInfo(uri, enhanceId, (int64_t)dataSize, fetchCount,
+ lastModified, expirationTime, false, mLoadInfo);
+
+ *_retval = NS_SUCCEEDED(rv);
+ return NS_OK;
+}
+
+// _OldGetDiskConsumption
+
+// static
+nsresult _OldGetDiskConsumption::Get(
+ nsICacheStorageConsumptionObserver* aCallback) {
+ nsresult rv;
+
+ nsCOMPtr<nsICacheService> serv =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<_OldGetDiskConsumption> cb = new _OldGetDiskConsumption(aCallback);
+
+ // _OldGetDiskConsumption stores the found size value, but until dispatched
+ // to the main thread it doesn't call on the consupmtion observer. See bellow.
+ rv = serv->VisitEntries(cb);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // We are called from CacheStorageService::AsyncGetDiskConsumption whose IDL
+ // documentation claims the callback is always delievered asynchronously
+ // back to the main thread. Despite we know the result synchronosusly when
+ // querying the old cache, we need to stand the word and dispatch the result
+ // to the main thread asynchronously. Hence the dispatch here.
+ return NS_DispatchToMainThread(cb);
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(_OldGetDiskConsumption, Runnable, nsICacheVisitor)
+
+_OldGetDiskConsumption::_OldGetDiskConsumption(
+ nsICacheStorageConsumptionObserver* aCallback)
+ : Runnable("net::_OldGetDiskConsumption"), mCallback(aCallback), mSize(0) {}
+
+NS_IMETHODIMP
+_OldGetDiskConsumption::Run() {
+ mCallback->OnNetworkCacheDiskConsumption(mSize);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+_OldGetDiskConsumption::VisitDevice(const char* deviceID,
+ nsICacheDeviceInfo* deviceInfo,
+ bool* _retval) {
+ if (!strcmp(deviceID, "disk")) {
+ uint32_t size;
+ nsresult rv = deviceInfo->GetTotalSize(&size);
+ if (NS_SUCCEEDED(rv)) mSize = (int64_t)size;
+ }
+
+ *_retval = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+_OldGetDiskConsumption::VisitEntry(const char* deviceID,
+ nsICacheEntryInfo* entryInfo,
+ bool* _retval) {
+ MOZ_CRASH("Unexpected");
+ return NS_OK;
+}
+
+// _OldCacheEntryWrapper
+
+_OldCacheEntryWrapper::_OldCacheEntryWrapper(nsICacheEntryDescriptor* desc)
+ : mOldDesc(desc), mOldInfo(desc), mCacheEntryId(CacheEntry::GetNextId()) {
+ LOG(("Creating _OldCacheEntryWrapper %p for descriptor %p", this, desc));
+}
+
+_OldCacheEntryWrapper::_OldCacheEntryWrapper(nsICacheEntryInfo* info)
+ : mOldDesc(nullptr),
+ mOldInfo(info),
+ mCacheEntryId(CacheEntry::GetNextId()) {
+ LOG(("Creating _OldCacheEntryWrapper %p for info %p", this, info));
+}
+
+_OldCacheEntryWrapper::~_OldCacheEntryWrapper() {
+ LOG(("Destroying _OldCacheEntryWrapper %p for descriptor %p", this,
+ mOldInfo.get()));
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::GetIsForcedValid(bool* aIsForcedValid) {
+ // Unused stub
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::ForceValidFor(
+ uint32_t aSecondsToTheFuture) {
+ // Unused stub
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMPL_ISUPPORTS(_OldCacheEntryWrapper, nsICacheEntry)
+
+NS_IMETHODIMP _OldCacheEntryWrapper::AsyncDoom(
+ nsICacheEntryDoomCallback* listener) {
+ RefPtr<DoomCallbackWrapper> cb =
+ listener ? new DoomCallbackWrapper(listener) : nullptr;
+ return AsyncDoom(cb);
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::GetDataSize(int64_t* aSize) {
+ uint32_t size;
+ nsresult rv = GetDataSize(&size);
+ if (NS_FAILED(rv)) return rv;
+
+ *aSize = size;
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::GetAltDataSize(int64_t* aSize) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::GetAltDataType(nsACString& aType) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::GetPersistent(bool* aPersistToDisk) {
+ if (!mOldDesc) {
+ return NS_ERROR_NULL_POINTER;
+ }
+
+ nsresult rv;
+
+ nsCacheStoragePolicy policy;
+ rv = mOldDesc->GetStoragePolicy(&policy);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *aPersistToDisk = policy != nsICache::STORE_IN_MEMORY;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::Recreate(bool aMemoryOnly,
+ nsICacheEntry** aResult) {
+ NS_ENSURE_TRUE(mOldDesc, NS_ERROR_NOT_AVAILABLE);
+
+ nsCacheAccessMode mode;
+ nsresult rv = mOldDesc->GetAccessGranted(&mode);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!(mode & nsICache::ACCESS_WRITE)) return NS_ERROR_NOT_AVAILABLE;
+
+ LOG(("_OldCacheEntryWrapper::Recreate [this=%p]", this));
+
+ if (aMemoryOnly) mOldDesc->SetStoragePolicy(nsICache::STORE_IN_MEMORY);
+
+ nsCOMPtr<nsICacheEntry> self(this);
+ self.forget(aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::OpenInputStream(int64_t offset,
+ nsIInputStream** _retval) {
+ if (offset > PR_UINT32_MAX) return NS_ERROR_INVALID_ARG;
+
+ return OpenInputStream(uint32_t(offset), _retval);
+}
+NS_IMETHODIMP _OldCacheEntryWrapper::OpenOutputStream(
+ int64_t offset, int64_t predictedSize, nsIOutputStream** _retval) {
+ if (offset > PR_UINT32_MAX) return NS_ERROR_INVALID_ARG;
+
+ return OpenOutputStream(uint32_t(offset), _retval);
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::MaybeMarkValid() {
+ LOG(("_OldCacheEntryWrapper::MaybeMarkValid [this=%p]", this));
+
+ NS_ENSURE_TRUE(mOldDesc, NS_ERROR_NULL_POINTER);
+
+ nsCacheAccessMode mode;
+ nsresult rv = mOldDesc->GetAccessGranted(&mode);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (mode & nsICache::ACCESS_WRITE) {
+ LOG(("Marking cache entry valid [entry=%p, descr=%p]", this, mOldDesc));
+ return mOldDesc->MarkValid();
+ }
+
+ LOG(("Not marking read-only cache entry valid [entry=%p, descr=%p]", this,
+ mOldDesc));
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldCacheEntryWrapper::HasWriteAccess(bool aWriteAllowed_unused,
+ bool* aWriteAccess) {
+ NS_ENSURE_TRUE(mOldDesc, NS_ERROR_NULL_POINTER);
+ NS_ENSURE_ARG(aWriteAccess);
+
+ nsCacheAccessMode mode;
+ nsresult rv = mOldDesc->GetAccessGranted(&mode);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *aWriteAccess = !!(mode & nsICache::ACCESS_WRITE);
+
+ LOG(("_OldCacheEntryWrapper::HasWriteAccess [this=%p, write-access=%d]", this,
+ *aWriteAccess));
+
+ return NS_OK;
+}
+
+namespace {
+
+class MetaDataVisitorWrapper : public nsICacheMetaDataVisitor {
+ virtual ~MetaDataVisitorWrapper() = default;
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSICACHEMETADATAVISITOR
+ explicit MetaDataVisitorWrapper(nsICacheEntryMetaDataVisitor* cb) : mCB(cb) {}
+ nsCOMPtr<nsICacheEntryMetaDataVisitor> mCB;
+};
+
+NS_IMPL_ISUPPORTS(MetaDataVisitorWrapper, nsICacheMetaDataVisitor)
+
+NS_IMETHODIMP
+MetaDataVisitorWrapper::VisitMetaDataElement(char const* key, char const* value,
+ bool* goon) {
+ *goon = true;
+ return mCB->OnMetaDataElement(key, value);
+}
+
+} // namespace
+
+NS_IMETHODIMP _OldCacheEntryWrapper::VisitMetaData(
+ nsICacheEntryMetaDataVisitor* cb) {
+ RefPtr<MetaDataVisitorWrapper> w = new MetaDataVisitorWrapper(cb);
+ return mOldDesc->VisitMetaData(w);
+}
+
+namespace {
+
+void GetCacheSessionNameForStoragePolicy(const nsACString& scheme,
+ nsCacheStoragePolicy storagePolicy,
+ bool isPrivate,
+ OriginAttributes const* originAttribs,
+ nsACString& sessionName) {
+ MOZ_ASSERT(!isPrivate || storagePolicy == nsICache::STORE_IN_MEMORY);
+
+ // HTTP
+ if (scheme.EqualsLiteral("http") || scheme.EqualsLiteral("https")) {
+ switch (storagePolicy) {
+ case nsICache::STORE_IN_MEMORY:
+ if (isPrivate)
+ sessionName.AssignLiteral("HTTP-memory-only-PB");
+ else
+ sessionName.AssignLiteral("HTTP-memory-only");
+ break;
+ case nsICache::STORE_OFFLINE:
+ // XXX This is actually never used, only added to prevent
+ // any compatibility damage.
+ sessionName.AssignLiteral("HTTP-offline");
+ break;
+ default:
+ sessionName.AssignLiteral("HTTP");
+ break;
+ }
+ }
+ // FTP
+ else if (scheme.EqualsLiteral("ftp")) {
+ if (isPrivate)
+ sessionName.AssignLiteral("FTP-private");
+ else
+ sessionName.AssignLiteral("FTP");
+ }
+ // all remaining URL scheme
+ else {
+ // Since with the new API a consumer cannot specify its own session name
+ // and partitioning of the cache is handled stricly only by the cache
+ // back-end internally, we will use a separate session name to pretend
+ // functionality of the new API wrapping the Darin's cache for all other
+ // URL schemes.
+ // Deliberately omitting |anonymous| since other session types don't
+ // recognize it too.
+ sessionName.AssignLiteral("other");
+ if (isPrivate) sessionName.AppendLiteral("-private");
+ }
+
+ nsAutoCString suffix;
+ originAttribs->CreateSuffix(suffix);
+ sessionName.Append(suffix);
+}
+
+nsresult GetCacheSession(const nsACString& aScheme, bool aWriteToDisk,
+ nsILoadContextInfo* aLoadInfo,
+ nsIApplicationCache* aAppCache,
+ nsICacheSession** _result) {
+ nsresult rv;
+
+ nsCacheStoragePolicy storagePolicy;
+ if (aAppCache)
+ storagePolicy = nsICache::STORE_OFFLINE;
+ else if (!aWriteToDisk || aLoadInfo->IsPrivate())
+ storagePolicy = nsICache::STORE_IN_MEMORY;
+ else
+ storagePolicy = nsICache::STORE_ANYWHERE;
+
+ nsAutoCString clientId;
+ if (aAppCache) {
+ aAppCache->GetClientID(clientId);
+ } else {
+ GetCacheSessionNameForStoragePolicy(
+ aScheme, storagePolicy, aLoadInfo->IsPrivate(),
+ aLoadInfo->OriginAttributesPtr(), clientId);
+ }
+
+ LOG((" GetCacheSession for client=%s, policy=%d", clientId.get(),
+ storagePolicy));
+
+ nsCOMPtr<nsICacheService> serv =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsICacheSession> session;
+ rv = nsCacheService::GlobalInstance()->CreateSessionInternal(
+ clientId.get(), storagePolicy, nsICache::STREAM_BASED,
+ getter_AddRefs(session));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = session->SetIsPrivate(aLoadInfo->IsPrivate());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = session->SetDoomEntriesIfExpired(false);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (aAppCache) {
+ nsCOMPtr<nsIFile> profileDirectory;
+ aAppCache->GetProfileDirectory(getter_AddRefs(profileDirectory));
+ if (profileDirectory) rv = session->SetProfileDirectory(profileDirectory);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ session.forget(_result);
+ return NS_OK;
+}
+
+} // namespace
+
+NS_IMPL_ISUPPORTS_INHERITED(_OldCacheLoad, Runnable, nsICacheListener)
+
+_OldCacheLoad::_OldCacheLoad(const nsACString& aScheme,
+ const nsACString& aCacheKey,
+ nsICacheEntryOpenCallback* aCallback,
+ nsIApplicationCache* aAppCache,
+ nsILoadContextInfo* aLoadInfo, bool aWriteToDisk,
+ uint32_t aFlags)
+ : Runnable("net::_OldCacheLoad"),
+ mScheme(aScheme),
+ mCacheKey(aCacheKey),
+ mCallback(aCallback),
+ mLoadInfo(GetLoadContextInfo(aLoadInfo)),
+ mFlags(aFlags),
+ mWriteToDisk(aWriteToDisk),
+ mNew(true),
+ mOpening(true),
+ mSync(false),
+ mStatus(NS_ERROR_UNEXPECTED),
+ mRunCount(0),
+ mAppCache(aAppCache) {}
+
+_OldCacheLoad::~_OldCacheLoad() {
+ ProxyReleaseMainThread("_OldCacheLoad::mAppCache", mAppCache);
+}
+
+nsresult _OldCacheLoad::Start() {
+ LOG(("_OldCacheLoad::Start [this=%p, key=%s]", this, mCacheKey.get()));
+
+ mLoadStart = mozilla::TimeStamp::Now();
+
+ nsresult rv;
+
+ // Consumers that can invoke this code as first and off the main thread
+ // are responsible for initiating these two services on the main thread.
+ // Currently no one does that.
+
+ // XXX: Start the cache service; otherwise DispatchToCacheIOThread will
+ // fail.
+ nsCOMPtr<nsICacheService> service =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+
+ // Ensure the stream transport service gets initialized on the main thread
+ if (NS_SUCCEEDED(rv) && NS_IsMainThread()) {
+ nsCOMPtr<nsIStreamTransportService> sts =
+ do_GetService(kStreamTransportServiceCID, &rv);
+ }
+
+ if (NS_SUCCEEDED(rv)) {
+ rv = service->GetCacheIOTarget(getter_AddRefs(mCacheThread));
+ }
+
+ if (NS_SUCCEEDED(rv)) {
+ bool onCacheTarget;
+ rv = mCacheThread->IsOnCurrentThread(&onCacheTarget);
+ if (NS_SUCCEEDED(rv) && onCacheTarget) {
+ mSync = true;
+ }
+ }
+
+ if (NS_SUCCEEDED(rv)) {
+ if (mSync) {
+ rv = Run();
+ } else {
+ rv = mCacheThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP
+_OldCacheLoad::Run() {
+ LOG(("_OldCacheLoad::Run [this=%p, key=%s, cb=%p]", this, mCacheKey.get(),
+ mCallback.get()));
+
+ nsresult rv;
+
+ if (mOpening) {
+ mOpening = false;
+ nsCOMPtr<nsICacheSession> session;
+ rv = GetCacheSession(mScheme, mWriteToDisk, mLoadInfo, mAppCache,
+ getter_AddRefs(session));
+ if (NS_SUCCEEDED(rv)) {
+ // AsyncOpenCacheEntry isn't really async when its called on the
+ // cache service thread.
+
+ nsCacheAccessMode cacheAccess;
+ if (mFlags & nsICacheStorage::OPEN_TRUNCATE)
+ cacheAccess = nsICache::ACCESS_WRITE;
+ else if ((mFlags & nsICacheStorage::OPEN_READONLY) || mAppCache)
+ cacheAccess = nsICache::ACCESS_READ;
+ else
+ cacheAccess = nsICache::ACCESS_READ_WRITE;
+
+ LOG((" session->AsyncOpenCacheEntry with access=%d", cacheAccess));
+
+ bool bypassBusy = mFlags & nsICacheStorage::OPEN_BYPASS_IF_BUSY;
+
+ if (mSync && cacheAccess == nsICache::ACCESS_WRITE) {
+ nsCOMPtr<nsICacheEntryDescriptor> entry;
+ rv = session->OpenCacheEntry(mCacheKey, cacheAccess, bypassBusy,
+ getter_AddRefs(entry));
+
+ nsCacheAccessMode grantedAccess = 0;
+ if (NS_SUCCEEDED(rv)) {
+ entry->GetAccessGranted(&grantedAccess);
+ }
+
+ return OnCacheEntryAvailable(entry, grantedAccess, rv);
+ }
+
+ rv = session->AsyncOpenCacheEntry(mCacheKey, cacheAccess, this,
+ bypassBusy);
+ if (NS_SUCCEEDED(rv)) return NS_OK;
+ }
+
+ // Opening failed, propagate the error to the consumer
+ LOG((" Opening cache entry failed with rv=0x%08" PRIx32,
+ static_cast<uint32_t>(rv)));
+ mStatus = rv;
+ mNew = false;
+ NS_DispatchToMainThread(this);
+ } else {
+ if (!mCallback) {
+ LOG((" duplicate call, bypassed"));
+ return NS_OK;
+ }
+
+ if (NS_SUCCEEDED(mStatus)) {
+ if (mFlags & nsICacheStorage::OPEN_TRUNCATE) {
+ mozilla::Telemetry::AccumulateTimeDelta(
+ mozilla::Telemetry::NETWORK_CACHE_V1_TRUNCATE_TIME_MS, mLoadStart);
+ } else if (mNew) {
+ mozilla::Telemetry::AccumulateTimeDelta(
+ mozilla::Telemetry::NETWORK_CACHE_V1_MISS_TIME_MS, mLoadStart);
+ } else {
+ mozilla::Telemetry::AccumulateTimeDelta(
+ mozilla::Telemetry::NETWORK_CACHE_V1_HIT_TIME_MS, mLoadStart);
+ }
+ }
+
+ if (!(mFlags & CHECK_MULTITHREADED)) Check();
+
+ // break cycles
+ nsCOMPtr<nsICacheEntryOpenCallback> cb = std::move(mCallback);
+ mCacheThread = nullptr;
+ nsCOMPtr<nsICacheEntry> entry = std::move(mCacheEntry);
+
+ rv = cb->OnCacheEntryAvailable(entry, mNew, mAppCache, mStatus);
+
+ if (NS_FAILED(rv) && entry) {
+ LOG((" cb->OnCacheEntryAvailable failed with rv=0x%08" PRIx32,
+ static_cast<uint32_t>(rv)));
+ if (mNew)
+ entry->AsyncDoom(nullptr);
+ else
+ entry->Close();
+ }
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP
+_OldCacheLoad::OnCacheEntryAvailable(nsICacheEntryDescriptor* entry,
+ nsCacheAccessMode access,
+ nsresult status) {
+ LOG(
+ ("_OldCacheLoad::OnCacheEntryAvailable [this=%p, ent=%p, cb=%p, "
+ "appcache=%p, access=%x]",
+ this, entry, mCallback.get(), mAppCache.get(), access));
+
+ // XXX Bug 759805: Sometimes we will call this method directly from
+ // HttpCacheQuery::Run when AsyncOpenCacheEntry fails, but
+ // AsyncOpenCacheEntry will also call this method. As a workaround, we just
+ // ensure we only execute this code once.
+ NS_ENSURE_TRUE(mRunCount == 0, NS_ERROR_UNEXPECTED);
+ ++mRunCount;
+
+ mCacheEntry = entry ? new _OldCacheEntryWrapper(entry) : nullptr;
+ mStatus = status;
+ mNew = access == nsICache::ACCESS_WRITE;
+
+ if (mFlags & CHECK_MULTITHREADED) Check();
+
+ if (mSync) return Run();
+
+ return NS_DispatchToMainThread(this);
+}
+
+void _OldCacheLoad::Check() {
+ if (!mCacheEntry) return;
+
+ if (mNew) return;
+
+ uint32_t result;
+ nsresult rv = mCallback->OnCacheEntryCheck(mCacheEntry, mAppCache, &result);
+ LOG((" OnCacheEntryCheck result ent=%p, cb=%p, appcache=%p, rv=0x%08" PRIx32
+ ", result=%d",
+ mCacheEntry.get(), mCallback.get(), mAppCache.get(),
+ static_cast<uint32_t>(rv), result));
+
+ if (NS_FAILED(rv)) {
+ NS_WARNING("cache check failed");
+ }
+
+ if (NS_FAILED(rv) || result == nsICacheEntryOpenCallback::ENTRY_NOT_WANTED) {
+ mCacheEntry->Close();
+ mCacheEntry = nullptr;
+ mStatus = NS_ERROR_CACHE_KEY_NOT_FOUND;
+ }
+}
+
+NS_IMETHODIMP
+_OldCacheLoad::OnCacheEntryDoomed(nsresult) { return NS_ERROR_NOT_IMPLEMENTED; }
+
+// nsICacheStorage old cache wrapper
+
+NS_IMPL_ISUPPORTS(_OldStorage, nsICacheStorage)
+
+_OldStorage::_OldStorage(nsILoadContextInfo* aInfo, bool aAllowDisk,
+ bool aLookupAppCache, bool aOfflineStorage,
+ nsIApplicationCache* aAppCache)
+ : mLoadInfo(GetLoadContextInfo(aInfo)),
+ mAppCache(aAppCache),
+ mWriteToDisk(aAllowDisk),
+ mLookupAppCache(aLookupAppCache),
+ mOfflineStorage(aOfflineStorage) {}
+
+_OldStorage::~_OldStorage() = default;
+
+NS_IMETHODIMP _OldStorage::AsyncOpenURI(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ uint32_t aFlags,
+ nsICacheEntryOpenCallback* aCallback) {
+ NS_ENSURE_ARG(aURI);
+ NS_ENSURE_ARG(aCallback);
+
+#ifdef MOZ_LOGGING
+ nsAutoCString uriSpec;
+ aURI->GetAsciiSpec(uriSpec);
+ LOG(("_OldStorage::AsyncOpenURI [this=%p, uri=%s, ide=%s, flags=%x]", this,
+ uriSpec.get(), aIdExtension.BeginReading(), aFlags));
+#endif
+
+ nsresult rv;
+
+ nsAutoCString cacheKey, scheme;
+ rv = AssembleCacheKey(aURI, aIdExtension, cacheKey, scheme);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!mAppCache && (mLookupAppCache || mOfflineStorage)) {
+ rv = ChooseApplicationCache(cacheKey, getter_AddRefs(mAppCache));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (mAppCache) {
+ // From a chosen appcache open only as readonly
+ aFlags &= ~nsICacheStorage::OPEN_TRUNCATE;
+ }
+ }
+
+ RefPtr<_OldCacheLoad> cacheLoad = new _OldCacheLoad(
+ scheme, cacheKey, aCallback, mAppCache, mLoadInfo, mWriteToDisk, aFlags);
+
+ rv = cacheLoad->Start();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldStorage::OpenTruncate(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntry** aCacheEntry) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP _OldStorage::Exists(nsIURI* aURI, const nsACString& aIdExtension,
+ bool* aResult) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP _OldStorage::AsyncDoomURI(nsIURI* aURI,
+ const nsACString& aIdExtension,
+ nsICacheEntryDoomCallback* aCallback) {
+ LOG(("_OldStorage::AsyncDoomURI"));
+
+ nsresult rv;
+
+ nsAutoCString cacheKey, scheme;
+ rv = AssembleCacheKey(aURI, aIdExtension, cacheKey, scheme);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsICacheSession> session;
+ rv = GetCacheSession(scheme, mWriteToDisk, mLoadInfo, mAppCache,
+ getter_AddRefs(session));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<DoomCallbackWrapper> cb =
+ aCallback ? new DoomCallbackWrapper(aCallback) : nullptr;
+ rv = session->DoomEntry(cacheKey, cb);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldStorage::AsyncEvictStorage(
+ nsICacheEntryDoomCallback* aCallback) {
+ LOG(("_OldStorage::AsyncEvictStorage"));
+
+ nsresult rv;
+
+ if (!mAppCache && mOfflineStorage) {
+ nsCOMPtr<nsIApplicationCacheService> appCacheService =
+ do_GetService(NS_APPLICATIONCACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = appCacheService->Evict(mLoadInfo);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else if (mAppCache) {
+ nsCOMPtr<nsICacheSession> session;
+ rv = GetCacheSession(""_ns, mWriteToDisk, mLoadInfo, mAppCache,
+ getter_AddRefs(session));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = session->EvictEntries();
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ // Oh, I'll be so happy when session names are gone...
+ nsCOMPtr<nsICacheSession> session;
+ rv = GetCacheSession("http"_ns, mWriteToDisk, mLoadInfo, mAppCache,
+ getter_AddRefs(session));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = session->EvictEntries();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // This clears any data from schemes other than http or ftp.
+ rv = GetCacheSession(""_ns, mWriteToDisk, mLoadInfo, mAppCache,
+ getter_AddRefs(session));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = session->EvictEntries();
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (aCallback) {
+ RefPtr<DoomCallbackSynchronizer> sync =
+ new DoomCallbackSynchronizer(aCallback);
+ rv = sync->Dispatch();
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldStorage::AsyncVisitStorage(nsICacheStorageVisitor* aVisitor,
+ bool aVisitEntries) {
+ LOG(("_OldStorage::AsyncVisitStorage"));
+
+ NS_ENSURE_ARG(aVisitor);
+
+ nsresult rv;
+
+ nsCOMPtr<nsICacheService> serv =
+ do_GetService(NS_CACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ char* deviceID;
+ if (mAppCache || mOfflineStorage) {
+ deviceID = const_cast<char*>("offline");
+ } else if (!mWriteToDisk || mLoadInfo->IsPrivate()) {
+ deviceID = const_cast<char*>("memory");
+ } else {
+ deviceID = const_cast<char*>("disk");
+ }
+
+ RefPtr<_OldVisitCallbackWrapper> cb = new _OldVisitCallbackWrapper(
+ deviceID, aVisitor, aVisitEntries, mLoadInfo);
+ rv = nsCacheService::GlobalInstance()->VisitEntriesInternal(cb);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP _OldStorage::GetCacheIndexEntryAttrs(
+ nsIURI* aURI, const nsACString& aIdExtension, bool* aHasAltData,
+ uint32_t* aSizeInKB) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// Internal
+
+nsresult _OldStorage::AssembleCacheKey(nsIURI* aURI,
+ nsACString const& aIdExtension,
+ nsACString& aCacheKey,
+ nsACString& aScheme) {
+ // Copied from nsHttpChannel::AssembleCacheKey
+
+ aCacheKey.Truncate();
+
+ nsresult rv;
+
+ rv = aURI->GetScheme(aScheme);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString uriSpec;
+ if (aScheme.EqualsLiteral("http") || aScheme.EqualsLiteral("https")) {
+ if (mLoadInfo->IsAnonymous()) {
+ aCacheKey.AssignLiteral("anon&");
+ }
+
+ if (!aIdExtension.IsEmpty()) {
+ aCacheKey.AppendPrintf("id=%s&", aIdExtension.BeginReading());
+ }
+
+ nsCOMPtr<nsIURI> noRefURI;
+ rv = NS_GetURIWithoutRef(aURI, getter_AddRefs(noRefURI));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = noRefURI->GetAsciiSpec(uriSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!aCacheKey.IsEmpty()) {
+ aCacheKey.AppendLiteral("uri=");
+ }
+ } else {
+ rv = aURI->GetAsciiSpec(uriSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ aCacheKey.Append(uriSpec);
+
+ return NS_OK;
+}
+
+nsresult _OldStorage::ChooseApplicationCache(const nsACString& cacheKey,
+ nsIApplicationCache** aCache) {
+ nsresult rv;
+
+ nsCOMPtr<nsIApplicationCacheService> appCacheService =
+ do_GetService(NS_APPLICATIONCACHESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = appCacheService->ChooseApplicationCache(cacheKey, mLoadInfo, aCache);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+}
+
+} // namespace net
+} // namespace mozilla
diff --git a/netwerk/cache2/OldWrappers.h b/netwerk/cache2/OldWrappers.h
new file mode 100644
index 0000000000..46a2f222b6
--- /dev/null
+++ b/netwerk/cache2/OldWrappers.h
@@ -0,0 +1,268 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Stuff to link the old imp to the new api - will go away!
+
+#ifndef OLDWRAPPERS__H__
+#define OLDWRAPPERS__H__
+
+#include "nsICacheEntry.h"
+#include "nsICacheListener.h"
+#include "nsICacheStorage.h"
+
+#include "nsCOMPtr.h"
+#include "nsICacheEntryOpenCallback.h"
+#include "nsICacheEntryDescriptor.h"
+#include "nsICacheStorageVisitor.h"
+#include "nsThreadUtils.h"
+#include "mozilla/TimeStamp.h"
+
+class nsIURI;
+class nsICacheEntryOpenCallback;
+class nsICacheStorageConsumptionObserver;
+class nsIApplicationCache;
+class nsILoadContextInfo;
+
+namespace mozilla {
+namespace net {
+
+class CacheStorage;
+
+class _OldCacheEntryWrapper : public nsICacheEntry {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ // nsICacheEntryDescriptor
+ NS_IMETHOD SetExpirationTime(uint32_t expirationTime) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->SetExpirationTime(expirationTime);
+ }
+ nsresult OpenInputStream(uint32_t offset, nsIInputStream** _retval) {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->OpenInputStream(offset, _retval);
+ }
+ nsresult OpenOutputStream(uint32_t offset, nsIOutputStream** _retval) {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->OpenOutputStream(offset, _retval);
+ }
+ NS_IMETHOD OpenAlternativeOutputStream(
+ const nsACString& type, int64_t predictedSize,
+ nsIAsyncOutputStream** _retval) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD OpenAlternativeInputStream(const nsACString& type,
+ nsIInputStream** _retval) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_IMETHOD GetSecurityInfo(nsISupports** aSecurityInfo) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->GetSecurityInfo(aSecurityInfo);
+ }
+ NS_IMETHOD SetSecurityInfo(nsISupports* aSecurityInfo) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->SetSecurityInfo(aSecurityInfo);
+ }
+ NS_IMETHOD GetStorageDataSize(uint32_t* aStorageDataSize) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->GetStorageDataSize(aStorageDataSize);
+ }
+ nsresult AsyncDoom(nsICacheListener* listener) {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER : mOldDesc->AsyncDoom(listener);
+ }
+ NS_IMETHOD MarkValid(void) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER : mOldDesc->MarkValid();
+ }
+ NS_IMETHOD Close(void) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER : mOldDesc->Close();
+ }
+ NS_IMETHOD GetMetaDataElement(const char* key, char** _retval) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->GetMetaDataElement(key, _retval);
+ }
+ NS_IMETHOD SetMetaDataElement(const char* key, const char* value) override {
+ return !mOldDesc ? NS_ERROR_NULL_POINTER
+ : mOldDesc->SetMetaDataElement(key, value);
+ }
+
+ NS_IMETHOD GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ // nsICacheEntryInfo
+ NS_IMETHOD GetKey(nsACString& aKey) override {
+ return mOldInfo->GetKey(aKey);
+ }
+ NS_IMETHOD GetCacheEntryId(uint64_t* aCacheEntryId) override {
+ *aCacheEntryId = mCacheEntryId;
+ return NS_OK;
+ }
+ NS_IMETHOD GetFetchCount(int32_t* aFetchCount) override {
+ return mOldInfo->GetFetchCount(aFetchCount);
+ }
+ NS_IMETHOD GetLastFetched(uint32_t* aLastFetched) override {
+ return mOldInfo->GetLastFetched(aLastFetched);
+ }
+ NS_IMETHOD GetLastModified(uint32_t* aLastModified) override {
+ return mOldInfo->GetLastModified(aLastModified);
+ }
+ NS_IMETHOD GetExpirationTime(uint32_t* aExpirationTime) override {
+ return mOldInfo->GetExpirationTime(aExpirationTime);
+ }
+ nsresult GetDataSize(uint32_t* aDataSize) {
+ return mOldInfo->GetDataSize(aDataSize);
+ }
+ NS_IMETHOD GetOnStartTime(uint64_t* aTime) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetOnStopTime(uint64_t* aTime) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD SetNetworkTimes(uint64_t aOnStartTime,
+ uint64_t aOnStopTime) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD SetContentType(uint8_t aContentType) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetLoadContextInfo(nsILoadContextInfo** aInfo) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD Dismiss() override { return NS_ERROR_NOT_IMPLEMENTED; }
+
+ NS_IMETHOD AsyncDoom(nsICacheEntryDoomCallback* listener) override;
+ NS_IMETHOD GetPersistent(bool* aPersistToDisk) override;
+ NS_IMETHOD GetIsForcedValid(bool* aIsForcedValid) override;
+ NS_IMETHOD ForceValidFor(uint32_t aSecondsToTheFuture) override;
+ NS_IMETHOD SetValid() override { return NS_OK; }
+ NS_IMETHOD MetaDataReady() override { return NS_OK; }
+ NS_IMETHOD Recreate(bool, nsICacheEntry**) override;
+ NS_IMETHOD GetDataSize(int64_t* size) override;
+ NS_IMETHOD GetAltDataSize(int64_t* size) override;
+ NS_IMETHOD GetAltDataType(nsACString& aType) override;
+ NS_IMETHOD OpenInputStream(int64_t offset, nsIInputStream** _retval) override;
+ NS_IMETHOD OpenOutputStream(int64_t offset, int64_t predictedSize,
+ nsIOutputStream** _retval) override;
+ NS_IMETHOD MaybeMarkValid() override;
+ NS_IMETHOD HasWriteAccess(bool aWriteOnly, bool* aWriteAccess) override;
+ NS_IMETHOD VisitMetaData(nsICacheEntryMetaDataVisitor*) override;
+
+ explicit _OldCacheEntryWrapper(nsICacheEntryDescriptor* desc);
+ explicit _OldCacheEntryWrapper(nsICacheEntryInfo* info);
+
+ private:
+ virtual ~_OldCacheEntryWrapper();
+
+ _OldCacheEntryWrapper() = delete;
+ nsICacheEntryDescriptor* mOldDesc; // ref holded in mOldInfo
+ nsCOMPtr<nsICacheEntryInfo> mOldInfo;
+
+ const uint64_t mCacheEntryId;
+};
+
+class _OldCacheLoad : public Runnable, public nsICacheListener {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSICACHELISTENER
+
+ _OldCacheLoad(const nsACString& aScheme, const nsACString& aCacheKey,
+ nsICacheEntryOpenCallback* aCallback,
+ nsIApplicationCache* aAppCache, nsILoadContextInfo* aLoadInfo,
+ bool aWriteToDisk, uint32_t aFlags);
+
+ nsresult Start();
+
+ protected:
+ virtual ~_OldCacheLoad();
+
+ private:
+ void Check();
+
+ nsCOMPtr<nsIEventTarget> mCacheThread;
+
+ nsCString const mScheme;
+ nsCString const mCacheKey;
+ nsCOMPtr<nsICacheEntryOpenCallback> mCallback;
+ nsCOMPtr<nsILoadContextInfo> mLoadInfo;
+ uint32_t const mFlags;
+
+ bool const mWriteToDisk : 1;
+ bool mNew : 1;
+ bool mOpening : 1;
+ bool mSync : 1;
+
+ nsCOMPtr<nsICacheEntry> mCacheEntry;
+ nsresult mStatus;
+ uint32_t mRunCount;
+ nsCOMPtr<nsIApplicationCache> mAppCache;
+
+ mozilla::TimeStamp mLoadStart;
+};
+
+class _OldStorage : public nsICacheStorage {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHESTORAGE
+
+ public:
+ _OldStorage(nsILoadContextInfo* aInfo, bool aAllowDisk, bool aLookupAppCache,
+ bool aOfflineStorage, nsIApplicationCache* aAppCache);
+
+ private:
+ virtual ~_OldStorage();
+ nsresult AssembleCacheKey(nsIURI* aURI, nsACString const& aIdExtension,
+ nsACString& aCacheKey, nsACString& aScheme);
+ nsresult ChooseApplicationCache(const nsACString& cacheKey,
+ nsIApplicationCache** aCache);
+
+ nsCOMPtr<nsILoadContextInfo> mLoadInfo;
+ nsCOMPtr<nsIApplicationCache> mAppCache;
+ bool const mWriteToDisk : 1;
+ bool const mLookupAppCache : 1;
+ bool const mOfflineStorage : 1;
+};
+
+class _OldVisitCallbackWrapper : public nsICacheVisitor {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSICACHEVISITOR
+
+ _OldVisitCallbackWrapper(char const* deviceID, nsICacheStorageVisitor* cb,
+ bool visitEntries, nsILoadContextInfo* aInfo)
+ : mCB(cb),
+ mVisitEntries(visitEntries),
+ mDeviceID(deviceID),
+ mLoadInfo(aInfo),
+ mHit(false) {}
+
+ private:
+ virtual ~_OldVisitCallbackWrapper();
+ nsCOMPtr<nsICacheStorageVisitor> mCB;
+ bool mVisitEntries;
+ char const* mDeviceID;
+ nsCOMPtr<nsILoadContextInfo> mLoadInfo;
+ bool mHit; // set to true when the device was found
+};
+
+class _OldGetDiskConsumption : public Runnable, public nsICacheVisitor {
+ public:
+ static nsresult Get(nsICacheStorageConsumptionObserver* aCallback);
+
+ private:
+ explicit _OldGetDiskConsumption(
+ nsICacheStorageConsumptionObserver* aCallback);
+ virtual ~_OldGetDiskConsumption() = default;
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSICACHEVISITOR
+ NS_DECL_NSIRUNNABLE
+
+ nsCOMPtr<nsICacheStorageConsumptionObserver> mCallback;
+ int64_t mSize;
+};
+
+} // namespace net
+} // namespace mozilla
+
+#endif
diff --git a/netwerk/cache2/moz.build b/netwerk/cache2/moz.build
new file mode 100644
index 0000000000..f46600cfdf
--- /dev/null
+++ b/netwerk/cache2/moz.build
@@ -0,0 +1,62 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Networking: Cache")
+
+XPIDL_SOURCES += [
+ "nsICacheEntry.idl",
+ "nsICacheEntryDoomCallback.idl",
+ "nsICacheEntryOpenCallback.idl",
+ "nsICacheStorage.idl",
+ "nsICacheStorageService.idl",
+ "nsICacheStorageVisitor.idl",
+ "nsICacheTesting.idl",
+]
+
+XPIDL_MODULE = "necko_cache2"
+
+EXPORTS += [
+ "CacheObserver.h",
+ "CacheStorageService.h",
+]
+
+SOURCES += [
+ "AppCacheStorage.cpp",
+ "CacheStorage.cpp",
+]
+
+
+UNIFIED_SOURCES += [
+ "CacheEntry.cpp",
+ "CacheFile.cpp",
+ "CacheFileChunk.cpp",
+ "CacheFileContextEvictor.cpp",
+ "CacheFileInputStream.cpp",
+ "CacheFileIOManager.cpp",
+ "CacheFileMetadata.cpp",
+ "CacheFileOutputStream.cpp",
+ "CacheFileUtils.cpp",
+ "CacheHashUtils.cpp",
+ "CacheIndex.cpp",
+ "CacheIndexContextIterator.cpp",
+ "CacheIndexIterator.cpp",
+ "CacheIOThread.cpp",
+ "CacheLog.cpp",
+ "CacheObserver.cpp",
+ "CacheStorageService.cpp",
+ "OldWrappers.cpp",
+]
+
+LOCAL_INCLUDES += [
+ "/netwerk/base",
+ "/netwerk/cache",
+]
+
+FINAL_LIBRARY = "xul"
+
+if CONFIG["CC_TYPE"] in ("clang", "gcc"):
+ CXXFLAGS += ["-Wno-error=shadow"]
diff --git a/netwerk/cache2/nsICacheEntry.idl b/netwerk/cache2/nsICacheEntry.idl
new file mode 100644
index 0000000000..c9732496b1
--- /dev/null
+++ b/netwerk/cache2/nsICacheEntry.idl
@@ -0,0 +1,364 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsICache.idl"
+
+interface nsIInputStream;
+interface nsIOutputStream;
+interface nsIAsyncOutputStream;
+interface nsICacheEntryDoomCallback;
+
+interface nsICacheListener;
+interface nsIFile;
+interface nsICacheEntryMetaDataVisitor;
+interface nsILoadContextInfo;
+
+[scriptable, uuid(607c2a2c-0a48-40b9-a956-8cf2bb9857cf)]
+interface nsICacheEntry : nsISupports
+{
+ const unsigned long CONTENT_TYPE_UNKNOWN = 0;
+ const unsigned long CONTENT_TYPE_OTHER = 1;
+ const unsigned long CONTENT_TYPE_JAVASCRIPT = 2;
+ const unsigned long CONTENT_TYPE_IMAGE = 3;
+ const unsigned long CONTENT_TYPE_MEDIA = 4;
+ const unsigned long CONTENT_TYPE_STYLESHEET = 5;
+ const unsigned long CONTENT_TYPE_WASM = 6;
+ /**
+ * Content type that is used internally to check whether the value parsed
+ * from disk is within allowed limits. Don't pass CONTENT_TYPE_LAST to
+ * setContentType method.
+ */
+ const unsigned long CONTENT_TYPE_LAST = 7;
+
+ /**
+ * Placeholder for the initial value of expiration time.
+ */
+ const unsigned long NO_EXPIRATION_TIME = 0xFFFFFFFF;
+
+ /**
+ * Get the key identifying the cache entry.
+ */
+ readonly attribute ACString key;
+
+ /**
+ * The unique ID for every nsICacheEntry instance, which can be used to check
+ * whether two pieces of information are from the same nsICacheEntry instance.
+ */
+ readonly attribute uint64_t cacheEntryId;
+
+ /**
+ * Whether the entry is memory/only or persisted to disk.
+ * Note: private browsing entries are reported as persistent for consistency
+ * while are not actually persisted to disk.
+ */
+ readonly attribute boolean persistent;
+
+ /**
+ * Get the number of times the cache entry has been opened.
+ */
+ readonly attribute long fetchCount;
+
+ /**
+ * Get the last time the cache entry was opened (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t lastFetched;
+
+ /**
+ * Get the last time the cache entry was modified (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t lastModified;
+
+ /**
+ * Get the expiration time of the cache entry (in seconds since the Epoch).
+ */
+ readonly attribute uint32_t expirationTime;
+
+ /**
+ * Set the time at which the cache entry should be considered invalid (in
+ * seconds since the Epoch).
+ */
+ void setExpirationTime(in uint32_t expirationTime);
+
+ /**
+ * Get the last network response times for onStartReqeust/onStopRequest (in ms).
+ * @throws
+ * - NS_ERROR_NOT_AVAILABLE if onStartTime/onStopTime does not exist.
+ */
+ readonly attribute uint64_t onStartTime;
+ readonly attribute uint64_t onStopTime;
+
+ /**
+ * Set the network response times for onStartReqeust/onStopRequest (in ms).
+ */
+ void setNetworkTimes(in uint64_t onStartTime, in uint64_t onStopTime);
+
+ /**
+ * Set content type. Available types are defined at the begining of this file.
+ * The content type is used internally for cache partitioning and telemetry
+ * purposes so there is no getter.
+ */
+ void setContentType(in uint8_t contentType);
+
+ /**
+ * This method is intended to override the per-spec cache validation
+ * decisions for a duration specified in seconds. The current state can
+ * be examined with isForcedValid (see below). This value is not persisted,
+ * so it will not survive session restart. Cache entries that are forced valid
+ * will not be evicted from the cache for the duration of forced validity.
+ * This means that there is a potential problem if the number of forced valid
+ * entries grows to take up more space than the cache size allows.
+ *
+ * NOTE: entries that have been forced valid will STILL be ignored by HTTP
+ * channels if they have expired AND the resource in question requires
+ * validation after expiring. This is to avoid using known-stale content.
+ *
+ * @param aSecondsToTheFuture
+ * the number of seconds the default cache validation behavior will be
+ * overridden before it returns to normal
+ */
+ void forceValidFor(in unsigned long aSecondsToTheFuture);
+
+ /**
+ * The state variable for whether this entry is currently forced valid.
+ * Defaults to false for normal cache validation behavior, and will return
+ * true if the number of seconds set by forceValidFor() has yet to be reached.
+ */
+ readonly attribute boolean isForcedValid;
+
+ /**
+ * Open blocking input stream to cache data. Use the stream transport
+ * service to asynchronously read this stream on a background thread.
+ * The returned stream MAY implement nsISeekableStream.
+ *
+ * @param offset
+ * read starting from this offset into the cached data. an offset
+ * beyond the end of the stream has undefined consequences.
+ *
+ * @return non-blocking, buffered input stream.
+ */
+ nsIInputStream openInputStream(in long long offset);
+
+ /**
+ * Open non-blocking output stream to cache data. The returned stream
+ * MAY implement nsISeekableStream.
+ *
+ * If opening an output stream to existing cached data, the data will be
+ * truncated to the specified offset.
+ *
+ * @param offset
+ * write starting from this offset into the cached data. an offset
+ * beyond the end of the stream has undefined consequences.
+ * @param predictedSize
+ * Predicted size of the data that will be written. It's used to decide
+ * whether the resulting entry would exceed size limit, in which case
+ * an error is thrown. If the size isn't known in advance, -1 should be
+ * passed.
+ *
+ * @return blocking, buffered output stream.
+ */
+ nsIOutputStream openOutputStream(in long long offset, in long long predictedSize);
+
+ /**
+ * Get/set security info on the cache entry for this descriptor.
+ */
+ attribute nsISupports securityInfo;
+
+ /**
+ * Get the size of the cache entry data, as stored. This may differ
+ * from the entry's dataSize, if the entry is compressed.
+ */
+ readonly attribute unsigned long storageDataSize;
+
+ /**
+ * Asynchronously doom an entry. Listener will be notified about the status
+ * of the operation. Null may be passed if caller doesn't care about the
+ * result.
+ */
+ void asyncDoom(in nsICacheEntryDoomCallback listener);
+
+ /**
+ * Methods for accessing meta data. Meta data is a table of key/value
+ * string pairs. The strings do not have to conform to any particular
+ * charset, but they must be null terminated.
+ */
+ string getMetaDataElement(in string key);
+ void setMetaDataElement(in string key, in string value);
+
+ /**
+ * Obtain the list of metadata keys this entry keeps.
+ *
+ * NOTE: The callback is invoked under the CacheFile's lock. It means
+ * there should not be made any calls to the entry from the visitor and
+ * if the values need to be processed somehow, it's better to cache them
+ * and process outside the callback.
+ */
+ void visitMetaData(in nsICacheEntryMetaDataVisitor visitor);
+
+ /**
+ * Claims that all metadata on this entry are up-to-date and this entry
+ * now can be delivered to other waiting consumers.
+ *
+ * We need such method since metadata must be delivered synchronously.
+ */
+ void metaDataReady();
+
+ /**
+ * Called by consumer upon 304/206 response from the server. This marks
+ * the entry content as positively revalidated.
+ * Consumer uses this method after the consumer has returned ENTRY_NEEDS_REVALIDATION
+ * result from onCacheEntryCheck and after successfull revalidation with the server.
+ */
+ void setValid();
+
+ /**
+ * Explicitly tell the cache backend this consumer is no longer going to modify
+ * this cache entry data or metadata. In case the consumer was responsible to
+ * either of writing the cache entry or revalidating it, calling this method
+ * reverts the state to initial (as never written) or as not-validated and
+ * immediately notifies the next consumer in line waiting for this entry.
+ * This is the way to prevent deadlocks when someone else than the responsible
+ * channel references the cache entry being in a non-written or revalidating
+ * state.
+ */
+ void dismiss();
+
+ /**
+ * Returns the size in kilobytes used to store the cache entry on disk.
+ */
+ readonly attribute uint32_t diskStorageSizeInKB;
+
+ /**
+ * Doom this entry and open a new, empty, entry for write. Consumer has
+ * to exchange the entry this method is called on for the newly created.
+ * Used on 200 responses to conditional requests.
+ *
+ * @param aMemoryOnly
+ * - whether the entry is to be created as memory/only regardless how
+ * the entry being recreated persistence is set
+ * @returns
+ * - an entry that can be used to write to
+ * @throws
+ * - NS_ERROR_NOT_AVAILABLE when the entry cannot be from some reason
+ * recreated for write
+ */
+ nsICacheEntry recreate([optional] in boolean aMemoryOnly);
+
+ /**
+ * Returns the length of data this entry holds.
+ * @throws
+ * NS_ERROR_IN_PROGRESS when the write is still in progress.
+ */
+ readonly attribute long long dataSize;
+
+ /**
+ * Returns the length of data this entry holds.
+ * @throws
+ * - NS_ERROR_IN_PROGRESS when a write is still in progress (either real
+ content or alt data).
+ * - NS_ERROR_NOT_AVAILABLE if alt data does not exist.
+ */
+ readonly attribute long long altDataSize;
+
+ /**
+ * Returns the type of the saved alt data.
+ * @throws
+ * - NS_ERROR_NOT_AVAILABLE if alt data does not exist.
+ */
+ readonly attribute ACString altDataType;
+
+ /**
+ * Opens and returns an output stream that a consumer may use to save an
+ * alternate representation of the data.
+ *
+ * @param type
+ * type of the alternative data representation
+ * @param predictedSize
+ * Predicted size of the data that will be written. It's used to decide
+ * whether the resulting entry would exceed size limit, in which case
+ * an error is thrown. If the size isn't known in advance, -1 should be
+ * passed.
+ *
+ * @throws
+ * - NS_ERROR_NOT_AVAILABLE if the real data hasn't been written.
+ * - NS_ERROR_IN_PROGRESS when the writing regular content or alt-data to
+ * the cache entry is still in progress.
+ *
+ * If there is alt-data already saved, it will be overwritten.
+ */
+ nsIAsyncOutputStream openAlternativeOutputStream(in ACString type, in long long predictedSize);
+
+ /**
+ * Opens and returns an input stream that can be used to read the alternative
+ * representation previously saved in the cache.
+ * If this call is made while writing alt-data is still in progress, it is
+ * still possible to read content from the input stream as it's being written.
+ * @throws
+ * - NS_ERROR_NOT_AVAILABLE if the alt-data representation doesn't exist at
+ * all or if alt-data of the given type doesn't exist.
+ */
+ nsIInputStream openAlternativeInputStream(in ACString type);
+
+ /**
+ * Get the nsILoadContextInfo of the cache entry
+ */
+ readonly attribute nsILoadContextInfo loadContextInfo;
+
+ /****************************************************************************
+ * The following methods might be added to some nsICacheEntryInternal
+ * interface since we want to remove them as soon as the old cache backend is
+ * completely removed.
+ */
+
+ /**
+ * @deprecated
+ * FOR BACKWARD COMPATIBILITY ONLY
+ * When the old cache backend is eventually removed, this method
+ * can be removed too.
+ *
+ * In the new backend: this method is no-op
+ * In the old backend: this method delegates to nsICacheEntryDescriptor.close()
+ */
+ void close();
+
+ /**
+ * @deprecated
+ * FOR BACKWARD COMPATIBILITY ONLY
+ * Marks the entry as valid so that others can use it and get only readonly
+ * access when the entry is held by the 1st writer.
+ */
+ void markValid();
+
+ /**
+ * @deprecated
+ * FOR BACKWARD COMPATIBILITY ONLY
+ * Marks the entry as valid when write access is acquired.
+ */
+ void maybeMarkValid();
+
+ /**
+ * @deprecated
+ * FOR BACKWARD COMPATIBILITY ONLY / KINDA HACK
+ * @param aWriteAllowed
+ * Consumer indicates whether write to the entry is allowed for it.
+ * Depends on implementation how the flag is handled.
+ * @returns
+ * true when write access is acquired for this entry,
+ * false otherwise
+ */
+ boolean hasWriteAccess(in boolean aWriteAllowed);
+};
+
+/**
+ * Argument for nsICacheEntry.visitMetaData, provides access to all metadata
+ * keys and values stored on the entry.
+ */
+[scriptable, uuid(fea3e276-6ba5-4ceb-a581-807d1f43f6d0)]
+interface nsICacheEntryMetaDataVisitor : nsISupports
+{
+ /**
+ * Called over each key / value pair.
+ */
+ void onMetaDataElement(in string key, in string value);
+};
diff --git a/netwerk/cache2/nsICacheEntryDoomCallback.idl b/netwerk/cache2/nsICacheEntryDoomCallback.idl
new file mode 100644
index 0000000000..a16a738292
--- /dev/null
+++ b/netwerk/cache2/nsICacheEntryDoomCallback.idl
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+[scriptable, uuid(2f8896be-232f-4140-afb3-1faffb56f3c6)]
+interface nsICacheEntryDoomCallback : nsISupports
+{
+ /**
+ * Callback invoked after an entry or entries has/have been
+ * doomed from the cache.
+ */
+ void onCacheEntryDoomed(in nsresult aResult);
+};
diff --git a/netwerk/cache2/nsICacheEntryOpenCallback.idl b/netwerk/cache2/nsICacheEntryOpenCallback.idl
new file mode 100644
index 0000000000..5cafd63770
--- /dev/null
+++ b/netwerk/cache2/nsICacheEntryOpenCallback.idl
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsICacheEntry;
+interface nsIApplicationCache;
+
+[scriptable, uuid(1fc9fe11-c6ac-4748-94bd-8555a5a12b94)]
+interface nsICacheEntryOpenCallback : nsISupports
+{
+ /**
+ * State of the entry determined by onCacheEntryCheck.
+ *
+ * ENTRY_WANTED - the consumer is interested in the entry, we will pass it.
+ * RECHECK_AFTER_WRITE_FINISHED - the consumer cannot use the entry while data is
+ * still being written and wants to check it again after the current write is
+ * finished. This actually prevents concurrent read/write and is used with
+ * non-resumable HTTP responses.
+ * ENTRY_NEEDS_REVALIDATION - entry needs to be revalidated first with origin server,
+ * this means the loading channel will decide whether to use the entry content
+ * as is after it gets a positive response from the server about validity of the
+ * content ; when a new content needs to be loaded from the server, the loading
+ * channel opens a new entry with OPEN_TRUNCATE flag which dooms the one
+ * this check has been made for.
+ * ENTRY_NOT_WANTED - the consumer is not interested in the entry, we will not pass it.
+ */
+ const unsigned long ENTRY_WANTED = 0;
+ const unsigned long RECHECK_AFTER_WRITE_FINISHED = 1;
+ const unsigned long ENTRY_NEEDS_REVALIDATION = 2;
+ const unsigned long ENTRY_NOT_WANTED = 3;
+
+ /**
+ * Callback to perform any validity checks before the entry should be used.
+ * Called before onCacheEntryAvailable callback, depending on the result it
+ * may be called more then one time.
+ *
+ * This callback is ensured to be called on the same thread on which asyncOpenURI
+ * has been called, unless nsICacheStorage.CHECK_MULTITHREADED flag has been specified.
+ * In that case this callback can be invoked on any thread, usually it is the cache I/O
+ * or cache management thread.
+ *
+ * IMPORTANT NOTE:
+ * This callback may be invoked sooner then respective asyncOpenURI call exits.
+ *
+ * @param aEntry
+ * An entry to examine. Consumer has a chance to decide whether the
+ * entry is valid or not.
+ * @param aApplicationCache
+ * Optional, application cache the entry has been found in, if any.
+ * @return
+ * State of the entry, see the constants just above.
+ */
+ unsigned long onCacheEntryCheck(in nsICacheEntry aEntry,
+ in nsIApplicationCache aApplicationCache);
+
+ /**
+ * Callback giving actual result of asyncOpenURI. It may give consumer the cache
+ * entry or a failure result when it's not possible to open it from some reason.
+ * This callback is ensured to be called on the same thread on which asyncOpenURI
+ * has been called.
+ *
+ * IMPORTANT NOTE:
+ * This callback may be invoked sooner then respective asyncOpenURI call exits.
+ *
+ * @param aEntry
+ * The entry bound to the originally requested URI. May be null when
+ * loading from a particular application cache and the URI has not
+ * been found in that application cache.
+ * @param aNew
+ * Whether no data so far has been stored for this entry, i.e. reading
+ * it will just fail. When aNew is true, a server request should be
+ * made and data stored to this new entry.
+ * @param aApplicationCache
+ * When an entry had been found in an application cache, this is the
+ * given application cache. It should be associated with the loading
+ * channel.
+ * @param aResult
+ * Result of the request. This may be a failure only when one of these
+ * issues occur:
+ * - the cache storage service could not be started due to some unexpected
+ * faulure
+ * - there is not enough disk space to create new entries
+ * - cache entry was not found in a given application cache
+ */
+ void onCacheEntryAvailable(in nsICacheEntry aEntry,
+ in boolean aNew,
+ in nsIApplicationCache aApplicationCache,
+ in nsresult aResult);
+};
diff --git a/netwerk/cache2/nsICacheStorage.idl b/netwerk/cache2/nsICacheStorage.idl
new file mode 100644
index 0000000000..8169c9b730
--- /dev/null
+++ b/netwerk/cache2/nsICacheStorage.idl
@@ -0,0 +1,145 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIURI;
+interface nsICacheEntry;
+interface nsICacheEntryOpenCallback;
+interface nsICacheEntryDoomCallback;
+interface nsICacheStorageVisitor;
+
+/**
+ * Representation of a cache storage. There can be just-in-mem,
+ * in-mem+on-disk, in-mem+on-disk+app-cache or just a specific
+ * app-cache storage.
+ */
+[scriptable, uuid(35d104a6-d252-4fd4-8a56-3c14657cad3b)]
+interface nsICacheStorage : nsISupports
+{
+ /**
+ * Placeholder for specifying "no special flags" during open.
+ */
+ const uint32_t OPEN_NORMALLY = 0;
+
+ /**
+ * Rewrite any existing data when opening a URL.
+ */
+ const uint32_t OPEN_TRUNCATE = 1 << 0;
+
+ /**
+ * Only open an existing entry. Don't create a new one.
+ */
+ const uint32_t OPEN_READONLY = 1 << 1;
+
+ /**
+ * Use for first-paint blocking loads.
+ */
+ const uint32_t OPEN_PRIORITY = 1 << 2;
+
+ /**
+ * Bypass the cache load when write is still in progress.
+ */
+ const uint32_t OPEN_BYPASS_IF_BUSY = 1 << 3;
+
+ /**
+ * Perform the cache entry check (onCacheEntryCheck invocation) on any thread
+ * for optimal perfomance optimization. If this flag is not specified it is
+ * ensured that onCacheEntryCheck is called on the same thread as respective
+ * asyncOpen has been called.
+ */
+ const uint32_t CHECK_MULTITHREADED = 1 << 4;
+
+ /**
+ * Don't automatically update any 'last used' metadata of the entry.
+ */
+ const uint32_t OPEN_SECRETLY = 1 << 5;
+
+ /**
+ * Entry is being opened as part of a service worker interception. Do not
+ * allow the cache to be disabled in this case.
+ */
+ const uint32_t OPEN_INTERCEPTED = 1 << 6;
+
+ /**
+ * Asynchronously opens a cache entry for the specified URI.
+ * Result is fetched asynchronously via the callback.
+ *
+ * @param aURI
+ * The URI to search in cache or to open for writting.
+ * @param aIdExtension
+ * Any string that will extend (distinguish) the entry. Two entries
+ * with the same aURI but different aIdExtension will be comletely
+ * different entries. If you don't know what aIdExtension should be
+ * leave it empty.
+ * @param aFlags
+ * OPEN_NORMALLY - open cache entry normally for read and write
+ * OPEN_TRUNCATE - delete any existing entry before opening it
+ * OPEN_READONLY - don't create an entry if there is none
+ * OPEN_PRIORITY - give this request a priority over others
+ * OPEN_BYPASS_IF_BUSY - backward compatibility only, LOAD_BYPASS_LOCAL_CACHE_IF_BUSY
+ * CHECK_MULTITHREADED - onCacheEntryCheck may be called on any thread, consumer
+ * implementation is thread-safe
+ * @param aCallback
+ * The consumer that receives the result.
+ * IMPORTANT: The callback may be called sooner the method returns.
+ */
+ void asyncOpenURI(in nsIURI aURI, in ACString aIdExtension,
+ in uint32_t aFlags,
+ in nsICacheEntryOpenCallback aCallback);
+
+ /**
+ * Immediately opens a new and empty cache entry in the storage, any existing
+ * entries are immediately doomed. This is similar to the recreate() method
+ * on nsICacheEntry.
+ *
+ * Storage may not implement this method and throw NS_ERROR_NOT_IMPLEMENTED.
+ * In that case consumer must use asyncOpen with OPEN_TRUNCATE flag and get
+ * the new entry via a callback.
+ *
+ * @param aURI @see asyncOpenURI
+ * @param aIdExtension @see asyncOpenURI
+ */
+ nsICacheEntry openTruncate(in nsIURI aURI,
+ in ACString aIdExtension);
+
+ /**
+ * Synchronously check on existance of an entry. In case of disk entries
+ * this uses information from the cache index. When the index data are not
+ * up to date or index is still building, NS_ERROR_NOT_AVAILABLE is thrown.
+ * The same error may throw any storage implementation that cannot determine
+ * entry state without blocking the caller.
+ */
+ boolean exists(in nsIURI aURI, in ACString aIdExtension);
+
+ /**
+ * Synchronously check on existance of alternative data and size of the
+ * content. When the index data are not up to date or index is still building,
+ * NS_ERROR_NOT_AVAILABLE is thrown. The same error may throw any storage
+ * implementation that cannot determine entry state without blocking the caller.
+ */
+ void getCacheIndexEntryAttrs(in nsIURI aURI,
+ in ACString aIdExtension,
+ out bool aHasAltData,
+ out uint32_t aSizeInKB);
+ /**
+ * Asynchronously removes an entry belonging to the URI from the cache.
+ */
+ void asyncDoomURI(in nsIURI aURI, in ACString aIdExtension,
+ in nsICacheEntryDoomCallback aCallback);
+
+ /**
+ * Asynchronously removes all cached entries under this storage.
+ * NOTE: Disk storage also evicts memory storage.
+ */
+ void asyncEvictStorage(in nsICacheEntryDoomCallback aCallback);
+
+ /**
+ * Visits the storage and its entries.
+ * NOTE: Disk storage also visits memory storage.
+ */
+ void asyncVisitStorage(in nsICacheStorageVisitor aVisitor,
+ in boolean aVisitEntries);
+
+};
diff --git a/netwerk/cache2/nsICacheStorageService.idl b/netwerk/cache2/nsICacheStorageService.idl
new file mode 100644
index 0000000000..c3aa08e555
--- /dev/null
+++ b/netwerk/cache2/nsICacheStorageService.idl
@@ -0,0 +1,155 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsICacheStorage;
+interface nsILoadContextInfo;
+interface nsIApplicationCache;
+interface nsIEventTarget;
+interface nsICacheStorageConsumptionObserver;
+interface nsICacheStorageVisitor;
+interface nsIPrincipal;
+
+/**
+ * Provides access to particual cache storages of the network URI cache.
+ */
+[scriptable, uuid(ae29c44b-fbc3-4552-afaf-0a157ce771e7)]
+interface nsICacheStorageService : nsISupports
+{
+ /**
+ * Get storage where entries will only remain in memory, never written
+ * to the disk.
+ *
+ * NOTE: Any existing disk entry for [URL|id-extension] will be doomed
+ * prior opening an entry using this memory-only storage. Result of
+ * AsyncOpenURI will be a new and empty memory-only entry. Using
+ * OPEN_READONLY open flag has no effect on this behavior.
+ *
+ * @param aLoadContextInfo
+ * Information about the loading context, this focuses the storage JAR and
+ * respects separate storage for private browsing.
+ */
+ nsICacheStorage memoryCacheStorage(in nsILoadContextInfo aLoadContextInfo);
+
+ /**
+ * Get storage where entries will be written to disk when not forbidden by
+ * response headers.
+ *
+ * @param aLookupAppCache
+ * When set true (for top level document loading channels) app cache will
+ * be first to check on to find entries in.
+ */
+ nsICacheStorage diskCacheStorage(in nsILoadContextInfo aLoadContextInfo,
+ in bool aLookupAppCache);
+
+ /**
+ * Get storage where entries will be written to disk and marked as pinned.
+ * These pinned entries are immune to over limit eviction and call of clear()
+ * on this service.
+ */
+ nsICacheStorage pinningCacheStorage(in nsILoadContextInfo aLoadContextInfo);
+
+ /**
+ * Get storage for a specified application cache obtained using some different
+ * mechanism.
+ *
+ * @param aLoadContextInfo
+ * Mandatory reference to a load context information.
+ * @param aApplicationCache
+ * Optional reference to an existing appcache. When left null, this will
+ * work with offline cache as a whole.
+ */
+ nsICacheStorage appCacheStorage(in nsILoadContextInfo aLoadContextInfo,
+ in nsIApplicationCache aApplicationCache);
+
+ /**
+ * Get storage for synthesized cache entries that we currently use for ServiceWorker interception in non-e10s mode.
+ *
+ * This cache storage has no limits on file size to allow the ServiceWorker to intercept large files.
+ */
+ nsICacheStorage synthesizedCacheStorage(in nsILoadContextInfo aLoadContextInfo);
+
+ /**
+ * Evict any cache entry having the same origin of aPrincipal.
+ *
+ * @param aPrincipal
+ * The principal to compare the entries with.
+ */
+ void clearOrigin(in nsIPrincipal aPrincipal);
+
+ /**
+ * Evict any cache entry having the same originAttributes.
+ *
+ * @param aOriginAttributes
+ * The origin attributes in string format to compare the entries with.
+ */
+ void clearOriginAttributes(in AString aOriginAttributes);
+
+ /**
+ * Evict the whole cache.
+ */
+ void clear();
+
+ /**
+ * Purge only data of disk backed entries. Metadata are left for
+ * performance purposes.
+ */
+ const uint32_t PURGE_DISK_DATA_ONLY = 1;
+ /**
+ * Purge whole disk backed entries from memory. Disk files will
+ * be left unattended.
+ */
+ const uint32_t PURGE_DISK_ALL = 2;
+ /**
+ * Purge all entries we keep in memory, including memory-storage
+ * entries. This may be dangerous to use.
+ */
+ const uint32_t PURGE_EVERYTHING = 3;
+ /**
+ * Purges data we keep warmed in memory. Use for tests and for
+ * saving memory.
+ */
+ void purgeFromMemory(in uint32_t aWhat);
+
+ /**
+ * I/O thread target to use for any operations on disk
+ */
+ readonly attribute nsIEventTarget ioTarget;
+
+ /**
+ * Asynchronously determine how many bytes of the disk space the cache takes.
+ * @see nsICacheStorageConsumptionObserver
+ * @param aObserver
+ * A mandatory (weak referred) observer. Documented at
+ * nsICacheStorageConsumptionObserver.
+ * NOTE: the observer MUST implement nsISupportsWeakReference.
+ */
+ void asyncGetDiskConsumption(in nsICacheStorageConsumptionObserver aObserver);
+
+ /**
+ * Asynchronously visits all storages of the disk cache and memory cache.
+ * @see nsICacheStorageVisitor
+ * @param aVisitor
+ * A visitor callback.
+ * @param aVisitEntries
+ * A boolean indicates whether visits entries.
+ */
+ void asyncVisitAllStorages(in nsICacheStorageVisitor aVisitor,
+ in boolean aVisitEntries);
+};
+
+[scriptable, uuid(7728ab5b-4c01-4483-a606-32bf5b8136cb)]
+interface nsICacheStorageConsumptionObserver : nsISupports
+{
+ /**
+ * Callback invoked to answer asyncGetDiskConsumption call. Always triggered
+ * on the main thread.
+ * NOTE: implementers must also implement nsISupportsWeakReference.
+ *
+ * @param aDiskSize
+ * The disk consumption in bytes.
+ */
+ void onNetworkCacheDiskConsumption(in int64_t aDiskSize);
+};
diff --git a/netwerk/cache2/nsICacheStorageVisitor.idl b/netwerk/cache2/nsICacheStorageVisitor.idl
new file mode 100644
index 0000000000..2d16b26615
--- /dev/null
+++ b/netwerk/cache2/nsICacheStorageVisitor.idl
@@ -0,0 +1,35 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIURI;
+interface nsIFile;
+interface nsILoadContextInfo;
+
+[scriptable, uuid(6cc7c253-93b6-482b-8e9d-1e04d8e9d655)]
+interface nsICacheStorageVisitor : nsISupports
+{
+ /**
+ */
+ void onCacheStorageInfo(in uint32_t aEntryCount,
+ in uint64_t aConsumption,
+ in uint64_t aCapacity,
+ in nsIFile aDiskDirectory);
+
+ /**
+ */
+ void onCacheEntryInfo(in nsIURI aURI,
+ in ACString aIdEnhance,
+ in int64_t aDataSize,
+ in long aFetchCount,
+ in uint32_t aLastModifiedTime,
+ in uint32_t aExpirationTime,
+ in boolean aPinned,
+ in nsILoadContextInfo aInfo);
+
+ /**
+ */
+ void onCacheEntryVisitCompleted();
+};
diff --git a/netwerk/cache2/nsICacheTesting.idl b/netwerk/cache2/nsICacheTesting.idl
new file mode 100644
index 0000000000..15704f7caa
--- /dev/null
+++ b/netwerk/cache2/nsICacheTesting.idl
@@ -0,0 +1,20 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIObserver;
+
+/**
+ * This is an internal interface used only for testing purposes.
+ *
+ * THIS IS NOT AN API TO BE USED BY EXTENSIONS! ONLY USED BY MOZILLA TESTS.
+ */
+[scriptable, builtinclass, uuid(4e8ba935-92e1-4a74-944b-b1a2f02a7480)]
+interface nsICacheTesting : nsISupports
+{
+ void suspendCacheIOThread(in uint32_t aLevel);
+ void resumeCacheIOThread();
+ void flush(in nsIObserver aObserver);
+};