summaryrefslogtreecommitdiffstats
path: root/mfbt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /mfbt
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--mfbt/Algorithm.h128
-rw-r--r--mfbt/Alignment.h138
-rw-r--r--mfbt/AllocPolicy.h175
-rw-r--r--mfbt/AlreadyAddRefed.h183
-rw-r--r--mfbt/Array.h110
-rw-r--r--mfbt/ArrayUtils.h188
-rw-r--r--mfbt/Assertions.cpp52
-rw-r--r--mfbt/Assertions.h709
-rw-r--r--mfbt/AtomicBitfields.h468
-rw-r--r--mfbt/Atomics.h520
-rw-r--r--mfbt/Attributes.h1034
-rw-r--r--mfbt/BinarySearch.h247
-rw-r--r--mfbt/BitSet.h177
-rw-r--r--mfbt/BloomFilter.h338
-rw-r--r--mfbt/Buffer.h197
-rw-r--r--mfbt/BufferList.h605
-rw-r--r--mfbt/Casting.h229
-rw-r--r--mfbt/ChaosMode.cpp17
-rw-r--r--mfbt/ChaosMode.h90
-rw-r--r--mfbt/Char16.h142
-rw-r--r--mfbt/CheckedInt.h804
-rw-r--r--mfbt/CompactPair.h244
-rw-r--r--mfbt/Compiler.h34
-rw-r--r--mfbt/Compression.cpp182
-rw-r--r--mfbt/Compression.h218
-rw-r--r--mfbt/DbgMacro.h206
-rw-r--r--mfbt/DebugOnly.h102
-rw-r--r--mfbt/DefineEnum.h156
-rw-r--r--mfbt/DoublyLinkedList.h578
-rw-r--r--mfbt/EndianUtils.h611
-rw-r--r--mfbt/EnumSet.h350
-rw-r--r--mfbt/EnumTypeTraits.h113
-rw-r--r--mfbt/EnumeratedArray.h89
-rw-r--r--mfbt/EnumeratedRange.h211
-rw-r--r--mfbt/FStream.h124
-rw-r--r--mfbt/FastBernoulliTrial.h381
-rw-r--r--mfbt/FloatingPoint.cpp41
-rw-r--r--mfbt/FloatingPoint.h606
-rw-r--r--mfbt/FunctionRef.h219
-rw-r--r--mfbt/FunctionTypeTraits.h114
-rw-r--r--mfbt/Fuzzing.h91
-rw-r--r--mfbt/HashFunctions.cpp37
-rw-r--r--mfbt/HashFunctions.h420
-rw-r--r--mfbt/HashTable.h2278
-rw-r--r--mfbt/HelperMacros.h18
-rw-r--r--mfbt/InitializedOnce.h247
-rw-r--r--mfbt/IntegerRange.h192
-rw-r--r--mfbt/IntegerTypeTraits.h86
-rw-r--r--mfbt/JSONWriter.cpp47
-rw-r--r--mfbt/JSONWriter.h545
-rw-r--r--mfbt/JsRust.h21
-rw-r--r--mfbt/Latin1.h262
-rw-r--r--mfbt/Likely.h23
-rw-r--r--mfbt/LinkedList.h748
-rw-r--r--mfbt/Literals.h39
-rw-r--r--mfbt/MacroArgs.h97
-rw-r--r--mfbt/MacroForEach.h219
-rw-r--r--mfbt/MathAlgorithms.h492
-rw-r--r--mfbt/Maybe.h977
-rw-r--r--mfbt/MaybeOneOf.h172
-rw-r--r--mfbt/MaybeStorageBase.h92
-rw-r--r--mfbt/MemoryChecking.h127
-rw-r--r--mfbt/MemoryReporting.h30
-rw-r--r--mfbt/MoveOnlyFunction.h47
-rw-r--r--mfbt/MruCache.h165
-rw-r--r--mfbt/NeverDestroyed.h66
-rw-r--r--mfbt/NonDereferenceable.h125
-rw-r--r--mfbt/NotNull.h449
-rw-r--r--mfbt/Opaque.h41
-rw-r--r--mfbt/OperatorNewExtensions.h50
-rw-r--r--mfbt/PairHash.h75
-rw-r--r--mfbt/Path.h31
-rw-r--r--mfbt/PodOperations.h160
-rw-r--r--mfbt/Poison.cpp205
-rw-r--r--mfbt/Poison.h109
-rw-r--r--mfbt/RandomNum.cpp146
-rw-r--r--mfbt/RandomNum.h51
-rw-r--r--mfbt/Range.h82
-rw-r--r--mfbt/RangedArray.h66
-rw-r--r--mfbt/RangedPtr.h311
-rw-r--r--mfbt/ReentrancyGuard.h50
-rw-r--r--mfbt/RefCountType.h37
-rw-r--r--mfbt/RefCounted.h327
-rw-r--r--mfbt/RefPtr.h646
-rw-r--r--mfbt/Result.h873
-rw-r--r--mfbt/ResultExtensions.h371
-rw-r--r--mfbt/ResultVariant.h61
-rw-r--r--mfbt/ReverseIterator.h173
-rw-r--r--mfbt/RollingMean.h93
-rw-r--r--mfbt/SHA1.cpp405
-rw-r--r--mfbt/SHA1.h61
-rw-r--r--mfbt/SPSCQueue.h420
-rw-r--r--mfbt/STYLE11
-rw-r--r--mfbt/Saturate.h248
-rw-r--r--mfbt/ScopeExit.h126
-rw-r--r--mfbt/SegmentedVector.h359
-rw-r--r--mfbt/SharedLibrary.h47
-rw-r--r--mfbt/SmallPointerArray.h270
-rw-r--r--mfbt/Span.h973
-rw-r--r--mfbt/SplayTree.h305
-rw-r--r--mfbt/StaticAnalysisFunctions.h70
-rw-r--r--mfbt/TaggedAnonymousMemory.cpp83
-rw-r--r--mfbt/TaggedAnonymousMemory.h82
-rw-r--r--mfbt/Tainting.h348
-rw-r--r--mfbt/TemplateLib.h126
-rw-r--r--mfbt/TextUtils.h295
-rw-r--r--mfbt/ThreadLocal.h256
-rw-r--r--mfbt/ThreadSafeWeakPtr.h309
-rw-r--r--mfbt/ThreadSafety.h140
-rw-r--r--mfbt/ToString.h30
-rw-r--r--mfbt/Try.h41
-rw-r--r--mfbt/TsanOptions.h95
-rw-r--r--mfbt/TypedEnumBits.h135
-rw-r--r--mfbt/Types.h140
-rw-r--r--mfbt/UniquePtr.h737
-rw-r--r--mfbt/UniquePtrExtensions.cpp35
-rw-r--r--mfbt/UniquePtrExtensions.h315
-rw-r--r--mfbt/Unused.cpp13
-rw-r--r--mfbt/Unused.h41
-rw-r--r--mfbt/Utf8.cpp38
-rw-r--r--mfbt/Utf8.h596
-rw-r--r--mfbt/Variant.h928
-rw-r--r--mfbt/Vector.h1653
-rw-r--r--mfbt/WasiAtomic.h200
-rw-r--r--mfbt/WeakPtr.h358
-rw-r--r--mfbt/WindowsVersion.h80
-rw-r--r--mfbt/WrappingOperations.h262
-rw-r--r--mfbt/XorShift128PlusRNG.h122
-rw-r--r--mfbt/double-conversion/LICENSE26
-rw-r--r--mfbt/double-conversion/add-mfbt-api-markers.patch207
-rw-r--r--mfbt/double-conversion/debug-only-functions.patch39
-rw-r--r--mfbt/double-conversion/double-conversion/README.md55
-rw-r--r--mfbt/double-conversion/double-conversion/bignum-dtoa.cc641
-rw-r--r--mfbt/double-conversion/double-conversion/bignum-dtoa.h84
-rw-r--r--mfbt/double-conversion/double-conversion/bignum.cc797
-rw-r--r--mfbt/double-conversion/double-conversion/bignum.h152
-rw-r--r--mfbt/double-conversion/double-conversion/cached-powers.cc175
-rw-r--r--mfbt/double-conversion/double-conversion/cached-powers.h64
-rw-r--r--mfbt/double-conversion/double-conversion/diy-fp.h137
-rw-r--r--mfbt/double-conversion/double-conversion/double-conversion.h34
-rw-r--r--mfbt/double-conversion/double-conversion/double-to-string.cc443
-rw-r--r--mfbt/double-conversion/double-conversion/double-to-string.h471
-rw-r--r--mfbt/double-conversion/double-conversion/fast-dtoa.cc665
-rw-r--r--mfbt/double-conversion/double-conversion/fast-dtoa.h88
-rw-r--r--mfbt/double-conversion/double-conversion/fixed-dtoa.cc405
-rw-r--r--mfbt/double-conversion/double-conversion/fixed-dtoa.h56
-rw-r--r--mfbt/double-conversion/double-conversion/ieee.h447
-rw-r--r--mfbt/double-conversion/double-conversion/string-to-double.cc818
-rw-r--r--mfbt/double-conversion/double-conversion/string-to-double.h239
-rw-r--r--mfbt/double-conversion/double-conversion/strtod.cc610
-rw-r--r--mfbt/double-conversion/double-conversion/strtod.h64
-rw-r--r--mfbt/double-conversion/double-conversion/utils.h421
-rw-r--r--mfbt/double-conversion/moz.yaml48
-rw-r--r--mfbt/double-conversion/to-fixed-dbl-max.patch51
-rw-r--r--mfbt/double-conversion/use-mozilla-assertions.patch60
-rw-r--r--mfbt/fallible.h64
-rw-r--r--mfbt/lz4/LICENSE24
-rw-r--r--mfbt/lz4/README.md169
-rw-r--r--mfbt/lz4/README.mozilla18
-rw-r--r--mfbt/lz4/lz4.c2722
-rw-r--r--mfbt/lz4/lz4.h842
-rw-r--r--mfbt/lz4/lz4file.c311
-rw-r--r--mfbt/lz4/lz4file.h93
-rw-r--r--mfbt/lz4/lz4frame.c2078
-rw-r--r--mfbt/lz4/lz4frame.h692
-rw-r--r--mfbt/lz4/lz4frame_static.h47
-rw-r--r--mfbt/lz4/lz4hc.c1631
-rw-r--r--mfbt/lz4/lz4hc.h413
-rw-r--r--mfbt/lz4/xxhash.c43
-rw-r--r--mfbt/lz4/xxhash.h6773
-rw-r--r--mfbt/moz.build214
-rw-r--r--mfbt/tests/TestAlgorithm.cpp68
-rw-r--r--mfbt/tests/TestArray.cpp31
-rw-r--r--mfbt/tests/TestArrayUtils.cpp301
-rw-r--r--mfbt/tests/TestAtomicBitfields.cpp189
-rw-r--r--mfbt/tests/TestAtomics.cpp274
-rw-r--r--mfbt/tests/TestBinarySearch.cpp158
-rw-r--r--mfbt/tests/TestBitSet.cpp117
-rw-r--r--mfbt/tests/TestBloomFilter.cpp142
-rw-r--r--mfbt/tests/TestBufferList.cpp372
-rw-r--r--mfbt/tests/TestCasting.cpp255
-rw-r--r--mfbt/tests/TestCeilingFloor.cpp81
-rw-r--r--mfbt/tests/TestCheckedInt.cpp615
-rw-r--r--mfbt/tests/TestCompactPair.cpp160
-rw-r--r--mfbt/tests/TestCountPopulation.cpp30
-rw-r--r--mfbt/tests/TestCountZeroes.cpp92
-rw-r--r--mfbt/tests/TestDefineEnum.cpp78
-rw-r--r--mfbt/tests/TestDoublyLinkedList.cpp306
-rw-r--r--mfbt/tests/TestEndian.cpp501
-rw-r--r--mfbt/tests/TestEnumSet.cpp306
-rw-r--r--mfbt/tests/TestEnumTypeTraits.cpp159
-rw-r--r--mfbt/tests/TestEnumeratedArray.cpp46
-rw-r--r--mfbt/tests/TestFastBernoulliTrial.cpp177
-rw-r--r--mfbt/tests/TestFloatingPoint.cpp730
-rw-r--r--mfbt/tests/TestFunctionRef.cpp144
-rw-r--r--mfbt/tests/TestFunctionTypeTraits.cpp232
-rw-r--r--mfbt/tests/TestHashTable.cpp103
-rw-r--r--mfbt/tests/TestIntegerRange.cpp150
-rw-r--r--mfbt/tests/TestJSONWriter.cpp657
-rw-r--r--mfbt/tests/TestLinkedList.cpp399
-rw-r--r--mfbt/tests/TestMacroArgs.cpp38
-rw-r--r--mfbt/tests/TestMacroForEach.cpp44
-rw-r--r--mfbt/tests/TestMathAlgorithms.cpp545
-rw-r--r--mfbt/tests/TestMaybe.cpp1473
-rw-r--r--mfbt/tests/TestNonDereferenceable.cpp171
-rw-r--r--mfbt/tests/TestNotNull.cpp386
-rw-r--r--mfbt/tests/TestPoisonArea.cpp530
-rw-r--r--mfbt/tests/TestRandomNum.cpp61
-rw-r--r--mfbt/tests/TestRange.cpp29
-rw-r--r--mfbt/tests/TestRefPtr.cpp131
-rw-r--r--mfbt/tests/TestResult.cpp870
-rw-r--r--mfbt/tests/TestRollingMean.cpp114
-rw-r--r--mfbt/tests/TestSHA1.cpp204
-rw-r--r--mfbt/tests/TestSIMD.cpp631
-rw-r--r--mfbt/tests/TestSPSCQueue.cpp302
-rw-r--r--mfbt/tests/TestSaturate.cpp181
-rw-r--r--mfbt/tests/TestScopeExit.cpp55
-rw-r--r--mfbt/tests/TestSegmentedVector.cpp388
-rw-r--r--mfbt/tests/TestSmallPointerArray.cpp237
-rw-r--r--mfbt/tests/TestSplayTree.cpp208
-rw-r--r--mfbt/tests/TestTextUtils.cpp1064
-rw-r--r--mfbt/tests/TestThreadSafeWeakPtr.cpp127
-rw-r--r--mfbt/tests/TestTypedEnum.cpp502
-rw-r--r--mfbt/tests/TestUniquePtr.cpp609
-rw-r--r--mfbt/tests/TestUtf8.cpp755
-rw-r--r--mfbt/tests/TestVariant.cpp1153
-rw-r--r--mfbt/tests/TestVector.cpp792
-rw-r--r--mfbt/tests/TestWeakPtr.cpp145
-rw-r--r--mfbt/tests/TestWinArchDefs.cpp58
-rw-r--r--mfbt/tests/TestWrappingOperations.cpp587
-rw-r--r--mfbt/tests/TestXorShift128PlusRNG.cpp101
-rw-r--r--mfbt/tests/gtest/TestAlgorithm.cpp191
-rw-r--r--mfbt/tests/gtest/TestBuffer.cpp96
-rw-r--r--mfbt/tests/gtest/TestInitializedOnce.cpp200
-rw-r--r--mfbt/tests/gtest/TestLinkedList.cpp78
-rw-r--r--mfbt/tests/gtest/TestMainThreadWeakPtr.cpp42
-rw-r--r--mfbt/tests/gtest/TestMozDbg.cpp170
-rw-r--r--mfbt/tests/gtest/TestResultExtensions.cpp579
-rw-r--r--mfbt/tests/gtest/TestReverseIterator.cpp104
-rw-r--r--mfbt/tests/gtest/TestSpan.cpp2355
-rw-r--r--mfbt/tests/gtest/TestTainting.cpp485
-rw-r--r--mfbt/tests/gtest/moz.build32
-rw-r--r--mfbt/tests/moz.build117
243 files changed, 81197 insertions, 0 deletions
diff --git a/mfbt/Algorithm.h b/mfbt/Algorithm.h
new file mode 100644
index 0000000000..33d666de49
--- /dev/null
+++ b/mfbt/Algorithm.h
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A polyfill for `<algorithm>`. */
+
+#ifndef mozilla_Algorithm_h
+#define mozilla_Algorithm_h
+
+#include "mozilla/Result.h"
+
+#include <iterator>
+#include <type_traits>
+
+namespace mozilla {
+
+// Returns true if all elements in the range [aFirst, aLast)
+// satisfy the predicate aPred.
+template <class Iter, class Pred>
+constexpr bool AllOf(Iter aFirst, Iter aLast, Pred aPred) {
+ for (; aFirst != aLast; ++aFirst) {
+ if (!aPred(*aFirst)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Like C++20's `std::any_of`.
+template <typename Iter, typename Pred>
+constexpr bool AnyOf(Iter aFirst, Iter aLast, Pred aPred) {
+ for (; aFirst != aLast; ++aFirst) {
+ if (aPred(*aFirst)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace detail {
+template <typename Transform, typename SrcIter>
+using ArrayElementTransformType = typename std::invoke_result_t<
+ Transform, typename std::iterator_traits<SrcIter>::reference>;
+
+template <typename Transform, typename SrcIter>
+struct TransformTraits {
+ using result_type = ArrayElementTransformType<Transform, SrcIter>;
+
+ using result_ok_type = typename result_type::ok_type;
+ using result_err_type = typename result_type::err_type;
+};
+} // namespace detail
+
+// An algorithm similar to TransformAbortOnErr combined with a condition that
+// allows to skip elements. At most std::distance(aIter, aEnd) elements will be
+// inserted into aDst.
+//
+// Type requirements, in addition to those specified in TransformAbortOnErr:
+// - Cond must be compatible with signature
+// bool (const SrcIter::value_type&)
+template <typename SrcIter, typename DstIter, typename Cond, typename Transform>
+Result<Ok,
+ typename detail::TransformTraits<Transform, SrcIter>::result_err_type>
+TransformIfAbortOnErr(SrcIter aIter, SrcIter aEnd, DstIter aDst, Cond aCond,
+ Transform aTransform) {
+ for (; aIter != aEnd; ++aIter) {
+ if (!aCond(static_cast<std::add_const_t<
+ typename std::iterator_traits<SrcIter>::value_type>&>(
+ *aIter))) {
+ continue;
+ }
+
+ auto res = aTransform(*aIter);
+ if (res.isErr()) {
+ return Err(res.unwrapErr());
+ }
+
+ *aDst++ = res.unwrap();
+ }
+ return Ok{};
+}
+
+template <typename SrcRange, typename DstIter, typename Cond,
+ typename Transform>
+auto TransformIfAbortOnErr(SrcRange& aRange, DstIter aDst, Cond aCond,
+ Transform aTransform) {
+ using std::begin;
+ using std::end;
+ return TransformIfAbortOnErr(begin(aRange), end(aRange), aDst, aCond,
+ aTransform);
+}
+
+// An algorithm similar to std::transform, adapted to error handling based on
+// mozilla::Result<V, E>. It iterates through the input range [aIter, aEnd) and
+// inserts the result of applying aTransform to each element into aDst, if
+// aTransform returns a success result. On the first error result, iterating is
+// aborted, and the error result is returned as an overall result. If all
+// transformations return a success result, Ok is returned as an overall result.
+//
+// Type requirements:
+// - SrcIter must be an InputIterator.
+// - DstIter must be an OutputIterator.
+// - Transform must be compatible with signature
+// Result<DstIter::value_type, E> (SrcIter::reference)
+template <typename SrcIter, typename DstIter, typename Transform>
+Result<Ok,
+ typename detail::TransformTraits<Transform, SrcIter>::result_err_type>
+TransformAbortOnErr(SrcIter aIter, SrcIter aEnd, DstIter aDst,
+ Transform aTransform) {
+ return TransformIfAbortOnErr(
+ aIter, aEnd, aDst, [](const auto&) { return true; }, aTransform);
+}
+
+template <typename SrcRange, typename DstIter, typename Transform>
+auto TransformAbortOnErr(SrcRange& aRange, DstIter aDst, Transform aTransform) {
+ using std::begin;
+ using std::end;
+ return TransformIfAbortOnErr(
+ begin(aRange), end(aRange), aDst, [](const auto&) { return true; },
+ aTransform);
+}
+
+} // namespace mozilla
+
+#endif // mozilla_Algorithm_h
diff --git a/mfbt/Alignment.h b/mfbt/Alignment.h
new file mode 100644
index 0000000000..c38e00d12c
--- /dev/null
+++ b/mfbt/Alignment.h
@@ -0,0 +1,138 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Functionality related to memory alignment. */
+
+#ifndef mozilla_Alignment_h
+#define mozilla_Alignment_h
+
+#include "mozilla/Attributes.h"
+#include <stddef.h>
+#include <stdint.h>
+
+namespace mozilla {
+
+/*
+ * This class, and the corresponding macro MOZ_ALIGNOF, figures out how many
+ * bytes of alignment a given type needs.
+ */
+template <typename T>
+class AlignmentFinder {
+ struct Aligner {
+ char mChar;
+ T mT;
+
+ // Aligner may be used to check alignment of types with deleted dtors. This
+ // results in such specializations having implicitly deleted dtors, which
+ // causes fatal warnings on MSVC (see bug 1481005). As we don't create
+ // Aligners, we can avoid this warning by explicitly deleting the dtor.
+ ~Aligner() = delete;
+ };
+
+ public:
+ static const size_t alignment = sizeof(Aligner) - sizeof(T);
+};
+
+#define MOZ_ALIGNOF(T) mozilla::AlignmentFinder<T>::alignment
+
+namespace detail {
+template <typename T>
+struct AlignasHelper {
+ T mT;
+};
+} // namespace detail
+
+/*
+ * Use this instead of alignof to align struct field as if it is inside
+ * a struct. On some platforms, there exist types which have different
+ * alignment between when it is used on its own and when it is used on
+ * a struct field.
+ *
+ * Known examples are 64bit types (uint64_t, double) on 32bit Linux,
+ * where they have 8byte alignment on their own, and 4byte alignment
+ * when in struct.
+ */
+#define MOZ_ALIGNAS_IN_STRUCT(T) alignas(mozilla::detail::AlignasHelper<T>)
+
+/*
+ * Declare the MOZ_ALIGNED_DECL macro for declaring aligned types.
+ *
+ * For instance,
+ *
+ * MOZ_ALIGNED_DECL(8, char arr[2]);
+ *
+ * will declare a two-character array |arr| aligned to 8 bytes.
+ */
+
+#if defined(__GNUC__)
+# define MOZ_ALIGNED_DECL(_align, _type) _type __attribute__((aligned(_align)))
+#elif defined(_MSC_VER)
+# define MOZ_ALIGNED_DECL(_align, _type) __declspec(align(_align)) _type
+#else
+# warning "We don't know how to align variables on this compiler."
+# define MOZ_ALIGNED_DECL(_align, _type) _type
+#endif
+
+/*
+ * AlignedElem<N> is a structure whose alignment is guaranteed to be at least N
+ * bytes.
+ *
+ * We support 1, 2, 4, 8, and 16-byte alignment.
+ */
+template <size_t Align>
+struct AlignedElem;
+
+/*
+ * We have to specialize this template because GCC doesn't like
+ * __attribute__((aligned(foo))) where foo is a template parameter.
+ */
+
+template <>
+struct AlignedElem<1> {
+ MOZ_ALIGNED_DECL(1, uint8_t elem);
+};
+
+template <>
+struct AlignedElem<2> {
+ MOZ_ALIGNED_DECL(2, uint8_t elem);
+};
+
+template <>
+struct AlignedElem<4> {
+ MOZ_ALIGNED_DECL(4, uint8_t elem);
+};
+
+template <>
+struct AlignedElem<8> {
+ MOZ_ALIGNED_DECL(8, uint8_t elem);
+};
+
+template <>
+struct AlignedElem<16> {
+ MOZ_ALIGNED_DECL(16, uint8_t elem);
+};
+
+template <typename T>
+struct MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS AlignedStorage2 {
+ union U {
+ char mBytes[sizeof(T)];
+ uint64_t mDummy;
+ } u;
+
+ const T* addr() const { return reinterpret_cast<const T*>(u.mBytes); }
+ T* addr() { return static_cast<T*>(static_cast<void*>(u.mBytes)); }
+
+ AlignedStorage2() = default;
+
+ // AlignedStorage2 is non-copyable: the default copy constructor violates
+ // strict aliasing rules, per bug 1269319.
+ AlignedStorage2(const AlignedStorage2&) = delete;
+ void operator=(const AlignedStorage2&) = delete;
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_Alignment_h */
diff --git a/mfbt/AllocPolicy.h b/mfbt/AllocPolicy.h
new file mode 100644
index 0000000000..e5c62bcd64
--- /dev/null
+++ b/mfbt/AllocPolicy.h
@@ -0,0 +1,175 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * An allocation policy concept, usable for structures and algorithms to
+ * control how memory is allocated and how failures are handled.
+ */
+
+#ifndef mozilla_AllocPolicy_h
+#define mozilla_AllocPolicy_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/TemplateLib.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+namespace mozilla {
+
+/*
+ * Allocation policies are used to implement the standard allocation behaviors
+ * in a customizable way. Additionally, custom behaviors may be added to these
+ * behaviors, such as additionally reporting an error through an out-of-band
+ * mechanism when OOM occurs. The concept modeled here is as follows:
+ *
+ * - public copy constructor, assignment, destructor
+ * - template <typename T> T* maybe_pod_malloc(size_t)
+ * Fallible, but doesn't report an error on OOM.
+ * - template <typename T> T* maybe_pod_calloc(size_t)
+ * Fallible, but doesn't report an error on OOM.
+ * - template <typename T> T* maybe_pod_realloc(T*, size_t, size_t)
+ * Fallible, but doesn't report an error on OOM. The old allocation
+ * size is passed in, in addition to the new allocation size requested.
+ * - template <typename T> T* pod_malloc(size_t)
+ * Responsible for OOM reporting when null is returned.
+ * - template <typename T> T* pod_calloc(size_t)
+ * Responsible for OOM reporting when null is returned.
+ * - template <typename T> T* pod_realloc(T*, size_t, size_t)
+ * Responsible for OOM reporting when null is returned. The old allocation
+ * size is passed in, in addition to the new allocation size requested.
+ * - template <typename T> void free_(T*, size_t)
+ * The capacity passed in must match the old allocation size.
+ * - template <typename T> void free_(T*)
+ * Frees a buffer without knowing its allocated size. This might not be
+ * implemented by allocation policies that need the allocation size.
+ * - void reportAllocOverflow() const
+ * Called on allocation overflow (that is, an allocation implicitly tried
+ * to allocate more than the available memory space -- think allocating an
+ * array of large-size objects, where N * size overflows) before null is
+ * returned.
+ * - bool checkSimulatedOOM() const
+ * Some clients generally allocate memory yet in some circumstances won't
+ * need to do so. For example, appending to a vector with a small amount of
+ * inline storage generally allocates memory, but no allocation occurs
+ * unless appending exceeds inline storage. But for testing purposes, it
+ * can be useful to treat *every* operation as allocating.
+ * Clients (such as this hypothetical append method implementation) should
+ * call this method in situations that don't allocate, but could generally,
+ * to support this. The default behavior should return true; more
+ * complicated behavior might be to return false only after a certain
+ * number of allocations-or-check-simulated-OOMs (coordinating with the
+ * other AllocPolicy methods) have occurred.
+ *
+ * mfbt provides (and typically uses by default) only MallocAllocPolicy, which
+ * does nothing more than delegate to the malloc/alloc/free functions.
+ */
+
+/*
+ * A policy that straightforwardly uses malloc/calloc/realloc/free and adds no
+ * extra behaviors.
+ */
+class MallocAllocPolicy {
+ public:
+ template <typename T>
+ T* maybe_pod_malloc(size_t aNumElems) {
+ if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ return nullptr;
+ }
+ return static_cast<T*>(malloc(aNumElems * sizeof(T)));
+ }
+
+ template <typename T>
+ T* maybe_pod_calloc(size_t aNumElems) {
+ return static_cast<T*>(calloc(aNumElems, sizeof(T)));
+ }
+
+ template <typename T>
+ T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+ if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ return nullptr;
+ }
+ return static_cast<T*>(realloc(aPtr, aNewSize * sizeof(T)));
+ }
+
+ template <typename T>
+ T* pod_malloc(size_t aNumElems) {
+ return maybe_pod_malloc<T>(aNumElems);
+ }
+
+ template <typename T>
+ T* pod_calloc(size_t aNumElems) {
+ return maybe_pod_calloc<T>(aNumElems);
+ }
+
+ template <typename T>
+ T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+ return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
+ }
+
+ template <typename T>
+ void free_(T* aPtr, size_t aNumElems = 0) {
+ free(aPtr);
+ }
+
+ void reportAllocOverflow() const {}
+
+ [[nodiscard]] bool checkSimulatedOOM() const { return true; }
+};
+
+/*
+ * A policy which always fails to allocate memory, returning nullptr. Methods
+ * which expect an existing allocation assert.
+ *
+ * This type should be used in situations where you want to use a MFBT type with
+ * inline storage, and don't want to allow it to allocate on the heap.
+ */
+class NeverAllocPolicy {
+ public:
+ template <typename T>
+ T* maybe_pod_malloc(size_t aNumElems) {
+ return nullptr;
+ }
+
+ template <typename T>
+ T* maybe_pod_calloc(size_t aNumElems) {
+ return nullptr;
+ }
+
+ template <typename T>
+ T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+ MOZ_CRASH("NeverAllocPolicy::maybe_pod_realloc");
+ }
+
+ template <typename T>
+ T* pod_malloc(size_t aNumElems) {
+ return nullptr;
+ }
+
+ template <typename T>
+ T* pod_calloc(size_t aNumElems) {
+ return nullptr;
+ }
+
+ template <typename T>
+ T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+ MOZ_CRASH("NeverAllocPolicy::pod_realloc");
+ }
+
+ template <typename T>
+ void free_(T* aPtr, size_t aNumElems = 0) {
+ MOZ_CRASH("NeverAllocPolicy::free_");
+ }
+
+ void reportAllocOverflow() const {}
+
+ [[nodiscard]] bool checkSimulatedOOM() const { return true; }
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_AllocPolicy_h */
diff --git a/mfbt/AlreadyAddRefed.h b/mfbt/AlreadyAddRefed.h
new file mode 100644
index 0000000000..fd2b7f52bd
--- /dev/null
+++ b/mfbt/AlreadyAddRefed.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Typed temporary pointers for reference-counted smart pointers. */
+
+#ifndef AlreadyAddRefed_h
+#define AlreadyAddRefed_h
+
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+struct unused_t;
+
+} // namespace mozilla
+
+/**
+ * already_AddRefed cooperates with reference counting smart pointers to enable
+ * you to assign in a pointer _without_ |AddRef|ing it. You might want to use
+ * this as a return type from a function that returns an already |AddRef|ed
+ * pointer. Or, you might want to use this as a parameter type in a function
+ * that wants to force a transfer-of-ownership from a RefPtr in the caller (for
+ * example, if the function expects callers to pass in a newly-created object,
+ * which the function then takes ownership of).
+ *
+ * TODO Move already_AddRefed to namespace mozilla. This has not yet been done
+ * because of the sheer number of usages of already_AddRefed.
+ *
+ * When should you use already_AddRefed<>?
+ * * Ensure a consumer takes ownership of a reference
+ * * Pass ownership without calling AddRef/Release (sometimes required in
+ * off-main-thread code)
+ * * The ref pointer type you're using doesn't support move construction
+ *
+ * Otherwise, use std::move(RefPtr/nsCOMPtr/etc).
+ */
+template <class T>
+struct
+#if !defined(MOZ_CLANG_PLUGIN) && !defined(XGILL_PLUGIN)
+ [[nodiscard]]
+#endif
+ MOZ_NON_AUTOABLE already_AddRefed {
+ already_AddRefed() : mRawPtr(nullptr) {}
+
+ // For simplicity, allow returning nullptr from functions returning
+ // already_AddRefed<T>. Don't permit returning raw T*, though; it's preferred
+ // to create already_AddRefed<T> from a reference-counting smart pointer.
+ MOZ_IMPLICIT already_AddRefed(decltype(nullptr)) : mRawPtr(nullptr) {}
+ explicit already_AddRefed(T* aRawPtr) : mRawPtr(aRawPtr) {}
+
+ // Disallow copy constructor and copy assignment operator: move semantics used
+ // instead.
+ already_AddRefed(const already_AddRefed<T>& aOther) = delete;
+ already_AddRefed<T>& operator=(const already_AddRefed<T>& aOther) = delete;
+
+ // WARNING: sketchiness ahead.
+ //
+ // The x86-64 ABI for Unix-like operating systems requires structures to be
+ // returned via invisible reference if they are non-trivial for the purposes
+ // of calls according to the C++ ABI[1]. For our consideration here, that
+ // means that if we have a non-trivial move constructor or destructor,
+ // already_AddRefed must be returned by invisible reference. But
+ // already_AddRefed is small enough and so commonly used that it would be
+ // beneficial to return it via registers instead. So we need to figure out
+ // a way to make the move constructor and the destructor trivial.
+ //
+ // Our destructor is normally non-trivial, because it asserts that the
+ // stored pointer has been taken by somebody else prior to destruction.
+ // However, since the assert in question is compiled only for DEBUG builds,
+ // we can make the destructor trivial in non-DEBUG builds by simply defining
+ // it with `= default`.
+ //
+ // We now have to make the move constructor trivial as well. It is normally
+ // non-trivial, because the incoming object has its pointer null-ed during
+ // the move. This null-ing is done to satisfy the assert in the destructor.
+ // But since that destructor has no assert in non-DEBUG builds, the clearing
+ // is unnecessary in such builds; all we really need to perform is a copy of
+ // the pointer from the incoming object. So we can let the compiler define
+ // a trivial move constructor for us, and already_AddRefed can now be
+ // returned in registers rather than needing to allocate a stack slot for
+ // an invisible reference.
+ //
+ // The above considerations apply to Unix-like operating systems only; the
+ // conditions for the same optimization to apply on x86-64 Windows are much
+ // more strigent and are basically impossible for already_AddRefed to
+ // satisfy[2]. But we do get some benefit from this optimization on Windows
+ // because we removed the nulling of the pointer during the move, so that's
+ // a codesize win.
+ //
+ // [1] https://itanium-cxx-abi.github.io/cxx-abi/abi.html#non-trivial
+ // [2] https://docs.microsoft.com/en-us/cpp/build/return-values-cpp
+
+ already_AddRefed(already_AddRefed<T>&& aOther)
+#ifdef DEBUG
+ : mRawPtr(aOther.take()){}
+#else
+ = default;
+#endif
+
+ already_AddRefed<T> &
+ operator=(already_AddRefed<T>&& aOther) {
+ mRawPtr = aOther.take();
+ return *this;
+ }
+
+ /**
+ * This helper is useful in cases like
+ *
+ * already_AddRefed<BaseClass>
+ * Foo()
+ * {
+ * RefPtr<SubClass> x = ...;
+ * return x.forget();
+ * }
+ *
+ * The autoconversion allows one to omit the idiom
+ *
+ * RefPtr<BaseClass> y = x.forget();
+ * return y.forget();
+ *
+ * Note that nsRefPtr is the XPCOM reference counting smart pointer class.
+ */
+ template <typename U>
+ MOZ_IMPLICIT already_AddRefed(already_AddRefed<U>&& aOther)
+ : mRawPtr(aOther.take()) {}
+
+ ~already_AddRefed()
+#ifdef DEBUG
+ {
+ MOZ_ASSERT(!mRawPtr);
+ }
+#else
+ = default;
+#endif
+
+ // Specialize the unused operator<< for already_AddRefed, to allow
+ // nsCOMPtr<nsIFoo> foo;
+ // Unused << foo.forget();
+ // Note that nsCOMPtr is the XPCOM reference counting smart pointer class.
+ friend void operator<<(const mozilla::unused_t& aUnused,
+ const already_AddRefed<T>& aRhs) {
+ auto mutableAlreadyAddRefed = const_cast<already_AddRefed<T>*>(&aRhs);
+ aUnused << mutableAlreadyAddRefed->take();
+ }
+
+ [[nodiscard]] T* take() {
+ T* rawPtr = mRawPtr;
+ mRawPtr = nullptr;
+ return rawPtr;
+ }
+
+ /**
+ * This helper provides a static_cast replacement for already_AddRefed, so
+ * if you have
+ *
+ * already_AddRefed<Parent> F();
+ *
+ * you can write
+ *
+ * already_AddRefed<Child>
+ * G()
+ * {
+ * return F().downcast<Child>();
+ * }
+ */
+ template <class U>
+ already_AddRefed<U> downcast() {
+ U* tmp = static_cast<U*>(mRawPtr);
+ mRawPtr = nullptr;
+ return already_AddRefed<U>(tmp);
+ }
+
+ private:
+ T* MOZ_OWNING_REF mRawPtr;
+};
+
+#endif // AlreadyAddRefed_h
diff --git a/mfbt/Array.h b/mfbt/Array.h
new file mode 100644
index 0000000000..55b724a288
--- /dev/null
+++ b/mfbt/Array.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A compile-time constant-length array with bounds-checking assertions. */
+
+#ifndef mozilla_Array_h
+#define mozilla_Array_h
+
+#include <stddef.h>
+
+#include <iterator>
+#include <ostream>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+
+namespace mozilla {
+
+template <typename T, size_t _Length>
+class Array {
+ T mArr[_Length];
+
+ public:
+ using ElementType = T;
+ static constexpr size_t Length = _Length;
+
+ constexpr Array() = default;
+
+ template <typename... Args>
+ MOZ_IMPLICIT constexpr Array(Args&&... aArgs)
+ : mArr{std::forward<Args>(aArgs)...} {
+ static_assert(sizeof...(aArgs) == Length,
+ "The number of arguments should be equal to the template "
+ "parameter Length");
+ }
+
+ T& operator[](size_t aIndex) {
+ if (MOZ_UNLIKELY(aIndex >= Length)) {
+ detail::InvalidArrayIndex_CRASH(aIndex, Length);
+ }
+ return mArr[aIndex];
+ }
+
+ const T& operator[](size_t aIndex) const {
+ if (MOZ_UNLIKELY(aIndex >= Length)) {
+ detail::InvalidArrayIndex_CRASH(aIndex, Length);
+ }
+ return mArr[aIndex];
+ }
+
+ bool operator==(const Array<T, Length>& aOther) const {
+ for (size_t i = 0; i < Length; i++) {
+ if (mArr[i] != aOther[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef std::reverse_iterator<T*> reverse_iterator;
+ typedef std::reverse_iterator<const T*> const_reverse_iterator;
+
+ // Methods for range-based for loops.
+ iterator begin() { return mArr; }
+ constexpr const_iterator begin() const { return mArr; }
+ constexpr const_iterator cbegin() const { return begin(); }
+ iterator end() { return mArr + Length; }
+ constexpr const_iterator end() const { return mArr + Length; }
+ constexpr const_iterator cend() const { return end(); }
+
+ // Methods for reverse iterating.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+ const_reverse_iterator crend() const { return rend(); }
+};
+
+template <typename T>
+class Array<T, 0> {
+ public:
+ T& operator[](size_t aIndex) { MOZ_CRASH("indexing into zero-length array"); }
+
+ const T& operator[](size_t aIndex) const {
+ MOZ_CRASH("indexing into zero-length array");
+ }
+};
+
+// MOZ_DBG support
+
+template <typename T, size_t Length>
+std::ostream& operator<<(std::ostream& aOut, const Array<T, Length>& aArray) {
+ return aOut << Span(aArray);
+}
+
+} /* namespace mozilla */
+
+#endif /* mozilla_Array_h */
diff --git a/mfbt/ArrayUtils.h b/mfbt/ArrayUtils.h
new file mode 100644
index 0000000000..0d55bb1f65
--- /dev/null
+++ b/mfbt/ArrayUtils.h
@@ -0,0 +1,188 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implements various helper functions related to arrays.
+ */
+
+#ifndef mozilla_ArrayUtils_h
+#define mozilla_ArrayUtils_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+# include <algorithm>
+# include <type_traits>
+
+# include "mozilla/Alignment.h"
+
+namespace mozilla {
+
+template <typename T, size_t Length>
+class Array;
+template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
+class EnumeratedArray;
+
+/*
+ * Safely subtract two pointers when it is known that aEnd >= aBegin, yielding a
+ * size_t result.
+ *
+ * Ordinary pointer subtraction yields a ptrdiff_t result, which, being signed,
+ * has insufficient range to express the distance between pointers at opposite
+ * ends of the address space. Furthermore, most compilers use ptrdiff_t to
+ * represent the intermediate byte address distance, before dividing by
+ * sizeof(T); if that intermediate result overflows, they'll produce results
+ * with the wrong sign even when the correct scaled distance would fit in a
+ * ptrdiff_t.
+ */
+template <class T>
+MOZ_ALWAYS_INLINE size_t PointerRangeSize(T* aBegin, T* aEnd) {
+ MOZ_ASSERT(aEnd >= aBegin);
+ return (size_t(aEnd) - size_t(aBegin)) / sizeof(T);
+}
+
+/*
+ * Compute the length of an array with constant length. (Use of this method
+ * with a non-array pointer will not compile.)
+ *
+ * Beware of the implicit trailing '\0' when using this with string constants.
+ */
+template <typename T, size_t N>
+constexpr size_t ArrayLength(T (&aArr)[N]) {
+ return N;
+}
+
+template <typename T, size_t N>
+constexpr size_t ArrayLength(const Array<T, N>& aArr) {
+ return N;
+}
+
+template <typename E, E N, typename T>
+constexpr size_t ArrayLength(const EnumeratedArray<E, N, T>& aArr) {
+ return size_t(N);
+}
+
+/*
+ * Compute the address one past the last element of a constant-length array.
+ *
+ * Beware of the implicit trailing '\0' when using this with string constants.
+ */
+template <typename T, size_t N>
+constexpr T* ArrayEnd(T (&aArr)[N]) {
+ return aArr + ArrayLength(aArr);
+}
+
+template <typename T, size_t N>
+constexpr T* ArrayEnd(Array<T, N>& aArr) {
+ return &aArr[0] + ArrayLength(aArr);
+}
+
+template <typename T, size_t N>
+constexpr const T* ArrayEnd(const Array<T, N>& aArr) {
+ return &aArr[0] + ArrayLength(aArr);
+}
+
+/**
+ * std::equal has subpar ergonomics.
+ */
+
+template <typename T, typename U, size_t N>
+bool ArrayEqual(const T (&a)[N], const U (&b)[N]) {
+ return std::equal(a, a + N, b);
+}
+
+template <typename T, typename U>
+bool ArrayEqual(const T* const a, const U* const b, const size_t n) {
+ return std::equal(a, a + n, b);
+}
+
+namespace detail {
+
+template <typename AlignType, typename Pointee, typename = void>
+struct AlignedChecker {
+ static void test(const Pointee* aPtr) {
+ MOZ_ASSERT((uintptr_t(aPtr) % MOZ_ALIGNOF(AlignType)) == 0,
+ "performing a range-check with a misaligned pointer");
+ }
+};
+
+template <typename AlignType, typename Pointee>
+struct AlignedChecker<AlignType, Pointee,
+ std::enable_if_t<std::is_void_v<AlignType>>> {
+ static void test(const Pointee* aPtr) {}
+};
+
+} // namespace detail
+
+/**
+ * Determines whether |aPtr| points at an object in the range [aBegin, aEnd).
+ *
+ * |aPtr| must have the same alignment as |aBegin| and |aEnd|. This usually
+ * should be achieved by ensuring |aPtr| points at a |U|, not just that it
+ * points at a |T|.
+ *
+ * It is a usage error for any argument to be misaligned.
+ *
+ * It's okay for T* to be void*, and if so U* may also be void*. In the latter
+ * case no argument is required to be aligned (obviously, as void* implies no
+ * particular alignment).
+ */
+template <typename T, typename U>
+inline std::enable_if_t<std::is_same_v<T, U> || std::is_base_of<T, U>::value ||
+ std::is_void_v<T>,
+ bool>
+IsInRange(const T* aPtr, const U* aBegin, const U* aEnd) {
+ MOZ_ASSERT(aBegin <= aEnd);
+ detail::AlignedChecker<U, T>::test(aPtr);
+ detail::AlignedChecker<U, U>::test(aBegin);
+ detail::AlignedChecker<U, U>::test(aEnd);
+ return aBegin <= reinterpret_cast<const U*>(aPtr) &&
+ reinterpret_cast<const U*>(aPtr) < aEnd;
+}
+
+/**
+ * Convenience version of the above method when the valid range is specified as
+ * uintptr_t values. As above, |aPtr| must be aligned, and |aBegin| and |aEnd|
+ * must be aligned with respect to |T|.
+ */
+template <typename T>
+inline bool IsInRange(const T* aPtr, uintptr_t aBegin, uintptr_t aEnd) {
+ return IsInRange(aPtr, reinterpret_cast<const T*>(aBegin),
+ reinterpret_cast<const T*>(aEnd));
+}
+
+namespace detail {
+
+/*
+ * Helper for the MOZ_ARRAY_LENGTH() macro to make the length a typesafe
+ * compile-time constant even on compilers lacking constexpr support.
+ */
+template <typename T, size_t N>
+char (&ArrayLengthHelper(T (&array)[N]))[N];
+
+} /* namespace detail */
+
+} /* namespace mozilla */
+
+#endif /* __cplusplus */
+
+/*
+ * MOZ_ARRAY_LENGTH() is an alternative to mozilla::ArrayLength() for C files
+ * that can't use C++ template functions and for static_assert() calls that
+ * can't call ArrayLength() when it is not a C++11 constexpr function.
+ */
+#ifdef __cplusplus
+# define MOZ_ARRAY_LENGTH(array) \
+ sizeof(mozilla::detail::ArrayLengthHelper(array))
+#else
+# define MOZ_ARRAY_LENGTH(array) (sizeof(array) / sizeof((array)[0]))
+#endif
+
+#endif /* mozilla_ArrayUtils_h */
diff --git a/mfbt/Assertions.cpp b/mfbt/Assertions.cpp
new file mode 100644
index 0000000000..7721677f19
--- /dev/null
+++ b/mfbt/Assertions.cpp
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Sprintf.h"
+
+#include <stdarg.h>
+
+MOZ_BEGIN_EXTERN_C
+
+/*
+ * The crash reason is defined as a global variable here rather than in the
+ * crash reporter itself to make it available to all code, even libraries like
+ * JS that don't link with the crash reporter directly. This value will only
+ * be consumed if the crash reporter is used by the target application.
+ */
+MFBT_DATA const char* gMozCrashReason = nullptr;
+
+static char sPrintfCrashReason[sPrintfCrashReasonSize] = {};
+
+// Accesses to this atomic are not included in web replay recordings, so that
+// if we crash in an area where recorded events are not allowed the true reason
+// for the crash is not obscured by a record/replay error.
+static mozilla::Atomic<bool, mozilla::SequentiallyConsistent> sCrashing(false);
+
+MFBT_API MOZ_COLD MOZ_NEVER_INLINE MOZ_FORMAT_PRINTF(1, 2) const
+ char* MOZ_CrashPrintf(const char* aFormat, ...) {
+ if (!sCrashing.compareExchange(false, true)) {
+ // In the unlikely event of a race condition, skip
+ // setting the crash reason and just crash safely.
+ MOZ_RELEASE_ASSERT(false);
+ }
+ va_list aArgs;
+ va_start(aArgs, aFormat);
+ int ret = VsprintfLiteral(sPrintfCrashReason, aFormat, aArgs);
+ va_end(aArgs);
+ MOZ_RELEASE_ASSERT(
+ ret >= 0 && size_t(ret) < sPrintfCrashReasonSize,
+ "Could not write the explanation string to the supplied buffer!");
+ return sPrintfCrashReason;
+}
+
+MOZ_END_EXTERN_C
+
+MFBT_API MOZ_NORETURN MOZ_COLD void mozilla::detail::InvalidArrayIndex_CRASH(
+ size_t aIndex, size_t aLength) {
+ MOZ_CRASH_UNSAFE_PRINTF("ElementAt(aIndex = %zu, aLength = %zu)", aIndex,
+ aLength);
+}
diff --git a/mfbt/Assertions.h b/mfbt/Assertions.h
new file mode 100644
index 0000000000..634d340579
--- /dev/null
+++ b/mfbt/Assertions.h
@@ -0,0 +1,709 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of runtime and static assertion macros for C and C++. */
+
+#ifndef mozilla_Assertions_h
+#define mozilla_Assertions_h
+
+#if (defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API)) && \
+ !defined(__wasi__)
+# define MOZ_DUMP_ASSERTION_STACK
+#endif
+#if defined(XP_WIN) && (defined(DEBUG) || defined(FUZZING))
+# define MOZ_BUFFER_STDERR
+#endif
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Compiler.h"
+#include "mozilla/Fuzzing.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MacroArgs.h"
+#include "mozilla/StaticAnalysisFunctions.h"
+#include "mozilla/Types.h"
+#ifdef MOZ_DUMP_ASSERTION_STACK
+# include "mozilla/StackWalk.h"
+#endif
+
+/*
+ * The crash reason set by MOZ_CRASH_ANNOTATE is consumed by the crash reporter
+ * if present. It is declared here (and defined in Assertions.cpp) to make it
+ * available to all code, even libraries that don't link with the crash reporter
+ * directly.
+ */
+MOZ_BEGIN_EXTERN_C
+extern MFBT_DATA const char* gMozCrashReason;
+MOZ_END_EXTERN_C
+
+#if defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API)
+static inline void AnnotateMozCrashReason(const char* reason) {
+ gMozCrashReason = reason;
+ // See bug 1681846, on 32-bit Android ARM the compiler removes the store to
+ // gMozCrashReason if this barrier is not present.
+ asm volatile("" ::: "memory");
+}
+# define MOZ_CRASH_ANNOTATE(...) AnnotateMozCrashReason(__VA_ARGS__)
+#else
+# define MOZ_CRASH_ANNOTATE(...) \
+ do { /* nothing */ \
+ } while (false)
+#endif
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+/*
+ * TerminateProcess and GetCurrentProcess are defined in <winbase.h>, which
+ * further depends on <windef.h>. We hardcode these few definitions manually
+ * because those headers clutter the global namespace with a significant
+ * number of undesired macros and symbols.
+ */
+MOZ_BEGIN_EXTERN_C
+__declspec(dllimport) int __stdcall TerminateProcess(void* hProcess,
+ unsigned int uExitCode);
+__declspec(dllimport) void* __stdcall GetCurrentProcess(void);
+MOZ_END_EXTERN_C
+#elif defined(__wasi__)
+/*
+ * On Wasm/WASI platforms, we just call __builtin_trap().
+ */
+#else
+# include <signal.h>
+#endif
+#ifdef ANDROID
+# include <android/log.h>
+#endif
+
+MOZ_BEGIN_EXTERN_C
+
+#if defined(ANDROID) && defined(MOZ_DUMP_ASSERTION_STACK)
+MOZ_MAYBE_UNUSED static void MOZ_ReportAssertionFailurePrintFrame(
+ const char* aBuf) {
+ __android_log_print(ANDROID_LOG_FATAL, "MOZ_Assert", "%s\n", aBuf);
+}
+#endif
+
+/*
+ * Prints |aStr| as an assertion failure (using aFilename and aLine as the
+ * location of the assertion) to the standard debug-output channel.
+ *
+ * Usually you should use MOZ_ASSERT or MOZ_CRASH instead of this method. This
+ * method is primarily for internal use in this header, and only secondarily
+ * for use in implementing release-build assertions.
+ */
+MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NEVER_INLINE void
+MOZ_ReportAssertionFailure(const char* aStr, const char* aFilename,
+ int aLine) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS {
+ MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_ASSERT", aFilename, aLine, aStr);
+#ifdef ANDROID
+ __android_log_print(ANDROID_LOG_FATAL, "MOZ_Assert",
+ "Assertion failure: %s, at %s:%d\n", aStr, aFilename,
+ aLine);
+# if defined(MOZ_DUMP_ASSERTION_STACK)
+ MozWalkTheStackWithWriter(MOZ_ReportAssertionFailurePrintFrame, CallerPC(),
+ /* aMaxFrames */ 0);
+# endif
+#else
+# if defined(MOZ_BUFFER_STDERR)
+ char msg[1024] = "";
+ snprintf(msg, sizeof(msg) - 1, "Assertion failure: %s, at %s:%d\n", aStr,
+ aFilename, aLine);
+ fputs(msg, stderr);
+# else
+ fprintf(stderr, "Assertion failure: %s, at %s:%d\n", aStr, aFilename, aLine);
+# endif
+# if defined(MOZ_DUMP_ASSERTION_STACK)
+ MozWalkTheStack(stderr, CallerPC(), /* aMaxFrames */ 0);
+# endif
+ fflush(stderr);
+#endif
+}
+
+MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NEVER_INLINE void MOZ_ReportCrash(
+ const char* aStr, const char* aFilename,
+ int aLine) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS {
+#ifdef ANDROID
+ __android_log_print(ANDROID_LOG_FATAL, "MOZ_CRASH",
+ "Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine);
+#else
+# if defined(MOZ_BUFFER_STDERR)
+ char msg[1024] = "";
+ snprintf(msg, sizeof(msg) - 1, "Hit MOZ_CRASH(%s) at %s:%d\n", aStr,
+ aFilename, aLine);
+ fputs(msg, stderr);
+# else
+ fprintf(stderr, "Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine);
+# endif
+# if defined(MOZ_DUMP_ASSERTION_STACK)
+ MozWalkTheStack(stderr, CallerPC(), /* aMaxFrames */ 0);
+# endif
+ fflush(stderr);
+#endif
+}
+
+/*
+ * MOZ_ASSUME_UNREACHABLE_MARKER() expands to an expression which states that
+ * it is undefined behavior for execution to reach this point. No guarantees
+ * are made about what will happen if this is reached at runtime. Most code
+ * should use MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE because it has extra
+ * asserts.
+ */
+#if defined(__clang__) || defined(__GNUC__)
+# define MOZ_ASSUME_UNREACHABLE_MARKER() __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define MOZ_ASSUME_UNREACHABLE_MARKER() __assume(0)
+#else
+# ifdef __cplusplus
+# define MOZ_ASSUME_UNREACHABLE_MARKER() ::abort()
+# else
+# define MOZ_ASSUME_UNREACHABLE_MARKER() abort()
+# endif
+#endif
+
+/**
+ * MOZ_REALLY_CRASH is used in the implementation of MOZ_CRASH(). You should
+ * call MOZ_CRASH instead.
+ */
+#if defined(_MSC_VER)
+/*
+ * On MSVC use the __debugbreak compiler intrinsic, which produces an inline
+ * (not nested in a system function) breakpoint. This distinctively invokes
+ * Breakpad without requiring system library symbols on all stack-processing
+ * machines, as a nested breakpoint would require.
+ *
+ * We use __LINE__ to prevent the compiler from folding multiple crash sites
+ * together, which would make crash reports hard to understand.
+ *
+ * We use TerminateProcess with the exit code aborting would generate
+ * because we don't want to invoke atexit handlers, destructors, library
+ * unload handlers, and so on when our process might be in a compromised
+ * state.
+ *
+ * We don't use abort() because it'd cause Windows to annoyingly pop up the
+ * process error dialog multiple times. See bug 345118 and bug 426163.
+ *
+ * (Technically these are Windows requirements, not MSVC requirements. But
+ * practically you need MSVC for debugging, and we only ship builds created
+ * by MSVC, so doing it this way reduces complexity.)
+ */
+
+MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NORETURN MOZ_NEVER_INLINE void
+MOZ_NoReturn(int aLine) {
+ *((volatile int*)NULL) = aLine;
+ TerminateProcess(GetCurrentProcess(), 3);
+ MOZ_ASSUME_UNREACHABLE_MARKER();
+}
+
+# define MOZ_REALLY_CRASH(line) \
+ do { \
+ __debugbreak(); \
+ MOZ_NoReturn(line); \
+ } while (false)
+
+#elif __wasi__
+
+# define MOZ_REALLY_CRASH(line) __builtin_trap()
+
+#else
+
+/*
+ * MOZ_CRASH_WRITE_ADDR is the address to be used when performing a forced
+ * crash. NULL is preferred however if for some reason NULL cannot be used
+ * this makes choosing another value possible.
+ *
+ * In the case of UBSan certain checks, bounds specifically, cause the compiler
+ * to emit the 'ud2' instruction when storing to 0x0. This causes forced
+ * crashes to manifest as ILL (at an arbitrary address) instead of the expected
+ * SEGV at 0x0.
+ */
+# ifdef MOZ_UBSAN
+# define MOZ_CRASH_WRITE_ADDR 0x1
+# else
+# define MOZ_CRASH_WRITE_ADDR NULL
+# endif
+
+# ifdef __cplusplus
+# define MOZ_REALLY_CRASH(line) \
+ do { \
+ *((volatile int*)MOZ_CRASH_WRITE_ADDR) = line; /* NOLINT */ \
+ MOZ_NOMERGE ::abort(); \
+ } while (false)
+# else
+# define MOZ_REALLY_CRASH(line) \
+ do { \
+ *((volatile int*)MOZ_CRASH_WRITE_ADDR) = line; /* NOLINT */ \
+ MOZ_NOMERGE abort(); \
+ } while (false)
+# endif
+#endif
+
+/*
+ * MOZ_CRASH([explanation-string]) crashes the program, plain and simple, in a
+ * Breakpad-compatible way, in both debug and release builds.
+ *
+ * MOZ_CRASH is a good solution for "handling" failure cases when you're
+ * unwilling or unable to handle them more cleanly -- for OOM, for likely memory
+ * corruption, and so on. It's also a good solution if you need safe behavior
+ * in release builds as well as debug builds. But if the failure is one that
+ * should be debugged and fixed, MOZ_ASSERT is generally preferable.
+ *
+ * The optional explanation-string, if provided, must be a string literal
+ * explaining why we're crashing. This argument is intended for use with
+ * MOZ_CRASH() calls whose rationale is non-obvious; don't use it if it's
+ * obvious why we're crashing.
+ *
+ * If we're a DEBUG build and we crash at a MOZ_CRASH which provides an
+ * explanation-string, we print the string to stderr. Otherwise, we don't
+ * print anything; this is because we want MOZ_CRASH to be 100% safe in release
+ * builds, and it's hard to print to stderr safely when memory might have been
+ * corrupted.
+ */
+#if !(defined(DEBUG) || defined(FUZZING))
+# define MOZ_CRASH(...) \
+ do { \
+ MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", __FILE__, __LINE__, NULL); \
+ MOZ_CRASH_ANNOTATE("MOZ_CRASH(" __VA_ARGS__ ")"); \
+ MOZ_REALLY_CRASH(__LINE__); \
+ } while (false)
+#else
+# define MOZ_CRASH(...) \
+ do { \
+ MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", __FILE__, __LINE__, NULL); \
+ MOZ_ReportCrash("" __VA_ARGS__, __FILE__, __LINE__); \
+ MOZ_CRASH_ANNOTATE("MOZ_CRASH(" __VA_ARGS__ ")"); \
+ MOZ_REALLY_CRASH(__LINE__); \
+ } while (false)
+#endif
+
+/*
+ * MOZ_CRASH_UNSAFE(explanation-string) can be used if the explanation string
+ * cannot be a string literal (but no other processing needs to be done on it).
+ * A regular MOZ_CRASH() is preferred wherever possible, as passing arbitrary
+ * strings from a potentially compromised process is not without risk. If the
+ * string being passed is the result of a printf-style function, consider using
+ * MOZ_CRASH_UNSAFE_PRINTF instead.
+ *
+ * @note This macro causes data collection because crash strings are annotated
+ * to crash-stats and are publicly visible. Firefox data stewards must do data
+ * review on usages of this macro.
+ */
+static MOZ_ALWAYS_INLINE_EVEN_DEBUG MOZ_COLD MOZ_NORETURN void MOZ_Crash(
+ const char* aFilename, int aLine, const char* aReason) {
+ MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", aFilename, aLine, aReason);
+#if defined(DEBUG) || defined(FUZZING)
+ MOZ_ReportCrash(aReason, aFilename, aLine);
+#endif
+ MOZ_CRASH_ANNOTATE(aReason);
+ MOZ_REALLY_CRASH(aLine);
+}
+#define MOZ_CRASH_UNSAFE(reason) MOZ_Crash(__FILE__, __LINE__, reason)
+
+static const size_t sPrintfMaxArgs = 4;
+static const size_t sPrintfCrashReasonSize = 1024;
+
+MFBT_API MOZ_COLD MOZ_NEVER_INLINE MOZ_FORMAT_PRINTF(1, 2) const
+ char* MOZ_CrashPrintf(const char* aFormat, ...);
+
+/*
+ * MOZ_CRASH_UNSAFE_PRINTF(format, arg1 [, args]) can be used when more
+ * information is desired than a string literal can supply. The caller provides
+ * a printf-style format string, which must be a string literal and between
+ * 1 and 4 additional arguments. A regular MOZ_CRASH() is preferred wherever
+ * possible, as passing arbitrary strings to printf from a potentially
+ * compromised process is not without risk.
+ *
+ * @note This macro causes data collection because crash strings are annotated
+ * to crash-stats and are publicly visible. Firefox data stewards must do data
+ * review on usages of this macro.
+ */
+#define MOZ_CRASH_UNSAFE_PRINTF(format, ...) \
+ do { \
+ static_assert(MOZ_ARG_COUNT(__VA_ARGS__) > 0, \
+ "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? " \
+ "Or maybe you want MOZ_CRASH instead?"); \
+ static_assert(MOZ_ARG_COUNT(__VA_ARGS__) <= sPrintfMaxArgs, \
+ "Only up to 4 additional arguments are allowed!"); \
+ static_assert(sizeof(format) <= sPrintfCrashReasonSize, \
+ "The supplied format string is too long!"); \
+ MOZ_Crash(__FILE__, __LINE__, MOZ_CrashPrintf("" format, __VA_ARGS__)); \
+ } while (false)
+
+MOZ_END_EXTERN_C
+
+/*
+ * MOZ_ASSERT(expr [, explanation-string]) asserts that |expr| must be truthy in
+ * debug builds. If it is, execution continues. Otherwise, an error message
+ * including the expression and the explanation-string (if provided) is printed,
+ * an attempt is made to invoke any existing debugger, and execution halts.
+ * MOZ_ASSERT is fatal: no recovery is possible. Do not assert a condition
+ * which can correctly be falsy.
+ *
+ * The optional explanation-string, if provided, must be a string literal
+ * explaining the assertion. It is intended for use with assertions whose
+ * correctness or rationale is non-obvious, and for assertions where the "real"
+ * condition being tested is best described prosaically. Don't provide an
+ * explanation if it's not actually helpful.
+ *
+ * // No explanation needed: pointer arguments often must not be NULL.
+ * MOZ_ASSERT(arg);
+ *
+ * // An explanation can be helpful to explain exactly how we know an
+ * // assertion is valid.
+ * MOZ_ASSERT(state == WAITING_FOR_RESPONSE,
+ * "given that <thingA> and <thingB>, we must have...");
+ *
+ * // Or it might disambiguate multiple identical (save for their location)
+ * // assertions of the same expression.
+ * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(),
+ * "we already set [[PrimitiveThis]] for this Boolean object");
+ * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(),
+ * "we already set [[PrimitiveThis]] for this String object");
+ *
+ * MOZ_ASSERT has no effect in non-debug builds. It is designed to catch bugs
+ * *only* during debugging, not "in the field". If you want the latter, use
+ * MOZ_RELEASE_ASSERT, which applies to non-debug builds as well.
+ *
+ * MOZ_DIAGNOSTIC_ASSERT works like MOZ_RELEASE_ASSERT in Nightly and early beta
+ * and MOZ_ASSERT in late Beta and Release - use this when a condition is
+ * potentially rare enough to require real user testing to hit, but is not
+ * security-sensitive. This can cause user pain, so use it sparingly. If a
+ * MOZ_DIAGNOSTIC_ASSERT is firing, it should promptly be converted to a
+ * MOZ_ASSERT while the failure is being investigated, rather than letting users
+ * suffer.
+ *
+ * MOZ_DIAGNOSTIC_ASSERT_ENABLED is defined when MOZ_DIAGNOSTIC_ASSERT is like
+ * MOZ_RELEASE_ASSERT rather than MOZ_ASSERT.
+ */
+
+/*
+ * Implement MOZ_VALIDATE_ASSERT_CONDITION_TYPE, which is used to guard against
+ * accidentally passing something unintended in lieu of an assertion condition.
+ */
+
+#ifdef __cplusplus
+# include <type_traits>
+namespace mozilla {
+namespace detail {
+
+template <typename T>
+struct AssertionConditionType {
+ using ValueT = std::remove_reference_t<T>;
+ static_assert(!std::is_array_v<ValueT>,
+ "Expected boolean assertion condition, got an array or a "
+ "string!");
+ static_assert(!std::is_function_v<ValueT>,
+ "Expected boolean assertion condition, got a function! Did "
+ "you intend to call that function?");
+ static_assert(!std::is_floating_point_v<ValueT>,
+ "It's often a bad idea to assert that a floating-point number "
+ "is nonzero, because such assertions tend to intermittently "
+ "fail. Shouldn't your code gracefully handle this case instead "
+ "of asserting? Anyway, if you really want to do that, write an "
+ "explicit boolean condition, like !!x or x!=0.");
+
+ static const bool isValid = true;
+};
+
+} // namespace detail
+} // namespace mozilla
+# define MOZ_VALIDATE_ASSERT_CONDITION_TYPE(x) \
+ static_assert( \
+ mozilla::detail::AssertionConditionType<decltype(x)>::isValid, \
+ "invalid assertion condition")
+#else
+# define MOZ_VALIDATE_ASSERT_CONDITION_TYPE(x)
+#endif
+
+#if defined(DEBUG) || defined(MOZ_ASAN)
+# define MOZ_REPORT_ASSERTION_FAILURE(...) \
+ MOZ_ReportAssertionFailure(__VA_ARGS__)
+#else
+# define MOZ_REPORT_ASSERTION_FAILURE(...) \
+ do { /* nothing */ \
+ } while (false)
+#endif
+
+/* First the single-argument form. */
+#define MOZ_ASSERT_HELPER1(kind, expr) \
+ do { \
+ MOZ_VALIDATE_ASSERT_CONDITION_TYPE(expr); \
+ if (MOZ_UNLIKELY(!MOZ_CHECK_ASSERT_ASSIGNMENT(expr))) { \
+ MOZ_FUZZING_HANDLE_CRASH_EVENT2(kind, #expr); \
+ MOZ_REPORT_ASSERTION_FAILURE(#expr, __FILE__, __LINE__); \
+ MOZ_CRASH_ANNOTATE(kind "(" #expr ")"); \
+ MOZ_REALLY_CRASH(__LINE__); \
+ } \
+ } while (false)
+/* Now the two-argument form. */
+#define MOZ_ASSERT_HELPER2(kind, expr, explain) \
+ do { \
+ MOZ_VALIDATE_ASSERT_CONDITION_TYPE(expr); \
+ if (MOZ_UNLIKELY(!MOZ_CHECK_ASSERT_ASSIGNMENT(expr))) { \
+ MOZ_FUZZING_HANDLE_CRASH_EVENT2(kind, #expr); \
+ MOZ_REPORT_ASSERTION_FAILURE(#expr " (" explain ")", __FILE__, \
+ __LINE__); \
+ MOZ_CRASH_ANNOTATE(kind "(" #expr ") (" explain ")"); \
+ MOZ_REALLY_CRASH(__LINE__); \
+ } \
+ } while (false)
+
+#define MOZ_ASSERT_GLUE(a, b) a b
+#define MOZ_RELEASE_ASSERT(...) \
+ MOZ_ASSERT_GLUE( \
+ MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \
+ ("MOZ_RELEASE_ASSERT", __VA_ARGS__))
+
+#ifdef DEBUG
+# define MOZ_ASSERT(...) \
+ MOZ_ASSERT_GLUE( \
+ MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \
+ ("MOZ_ASSERT", __VA_ARGS__))
+#else
+# define MOZ_ASSERT(...) \
+ do { \
+ } while (false)
+#endif /* DEBUG */
+
+#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
+# define MOZ_DIAGNOSTIC_ASSERT(...) \
+ MOZ_ASSERT_GLUE( \
+ MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \
+ ("MOZ_DIAGNOSTIC_ASSERT", __VA_ARGS__))
+#else
+# define MOZ_DIAGNOSTIC_ASSERT(...) \
+ do { \
+ } while (false)
+#endif
+
+/*
+ * MOZ_ASSERT_DEBUG_OR_FUZZING is like a MOZ_ASSERT but also enabled in builds
+ * that are non-DEBUG but FUZZING. This is useful for checks that are too
+ * expensive for Nightly in general but are still indicating potentially
+ * critical bugs.
+ * In fuzzing builds, the assert is rewritten to be a diagnostic assert because
+ * we already use this in other sensitive places and fuzzing automation is
+ * set to act on these under all circumstances.
+ */
+#ifdef FUZZING
+# define MOZ_ASSERT_DEBUG_OR_FUZZING(...) MOZ_DIAGNOSTIC_ASSERT(__VA_ARGS__)
+#else
+# define MOZ_ASSERT_DEBUG_OR_FUZZING(...) MOZ_ASSERT(__VA_ARGS__)
+#endif
+
+/*
+ * MOZ_ASSERT_IF(cond1, cond2) is equivalent to MOZ_ASSERT(cond2) if cond1 is
+ * true.
+ *
+ * MOZ_ASSERT_IF(isPrime(num), num == 2 || isOdd(num));
+ *
+ * As with MOZ_ASSERT, MOZ_ASSERT_IF has effect only in debug builds. It is
+ * designed to catch bugs during debugging, not "in the field".
+ */
+#ifdef DEBUG
+# define MOZ_ASSERT_IF(cond, expr) \
+ do { \
+ if (cond) { \
+ MOZ_ASSERT(expr); \
+ } \
+ } while (false)
+#else
+# define MOZ_ASSERT_IF(cond, expr) \
+ do { \
+ } while (false)
+#endif
+
+/*
+ * MOZ_DIAGNOSTIC_ASSERT_IF is like MOZ_ASSERT_IF, but using
+ * MOZ_DIAGNOSTIC_ASSERT as the underlying assert.
+ *
+ * See the block comment for MOZ_DIAGNOSTIC_ASSERT above for more details on how
+ * diagnostic assertions work and how to use them.
+ */
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+# define MOZ_DIAGNOSTIC_ASSERT_IF(cond, expr) \
+ do { \
+ if (cond) { \
+ MOZ_DIAGNOSTIC_ASSERT(expr); \
+ } \
+ } while (false)
+#else
+# define MOZ_DIAGNOSTIC_ASSERT_IF(cond, expr) \
+ do { \
+ } while (false)
+#endif
+
+/*
+ * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE([reason]) tells the compiler that it
+ * can assume that the macro call cannot be reached during execution. This lets
+ * the compiler generate better-optimized code under some circumstances, at the
+ * expense of the program's behavior being undefined if control reaches the
+ * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE.
+ *
+ * In Gecko, you probably should not use this macro outside of performance- or
+ * size-critical code, because it's unsafe. If you don't care about code size
+ * or performance, you should probably use MOZ_ASSERT or MOZ_CRASH.
+ *
+ * SpiderMonkey is a different beast, and there it's acceptable to use
+ * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE more widely.
+ *
+ * Note that MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE is noreturn, so it's valid
+ * not to return a value following a MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE
+ * call.
+ *
+ * Example usage:
+ *
+ * enum ValueType {
+ * VALUE_STRING,
+ * VALUE_INT,
+ * VALUE_FLOAT
+ * };
+ *
+ * int ptrToInt(ValueType type, void* value) {
+ * {
+ * // We know for sure that type is either INT or FLOAT, and we want this
+ * // code to run as quickly as possible.
+ * switch (type) {
+ * case VALUE_INT:
+ * return *(int*) value;
+ * case VALUE_FLOAT:
+ * return (int) *(float*) value;
+ * default:
+ * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected ValueType");
+ * }
+ * }
+ */
+
+/*
+ * Unconditional assert in debug builds for (assumed) unreachable code paths
+ * that have a safe return without crashing in release builds.
+ */
+#define MOZ_ASSERT_UNREACHABLE(reason) \
+ MOZ_ASSERT(false, "MOZ_ASSERT_UNREACHABLE: " reason)
+
+#define MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(reason) \
+ do { \
+ MOZ_ASSERT_UNREACHABLE(reason); \
+ MOZ_ASSUME_UNREACHABLE_MARKER(); \
+ } while (false)
+
+/**
+ * MOZ_FALLTHROUGH_ASSERT is an annotation to suppress compiler warnings about
+ * switch cases that MOZ_ASSERT(false) (or its alias MOZ_ASSERT_UNREACHABLE) in
+ * debug builds, but intentionally fall through in release builds to handle
+ * unexpected values.
+ *
+ * Why do we need MOZ_FALLTHROUGH_ASSERT in addition to [[fallthrough]]? In
+ * release builds, the MOZ_ASSERT(false) will expand to `do { } while (false)`,
+ * requiring a [[fallthrough]] annotation to suppress a -Wimplicit-fallthrough
+ * warning. In debug builds, the MOZ_ASSERT(false) will expand to something like
+ * `if (true) { MOZ_CRASH(); }` and the [[fallthrough]] annotation will cause
+ * a -Wunreachable-code warning. The MOZ_FALLTHROUGH_ASSERT macro breaks this
+ * warning stalemate.
+ *
+ * // Example before MOZ_FALLTHROUGH_ASSERT:
+ * switch (foo) {
+ * default:
+ * // This case wants to assert in debug builds, fall through in release.
+ * MOZ_ASSERT(false); // -Wimplicit-fallthrough warning in release builds!
+ * [[fallthrough]]; // but -Wunreachable-code warning in debug builds!
+ * case 5:
+ * return 5;
+ * }
+ *
+ * // Example with MOZ_FALLTHROUGH_ASSERT:
+ * switch (foo) {
+ * default:
+ * // This case asserts in debug builds, falls through in release.
+ * MOZ_FALLTHROUGH_ASSERT("Unexpected foo value?!");
+ * case 5:
+ * return 5;
+ * }
+ */
+#ifdef DEBUG
+# define MOZ_FALLTHROUGH_ASSERT(...) \
+ MOZ_CRASH("MOZ_FALLTHROUGH_ASSERT: " __VA_ARGS__)
+#else
+# define MOZ_FALLTHROUGH_ASSERT(...) [[fallthrough]]
+#endif
+
+/*
+ * MOZ_ALWAYS_TRUE(expr) and friends always evaluate the provided expression,
+ * in debug builds and in release builds both. Then, in debug builds and
+ * Nightly and early beta builds, the value of the expression is
+ * asserted either true or false using MOZ_DIAGNOSTIC_ASSERT.
+ */
+#define MOZ_ALWAYS_TRUE(expr) \
+ do { \
+ if (MOZ_LIKELY(expr)) { \
+ /* Silence [[nodiscard]]. */ \
+ } else { \
+ MOZ_DIAGNOSTIC_ASSERT(false, #expr); \
+ } \
+ } while (false)
+
+#define MOZ_ALWAYS_FALSE(expr) MOZ_ALWAYS_TRUE(!(expr))
+#define MOZ_ALWAYS_OK(expr) MOZ_ALWAYS_TRUE((expr).isOk())
+#define MOZ_ALWAYS_ERR(expr) MOZ_ALWAYS_TRUE((expr).isErr())
+
+/*
+ * These are disabled when fuzzing
+ */
+#ifdef FUZZING
+# define MOZ_CRASH_UNLESS_FUZZING(...) \
+ do { \
+ } while (0)
+# define MOZ_ASSERT_UNLESS_FUZZING(...) \
+ do { \
+ } while (0)
+#else
+# define MOZ_CRASH_UNLESS_FUZZING(...) MOZ_CRASH(__VA_ARGS__)
+# define MOZ_ASSERT_UNLESS_FUZZING(...) MOZ_ASSERT(__VA_ARGS__)
+#endif
+
+#undef MOZ_BUFFER_STDERR
+#undef MOZ_CRASH_CRASHREPORT
+#undef MOZ_DUMP_ASSERTION_STACK
+
+/*
+ * This is only used by Array and nsTArray classes, therefore it is not
+ * required when included from C code.
+ */
+#ifdef __cplusplus
+namespace mozilla::detail {
+MFBT_API MOZ_NORETURN MOZ_COLD void InvalidArrayIndex_CRASH(size_t aIndex,
+ size_t aLength);
+} // namespace mozilla::detail
+#endif // __cplusplus
+
+/*
+ * Provide a fake default value to be used when a value is required but none can
+ * sensibily be provided without adding undefined behavior or security issues.
+ *
+ * This function asserts and aborts if it ever executed.
+ *
+ * Example usage:
+ *
+ * class Trooper {
+ * const Droid& lookFor;
+ * Trooper() : lookFor(MakeCompilerAssumeUnreachableFakeValue<
+ const Droid&>()) {
+ * // The class might be instantiated due to existing caller
+ * // but this never happens in practice.
+ * }
+ * };
+ *
+ */
+#ifdef __cplusplus
+namespace mozilla {
+template <typename T>
+static inline T MakeCompilerAssumeUnreachableFakeValue() {
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE();
+}
+} // namespace mozilla
+#endif // __cplusplus
+
+#endif /* mozilla_Assertions_h */
diff --git a/mfbt/AtomicBitfields.h b/mfbt/AtomicBitfields.h
new file mode 100644
index 0000000000..c61dc4df46
--- /dev/null
+++ b/mfbt/AtomicBitfields.h
@@ -0,0 +1,468 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AtomicBitfields_h
+#define mozilla_AtomicBitfields_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MacroArgs.h"
+#include "mozilla/MacroForEach.h"
+
+#include <limits>
+#include <stdint.h>
+#include <type_traits>
+
+#ifdef __wasi__
+# include "mozilla/WasiAtomic.h"
+#else
+# include <atomic>
+#endif // __wasi__
+
+namespace mozilla {
+
+// Creates a series of atomic bitfields.
+//
+// |aBitfields| is the name of the underlying storage for the bitfields.
+// |aBitFieldsSize| is the size of the underlying storage (8, 16, 32, or 64).
+//
+// Bitfields are specified as a triplet of (type, name, size), which mirrors
+// the way you declare native C++ bitfields (bool mMyField1: 1). Trailing
+// commas are not supported in the list of bitfields.
+//
+// Signed integer types are not supported by this Macro to avoid dealing with
+// packing/unpacking the sign bit and C++'s general messiness around signed
+// integer representations not being fully defined.
+//
+// You cannot request a single field that's the
+// size of the the entire bitfield storage. Just use a normal atomic integer!
+//
+//
+// ========================== SEMANTICS AND SAFETY ============================
+//
+// All fields are default-initialized to 0.
+//
+// In debug builds, storing a value to a bitfield that's larger than its bits
+// can fit will trigger an assertion. In release builds, the value will just be
+// masked off.
+//
+// If you request anything unsupported by this macro it should result in
+// a compile-time error (either a static assert or just weird macro errors).
+// For instance, this macro will statically prevent using more bits than
+// |aBitFieldsSize|, so specifying the size is just to prevent accidentally
+// making the storage bigger.
+//
+// Each field will get a Load$NAME and Store$Name method which will atomically
+// load and store the requested value with a Sequentially Consistent memory
+// order (to be on the safe side). Storing a field requires a compare-exchange,
+// so a thread may get stalled if there's a lot of contention on the bitfields.
+//
+//
+// ============================== MOTIVATION ==================================
+//
+// You might be wondering: why would I need atomic bitfields? Well as it turns
+// out, bitfields and concurrency mess a lot of people up!
+//
+// CPUs don't have operations to write to a handful of bits -- they generally
+// only have the precision of a byte. So when you use C++'s native bitfields,
+// the compiler generates code to mask and shift the values in for you. This
+// means writing to a single field will actually overwrite all the other
+// bitfields that are packed in with it!
+//
+// In single-threaded code this is fine; the old values are loaded and written
+// back by the compiler's generated code. But in concurrent code, it means
+// that accessing two different fields can be an unexpected Data Race (which is
+// Undefined Behavior!).
+//
+// By using MOZ_ATOMIC_BITFIELDS, you protect yourself from these Data Races,
+// and don't have to worry about writes getting lost.
+//
+//
+// ================================ EXAMPLE ===================================
+//
+// #include "mozilla/AtomicBitfields.h"
+// #include <stdint.h>
+//
+//
+// struct MyType {
+// MOZ_ATOMIC_BITFIELDS(mAtomicFields, 8, (
+// (bool, IsDownloaded, 1),
+// (uint32_t, SomeData, 2),
+// (uint8_t, OtherData, 5)
+// ))
+//
+// int32_t aNormalInteger;
+//
+// explicit MyType(uint32_t aSomeData): aNormalInteger(7) {
+// StoreSomeData(aSomeData);
+// // Other bitfields were already default initialized to 0/false
+// }
+// };
+//
+//
+// int main() {
+// MyType val(3);
+//
+// if (!val.LoadIsDownloaded()) {
+// val.StoreOtherData(2);
+// val.StoreIsDownloaded(true);
+// }
+// }
+//
+//
+// ============================== GENERATED ===================================
+//
+// This macro is a real mess to read because, well, it's a macro. So for the
+// sake of anyone who has to review or modify its internals, here's a rough
+// sketch of what the above example would expand to:
+//
+// struct MyType {
+// // The actual storage of the bitfields, initialized to 0.
+// std::atomic_uint8_t mAtomicFields{0};
+//
+// // How many bits were actually used (in this case, all of them).
+// static const size_t mAtomicFields_USED_BITS = 8;
+//
+// // The offset values for each field.
+// static const size_t mAtomicFieldsIsDownloaded = 0;
+// static const size_t mAtomicFieldsSomeData = 1;
+// static const size_t mAtomicFieldsOtherData = 3;
+//
+// // Quick safety guard to prevent capacity overflow.
+// static_assert(mAtomicFields_USED_BITS <= 8);
+//
+// // Asserts that fields are reasonable.
+// static_assert(8>1, "mAtomicFields: MOZ_ATOMIC_BITFIELDS field too big");
+// static_assert(std::is_unsigned<bool>(), "mAtomicFields:
+// MOZ_ATOMIC_BITFIELDS doesn't support signed payloads");
+// // ...and so on
+//
+// // Load/Store methods for all the fields.
+//
+// bool LoadIsDownloaded() { ... }
+// void StoreIsDownloaded(bool aValue) { ... }
+//
+// uint32_t LoadSomeData() { ... }
+// void StoreSomeData(uint32_t aValue) { ... }
+//
+// uint8_t LoadOtherData() { ... }
+// void StoreOtherData(uint8_t aValue) { ... }
+//
+//
+// // Remainder of the struct body continues normally.
+// int32_t aNormalInteger;
+// explicit MyType(uint32_t aSomeData): aNormalInteger(7) {
+// StoreSomeData(aSomeData);
+// // Other bitfields were already default initialized to 0/false.
+// }
+// }
+//
+// Also if you're wondering why there's so many MOZ_CONCAT's -- it's because
+// the preprocessor sometimes gets confused if we use ## on certain arguments.
+// MOZ_CONCAT reliably kept the preprocessor happy, sorry it's so ugly!
+//
+//
+// ==================== FIXMES / FUTURE WORK ==================================
+//
+// * It would be nice if LoadField could be IsField for booleans.
+//
+// * For the case of setting something to all 1's or 0's, we can use
+// |fetch_or| or |fetch_and| instead of |compare_exchange_weak|. Is this
+// worth providing? (Possibly for 1-bit boolean fields?)
+//
+// * Try harder to hide the atomic/enum/array internals from
+// the outer struct?
+//
+#define MOZ_ATOMIC_BITFIELDS(aBitfields, aBitfieldsSize, aFields) \
+ std::atomic_uint##aBitfieldsSize##_t aBitfields{0}; \
+ \
+ static const size_t MOZ_CONCAT(aBitfields, _USED_BITS) = \
+ MOZ_FOR_EACH_SEPARATED(MOZ_ATOMIC_BITFIELDS_FIELD_SIZE, (+), (), \
+ aFields); \
+ \
+ MOZ_ROLL_EACH(MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER1, (aBitfields, ), aFields) \
+ \
+ static_assert(MOZ_CONCAT(aBitfields, _USED_BITS) <= aBitfieldsSize, \
+ #aBitfields ": Maximum bits (" #aBitfieldsSize \
+ ") exceeded for MOZ_ATOMIC_BITFIELDS instance"); \
+ \
+ MOZ_FOR_EACH(MOZ_ATOMIC_BITFIELDS_FIELD_HELPER, \
+ (aBitfields, aBitfieldsSize, ), aFields)
+
+// Just a helper to unpack the head of the list.
+#define MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER1(aBitfields, aFields) \
+ MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER2(aBitfields, MOZ_ARG_1 aFields, aFields);
+
+// Just a helper to unpack the name and call the real function.
+#define MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER2(aBitfields, aField, aFields) \
+ MOZ_ATOMIC_BITFIELDS_OFFSET(aBitfields, MOZ_ARG_2 aField, aFields)
+
+// To compute the offset of a field, why sum up all the offsets after it
+// (inclusive) and subtract that from the total sum itself. We do this to swap
+// the rolling sum that |MOZ_ROLL_EACH| gets us from descending to ascending.
+#define MOZ_ATOMIC_BITFIELDS_OFFSET(aBitfields, aFieldName, aFields) \
+ static const size_t MOZ_CONCAT(aBitfields, aFieldName) = \
+ MOZ_CONCAT(aBitfields, _USED_BITS) - \
+ (MOZ_FOR_EACH_SEPARATED(MOZ_ATOMIC_BITFIELDS_FIELD_SIZE, (+), (), \
+ aFields));
+
+// Just a more clearly named way of unpacking the size.
+#define MOZ_ATOMIC_BITFIELDS_FIELD_SIZE(aArgs) MOZ_ARG_3 aArgs
+
+// Just a helper to unpack the tuple and call the real function.
+#define MOZ_ATOMIC_BITFIELDS_FIELD_HELPER(aBitfields, aBitfieldsSize, aArgs) \
+ MOZ_ATOMIC_BITFIELDS_FIELD(aBitfields, aBitfieldsSize, MOZ_ARG_1 aArgs, \
+ MOZ_ARG_2 aArgs, MOZ_ARG_3 aArgs)
+
+// We need to disable this with coverity because it doesn't like checking that
+// booleans are < 2 (because they always are).
+#ifdef __COVERITY__
+# define MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize)
+#else
+# define MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize) \
+ MOZ_ASSERT(((uint64_t)aValue) < (1ull << aFieldSize), \
+ "Stored value exceeded capacity of bitfield!")
+#endif
+
+// Generates the Load and Store methods for each field.
+//
+// Some comments here because inline macro comments are a pain in the neck:
+//
+// Most of the locals are forward declared to minimize messy macroified
+// type declaration. Also a lot of locals are used to try to make things
+// a little more clear, while also avoiding integer promotion issues.
+// This is why some locals are literally just copying a value we already have:
+// to force it to the right size.
+//
+// There's an annoying overflow case where a bitfields instance has a field
+// that is the same size as the bitfields. Rather than trying to handle that,
+// we just static_assert against it.
+//
+//
+// BITMATH EXPLAINED:
+//
+// For |Load$Name|:
+//
+// mask = ((1 << fieldSize) - 1) << offset
+//
+// If you subtract 1 from a value with 1 bit set you get all 1's below that bit.
+// This is perfect for ANDing out |fieldSize| bits. We shift by |offset| to get
+// it in the right place.
+//
+// value = (aBitfields.load() & mask) >> offset
+//
+// This sets every bit we're not interested in to 0. Shifting the result by
+// |offset| converts the value back to its native format, ready to be cast
+// up to an integer type.
+//
+//
+// For |Store$Name|:
+//
+// packedValue = (resizedValue << offset) & mask
+//
+// This converts a native value to the packed format. If the value is in bounds,
+// the AND will do nothing. If it's out of bounds (not checked in release),
+// then it will cause the value to wrap around by modulo 2^aFieldSize, just like
+// a normal uint.
+//
+// clearedValue = oldValue & ~mask;
+//
+// This clears the bits where our field is stored on our bitfield storage by
+// ANDing it with an inverted (NOTed) mask.
+//
+// newValue = clearedValue | packedValue;
+//
+// Once we have |packedValue| and |clearedValue| they just need to be ORed
+// together to merge the new field value with the old values of all the other
+// fields.
+//
+// This last step is done in a while loop because someone else can modify
+// the bits before we have a chance to. If we didn't guard against this,
+// our write would undo the write the other thread did. |compare_exchange_weak|
+// is specifically designed to handle this. We give it what we expect the
+// current value to be, and what we want it to be. If someone else modifies
+// the bitfields before us, then we will reload the value and try again.
+//
+// Note that |compare_exchange_weak| writes back the actual value to the
+// "expected" argument (it's passed by-reference), so we don't need to do
+// another load in the body of the loop when we fail to write our result.
+#define MOZ_ATOMIC_BITFIELDS_FIELD(aBitfields, aBitfieldsSize, aFieldType, \
+ aFieldName, aFieldSize) \
+ static_assert(aBitfieldsSize > aFieldSize, \
+ #aBitfields ": MOZ_ATOMIC_BITFIELDS field too big"); \
+ static_assert(std::is_unsigned<aFieldType>(), #aBitfields \
+ ": MOZ_ATOMIC_BITFIELDS doesn't support signed payloads"); \
+ \
+ aFieldType MOZ_CONCAT(Load, aFieldName)() const { \
+ uint##aBitfieldsSize##_t fieldSize, mask, masked, value; \
+ size_t offset = MOZ_CONCAT(aBitfields, aFieldName); \
+ fieldSize = aFieldSize; \
+ mask = ((1ull << fieldSize) - 1ull) << offset; \
+ masked = aBitfields.load() & mask; \
+ value = (masked >> offset); \
+ return value; \
+ } \
+ \
+ void MOZ_CONCAT(Store, aFieldName)(aFieldType aValue) { \
+ MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize); \
+ uint##aBitfieldsSize##_t fieldSize, mask, resizedValue, packedValue, \
+ oldValue, clearedValue, newValue; \
+ size_t offset = MOZ_CONCAT(aBitfields, aFieldName); \
+ fieldSize = aFieldSize; \
+ mask = ((1ull << fieldSize) - 1ull) << offset; \
+ resizedValue = aValue; \
+ packedValue = (resizedValue << offset) & mask; \
+ oldValue = aBitfields.load(); \
+ do { \
+ clearedValue = oldValue & ~mask; \
+ newValue = clearedValue | packedValue; \
+ } while (!aBitfields.compare_exchange_weak(oldValue, newValue)); \
+ }
+
+// OK SO THIS IS A GROSS HACK. GCC 10.2 (and below) has a bug[1] where it
+// doesn't allow a static array to reference itself in its initializer, so we
+// need to create a hacky way to produce a rolling sum of all the offsets.
+//
+// To do this, we make a tweaked version of |MOZ_FOR_EACH| which instead of
+// passing just one argument to |aMacro| it passes the remaining values of
+// |aArgs|.
+//
+// This allows us to expand an input (a, b, c, d) quadratically to:
+//
+// int sum1 = a + b + c + d;
+// int sum2 = b + c + d;
+// int sum3 = c + d;
+// int sum4 = d;
+//
+// So all of this is a copy-paste of |MOZ_FOR_EACH| except the definition
+// of |MOZ_FOR_EACH_HELPER| no longer extracts an argument with |MOZ_ARG_1|.
+// Also this is restricted to 32 arguments just to reduce footprint a little.
+//
+// If the GCC bug is ever fixed, then this hack can be removed, and we can
+// use the non-quadratic version that was originally written[2]. In case
+// that link dies, a brief summary of that implementation:
+//
+// * Associate each field with an index by creating an `enum class` with
+// entries for each field (an existing gecko patten).
+//
+// * Calculate offsets with a constexpr static array whose initializer
+// self-referentially adds the contents of the previous index to the
+// compute the current one.
+//
+// * Index into this array with the enum.
+//
+// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97234
+// [2]: https://phabricator.services.mozilla.com/D91622?id=346499
+#define MOZ_ROLL_EACH_EXPAND_HELPER(...) __VA_ARGS__
+#define MOZ_ROLL_EACH_GLUE(a, b) a b
+#define MOZ_ROLL_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) \
+ MOZ_ROLL_EACH_GLUE(MOZ_PASTE_PREFIX_AND_ARG_COUNT( \
+ MOZ_ROLL_EACH_, MOZ_ROLL_EACH_EXPAND_HELPER aArgs), \
+ (aMacro, aSeparator, aFixedArgs, aArgs))
+#define MOZ_ROLL_EACH(aMacro, aFixedArgs, aArgs) \
+ MOZ_ROLL_EACH_SEPARATED(aMacro, (), aFixedArgs, aArgs)
+
+#define MOZ_ROLL_EACH_HELPER_GLUE(a, b) a b
+#define MOZ_ROLL_EACH_HELPER(aMacro, aFixedArgs, aArgs) \
+ MOZ_ROLL_EACH_HELPER_GLUE(aMacro, \
+ (MOZ_ROLL_EACH_EXPAND_HELPER aFixedArgs aArgs))
+
+#define MOZ_ROLL_EACH_0(m, s, fa, a)
+#define MOZ_ROLL_EACH_1(m, s, fa, a) MOZ_ROLL_EACH_HELPER(m, fa, a)
+#define MOZ_ROLL_EACH_2(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_1(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_3(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_2(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_4(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_3(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_5(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_4(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_6(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_5(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_7(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_6(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_8(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_7(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_9(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_8(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_10(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_9(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_11(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_10(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_12(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_11(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_13(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_12(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_14(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_13(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_15(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_14(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_16(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_15(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_17(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_16(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_18(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_17(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_19(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_18(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_20(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_19(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_21(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_20(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_22(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_21(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_23(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_22(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_24(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_23(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_25(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_24(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_26(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_25(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_27(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_26(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_28(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_27(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_29(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_28(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_30(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_29(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_31(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_30(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_ROLL_EACH_32(m, s, fa, a) \
+ MOZ_ROLL_EACH_HELPER(m, fa, a) \
+ MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_31(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+} // namespace mozilla
+#endif /* mozilla_AtomicBitfields_h */
diff --git a/mfbt/Atomics.h b/mfbt/Atomics.h
new file mode 100644
index 0000000000..e5da5c07a9
--- /dev/null
+++ b/mfbt/Atomics.h
@@ -0,0 +1,520 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implements (almost always) lock-free atomic operations. The operations here
+ * are a subset of that which can be found in C++11's <atomic> header, with a
+ * different API to enforce consistent memory ordering constraints.
+ *
+ * Anyone caught using |volatile| for inter-thread memory safety needs to be
+ * sent a copy of this header and the C++11 standard.
+ */
+
+#ifndef mozilla_Atomics_h
+#define mozilla_Atomics_h
+
+#include "mozilla/Attributes.h"
+
+#ifdef __wasi__
+# include "mozilla/WasiAtomic.h"
+#else
+# include <atomic>
+#endif // __wasi__
+
+#include <stddef.h> // For ptrdiff_t
+#include <stdint.h>
+#include <type_traits>
+
+namespace mozilla {
+
+/**
+ * An enum of memory ordering possibilities for atomics.
+ *
+ * Memory ordering is the observable state of distinct values in memory.
+ * (It's a separate concept from atomicity, which concerns whether an
+ * operation can ever be observed in an intermediate state. Don't
+ * conflate the two!) Given a sequence of operations in source code on
+ * memory, it is *not* always the case that, at all times and on all
+ * cores, those operations will appear to have occurred in that exact
+ * sequence. First, the compiler might reorder that sequence, if it
+ * thinks another ordering will be more efficient. Second, the CPU may
+ * not expose so consistent a view of memory. CPUs will often perform
+ * their own instruction reordering, above and beyond that performed by
+ * the compiler. And each core has its own memory caches, and accesses
+ * (reads and writes both) to "memory" may only resolve to out-of-date
+ * cache entries -- not to the "most recently" performed operation in
+ * some global sense. Any access to a value that may be used by
+ * multiple threads, potentially across multiple cores, must therefore
+ * have a memory ordering imposed on it, for all code on all
+ * threads/cores to have a sufficiently coherent worldview.
+ *
+ * http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync and
+ * http://en.cppreference.com/w/cpp/atomic/memory_order go into more
+ * detail on all this, including examples of how each mode works.
+ *
+ * Note that for simplicity and practicality, not all of the modes in
+ * C++11 are supported. The missing C++11 modes are either subsumed by
+ * the modes we provide below, or not relevant for the CPUs we support
+ * in Gecko. These three modes are confusing enough as it is!
+ */
+enum MemoryOrdering {
+ /*
+ * Relaxed ordering is the simplest memory ordering: none at all.
+ * When the result of a write is observed, nothing may be inferred
+ * about other memory. Writes ostensibly performed "before" on the
+ * writing thread may not yet be visible. Writes performed "after" on
+ * the writing thread may already be visible, if the compiler or CPU
+ * reordered them. (The latter can happen if reads and/or writes get
+ * held up in per-processor caches.) Relaxed ordering means
+ * operations can always use cached values (as long as the actual
+ * updates to atomic values actually occur, correctly, eventually), so
+ * it's usually the fastest sort of atomic access. For this reason,
+ * *it's also the most dangerous kind of access*.
+ *
+ * Relaxed ordering is good for things like process-wide statistics
+ * counters that don't need to be consistent with anything else, so
+ * long as updates themselves are atomic. (And so long as any
+ * observations of that value can tolerate being out-of-date -- if you
+ * need some sort of up-to-date value, you need some sort of other
+ * synchronizing operation.) It's *not* good for locks, mutexes,
+ * reference counts, etc. that mediate access to other memory, or must
+ * be observably consistent with other memory.
+ *
+ * x86 architectures don't take advantage of the optimization
+ * opportunities that relaxed ordering permits. Thus it's possible
+ * that using relaxed ordering will "work" on x86 but fail elsewhere
+ * (ARM, say, which *does* implement non-sequentially-consistent
+ * relaxed ordering semantics). Be extra-careful using relaxed
+ * ordering if you can't easily test non-x86 architectures!
+ */
+ Relaxed,
+
+ /*
+ * When an atomic value is updated with ReleaseAcquire ordering, and
+ * that new value is observed with ReleaseAcquire ordering, prior
+ * writes (atomic or not) are also observable. What ReleaseAcquire
+ * *doesn't* give you is any observable ordering guarantees for
+ * ReleaseAcquire-ordered operations on different objects. For
+ * example, if there are two cores that each perform ReleaseAcquire
+ * operations on separate objects, each core may or may not observe
+ * the operations made by the other core. The only way the cores can
+ * be synchronized with ReleaseAcquire is if they both
+ * ReleaseAcquire-access the same object. This implies that you can't
+ * necessarily describe some global total ordering of ReleaseAcquire
+ * operations.
+ *
+ * ReleaseAcquire ordering is good for (as the name implies) atomic
+ * operations on values controlling ownership of things: reference
+ * counts, mutexes, and the like. However, if you are thinking about
+ * using these to implement your own locks or mutexes, you should take
+ * a good, hard look at actual lock or mutex primitives first.
+ */
+ ReleaseAcquire,
+
+ /*
+ * When an atomic value is updated with SequentiallyConsistent
+ * ordering, all writes observable when the update is observed, just
+ * as with ReleaseAcquire ordering. But, furthermore, a global total
+ * ordering of SequentiallyConsistent operations *can* be described.
+ * For example, if two cores perform SequentiallyConsistent operations
+ * on separate objects, one core will observably perform its update
+ * (and all previous operations will have completed), then the other
+ * core will observably perform its update (and all previous
+ * operations will have completed). (Although those previous
+ * operations aren't themselves ordered -- they could be intermixed,
+ * or ordered if they occur on atomic values with ordering
+ * requirements.) SequentiallyConsistent is the *simplest and safest*
+ * ordering of atomic operations -- it's always as if one operation
+ * happens, then another, then another, in some order -- and every
+ * core observes updates to happen in that single order. Because it
+ * has the most synchronization requirements, operations ordered this
+ * way also tend to be slowest.
+ *
+ * SequentiallyConsistent ordering can be desirable when multiple
+ * threads observe objects, and they all have to agree on the
+ * observable order of changes to them. People expect
+ * SequentiallyConsistent ordering, even if they shouldn't, when
+ * writing code, atomic or otherwise. SequentiallyConsistent is also
+ * the ordering of choice when designing lockless data structures. If
+ * you don't know what order to use, use this one.
+ */
+ SequentiallyConsistent,
+};
+
+namespace detail {
+
+/*
+ * We provide CompareExchangeFailureOrder to work around a bug in some
+ * versions of GCC's <atomic> header. See bug 898491.
+ */
+template <MemoryOrdering Order>
+struct AtomicOrderConstraints;
+
+template <>
+struct AtomicOrderConstraints<Relaxed> {
+ static const std::memory_order AtomicRMWOrder = std::memory_order_relaxed;
+ static const std::memory_order LoadOrder = std::memory_order_relaxed;
+ static const std::memory_order StoreOrder = std::memory_order_relaxed;
+ static const std::memory_order CompareExchangeFailureOrder =
+ std::memory_order_relaxed;
+};
+
+template <>
+struct AtomicOrderConstraints<ReleaseAcquire> {
+ static const std::memory_order AtomicRMWOrder = std::memory_order_acq_rel;
+ static const std::memory_order LoadOrder = std::memory_order_acquire;
+ static const std::memory_order StoreOrder = std::memory_order_release;
+ static const std::memory_order CompareExchangeFailureOrder =
+ std::memory_order_acquire;
+};
+
+template <>
+struct AtomicOrderConstraints<SequentiallyConsistent> {
+ static const std::memory_order AtomicRMWOrder = std::memory_order_seq_cst;
+ static const std::memory_order LoadOrder = std::memory_order_seq_cst;
+ static const std::memory_order StoreOrder = std::memory_order_seq_cst;
+ static const std::memory_order CompareExchangeFailureOrder =
+ std::memory_order_seq_cst;
+};
+
+template <typename T, MemoryOrdering Order>
+struct IntrinsicBase {
+ typedef std::atomic<T> ValueType;
+ typedef AtomicOrderConstraints<Order> OrderedOp;
+};
+
+template <typename T, MemoryOrdering Order>
+struct IntrinsicMemoryOps : public IntrinsicBase<T, Order> {
+ typedef IntrinsicBase<T, Order> Base;
+
+ static T load(const typename Base::ValueType& aPtr) {
+ return aPtr.load(Base::OrderedOp::LoadOrder);
+ }
+
+ static void store(typename Base::ValueType& aPtr, T aVal) {
+ aPtr.store(aVal, Base::OrderedOp::StoreOrder);
+ }
+
+ static T exchange(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.exchange(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+
+ static bool compareExchange(typename Base::ValueType& aPtr, T aOldVal,
+ T aNewVal) {
+ return aPtr.compare_exchange_strong(
+ aOldVal, aNewVal, Base::OrderedOp::AtomicRMWOrder,
+ Base::OrderedOp::CompareExchangeFailureOrder);
+ }
+};
+
+template <typename T, MemoryOrdering Order>
+struct IntrinsicAddSub : public IntrinsicBase<T, Order> {
+ typedef IntrinsicBase<T, Order> Base;
+
+ static T add(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+
+ static T sub(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+};
+
+template <typename T, MemoryOrdering Order>
+struct IntrinsicAddSub<T*, Order> : public IntrinsicBase<T*, Order> {
+ typedef IntrinsicBase<T*, Order> Base;
+
+ static T* add(typename Base::ValueType& aPtr, ptrdiff_t aVal) {
+ return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+
+ static T* sub(typename Base::ValueType& aPtr, ptrdiff_t aVal) {
+ return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+};
+
+template <typename T, MemoryOrdering Order>
+struct IntrinsicIncDec : public IntrinsicAddSub<T, Order> {
+ typedef IntrinsicBase<T, Order> Base;
+
+ static T inc(typename Base::ValueType& aPtr) {
+ return IntrinsicAddSub<T, Order>::add(aPtr, 1);
+ }
+
+ static T dec(typename Base::ValueType& aPtr) {
+ return IntrinsicAddSub<T, Order>::sub(aPtr, 1);
+ }
+};
+
+template <typename T, MemoryOrdering Order>
+struct AtomicIntrinsics : public IntrinsicMemoryOps<T, Order>,
+ public IntrinsicIncDec<T, Order> {
+ typedef IntrinsicBase<T, Order> Base;
+
+ static T or_(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.fetch_or(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+
+ static T xor_(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.fetch_xor(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+
+ static T and_(typename Base::ValueType& aPtr, T aVal) {
+ return aPtr.fetch_and(aVal, Base::OrderedOp::AtomicRMWOrder);
+ }
+};
+
+template <typename T, MemoryOrdering Order>
+struct AtomicIntrinsics<T*, Order> : public IntrinsicMemoryOps<T*, Order>,
+ public IntrinsicIncDec<T*, Order> {};
+
+template <typename T>
+struct ToStorageTypeArgument {
+ static constexpr T convert(T aT) { return aT; }
+};
+
+template <typename T, MemoryOrdering Order>
+class AtomicBase {
+ static_assert(sizeof(T) == 4 || sizeof(T) == 8,
+ "mozilla/Atomics.h only supports 32-bit and 64-bit types");
+
+ protected:
+ typedef typename detail::AtomicIntrinsics<T, Order> Intrinsics;
+ typedef typename Intrinsics::ValueType ValueType;
+ ValueType mValue;
+
+ public:
+ constexpr AtomicBase() : mValue() {}
+ explicit constexpr AtomicBase(T aInit)
+ : mValue(ToStorageTypeArgument<T>::convert(aInit)) {}
+
+ // Note: we can't provide operator T() here because Atomic<bool> inherits
+ // from AtomcBase with T=uint32_t and not T=bool. If we implemented
+ // operator T() here, it would cause errors when comparing Atomic<bool> with
+ // a regular bool.
+
+ T operator=(T aVal) {
+ Intrinsics::store(mValue, aVal);
+ return aVal;
+ }
+
+ /**
+ * Performs an atomic swap operation. aVal is stored and the previous
+ * value of this variable is returned.
+ */
+ T exchange(T aVal) { return Intrinsics::exchange(mValue, aVal); }
+
+ /**
+ * Performs an atomic compare-and-swap operation and returns true if it
+ * succeeded. This is equivalent to atomically doing
+ *
+ * if (mValue == aOldValue) {
+ * mValue = aNewValue;
+ * return true;
+ * } else {
+ * return false;
+ * }
+ */
+ bool compareExchange(T aOldValue, T aNewValue) {
+ return Intrinsics::compareExchange(mValue, aOldValue, aNewValue);
+ }
+
+ private:
+ AtomicBase(const AtomicBase& aCopy) = delete;
+};
+
+template <typename T, MemoryOrdering Order>
+class AtomicBaseIncDec : public AtomicBase<T, Order> {
+ typedef typename detail::AtomicBase<T, Order> Base;
+
+ public:
+ constexpr AtomicBaseIncDec() : Base() {}
+ explicit constexpr AtomicBaseIncDec(T aInit) : Base(aInit) {}
+
+ using Base::operator=;
+
+ operator T() const { return Base::Intrinsics::load(Base::mValue); }
+ T operator++(int) { return Base::Intrinsics::inc(Base::mValue); }
+ T operator--(int) { return Base::Intrinsics::dec(Base::mValue); }
+ T operator++() { return Base::Intrinsics::inc(Base::mValue) + 1; }
+ T operator--() { return Base::Intrinsics::dec(Base::mValue) - 1; }
+
+ private:
+ AtomicBaseIncDec(const AtomicBaseIncDec& aCopy) = delete;
+};
+
+} // namespace detail
+
+/**
+ * A wrapper for a type that enforces that all memory accesses are atomic.
+ *
+ * In general, where a variable |T foo| exists, |Atomic<T> foo| can be used in
+ * its place. Implementations for integral and pointer types are provided
+ * below.
+ *
+ * Atomic accesses are sequentially consistent by default. You should
+ * use the default unless you are tall enough to ride the
+ * memory-ordering roller coaster (if you're not sure, you aren't) and
+ * you have a compelling reason to do otherwise.
+ *
+ * There is one exception to the case of atomic memory accesses: providing an
+ * initial value of the atomic value is not guaranteed to be atomic. This is a
+ * deliberate design choice that enables static atomic variables to be declared
+ * without introducing extra static constructors.
+ */
+template <typename T, MemoryOrdering Order = SequentiallyConsistent,
+ typename Enable = void>
+class Atomic;
+
+/**
+ * Atomic<T> implementation for integral types.
+ *
+ * In addition to atomic store and load operations, compound assignment and
+ * increment/decrement operators are implemented which perform the
+ * corresponding read-modify-write operation atomically. Finally, an atomic
+ * swap method is provided.
+ */
+template <typename T, MemoryOrdering Order>
+class Atomic<
+ T, Order,
+ std::enable_if_t<std::is_integral_v<T> && !std::is_same_v<T, bool>>>
+ : public detail::AtomicBaseIncDec<T, Order> {
+ typedef typename detail::AtomicBaseIncDec<T, Order> Base;
+
+ public:
+ constexpr Atomic() : Base() {}
+ explicit constexpr Atomic(T aInit) : Base(aInit) {}
+
+ using Base::operator=;
+
+ T operator+=(T aDelta) {
+ return Base::Intrinsics::add(Base::mValue, aDelta) + aDelta;
+ }
+
+ T operator-=(T aDelta) {
+ return Base::Intrinsics::sub(Base::mValue, aDelta) - aDelta;
+ }
+
+ T operator|=(T aVal) {
+ return Base::Intrinsics::or_(Base::mValue, aVal) | aVal;
+ }
+
+ T operator^=(T aVal) {
+ return Base::Intrinsics::xor_(Base::mValue, aVal) ^ aVal;
+ }
+
+ T operator&=(T aVal) {
+ return Base::Intrinsics::and_(Base::mValue, aVal) & aVal;
+ }
+
+ private:
+ Atomic(Atomic& aOther) = delete;
+};
+
+/**
+ * Atomic<T> implementation for pointer types.
+ *
+ * An atomic compare-and-swap primitive for pointer variables is provided, as
+ * are atomic increment and decement operators. Also provided are the compound
+ * assignment operators for addition and subtraction. Atomic swap (via
+ * exchange()) is included as well.
+ */
+template <typename T, MemoryOrdering Order>
+class Atomic<T*, Order> : public detail::AtomicBaseIncDec<T*, Order> {
+ typedef typename detail::AtomicBaseIncDec<T*, Order> Base;
+
+ public:
+ constexpr Atomic() : Base() {}
+ explicit constexpr Atomic(T* aInit) : Base(aInit) {}
+
+ using Base::operator=;
+
+ T* operator+=(ptrdiff_t aDelta) {
+ return Base::Intrinsics::add(Base::mValue, aDelta) + aDelta;
+ }
+
+ T* operator-=(ptrdiff_t aDelta) {
+ return Base::Intrinsics::sub(Base::mValue, aDelta) - aDelta;
+ }
+
+ private:
+ Atomic(Atomic& aOther) = delete;
+};
+
+/**
+ * Atomic<T> implementation for enum types.
+ *
+ * The atomic store and load operations and the atomic swap method is provided.
+ */
+template <typename T, MemoryOrdering Order>
+class Atomic<T, Order, std::enable_if_t<std::is_enum_v<T>>>
+ : public detail::AtomicBase<T, Order> {
+ typedef typename detail::AtomicBase<T, Order> Base;
+
+ public:
+ constexpr Atomic() : Base() {}
+ explicit constexpr Atomic(T aInit) : Base(aInit) {}
+
+ operator T() const { return T(Base::Intrinsics::load(Base::mValue)); }
+
+ using Base::operator=;
+
+ private:
+ Atomic(Atomic& aOther) = delete;
+};
+
+/**
+ * Atomic<T> implementation for boolean types.
+ *
+ * The atomic store and load operations and the atomic swap method is provided.
+ *
+ * Note:
+ *
+ * - sizeof(Atomic<bool>) != sizeof(bool) for some implementations of
+ * bool and/or some implementations of std::atomic. This is allowed in
+ * [atomic.types.generic]p9.
+ *
+ * - It's not obvious whether the 8-bit atomic functions on Windows are always
+ * inlined or not. If they are not inlined, the corresponding functions in the
+ * runtime library are not available on Windows XP. This is why we implement
+ * Atomic<bool> with an underlying type of uint32_t.
+ */
+template <MemoryOrdering Order>
+class Atomic<bool, Order> : protected detail::AtomicBase<uint32_t, Order> {
+ typedef typename detail::AtomicBase<uint32_t, Order> Base;
+
+ public:
+ constexpr Atomic() : Base() {}
+ explicit constexpr Atomic(bool aInit) : Base(aInit) {}
+
+ // We provide boolean wrappers for the underlying AtomicBase methods.
+ MOZ_IMPLICIT operator bool() const {
+ return Base::Intrinsics::load(Base::mValue);
+ }
+
+ bool operator=(bool aVal) { return Base::operator=(aVal); }
+
+ bool exchange(bool aVal) { return Base::exchange(aVal); }
+
+ bool compareExchange(bool aOldValue, bool aNewValue) {
+ return Base::compareExchange(aOldValue, aNewValue);
+ }
+
+ private:
+ Atomic(Atomic& aOther) = delete;
+};
+
+} // namespace mozilla
+
+namespace std {
+
+// If you want to atomically swap two atomic values, use exchange().
+template <typename T, mozilla::MemoryOrdering Order>
+void swap(mozilla::Atomic<T, Order>&, mozilla::Atomic<T, Order>&) = delete;
+
+} // namespace std
+
+#endif /* mozilla_Atomics_h */
diff --git a/mfbt/Attributes.h b/mfbt/Attributes.h
new file mode 100644
index 0000000000..b4b0316a3a
--- /dev/null
+++ b/mfbt/Attributes.h
@@ -0,0 +1,1034 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of various class and method modifier attributes. */
+
+#ifndef mozilla_Attributes_h
+#define mozilla_Attributes_h
+
+#include "mozilla/Compiler.h"
+
+/*
+ * MOZ_ALWAYS_INLINE is a macro which expands to tell the compiler that the
+ * method decorated with it must be inlined, even if the compiler thinks
+ * otherwise. This is only a (much) stronger version of the inline hint:
+ * compilers are not guaranteed to respect it (although they're much more likely
+ * to do so).
+ *
+ * The MOZ_ALWAYS_INLINE_EVEN_DEBUG macro is yet stronger. It tells the
+ * compiler to inline even in DEBUG builds. It should be used very rarely.
+ */
+#if defined(_MSC_VER)
+# define MOZ_ALWAYS_INLINE_EVEN_DEBUG __forceinline
+#elif defined(__GNUC__)
+# define MOZ_ALWAYS_INLINE_EVEN_DEBUG __attribute__((always_inline)) inline
+#else
+# define MOZ_ALWAYS_INLINE_EVEN_DEBUG inline
+#endif
+
+#if !defined(DEBUG)
+# define MOZ_ALWAYS_INLINE MOZ_ALWAYS_INLINE_EVEN_DEBUG
+#elif defined(_MSC_VER) && !defined(__cplusplus)
+# define MOZ_ALWAYS_INLINE __inline
+#else
+# define MOZ_ALWAYS_INLINE inline
+#endif
+
+#if defined(_MSC_VER)
+/*
+ * g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
+ * without warnings (functionality used by the macros below). These modes are
+ * detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or, more
+ * standardly, by checking whether __cplusplus has a C++11 or greater value.
+ * Current versions of g++ do not correctly set __cplusplus, so we check both
+ * for forward compatibility.
+ */
+# define MOZ_HAVE_NEVER_INLINE __declspec(noinline)
+# define MOZ_HAVE_NORETURN __declspec(noreturn)
+#elif defined(__clang__)
+/*
+ * Per Clang documentation, "Note that marketing version numbers should not
+ * be used to check for language features, as different vendors use different
+ * numbering schemes. Instead, use the feature checking macros."
+ */
+# ifndef __has_extension
+# define __has_extension \
+ __has_feature /* compatibility, for older versions of clang */
+# endif
+# if __has_attribute(noinline)
+# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline))
+# endif
+# if __has_attribute(noreturn)
+# define MOZ_HAVE_NORETURN __attribute__((noreturn))
+# endif
+#elif defined(__GNUC__)
+# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline))
+# define MOZ_HAVE_NORETURN __attribute__((noreturn))
+# define MOZ_HAVE_NORETURN_PTR __attribute__((noreturn))
+#endif
+
+#if defined(__clang__)
+# if __has_attribute(no_stack_protector)
+# define MOZ_HAVE_NO_STACK_PROTECTOR __attribute__((no_stack_protector))
+# endif
+#elif defined(__GNUC__)
+# define MOZ_HAVE_NO_STACK_PROTECTOR __attribute__((no_stack_protector))
+#endif
+
+/*
+ * When built with clang analyzer (a.k.a scan-build), define MOZ_HAVE_NORETURN
+ * to mark some false positives
+ */
+#ifdef __clang_analyzer__
+# if __has_extension(attribute_analyzer_noreturn)
+# define MOZ_HAVE_ANALYZER_NORETURN __attribute__((analyzer_noreturn))
+# endif
+#endif
+
+#if defined(__GNUC__) || \
+ (defined(__clang__) && __has_attribute(no_profile_instrument_function))
+# define MOZ_NOPROFILE __attribute__((no_profile_instrument_function))
+#else
+# define MOZ_NOPROFILE
+#endif
+
+#if defined(__GNUC__) || \
+ (defined(__clang__) && __has_attribute(no_instrument_function))
+# define MOZ_NOINSTRUMENT __attribute__((no_instrument_function))
+#else
+# define MOZ_NOINSTRUMENT
+#endif
+
+/*
+ * MOZ_NAKED tells the compiler that the function only contains assembly and
+ * that it should not try to inject code that may mess with the assembly in it.
+ *
+ * See https://github.com/llvm/llvm-project/issues/74573 for the interaction
+ * between naked and no_profile_instrument_function.
+ */
+#define MOZ_NAKED __attribute__((naked)) MOZ_NOPROFILE MOZ_NOINSTRUMENT
+
+/**
+ * Per clang's documentation:
+ *
+ * If a statement is marked nomerge and contains call expressions, those call
+ * expressions inside the statement will not be merged during optimization. This
+ * attribute can be used to prevent the optimizer from obscuring the source
+ * location of certain calls.
+ *
+ * This is useful to have clearer information on assertion failures.
+ */
+#if defined(__clang__) && __has_attribute(nomerge)
+# define MOZ_NOMERGE __attribute__((nomerge))
+#else
+# define MOZ_NOMERGE
+#endif
+
+/*
+ * MOZ_NEVER_INLINE is a macro which expands to tell the compiler that the
+ * method decorated with it must never be inlined, even if the compiler would
+ * otherwise choose to inline the method. Compilers aren't absolutely
+ * guaranteed to support this, but most do.
+ */
+#if defined(MOZ_HAVE_NEVER_INLINE)
+# define MOZ_NEVER_INLINE MOZ_HAVE_NEVER_INLINE
+#else
+# define MOZ_NEVER_INLINE /* no support */
+#endif
+
+/*
+ * MOZ_NEVER_INLINE_DEBUG is a macro which expands to MOZ_NEVER_INLINE
+ * in debug builds, and nothing in opt builds.
+ */
+#if defined(DEBUG)
+# define MOZ_NEVER_INLINE_DEBUG MOZ_NEVER_INLINE
+#else
+# define MOZ_NEVER_INLINE_DEBUG /* don't inline in opt builds */
+#endif
+/*
+ * MOZ_NORETURN, specified at the start of a function declaration, indicates
+ * that the given function does not return. (The function definition does not
+ * need to be annotated.)
+ *
+ * MOZ_NORETURN void abort(const char* msg);
+ *
+ * This modifier permits the compiler to optimize code assuming a call to such a
+ * function will never return. It also enables the compiler to avoid spurious
+ * warnings about not initializing variables, or about any other seemingly-dodgy
+ * operations performed after the function returns.
+ *
+ * There are two variants. The GCC version of NORETURN may be applied to a
+ * function pointer, while for MSVC it may not.
+ *
+ * This modifier does not affect the corresponding function's linking behavior.
+ */
+#if defined(MOZ_HAVE_NORETURN)
+# define MOZ_NORETURN MOZ_HAVE_NORETURN
+#else
+# define MOZ_NORETURN /* no support */
+#endif
+#if defined(MOZ_HAVE_NORETURN_PTR)
+# define MOZ_NORETURN_PTR MOZ_HAVE_NORETURN_PTR
+#else
+# define MOZ_NORETURN_PTR /* no support */
+#endif
+
+/**
+ * MOZ_COLD tells the compiler that a function is "cold", meaning infrequently
+ * executed. This may lead it to optimize for size more aggressively than speed,
+ * or to allocate the body of the function in a distant part of the text segment
+ * to help keep it from taking up unnecessary icache when it isn't in use.
+ *
+ * Place this attribute at the very beginning of a function definition. For
+ * example, write
+ *
+ * MOZ_COLD int foo();
+ *
+ * or
+ *
+ * MOZ_COLD int foo() { return 42; }
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_COLD __attribute__((cold))
+#else
+# define MOZ_COLD
+#endif
+
+/**
+ * MOZ_NONNULL tells the compiler that some of the arguments to a function are
+ * known to be non-null. The arguments are a list of 1-based argument indexes
+ * identifying arguments which are known to be non-null.
+ *
+ * Place this attribute at the very beginning of a function definition. For
+ * example, write
+ *
+ * MOZ_NONNULL(1, 2) int foo(char *p, char *q);
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_NONNULL(...) __attribute__((nonnull(__VA_ARGS__)))
+#else
+# define MOZ_NONNULL(...)
+#endif
+
+/**
+ * MOZ_NONNULL_RETURN tells the compiler that the function's return value is
+ * guaranteed to be a non-null pointer, which may enable the compiler to
+ * optimize better at call sites.
+ *
+ * Place this attribute at the end of a function declaration. For example,
+ *
+ * char* foo(char *p, char *q) MOZ_NONNULL_RETURN;
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_NONNULL_RETURN __attribute__((returns_nonnull))
+#else
+# define MOZ_NONNULL_RETURN
+#endif
+
+/*
+ * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS, specified at the end of a function
+ * declaration, indicates that for the purposes of static analysis, this
+ * function does not return. (The function definition does not need to be
+ * annotated.)
+ *
+ * MOZ_ReportCrash(const char* s, const char* file, int ln)
+ * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
+ *
+ * Some static analyzers, like scan-build from clang, can use this information
+ * to eliminate false positives. From the upstream documentation of scan-build:
+ * "This attribute is useful for annotating assertion handlers that actually
+ * can return, but for the purpose of using the analyzer we want to pretend
+ * that such functions do not return."
+ *
+ */
+#if defined(MOZ_HAVE_ANALYZER_NORETURN)
+# define MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS MOZ_HAVE_ANALYZER_NORETURN
+#else
+# define MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS /* no support */
+#endif
+
+/*
+ * MOZ_ASAN_IGNORE is a macro to tell AddressSanitizer (a compile-time
+ * instrumentation shipped with Clang and GCC) to not instrument the annotated
+ * function. Furthermore, it will prevent the compiler from inlining the
+ * function because inlining currently breaks the blocklisting mechanism of
+ * AddressSanitizer.
+ */
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+# define MOZ_HAVE_ASAN_IGNORE
+# endif
+#elif defined(__GNUC__)
+# if defined(__SANITIZE_ADDRESS__)
+# define MOZ_HAVE_ASAN_IGNORE
+# endif
+#endif
+
+#if defined(MOZ_HAVE_ASAN_IGNORE)
+# define MOZ_ASAN_IGNORE MOZ_NEVER_INLINE __attribute__((no_sanitize_address))
+#else
+# define MOZ_ASAN_IGNORE /* nothing */
+#endif
+
+/*
+ * MOZ_TSAN_IGNORE is a macro to tell ThreadSanitizer (a compile-time
+ * instrumentation shipped with Clang) to not instrument the annotated function.
+ * Furthermore, it will prevent the compiler from inlining the function because
+ * inlining currently breaks the blocklisting mechanism of ThreadSanitizer.
+ */
+#if defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define MOZ_TSAN_IGNORE MOZ_NEVER_INLINE __attribute__((no_sanitize_thread))
+# else
+# define MOZ_TSAN_IGNORE /* nothing */
+# endif
+#else
+# define MOZ_TSAN_IGNORE /* nothing */
+#endif
+
+#if defined(__has_attribute)
+# if __has_attribute(no_sanitize)
+# define MOZ_HAVE_NO_SANITIZE_ATTR
+# endif
+#endif
+
+#ifdef __clang__
+# ifdef MOZ_HAVE_NO_SANITIZE_ATTR
+# define MOZ_HAVE_UNSIGNED_OVERFLOW_SANITIZE_ATTR
+# define MOZ_HAVE_SIGNED_OVERFLOW_SANITIZE_ATTR
+# endif
+#endif
+
+/*
+ * MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW disables *un*signed integer overflow
+ * checking on the function it annotates, in builds configured to perform it.
+ * (Currently this is only Clang using -fsanitize=unsigned-integer-overflow, or
+ * via --enable-unsigned-overflow-sanitizer in Mozilla's build system.) It has
+ * no effect in other builds.
+ *
+ * Place this attribute at the very beginning of a function declaration.
+ *
+ * Unsigned integer overflow isn't *necessarily* a bug. It's well-defined in
+ * C/C++, and code may reasonably depend upon it. For example,
+ *
+ * MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline bool
+ * IsDecimal(char aChar)
+ * {
+ * // For chars less than '0', unsigned integer underflow occurs, to a value
+ * // much greater than 10, so the overall test is false.
+ * // For chars greater than '0', no overflow occurs, and only '0' to '9'
+ * // pass the overall test.
+ * return static_cast<unsigned int>(aChar) - '0' < 10;
+ * }
+ *
+ * But even well-defined unsigned overflow often causes bugs when it occurs, so
+ * it should be restricted to functions annotated with this attribute.
+ *
+ * The compiler instrumentation to detect unsigned integer overflow has costs
+ * both at compile time and at runtime. Functions that are repeatedly inlined
+ * at compile time will also implicitly inline the necessary instrumentation,
+ * increasing compile time. Similarly, frequently-executed functions that
+ * require large amounts of instrumentation will also notice significant runtime
+ * slowdown to execute that instrumentation. Use this attribute to eliminate
+ * those costs -- but only after carefully verifying that no overflow can occur.
+ */
+#ifdef MOZ_HAVE_UNSIGNED_OVERFLOW_SANITIZE_ATTR
+# define MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW \
+ __attribute__((no_sanitize("unsigned-integer-overflow")))
+#else
+# define MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW /* nothing */
+#endif
+
+/*
+ * MOZ_NO_SANITIZE_SIGNED_OVERFLOW disables *signed* integer overflow checking
+ * on the function it annotates, in builds configured to perform it. (Currently
+ * this is only Clang using -fsanitize=signed-integer-overflow, or via
+ * --enable-signed-overflow-sanitizer in Mozilla's build system. GCC support
+ * will probably be added in the future.) It has no effect in other builds.
+ *
+ * Place this attribute at the very beginning of a function declaration.
+ *
+ * Signed integer overflow is undefined behavior in C/C++: *anything* can happen
+ * when it occurs. *Maybe* wraparound behavior will occur, but maybe also the
+ * compiler will assume no overflow happens and will adversely optimize the rest
+ * of your code. Code that contains signed integer overflow needs to be fixed.
+ *
+ * The compiler instrumentation to detect signed integer overflow has costs both
+ * at compile time and at runtime. Functions that are repeatedly inlined at
+ * compile time will also implicitly inline the necessary instrumentation,
+ * increasing compile time. Similarly, frequently-executed functions that
+ * require large amounts of instrumentation will also notice significant runtime
+ * slowdown to execute that instrumentation. Use this attribute to eliminate
+ * those costs -- but only after carefully verifying that no overflow can occur.
+ */
+#ifdef MOZ_HAVE_SIGNED_OVERFLOW_SANITIZE_ATTR
+# define MOZ_NO_SANITIZE_SIGNED_OVERFLOW \
+ __attribute__((no_sanitize("signed-integer-overflow")))
+#else
+# define MOZ_NO_SANITIZE_SIGNED_OVERFLOW /* nothing */
+#endif
+
+#undef MOZ_HAVE_NO_SANITIZE_ATTR
+
+/**
+ * MOZ_ALLOCATOR tells the compiler that the function it marks returns either a
+ * "fresh", "pointer-free" block of memory, or nullptr. "Fresh" means that the
+ * block is not pointed to by any other reachable pointer in the program.
+ * "Pointer-free" means that the block contains no pointers to any valid object
+ * in the program. It may be initialized with other (non-pointer) values.
+ *
+ * Placing this attribute on appropriate functions helps GCC analyze pointer
+ * aliasing more accurately in their callers.
+ *
+ * GCC warns if a caller ignores the value returned by a function marked with
+ * MOZ_ALLOCATOR: it is hard to imagine cases where dropping the value returned
+ * by a function that meets the criteria above would be intentional.
+ *
+ * Place this attribute after the argument list and 'this' qualifiers of a
+ * function definition. For example, write
+ *
+ * void *my_allocator(size_t) MOZ_ALLOCATOR;
+ *
+ * or
+ *
+ * void *my_allocator(size_t bytes) MOZ_ALLOCATOR { ... }
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_ALLOCATOR __attribute__((malloc, warn_unused_result))
+# define MOZ_INFALLIBLE_ALLOCATOR \
+ __attribute__((malloc, warn_unused_result, returns_nonnull))
+#else
+# define MOZ_ALLOCATOR
+# define MOZ_INFALLIBLE_ALLOCATOR
+#endif
+
+/**
+ * MOZ_MAYBE_UNUSED suppresses compiler warnings about functions that are
+ * never called (in this build configuration, at least).
+ *
+ * Place this attribute at the very beginning of a function declaration. For
+ * example, write
+ *
+ * MOZ_MAYBE_UNUSED int foo();
+ *
+ * or
+ *
+ * MOZ_MAYBE_UNUSED int foo() { return 42; }
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_MAYBE_UNUSED __attribute__((__unused__))
+#elif defined(_MSC_VER)
+# define MOZ_MAYBE_UNUSED __pragma(warning(suppress : 4505))
+#else
+# define MOZ_MAYBE_UNUSED
+#endif
+
+/*
+ * MOZ_NO_STACK_PROTECTOR, specified at the start of a function declaration,
+ * indicates that the given function should *NOT* be instrumented to detect
+ * stack buffer overflows at runtime. (The function definition does not need to
+ * be annotated.)
+ *
+ * MOZ_NO_STACK_PROTECTOR int foo();
+ *
+ * Detecting stack buffer overflows at runtime is a security feature. This
+ * modifier should thus only be used on functions which are provably exempt of
+ * stack buffer overflows, for example because they do not use stack buffers.
+ *
+ * This modifier does not affect the corresponding function's linking behavior.
+ */
+#if defined(MOZ_HAVE_NO_STACK_PROTECTOR)
+# define MOZ_NO_STACK_PROTECTOR MOZ_HAVE_NO_STACK_PROTECTOR
+#else
+# define MOZ_NO_STACK_PROTECTOR /* no support */
+#endif
+
+/**
+ * MOZ_LIFETIME_BOUND indicates that objects that are referred to by that
+ * parameter may also be referred to by the return value of the annotated
+ * function (or, for a parameter of a constructor, by the value of the
+ * constructed object).
+ * See: https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+ */
+#if defined(__clang__) && defined(__has_cpp_attribute)
+# if __has_cpp_attribute(clang::lifetimebound)
+# define MOZ_LIFETIME_BOUND [[clang::lifetimebound]]
+# else
+# define MOZ_LIFETIME_BOUND /* nothing */
+# endif
+#else
+# define MOZ_LIFETIME_BOUND /* nothing */
+#endif
+
+#ifdef __cplusplus
+
+/**
+ * C++11 lets unions contain members that have non-trivial special member
+ * functions (default/copy/move constructor, copy/move assignment operator,
+ * destructor) if the user defines the corresponding functions on the union.
+ * (Such user-defined functions must rely on external knowledge about which arm
+ * is active to be safe. Be extra-careful defining these functions!)
+ *
+ * MSVC unfortunately warns/errors for this bog-standard C++11 pattern. Use
+ * these macro-guards around such member functions to disable the warnings:
+ *
+ * union U
+ * {
+ * std::string s;
+ * int x;
+ *
+ * MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ *
+ * // |U| must have a user-defined default constructor because |std::string|
+ * // has a non-trivial default constructor.
+ * U() ... { ... }
+ *
+ * // |U| must have a user-defined destructor because |std::string| has a
+ * // non-trivial destructor.
+ * ~U() { ... }
+ *
+ * MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ * };
+ */
+# if defined(_MSC_VER)
+# define MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS \
+ __pragma(warning(push)) __pragma(warning(disable : 4582)) \
+ __pragma(warning(disable : 4583))
+# define MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS __pragma(warning(pop))
+# else
+# define MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS /* nothing */
+# define MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS /* nothing */
+# endif
+
+/*
+ * The following macros are attributes that support the static analysis plugin
+ * included with Mozilla, and will be implemented (when such support is enabled)
+ * as C++11 attributes. Since such attributes are legal pretty much everywhere
+ * and have subtly different semantics depending on their placement, the
+ * following is a guide on where to place the attributes.
+ *
+ * Attributes that apply to a struct or class precede the name of the class:
+ * (Note that this is different from the placement of final for classes!)
+ *
+ * class MOZ_CLASS_ATTRIBUTE SomeClass {};
+ *
+ * Attributes that apply to functions follow the parentheses and const
+ * qualifiers but precede final, override and the function body:
+ *
+ * void DeclaredFunction() MOZ_FUNCTION_ATTRIBUTE;
+ * void SomeFunction() MOZ_FUNCTION_ATTRIBUTE {}
+ * void PureFunction() const MOZ_FUNCTION_ATTRIBUTE = 0;
+ * void OverriddenFunction() MOZ_FUNCTION_ATTIRBUTE override;
+ *
+ * Attributes that apply to variables or parameters follow the variable's name:
+ *
+ * int variable MOZ_VARIABLE_ATTRIBUTE;
+ *
+ * Attributes that apply to types follow the type name:
+ *
+ * typedef int MOZ_TYPE_ATTRIBUTE MagicInt;
+ * int MOZ_TYPE_ATTRIBUTE someVariable;
+ * int* MOZ_TYPE_ATTRIBUTE magicPtrInt;
+ * int MOZ_TYPE_ATTRIBUTE* ptrToMagicInt;
+ *
+ * Attributes that apply to statements precede the statement:
+ *
+ * MOZ_IF_ATTRIBUTE if (x == 0)
+ * MOZ_DO_ATTRIBUTE do { } while (0);
+ *
+ * Attributes that apply to labels precede the label:
+ *
+ * MOZ_LABEL_ATTRIBUTE target:
+ * goto target;
+ * MOZ_CASE_ATTRIBUTE case 5:
+ * MOZ_DEFAULT_ATTRIBUTE default:
+ *
+ * The static analyses that are performed by the plugin are as follows:
+ *
+ * MOZ_CAN_RUN_SCRIPT: Applies to functions which can run script. Callers of
+ * this function must also be marked as MOZ_CAN_RUN_SCRIPT, and all refcounted
+ * arguments must be strongly held in the caller. Note that MOZ_CAN_RUN_SCRIPT
+ * should only be applied to function declarations, not definitions. If you
+ * need to apply it to a definition (eg because both are generated by a macro)
+ * use MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION.
+ *
+ * MOZ_CAN_RUN_SCRIPT can be applied to XPIDL-generated declarations by
+ * annotating the method or attribute as [can_run_script] in the .idl file.
+ *
+ * MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION: Same as MOZ_CAN_RUN_SCRIPT, but usable on
+ * a definition. If the declaration is in a header file, users of that header
+ * file may not see the annotation.
+ * MOZ_CAN_RUN_SCRIPT_BOUNDARY: Applies to functions which need to call
+ * MOZ_CAN_RUN_SCRIPT functions, but should not themselves be considered
+ * MOZ_CAN_RUN_SCRIPT. This should generally be avoided but can be used in
+ * two cases:
+ * 1) As a temporary measure to limit the scope of changes when adding
+ * MOZ_CAN_RUN_SCRIPT. Such a use must be accompanied by a follow-up bug
+ * to replace the MOZ_CAN_RUN_SCRIPT_BOUNDARY with MOZ_CAN_RUN_SCRIPT and
+ * a comment linking to that bug.
+ * 2) If we can reason that the MOZ_CAN_RUN_SCRIPT callees of the function
+ * do not in fact run script (for example, because their behavior depends
+ * on arguments and we pass the arguments that don't allow script
+ * execution). Such a use must be accompanied by a comment that explains
+ * why it's OK to have the MOZ_CAN_RUN_SCRIPT_BOUNDARY, as well as
+ * comments in the callee pointing out that if its behavior changes the
+ * caller might need adjusting. And perhaps also a followup bug to
+ * refactor things so the "script" and "no script" codepaths do not share
+ * a chokepoint.
+ * Importantly, any use MUST be accompanied by a comment explaining why it's
+ * there, and should ideally have an action plan for getting rid of the
+ * MOZ_CAN_RUN_SCRIPT_BOUNDARY annotation.
+ * MOZ_MUST_OVERRIDE: Applies to all C++ member functions. All immediate
+ * subclasses must provide an exact override of this method; if a subclass
+ * does not override this method, the compiler will emit an error. This
+ * attribute is not limited to virtual methods, so if it is applied to a
+ * nonvirtual method and the subclass does not provide an equivalent
+ * definition, the compiler will emit an error.
+ * MOZ_STATIC_CLASS: Applies to all classes. Any class with this annotation is
+ * expected to live in static memory, so it is a compile-time error to use
+ * it, or an array of such objects, as the type of a variable declaration, or
+ * as a temporary object, or as the type of a new expression (unless
+ * placement new is being used). If a member of another class uses this
+ * class, or if another class inherits from this class, then it is considered
+ * to be a static class as well, although this attribute need not be provided
+ * in such cases.
+ * MOZ_STATIC_LOCAL_CLASS: Applies to all classes. Any class with this
+ * annotation is expected to be a static local variable, so it is
+ * a compile-time error to use it, or an array of such objects, or as a
+ * temporary object, or as the type of a new expression. If another class
+ * inherits from this class then it is considered to be a static local
+ * class as well, although this attribute need not be provided in such cases.
+ * It is also a compile-time error for any class with this annotation to have
+ * a non-trivial destructor.
+ * MOZ_STACK_CLASS: Applies to all classes. Any class with this annotation is
+ * expected to live on the stack, so it is a compile-time error to use it, or
+ * an array of such objects, as a global or static variable, or as the type of
+ * a new expression (unless placement new is being used). If a member of
+ * another class uses this class, or if another class inherits from this
+ * class, then it is considered to be a stack class as well, although this
+ * attribute need not be provided in such cases.
+ * MOZ_NONHEAP_CLASS: Applies to all classes. Any class with this annotation is
+ * expected to live on the stack or in static storage, so it is a compile-time
+ * error to use it, or an array of such objects, as the type of a new
+ * expression. If a member of another class uses this class, or if another
+ * class inherits from this class, then it is considered to be a non-heap
+ * class as well, although this attribute need not be provided in such cases.
+ * MOZ_HEAP_CLASS: Applies to all classes. Any class with this annotation is
+ * expected to live on the heap, so it is a compile-time error to use it, or
+ * an array of such objects, as the type of a variable declaration, or as a
+ * temporary object. If a member of another class uses this class, or if
+ * another class inherits from this class, then it is considered to be a heap
+ * class as well, although this attribute need not be provided in such cases.
+ * MOZ_NON_TEMPORARY_CLASS: Applies to all classes. Any class with this
+ * annotation is expected not to live in a temporary. If a member of another
+ * class uses this class or if another class inherits from this class, then it
+ * is considered to be a non-temporary class as well, although this attribute
+ * need not be provided in such cases.
+ * MOZ_TEMPORARY_CLASS: Applies to all classes. Any class with this annotation
+ * is expected to only live in a temporary. If another class inherits from
+ * this class, then it is considered to be a temporary class as well, although
+ * this attribute need not be provided in such cases.
+ * MOZ_RAII: Applies to all classes. Any class with this annotation is assumed
+ * to be a RAII guard, which is expected to live on the stack in an automatic
+ * allocation. It is prohibited from being allocated in a temporary, static
+ * storage, or on the heap. This is a combination of MOZ_STACK_CLASS and
+ * MOZ_NON_TEMPORARY_CLASS.
+ * MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS: Applies to all classes that are
+ * intended to prevent introducing static initializers. This attribute
+ * currently makes it a compile-time error to instantiate these classes
+ * anywhere other than at the global scope, or as a static member of a class.
+ * In non-debug mode, it also prohibits non-trivial constructors and
+ * destructors.
+ * MOZ_TRIVIAL_CTOR_DTOR: Applies to all classes that must have both a trivial
+ * or constexpr constructor and a trivial destructor. Setting this attribute
+ * on a class makes it a compile-time error for that class to get a
+ * non-trivial constructor or destructor for any reason.
+ * MOZ_ALLOW_TEMPORARY: Applies to constructors. This indicates that using the
+ * constructor is allowed in temporary expressions, if it would have otherwise
+ * been forbidden by the type being a MOZ_NON_TEMPORARY_CLASS. Useful for
+ * constructors like Maybe(Nothing).
+ * MOZ_HEAP_ALLOCATOR: Applies to any function. This indicates that the return
+ * value is allocated on the heap, and will as a result check such allocations
+ * during MOZ_STACK_CLASS and MOZ_NONHEAP_CLASS annotation checking.
+ * MOZ_IMPLICIT: Applies to constructors. Implicit conversion constructors
+ * are disallowed by default unless they are marked as MOZ_IMPLICIT. This
+ * attribute must be used for constructors which intend to provide implicit
+ * conversions.
+ * MOZ_IS_REFPTR: Applies to class declarations of ref pointer to mark them as
+ * such for use with static-analysis.
+ * A ref pointer is an object wrapping a pointer and automatically taking care
+ * of its refcounting upon construction/destruction/transfer of ownership.
+ * This annotation implies MOZ_IS_SMARTPTR_TO_REFCOUNTED.
+ * MOZ_IS_SMARTPTR_TO_REFCOUNTED: Applies to class declarations of smart
+ * pointers to ref counted classes to mark them as such for use with
+ * static-analysis.
+ * MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT: Applies to functions. Makes it a compile
+ * time error to pass arithmetic expressions on variables to the function.
+ * MOZ_OWNING_REF: Applies to declarations of pointers to reference counted
+ * types. This attribute tells the compiler that the raw pointer is a strong
+ * reference, where ownership through methods such as AddRef and Release is
+ * managed manually. This can make the compiler ignore these pointers when
+ * validating the usage of pointers otherwise.
+ *
+ * Example uses include owned pointers inside of unions, and pointers stored
+ * in POD types where a using a smart pointer class would make the object
+ * non-POD.
+ * MOZ_NON_OWNING_REF: Applies to declarations of pointers to reference counted
+ * types. This attribute tells the compiler that the raw pointer is a weak
+ * reference, which is ensured to be valid by a guarantee that the reference
+ * will be nulled before the pointer becomes invalid. This can make the
+ * compiler ignore these pointers when validating the usage of pointers
+ * otherwise.
+ *
+ * Examples include an mOwner pointer, which is nulled by the owning class's
+ * destructor, and is null-checked before dereferencing.
+ * MOZ_UNSAFE_REF: Applies to declarations of pointers to reference counted
+ * types. Occasionally there are non-owning references which are valid, but
+ * do not take the form of a MOZ_NON_OWNING_REF. Their safety may be
+ * dependent on the behaviour of API consumers. The string argument passed
+ * to this macro documents the safety conditions. This can make the compiler
+ * ignore these pointers when validating the usage of pointers elsewhere.
+ *
+ * Examples include an nsAtom* member which is known at compile time to point
+ * to a static atom which is valid throughout the lifetime of the program, or
+ * an API which stores a pointer, but doesn't take ownership over it, instead
+ * requiring the API consumer to correctly null the value before it becomes
+ * invalid.
+ *
+ * Use of this annotation is discouraged when a strong reference or one of
+ * the above two annotations can be used instead.
+ * MOZ_NO_ADDREF_RELEASE_ON_RETURN: Applies to function declarations. Makes it
+ * a compile time error to call AddRef or Release on the return value of a
+ * function. This is intended to be used with operator->() of our smart
+ * pointer classes to ensure that the refcount of an object wrapped in a
+ * smart pointer is not manipulated directly.
+ * MOZ_NEEDS_NO_VTABLE_TYPE: Applies to template class declarations. Makes it
+ * a compile time error to instantiate this template with a type parameter
+ * which has a VTable.
+ * MOZ_NON_MEMMOVABLE: Applies to class declarations for types that are not safe
+ * to be moved in memory using memmove().
+ * MOZ_NEEDS_MEMMOVABLE_TYPE: Applies to template class declarations where the
+ * template arguments are required to be safe to move in memory using
+ * memmove(). Passing MOZ_NON_MEMMOVABLE types to these templates is a
+ * compile time error.
+ * MOZ_NEEDS_MEMMOVABLE_MEMBERS: Applies to class declarations where each member
+ * must be safe to move in memory using memmove(). MOZ_NON_MEMMOVABLE types
+ * used in members of these classes are compile time errors.
+ * MOZ_NO_DANGLING_ON_TEMPORARIES: Applies to method declarations which return
+ * a pointer that is freed when the destructor of the class is called. This
+ * prevents these methods from being called on temporaries of the class,
+ * reducing risks of use-after-free.
+ * This attribute cannot be applied to && methods.
+ * In some cases, adding a deleted &&-qualified overload is too restrictive as
+ * this method should still be callable as a non-escaping argument to another
+ * function. This annotation can be used in those cases.
+ * MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS: Applies to template class
+ * declarations where an instance of the template should be considered, for
+ * static analysis purposes, to inherit any type annotations (such as
+ * MOZ_STACK_CLASS) from its template arguments.
+ * MOZ_INIT_OUTSIDE_CTOR: Applies to class member declarations. Occasionally
+ * there are class members that are not initialized in the constructor,
+ * but logic elsewhere in the class ensures they are initialized prior to use.
+ * Using this attribute on a member disables the check that this member must
+ * be initialized in constructors via list-initialization, in the constructor
+ * body, or via functions called from the constructor body.
+ * MOZ_IS_CLASS_INIT: Applies to class method declarations. Occasionally the
+ * constructor doesn't initialize all of the member variables and another
+ * function is used to initialize the rest. This marker is used to make the
+ * static analysis tool aware that the marked function is part of the
+ * initialization process and to include the marked function in the scan
+ * mechanism that determines which member variables still remain
+ * uninitialized.
+ * MOZ_NON_PARAM: Applies to types. Makes it compile time error to use the type
+ * in parameter without pointer or reference.
+ * MOZ_NON_AUTOABLE: Applies to class declarations. Makes it a compile time
+ * error to use `auto` in place of this type in variable declarations. This
+ * is intended to be used with types which are intended to be implicitly
+ * constructed into other other types before being assigned to variables.
+ * MOZ_REQUIRED_BASE_METHOD: Applies to virtual class method declarations.
+ * Sometimes derived classes override methods that need to be called by their
+ * overridden counterparts. This marker indicates that the marked method must
+ * be called by the method that it overrides.
+ * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG: Applies to method declarations.
+ * Callers of the annotated method must return from that function within the
+ * calling block using an explicit `return` statement if the "this" value for
+ * the call is a parameter of the caller. Only calls to Constructors,
+ * references to local and member variables, and calls to functions or
+ * methods marked as MOZ_MAY_CALL_AFTER_MUST_RETURN may be made after the
+ * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG call.
+ * MOZ_MAY_CALL_AFTER_MUST_RETURN: Applies to function or method declarations.
+ * Calls to these methods may be made in functions after calls a
+ * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG method.
+ * MOZ_UNANNOTATED/MOZ_ANNOTATED: Applies to Mutexes/Monitors and variations on
+ * them. MOZ_UNANNOTATED indicates that the Mutex/Monitor/etc hasn't been
+ * examined and annotated using macros from mfbt/ThreadSafety --
+ * MOZ_GUARDED_BY()/REQUIRES()/etc. MOZ_ANNOTATED is used in rare cases to
+ * indicate that is has been looked at, but it did not need any
+ * MOZ_GUARDED_BY()/REQUIRES()/etc (and thus static analysis knows it can
+ * ignore this Mutex/Monitor/etc)
+ */
+
+// gcc emits a nuisance warning -Wignored-attributes because attributes do not
+// affect mangled names, and therefore template arguments do not propagate
+// their attributes. It is rare that this would affect anything in practice,
+// and most compilers are silent about it. Similarly, -Wattributes complains
+// about attributes being ignored during template instantiation.
+//
+// Be conservative and only suppress the warning when running in a
+// configuration where it would be emitted, namely when compiling with the
+// XGILL_PLUGIN for the rooting hazard analysis (which runs under gcc.) If we
+// end up wanting these attributes in general GCC builds, change this to
+// something like
+//
+// #if defined(__GNUC__) && ! defined(__clang__)
+//
+# ifdef XGILL_PLUGIN
+# pragma GCC diagnostic ignored "-Wignored-attributes"
+# pragma GCC diagnostic ignored "-Wattributes"
+# endif
+
+# if defined(MOZ_CLANG_PLUGIN) || defined(XGILL_PLUGIN)
+# define MOZ_CAN_RUN_SCRIPT __attribute__((annotate("moz_can_run_script")))
+# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION \
+ __attribute__((annotate("moz_can_run_script"))) \
+ __attribute__((annotate("moz_can_run_script_for_definition")))
+# define MOZ_CAN_RUN_SCRIPT_BOUNDARY \
+ __attribute__((annotate("moz_can_run_script_boundary")))
+# define MOZ_MUST_OVERRIDE __attribute__((annotate("moz_must_override")))
+# define MOZ_STATIC_CLASS __attribute__((annotate("moz_global_class")))
+# define MOZ_STATIC_LOCAL_CLASS \
+ __attribute__((annotate("moz_static_local_class"))) \
+ __attribute__((annotate("moz_trivial_dtor")))
+# define MOZ_STACK_CLASS __attribute__((annotate("moz_stack_class")))
+# define MOZ_NONHEAP_CLASS __attribute__((annotate("moz_nonheap_class")))
+# define MOZ_HEAP_CLASS __attribute__((annotate("moz_heap_class")))
+# define MOZ_NON_TEMPORARY_CLASS \
+ __attribute__((annotate("moz_non_temporary_class")))
+# define MOZ_TEMPORARY_CLASS __attribute__((annotate("moz_temporary_class")))
+# define MOZ_TRIVIAL_CTOR_DTOR \
+ __attribute__((annotate("moz_trivial_ctor_dtor")))
+# define MOZ_ALLOW_TEMPORARY __attribute__((annotate("moz_allow_temporary")))
+# ifdef DEBUG
+/* in debug builds, these classes do have non-trivial constructors. */
+# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS \
+ __attribute__((annotate("moz_global_class")))
+# else
+# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS \
+ __attribute__((annotate("moz_global_class"))) MOZ_TRIVIAL_CTOR_DTOR
+# endif
+# define MOZ_IMPLICIT __attribute__((annotate("moz_implicit")))
+# define MOZ_IS_SMARTPTR_TO_REFCOUNTED \
+ __attribute__((annotate("moz_is_smartptr_to_refcounted")))
+# define MOZ_IS_REFPTR MOZ_IS_SMARTPTR_TO_REFCOUNTED
+# define MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT \
+ __attribute__((annotate("moz_no_arith_expr_in_arg")))
+# define MOZ_OWNING_REF __attribute__((annotate("moz_owning_ref")))
+# define MOZ_NON_OWNING_REF __attribute__((annotate("moz_non_owning_ref")))
+# define MOZ_UNSAFE_REF(reason) __attribute__((annotate("moz_unsafe_ref")))
+# define MOZ_NO_ADDREF_RELEASE_ON_RETURN \
+ __attribute__((annotate("moz_no_addref_release_on_return")))
+# define MOZ_NEEDS_NO_VTABLE_TYPE \
+ __attribute__((annotate("moz_needs_no_vtable_type")))
+# define MOZ_NON_MEMMOVABLE __attribute__((annotate("moz_non_memmovable")))
+# define MOZ_NEEDS_MEMMOVABLE_TYPE \
+ __attribute__((annotate("moz_needs_memmovable_type")))
+# define MOZ_NEEDS_MEMMOVABLE_MEMBERS \
+ __attribute__((annotate("moz_needs_memmovable_members")))
+# define MOZ_NO_DANGLING_ON_TEMPORARIES \
+ __attribute__((annotate("moz_no_dangling_on_temporaries")))
+# define MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS \
+ __attribute__(( \
+ annotate("moz_inherit_type_annotations_from_template_args")))
+# define MOZ_NON_AUTOABLE __attribute__((annotate("moz_non_autoable")))
+# define MOZ_INIT_OUTSIDE_CTOR
+# define MOZ_IS_CLASS_INIT
+# define MOZ_NON_PARAM __attribute__((annotate("moz_non_param")))
+# define MOZ_REQUIRED_BASE_METHOD \
+ __attribute__((annotate("moz_required_base_method")))
+# define MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG \
+ __attribute__((annotate("moz_must_return_from_caller_if_this_is_arg")))
+# define MOZ_MAY_CALL_AFTER_MUST_RETURN \
+ __attribute__((annotate("moz_may_call_after_must_return")))
+# define MOZ_KNOWN_LIVE __attribute__((annotate("moz_known_live")))
+# ifndef XGILL_PLUGIN
+# define MOZ_UNANNOTATED __attribute__((annotate("moz_unannotated")))
+# define MOZ_ANNOTATED __attribute__((annotate("moz_annotated")))
+# else
+# define MOZ_UNANNOTATED /* nothing */
+# define MOZ_ANNOTATED /* nothing */
+# endif
+
+/*
+ * It turns out that clang doesn't like void func() __attribute__ {} without a
+ * warning, so use pragmas to disable the warning.
+ */
+# ifdef __clang__
+# define MOZ_HEAP_ALLOCATOR \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
+ __attribute__((annotate("moz_heap_allocator"))) \
+ _Pragma("clang diagnostic pop")
+# else
+# define MOZ_HEAP_ALLOCATOR __attribute__((annotate("moz_heap_allocator")))
+# endif
+# else
+# define MOZ_CAN_RUN_SCRIPT /* nothing */
+# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION /* nothing */
+# define MOZ_CAN_RUN_SCRIPT_BOUNDARY /* nothing */
+# define MOZ_MUST_OVERRIDE /* nothing */
+# define MOZ_STATIC_CLASS /* nothing */
+# define MOZ_STATIC_LOCAL_CLASS /* nothing */
+# define MOZ_STACK_CLASS /* nothing */
+# define MOZ_NONHEAP_CLASS /* nothing */
+# define MOZ_HEAP_CLASS /* nothing */
+# define MOZ_NON_TEMPORARY_CLASS /* nothing */
+# define MOZ_TEMPORARY_CLASS /* nothing */
+# define MOZ_TRIVIAL_CTOR_DTOR /* nothing */
+# define MOZ_ALLOW_TEMPORARY /* nothing */
+# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS /* nothing */
+# define MOZ_IMPLICIT /* nothing */
+# define MOZ_IS_SMARTPTR_TO_REFCOUNTED /* nothing */
+# define MOZ_IS_REFPTR /* nothing */
+# define MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT /* nothing */
+# define MOZ_HEAP_ALLOCATOR /* nothing */
+# define MOZ_OWNING_REF /* nothing */
+# define MOZ_NON_OWNING_REF /* nothing */
+# define MOZ_UNSAFE_REF(reason) /* nothing */
+# define MOZ_NO_ADDREF_RELEASE_ON_RETURN /* nothing */
+# define MOZ_NEEDS_NO_VTABLE_TYPE /* nothing */
+# define MOZ_NON_MEMMOVABLE /* nothing */
+# define MOZ_NEEDS_MEMMOVABLE_TYPE /* nothing */
+# define MOZ_NEEDS_MEMMOVABLE_MEMBERS /* nothing */
+# define MOZ_NO_DANGLING_ON_TEMPORARIES /* nothing */
+# define MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS /* nothing */
+# define MOZ_INIT_OUTSIDE_CTOR /* nothing */
+# define MOZ_IS_CLASS_INIT /* nothing */
+# define MOZ_NON_PARAM /* nothing */
+# define MOZ_NON_AUTOABLE /* nothing */
+# define MOZ_REQUIRED_BASE_METHOD /* nothing */
+# define MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG /* nothing */
+# define MOZ_MAY_CALL_AFTER_MUST_RETURN /* nothing */
+# define MOZ_KNOWN_LIVE /* nothing */
+# define MOZ_UNANNOTATED /* nothing */
+# define MOZ_ANNOTATED /* nothing */
+# endif /* defined(MOZ_CLANG_PLUGIN) || defined(XGILL_PLUGIN) */
+
+# define MOZ_RAII MOZ_NON_TEMPORARY_CLASS MOZ_STACK_CLASS
+
+// XGILL_PLUGIN is used for the GC rooting hazard analysis, which compiles with
+// gcc. gcc has different rules governing __attribute__((...)) placement, so
+// some attributes will error out when used in the source code where clang
+// expects them to be. Remove the problematic annotations when needed.
+//
+// The placement of c++11 [[...]] attributes is more flexible and defined by a
+// spec, so it would be nice to switch to those for the problematic
+// cases. Unfortunately, the official spec provides *no* way to annotate a
+// lambda function, which is one source of the difficulty here. It appears that
+// this will be fixed in c++23: https://github.com/cplusplus/papers/issues/882
+
+# ifdef XGILL_PLUGIN
+
+# undef MOZ_MUST_OVERRIDE
+# undef MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION
+# undef MOZ_CAN_RUN_SCRIPT
+# undef MOZ_CAN_RUN_SCRIPT_BOUNDARY
+# define MOZ_MUST_OVERRIDE /* nothing */
+# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION /* nothing */
+# define MOZ_CAN_RUN_SCRIPT /* nothing */
+# define MOZ_CAN_RUN_SCRIPT_BOUNDARY /* nothing */
+
+# endif
+
+#endif /* __cplusplus */
+
+/**
+ * Printf style formats. MOZ_FORMAT_PRINTF and MOZ_FORMAT_WPRINTF can be used
+ * to annotate a function or method that is "printf/wprintf-like"; this will let
+ * (some) compilers check that the arguments match the template string.
+ *
+ * This macro takes two arguments. The first argument is the argument
+ * number of the template string. The second argument is the argument
+ * number of the '...' argument holding the arguments.
+ *
+ * Argument numbers start at 1. Note that the implicit "this"
+ * argument of a non-static member function counts as an argument.
+ *
+ * So, for a simple case like:
+ * void print_something (int whatever, const char *fmt, ...);
+ * The corresponding annotation would be
+ * MOZ_FORMAT_PRINTF(2, 3)
+ * However, if "print_something" were a non-static member function,
+ * then the annotation would be:
+ * MOZ_FORMAT_PRINTF(3, 4)
+ *
+ * The second argument should be 0 for vprintf-like functions; that
+ * is, those taking a va_list argument.
+ *
+ * Note that the checking is limited to standards-conforming
+ * printf-likes, and in particular this should not be used for
+ * PR_snprintf and friends, which are "printf-like" but which assign
+ * different meanings to the various formats.
+ *
+ * MinGW requires special handling due to different format specifiers
+ * on different platforms. The macro __MINGW_PRINTF_FORMAT maps to
+ * either gnu_printf or ms_printf depending on where we are compiling
+ * to avoid warnings on format specifiers that are legal.
+ *
+ * At time of writing MinGW has no wide equivalent to __MINGW_PRINTF_FORMAT;
+ * therefore __MINGW_WPRINTF_FORMAT has been implemented following the same
+ * pattern seen in MinGW's source.
+ */
+#ifdef __MINGW32__
+# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck) \
+ __attribute__((format(__MINGW_PRINTF_FORMAT, stringIndex, firstToCheck)))
+# ifndef __MINGW_WPRINTF_FORMAT
+# if defined(__clang__)
+# define __MINGW_WPRINTF_FORMAT wprintf
+# elif defined(_UCRT) || __USE_MINGW_ANSI_STDIO
+# define __MINGW_WPRINTF_FORMAT gnu_wprintf
+# else
+# define __MINGW_WPRINTF_FORMAT ms_wprintf
+# endif
+# endif
+# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck) \
+ __attribute__((format(__MINGW_WPRINTF_FORMAT, stringIndex, firstToCheck)))
+#elif __GNUC__ || __clang__
+# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck) \
+ __attribute__((format(printf, stringIndex, firstToCheck)))
+# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck) \
+ __attribute__((format(wprintf, stringIndex, firstToCheck)))
+#else
+# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck)
+# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck)
+#endif
+
+/**
+ * To manually declare an XPCOM ABI-compatible virtual function, the following
+ * macros can be used to handle the non-standard ABI used on Windows for COM
+ * compatibility. E.g.:
+ *
+ * virtual ReturnType MOZ_XPCOM_ABI foo();
+ */
+#if defined(XP_WIN)
+# define MOZ_XPCOM_ABI __stdcall
+#else
+# define MOZ_XPCOM_ABI
+#endif
+
+/**
+ * MSVC / clang-cl don't optimize empty bases correctly unless we explicitly
+ * tell it to, see:
+ *
+ * https://stackoverflow.com/questions/12701469/why-is-the-empty-base-class-optimization-ebo-is-not-working-in-msvc
+ * https://devblogs.microsoft.com/cppblog/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+ */
+#if defined(_MSC_VER)
+# define MOZ_EMPTY_BASES __declspec(empty_bases)
+#else
+# define MOZ_EMPTY_BASES
+#endif
+
+#endif /* mozilla_Attributes_h */
diff --git a/mfbt/BinarySearch.h b/mfbt/BinarySearch.h
new file mode 100644
index 0000000000..f3aeac30a0
--- /dev/null
+++ b/mfbt/BinarySearch.h
@@ -0,0 +1,247 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BinarySearch_h
+#define mozilla_BinarySearch_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <utility>
+
+namespace mozilla {
+
+/*
+ * The BinarySearch() algorithm searches the given container |aContainer| over
+ * the sorted index range [aBegin, aEnd) for an index |i| where
+ * |aContainer[i] == aTarget|.
+ * If such an index |i| is found, BinarySearch returns |true| and the index is
+ * returned via the outparam |aMatchOrInsertionPoint|. If no index is found,
+ * BinarySearch returns |false| and the outparam returns the first index in
+ * [aBegin, aEnd] where |aTarget| can be inserted to maintain sorted order.
+ *
+ * Example:
+ *
+ * Vector<int> sortedInts = ...
+ *
+ * size_t match;
+ * if (BinarySearch(sortedInts, 0, sortedInts.length(), 13, &match)) {
+ * printf("found 13 at %lu\n", match);
+ * }
+ *
+ * The BinarySearchIf() version behaves similarly, but takes |aComparator|, a
+ * functor to compare the values with, instead of a value to find.
+ * That functor should take one argument - the value to compare - and return an
+ * |int| with the comparison result:
+ *
+ * * 0, if the argument is equal to,
+ * * less than 0, if the argument is greater than,
+ * * greater than 0, if the argument is less than
+ *
+ * the value.
+ *
+ * Example:
+ *
+ * struct Comparator {
+ * int operator()(int aVal) const {
+ * if (mTarget < aVal) { return -1; }
+ * if (mTarget > aVal) { return 1; }
+ * return 0;
+ * }
+ * explicit Comparator(int aTarget) : mTarget(aTarget) {}
+ * const int mTarget;
+ * };
+ *
+ * Vector<int> sortedInts = ...
+ *
+ * size_t match;
+ * if (BinarySearchIf(sortedInts, 0, sortedInts.length(), Comparator(13),
+ * &match)) { printf("found 13 at %lu\n", match);
+ * }
+ *
+ */
+
+template <typename Container, typename Comparator>
+bool BinarySearchIf(const Container& aContainer, size_t aBegin, size_t aEnd,
+ const Comparator& aCompare,
+ size_t* aMatchOrInsertionPoint) {
+ MOZ_ASSERT(aBegin <= aEnd);
+
+ size_t low = aBegin;
+ size_t high = aEnd;
+ while (high != low) {
+ size_t middle = low + (high - low) / 2;
+
+ // Allow any intermediate type so long as it provides a suitable ordering
+ // relation.
+ const int result = aCompare(aContainer[middle]);
+
+ if (result == 0) {
+ *aMatchOrInsertionPoint = middle;
+ return true;
+ }
+
+ if (result < 0) {
+ high = middle;
+ } else {
+ low = middle + 1;
+ }
+ }
+
+ *aMatchOrInsertionPoint = low;
+ return false;
+}
+
+namespace detail {
+
+template <class T>
+class BinarySearchDefaultComparator {
+ public:
+ explicit BinarySearchDefaultComparator(const T& aTarget) : mTarget(aTarget) {}
+
+ template <class U>
+ int operator()(const U& aVal) const {
+ if (mTarget == aVal) {
+ return 0;
+ }
+
+ if (mTarget < aVal) {
+ return -1;
+ }
+
+ return 1;
+ }
+
+ private:
+ const T& mTarget;
+};
+
+} // namespace detail
+
+template <typename Container, typename T>
+bool BinarySearch(const Container& aContainer, size_t aBegin, size_t aEnd,
+ T aTarget, size_t* aMatchOrInsertionPoint) {
+ return BinarySearchIf(aContainer, aBegin, aEnd,
+ detail::BinarySearchDefaultComparator<T>(aTarget),
+ aMatchOrInsertionPoint);
+}
+
+/*
+ * LowerBound(), UpperBound(), and EqualRange() are equivalent to
+ * std::lower_bound(), std::upper_bound(), and std::equal_range() respectively.
+ *
+ * LowerBound() returns an index pointing to the first element in the range
+ * in which each element is considered *not less than* the given value passed
+ * via |aCompare|, or the length of |aContainer| if no such element is found.
+ *
+ * UpperBound() returns an index pointing to the first element in the range
+ * in which each element is considered *greater than* the given value passed
+ * via |aCompare|, or the length of |aContainer| if no such element is found.
+ *
+ * EqualRange() returns a range [first, second) containing all elements are
+ * considered equivalent to the given value via |aCompare|. If you need
+ * either the first or last index of the range, LowerBound() or UpperBound(),
+ * which is slightly faster than EqualRange(), should suffice.
+ *
+ * Example (another example is given in TestBinarySearch.cpp):
+ *
+ * Vector<const char*> sortedStrings = ...
+ *
+ * struct Comparator {
+ * const nsACString& mStr;
+ * explicit Comparator(const nsACString& aStr) : mStr(aStr) {}
+ * int32_t operator()(const char* aVal) const {
+ * return Compare(mStr, nsDependentCString(aVal));
+ * }
+ * };
+ *
+ * auto bounds = EqualRange(sortedStrings, 0, sortedStrings.length(),
+ * Comparator("needle I'm looking for"_ns));
+ * printf("Found the range [%zd %zd)\n", bounds.first(), bounds.second());
+ *
+ */
+template <typename Container, typename Comparator>
+size_t LowerBound(const Container& aContainer, size_t aBegin, size_t aEnd,
+ const Comparator& aCompare) {
+ MOZ_ASSERT(aBegin <= aEnd);
+
+ size_t low = aBegin;
+ size_t high = aEnd;
+ while (high != low) {
+ size_t middle = low + (high - low) / 2;
+
+ // Allow any intermediate type so long as it provides a suitable ordering
+ // relation.
+ const int result = aCompare(aContainer[middle]);
+
+ // The range returning from LowerBound does include elements
+ // equivalent to the given value i.e. aCompare(element) == 0
+ if (result <= 0) {
+ high = middle;
+ } else {
+ low = middle + 1;
+ }
+ }
+
+ return low;
+}
+
+template <typename Container, typename Comparator>
+size_t UpperBound(const Container& aContainer, size_t aBegin, size_t aEnd,
+ const Comparator& aCompare) {
+ MOZ_ASSERT(aBegin <= aEnd);
+
+ size_t low = aBegin;
+ size_t high = aEnd;
+ while (high != low) {
+ size_t middle = low + (high - low) / 2;
+
+ // Allow any intermediate type so long as it provides a suitable ordering
+ // relation.
+ const int result = aCompare(aContainer[middle]);
+
+ // The range returning from UpperBound does NOT include elements
+ // equivalent to the given value i.e. aCompare(element) == 0
+ if (result < 0) {
+ high = middle;
+ } else {
+ low = middle + 1;
+ }
+ }
+
+ return high;
+}
+
+template <typename Container, typename Comparator>
+std::pair<size_t, size_t> EqualRange(const Container& aContainer, size_t aBegin,
+ size_t aEnd, const Comparator& aCompare) {
+ MOZ_ASSERT(aBegin <= aEnd);
+
+ size_t low = aBegin;
+ size_t high = aEnd;
+ while (high != low) {
+ size_t middle = low + (high - low) / 2;
+
+ // Allow any intermediate type so long as it provides a suitable ordering
+ // relation.
+ const int result = aCompare(aContainer[middle]);
+
+ if (result < 0) {
+ high = middle;
+ } else if (result > 0) {
+ low = middle + 1;
+ } else {
+ return {LowerBound(aContainer, low, middle, aCompare),
+ UpperBound(aContainer, middle + 1, high, aCompare)};
+ }
+ }
+
+ return {low, high};
+}
+
+} // namespace mozilla
+
+#endif // mozilla_BinarySearch_h
diff --git a/mfbt/BitSet.h b/mfbt/BitSet.h
new file mode 100644
index 0000000000..7c03fb87ce
--- /dev/null
+++ b/mfbt/BitSet.h
@@ -0,0 +1,177 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BitSet_h
+#define mozilla_BitSet_h
+
+#include "mozilla/Array.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/Span.h"
+
+namespace mozilla {
+
+/**
+ * An object like std::bitset but which provides access to the underlying
+ * storage.
+ *
+ * The limited API is due to expedience only; feel free to flesh out any
+ * std::bitset-like members.
+ */
+template <size_t N, typename Word = size_t>
+class BitSet {
+ static_assert(std::is_unsigned_v<Word>,
+ "The Word type must be an unsigned integral type");
+
+ private:
+ static constexpr size_t kBitsPerWord = 8 * sizeof(Word);
+ static constexpr size_t kNumWords = (N + kBitsPerWord - 1) / kBitsPerWord;
+ static constexpr size_t kPaddingBits = (kNumWords * kBitsPerWord) - N;
+ static constexpr Word kPaddingMask = Word(-1) >> kPaddingBits;
+
+ // The zeroth bit in the bitset is the least significant bit of mStorage[0].
+ Array<Word, kNumWords> mStorage;
+
+ constexpr void ResetPaddingBits() {
+ if constexpr (kPaddingBits != 0) {
+ mStorage[kNumWords - 1] &= kPaddingMask;
+ }
+ }
+
+ public:
+ class Reference {
+ public:
+ Reference(BitSet<N, Word>& aBitSet, size_t aPos)
+ : mBitSet(aBitSet), mPos(aPos) {}
+
+ Reference& operator=(bool aValue) {
+ auto bit = Word(1) << (mPos % kBitsPerWord);
+ auto& word = mBitSet.mStorage[mPos / kBitsPerWord];
+ word = (word & ~bit) | (aValue ? bit : 0);
+ return *this;
+ }
+
+ MOZ_IMPLICIT operator bool() const { return mBitSet.Test(mPos); }
+
+ private:
+ BitSet<N, Word>& mBitSet;
+ size_t mPos;
+ };
+
+ constexpr BitSet() : mStorage() {}
+
+ BitSet(const BitSet& aOther) { *this = aOther; }
+
+ BitSet& operator=(const BitSet& aOther) {
+ PodCopy(mStorage.begin(), aOther.mStorage.begin(), kNumWords);
+ return *this;
+ }
+
+ explicit BitSet(Span<Word, kNumWords> aStorage) {
+ PodCopy(mStorage.begin(), aStorage.Elements(), kNumWords);
+ }
+
+ static constexpr size_t Size() { return N; }
+
+ constexpr bool Test(size_t aPos) const {
+ MOZ_ASSERT(aPos < N);
+ return mStorage[aPos / kBitsPerWord] & (Word(1) << (aPos % kBitsPerWord));
+ }
+
+ constexpr bool IsEmpty() const {
+ for (const Word& word : mStorage) {
+ if (word) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ explicit constexpr operator bool() { return !IsEmpty(); }
+
+ constexpr bool operator[](size_t aPos) const { return Test(aPos); }
+
+ Reference operator[](size_t aPos) {
+ MOZ_ASSERT(aPos < N);
+ return {*this, aPos};
+ }
+
+ BitSet operator|(const BitSet<N, Word>& aOther) {
+ BitSet result = *this;
+ result |= aOther;
+ return result;
+ }
+
+ BitSet& operator|=(const BitSet<N, Word>& aOther) {
+ for (size_t i = 0; i < ArrayLength(mStorage); i++) {
+ mStorage[i] |= aOther.mStorage[i];
+ }
+ return *this;
+ }
+
+ BitSet operator~() const {
+ BitSet result = *this;
+ result.Flip();
+ return result;
+ }
+
+ BitSet& operator&=(const BitSet<N, Word>& aOther) {
+ for (size_t i = 0; i < ArrayLength(mStorage); i++) {
+ mStorage[i] &= aOther.mStorage[i];
+ }
+ return *this;
+ }
+
+ BitSet operator&(const BitSet<N, Word>& aOther) const {
+ BitSet result = *this;
+ result &= aOther;
+ return result;
+ }
+
+ bool operator==(const BitSet<N, Word>& aOther) const {
+ return mStorage == aOther.mStorage;
+ }
+
+ size_t Count() const {
+ size_t count = 0;
+
+ for (const Word& word : mStorage) {
+ if constexpr (kBitsPerWord > 32) {
+ count += CountPopulation64(word);
+ } else {
+ count += CountPopulation32(word);
+ }
+ }
+
+ return count;
+ }
+
+ // Set all bits to false.
+ void ResetAll() { PodArrayZero(mStorage); }
+
+ // Set all bits to true.
+ void SetAll() {
+ memset(mStorage.begin(), 0xff, kNumWords * sizeof(Word));
+ ResetPaddingBits();
+ }
+
+ void Flip() {
+ for (Word& word : mStorage) {
+ word = ~word;
+ }
+
+ ResetPaddingBits();
+ }
+
+ Span<Word> Storage() { return mStorage; }
+
+ Span<const Word> Storage() const { return mStorage; }
+};
+
+} // namespace mozilla
+
+#endif // mozilla_BitSet_h
diff --git a/mfbt/BloomFilter.h b/mfbt/BloomFilter.h
new file mode 100644
index 0000000000..08882c4d63
--- /dev/null
+++ b/mfbt/BloomFilter.h
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A counting Bloom filter implementation. This allows consumers to
+ * do fast probabilistic "is item X in set Y?" testing which will
+ * never answer "no" when the correct answer is "yes" (but might
+ * incorrectly answer "yes" when the correct answer is "no").
+ */
+
+#ifndef mozilla_BloomFilter_h
+#define mozilla_BloomFilter_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Likely.h"
+
+#include <stdint.h>
+#include <string.h>
+
+namespace mozilla {
+
+/*
+ * This class implements a classic Bloom filter as described at
+ * <http://en.wikipedia.org/wiki/Bloom_filter>. This allows quick
+ * probabilistic answers to the question "is object X in set Y?" where the
+ * contents of Y might not be time-invariant. The probabilistic nature of the
+ * test means that sometimes the answer will be "yes" when it should be "no".
+ * If the answer is "no", then X is guaranteed not to be in Y.
+ *
+ * The filter is parametrized on KeySize, which is the size of the key
+ * generated by each of hash functions used by the filter, in bits,
+ * and the type of object T being added and removed. T must implement
+ * a |uint32_t hash() const| method which returns a uint32_t hash key
+ * that will be used to generate the two separate hash functions for
+ * the Bloom filter. This hash key MUST be well-distributed for good
+ * results! KeySize is not allowed to be larger than 16.
+ *
+ * The filter uses exactly 2**KeySize bit (2**(KeySize-3) bytes) of memory.
+ * From now on we will refer to the memory used by the filter as M.
+ *
+ * The expected rate of incorrect "yes" answers depends on M and on
+ * the number N of objects in set Y. As long as N is small compared
+ * to M, the rate of such answers is expected to be approximately
+ * 4*(N/M)**2 for this filter. In practice, if Y has a few hundred
+ * elements then using a KeySize of 12 gives a reasonably low
+ * incorrect answer rate. A KeySize of 12 has the additional benefit
+ * of using exactly one page for the filter in typical hardware
+ * configurations.
+ */
+template <unsigned KeySize, class T>
+class BitBloomFilter {
+ /*
+ * A counting Bloom filter with 8-bit counters. For now we assume
+ * that having two hash functions is enough, but we may revisit that
+ * decision later.
+ *
+ * The filter uses an array with 2**KeySize entries.
+ *
+ * Assuming a well-distributed hash function, a Bloom filter with
+ * array size M containing N elements and
+ * using k hash function has expected false positive rate exactly
+ *
+ * $ (1 - (1 - 1/M)^{kN})^k $
+ *
+ * because each array slot has a
+ *
+ * $ (1 - 1/M)^{kN} $
+ *
+ * chance of being 0, and the expected false positive rate is the
+ * probability that all of the k hash functions will hit a nonzero
+ * slot.
+ *
+ * For reasonable assumptions (M large, kN large, which should both
+ * hold if we're worried about false positives) about M and kN this
+ * becomes approximately
+ *
+ * $$ (1 - \exp(-kN/M))^k $$
+ *
+ * For our special case of k == 2, that's $(1 - \exp(-2N/M))^2$,
+ * or in other words
+ *
+ * $$ N/M = -0.5 * \ln(1 - \sqrt(r)) $$
+ *
+ * where r is the false positive rate. This can be used to compute
+ * the desired KeySize for a given load N and false positive rate r.
+ *
+ * If N/M is assumed small, then the false positive rate can
+ * further be approximated as 4*N^2/M^2. So increasing KeySize by
+ * 1, which doubles M, reduces the false positive rate by about a
+ * factor of 4, and a false positive rate of 1% corresponds to
+ * about M/N == 20.
+ *
+ * What this means in practice is that for a few hundred keys using a
+ * KeySize of 12 gives false positive rates on the order of 0.25-4%.
+ *
+ * Similarly, using a KeySize of 10 would lead to a 4% false
+ * positive rate for N == 100 and to quite bad false positive
+ * rates for larger N.
+ */
+ public:
+ BitBloomFilter() {
+ static_assert(KeySize >= 3, "KeySize too small");
+ static_assert(KeySize <= kKeyShift, "KeySize too big");
+
+ // XXX: Should we have a custom operator new using calloc instead and
+ // require that we're allocated via the operator?
+ clear();
+ }
+
+ /*
+ * Clear the filter. This should be done before reusing it.
+ */
+ void clear();
+
+ /*
+ * Add an item to the filter.
+ */
+ void add(const T* aValue);
+
+ /*
+ * Check whether the filter might contain an item. This can
+ * sometimes return true even if the item is not in the filter,
+ * but will never return false for items that are actually in the
+ * filter.
+ */
+ bool mightContain(const T* aValue) const;
+
+ /*
+ * Methods for add/contain when we already have a hash computed
+ */
+ void add(uint32_t aHash);
+ bool mightContain(uint32_t aHash) const;
+
+ private:
+ static const size_t kArraySize = (1 << (KeySize - 3));
+ static const uint32_t kKeyMask = (1 << KeySize) - 1;
+ static const uint32_t kKeyShift = 16;
+
+ static uint32_t hash1(uint32_t aHash) { return aHash & kKeyMask; }
+ static uint32_t hash2(uint32_t aHash) {
+ return (aHash >> kKeyShift) & kKeyMask;
+ }
+
+ bool getSlot(uint32_t aHash) const {
+ uint32_t index = aHash / 8;
+ uint8_t shift = aHash % 8;
+ uint8_t mask = 1 << shift;
+ return !!(mBits[index] & mask);
+ }
+
+ void setSlot(uint32_t aHash) {
+ uint32_t index = aHash / 8;
+ uint8_t shift = aHash % 8;
+ uint8_t bit = 1 << shift;
+ mBits[index] |= bit;
+ }
+
+ bool getFirstSlot(uint32_t aHash) const { return getSlot(hash1(aHash)); }
+ bool getSecondSlot(uint32_t aHash) const { return getSlot(hash2(aHash)); }
+
+ void setFirstSlot(uint32_t aHash) { setSlot(hash1(aHash)); }
+ void setSecondSlot(uint32_t aHash) { setSlot(hash2(aHash)); }
+
+ uint8_t mBits[kArraySize];
+};
+
+template <unsigned KeySize, class T>
+inline void BitBloomFilter<KeySize, T>::clear() {
+ memset(mBits, 0, kArraySize);
+}
+
+template <unsigned KeySize, class T>
+inline void BitBloomFilter<KeySize, T>::add(uint32_t aHash) {
+ setFirstSlot(aHash);
+ setSecondSlot(aHash);
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE void BitBloomFilter<KeySize, T>::add(const T* aValue) {
+ uint32_t hash = aValue->hash();
+ return add(hash);
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE bool BitBloomFilter<KeySize, T>::mightContain(
+ uint32_t aHash) const {
+ // Check that all the slots for this hash contain something
+ return getFirstSlot(aHash) && getSecondSlot(aHash);
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE bool BitBloomFilter<KeySize, T>::mightContain(
+ const T* aValue) const {
+ uint32_t hash = aValue->hash();
+ return mightContain(hash);
+}
+
+/*
+ * This class implements a counting Bloom filter as described at
+ * <http://en.wikipedia.org/wiki/Bloom_filter#Counting_filters>, with
+ * 8-bit counters.
+ *
+ * Compared to `BitBloomFilter`, this class supports 'remove' operation.
+ *
+ * The filter uses exactly 2**KeySize bytes of memory.
+ *
+ * Other characteristics are the same as BitBloomFilter.
+ */
+template <unsigned KeySize, class T>
+class CountingBloomFilter {
+ public:
+ CountingBloomFilter() {
+ static_assert(KeySize <= kKeyShift, "KeySize too big");
+
+ clear();
+ }
+
+ /*
+ * Clear the filter. This should be done before reusing it, because
+ * just removing all items doesn't clear counters that hit the upper
+ * bound.
+ */
+ void clear();
+
+ /*
+ * Add an item to the filter.
+ */
+ void add(const T* aValue);
+
+ /*
+ * Remove an item from the filter.
+ */
+ void remove(const T* aValue);
+
+ /*
+ * Check whether the filter might contain an item. This can
+ * sometimes return true even if the item is not in the filter,
+ * but will never return false for items that are actually in the
+ * filter.
+ */
+ bool mightContain(const T* aValue) const;
+
+ /*
+ * Methods for add/remove/contain when we already have a hash computed
+ */
+ void add(uint32_t aHash);
+ void remove(uint32_t aHash);
+ bool mightContain(uint32_t aHash) const;
+
+ private:
+ static const size_t kArraySize = (1 << KeySize);
+ static const uint32_t kKeyMask = (1 << KeySize) - 1;
+ static const uint32_t kKeyShift = 16;
+
+ static uint32_t hash1(uint32_t aHash) { return aHash & kKeyMask; }
+ static uint32_t hash2(uint32_t aHash) {
+ return (aHash >> kKeyShift) & kKeyMask;
+ }
+
+ uint8_t& firstSlot(uint32_t aHash) { return mCounters[hash1(aHash)]; }
+ uint8_t& secondSlot(uint32_t aHash) { return mCounters[hash2(aHash)]; }
+
+ const uint8_t& firstSlot(uint32_t aHash) const {
+ return mCounters[hash1(aHash)];
+ }
+ const uint8_t& secondSlot(uint32_t aHash) const {
+ return mCounters[hash2(aHash)];
+ }
+
+ static bool full(const uint8_t& aSlot) { return aSlot == UINT8_MAX; }
+
+ uint8_t mCounters[kArraySize];
+};
+
+template <unsigned KeySize, class T>
+inline void CountingBloomFilter<KeySize, T>::clear() {
+ memset(mCounters, 0, kArraySize);
+}
+
+template <unsigned KeySize, class T>
+inline void CountingBloomFilter<KeySize, T>::add(uint32_t aHash) {
+ uint8_t& slot1 = firstSlot(aHash);
+ if (MOZ_LIKELY(!full(slot1))) {
+ ++slot1;
+ }
+ uint8_t& slot2 = secondSlot(aHash);
+ if (MOZ_LIKELY(!full(slot2))) {
+ ++slot2;
+ }
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE void CountingBloomFilter<KeySize, T>::add(const T* aValue) {
+ uint32_t hash = aValue->hash();
+ return add(hash);
+}
+
+template <unsigned KeySize, class T>
+inline void CountingBloomFilter<KeySize, T>::remove(uint32_t aHash) {
+ // If the slots are full, we don't know whether we bumped them to be
+ // there when we added or not, so just leave them full.
+ uint8_t& slot1 = firstSlot(aHash);
+ if (MOZ_LIKELY(!full(slot1))) {
+ --slot1;
+ }
+ uint8_t& slot2 = secondSlot(aHash);
+ if (MOZ_LIKELY(!full(slot2))) {
+ --slot2;
+ }
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE void CountingBloomFilter<KeySize, T>::remove(
+ const T* aValue) {
+ uint32_t hash = aValue->hash();
+ remove(hash);
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE bool CountingBloomFilter<KeySize, T>::mightContain(
+ uint32_t aHash) const {
+ // Check that all the slots for this hash contain something
+ return firstSlot(aHash) && secondSlot(aHash);
+}
+
+template <unsigned KeySize, class T>
+MOZ_ALWAYS_INLINE bool CountingBloomFilter<KeySize, T>::mightContain(
+ const T* aValue) const {
+ uint32_t hash = aValue->hash();
+ return mightContain(hash);
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_BloomFilter_h */
diff --git a/mfbt/Buffer.h b/mfbt/Buffer.h
new file mode 100644
index 0000000000..c4e0a4be92
--- /dev/null
+++ b/mfbt/Buffer.h
@@ -0,0 +1,197 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Buffer_h
+#define mozilla_Buffer_h
+
+#include <cstddef>
+#include <iterator>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Span.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+
+namespace mozilla {
+
+/**
+ * A move-only type that wraps a mozilla::UniquePtr<T[]> and the length of
+ * the T[].
+ *
+ * Unlike mozilla::Array, the length is a run-time property.
+ * Unlike mozilla::Vector and nsTArray, does not have capacity and
+ * assocatiated growth functionality.
+ * Unlike mozilla::Span, mozilla::Buffer owns the allocation it points to.
+ */
+template <typename T>
+class Buffer final {
+ private:
+ mozilla::UniquePtr<T[]> mData;
+ size_t mLength;
+
+ public:
+ Buffer(const Buffer<T>& aOther) = delete;
+ Buffer<T>& operator=(const Buffer<T>& aOther) = delete;
+
+ /**
+ * Construct zero-lenth Buffer (without actually pointing to a heap
+ * allocation).
+ */
+ Buffer() : mData(nullptr), mLength(0){};
+
+ /**
+ * Construct from raw parts.
+ *
+ * aLength must not be greater than the actual length of the buffer pointed
+ * to by aData.
+ */
+ Buffer(mozilla::UniquePtr<T[]>&& aData, size_t aLength)
+ : mData(std::move(aData)), mLength(aLength) {}
+
+ /**
+ * Move constructor. Sets the moved-from Buffer to zero-length
+ * state.
+ */
+ Buffer(Buffer<T>&& aOther)
+ : mData(std::move(aOther.mData)), mLength(aOther.mLength) {
+ aOther.mLength = 0;
+ }
+
+ /**
+ * Move assignment. Sets the moved-from Buffer to zero-length
+ * state.
+ */
+ Buffer<T>& operator=(Buffer<T>&& aOther) {
+ mData = std::move(aOther.mData);
+ mLength = aOther.mLength;
+ aOther.mLength = 0;
+ return *this;
+ }
+
+ /**
+ * Construct by copying the elements of a Span.
+ *
+ * Allocates the internal buffer infallibly. Use CopyFrom for fallible
+ * allocation.
+ */
+ explicit Buffer(mozilla::Span<const T> aSpan)
+ : mData(mozilla::MakeUniqueForOverwrite<T[]>(aSpan.Length())),
+ mLength(aSpan.Length()) {
+ std::copy(aSpan.cbegin(), aSpan.cend(), mData.get());
+ }
+
+ /**
+ * Create a new Buffer by copying the elements of a Span.
+ *
+ * Allocates the internal buffer fallibly.
+ */
+ static mozilla::Maybe<Buffer<T>> CopyFrom(mozilla::Span<const T> aSpan) {
+ if (aSpan.IsEmpty()) {
+ return Some(Buffer());
+ }
+
+ auto data = mozilla::MakeUniqueForOverwriteFallible<T[]>(aSpan.Length());
+ if (!data) {
+ return mozilla::Nothing();
+ }
+ std::copy(aSpan.cbegin(), aSpan.cend(), data.get());
+ return mozilla::Some(Buffer(std::move(data), aSpan.Length()));
+ }
+
+ /**
+ * Construct a buffer of requested length.
+ *
+ * The contents will be initialized or uninitialized according
+ * to the behavior of mozilla::MakeUnique<T[]>(aLength) for T.
+ *
+ * Allocates the internal buffer infallibly. Use Alloc for fallible
+ * allocation.
+ */
+ explicit Buffer(size_t aLength)
+ : mData(mozilla::MakeUnique<T[]>(aLength)), mLength(aLength) {}
+
+ /**
+ * Create a new Buffer with an internal buffer of requested length.
+ *
+ * The contents will be initialized or uninitialized according to the
+ * behavior of mozilla::MakeUnique<T[]>(aLength) for T.
+ *
+ * Allocates the internal buffer fallibly.
+ */
+ static mozilla::Maybe<Buffer<T>> Alloc(size_t aLength) {
+ auto data = mozilla::MakeUniqueFallible<T[]>(aLength);
+ if (!data) {
+ return mozilla::Nothing();
+ }
+ return mozilla::Some(Buffer(std::move(data), aLength));
+ }
+
+ /**
+ * Create a new Buffer with an internal buffer of requested length.
+ *
+ * This uses MakeUniqueFallibleForOverwrite so the contents will be
+ * default-initialized.
+ *
+ * Allocates the internal buffer fallibly.
+ */
+ static Maybe<Buffer<T>> AllocForOverwrite(size_t aLength) {
+ auto data = MakeUniqueForOverwriteFallible<T[]>(aLength);
+ if (!data) {
+ return Nothing();
+ }
+ return Some(Buffer(std::move(data), aLength));
+ }
+
+ auto AsSpan() const { return mozilla::Span<const T>{mData.get(), mLength}; }
+ auto AsWritableSpan() { return mozilla::Span<T>{mData.get(), mLength}; }
+ operator mozilla::Span<const T>() const { return AsSpan(); }
+ operator mozilla::Span<T>() { return AsWritableSpan(); }
+
+ /**
+ * Guarantees a non-null and aligned pointer
+ * even for the zero-length case.
+ */
+ T* Elements() { return AsWritableSpan().Elements(); }
+ size_t Length() const { return mLength; }
+
+ T& operator[](size_t aIndex) {
+ MOZ_ASSERT(aIndex < mLength);
+ return mData.get()[aIndex];
+ }
+
+ const T& operator[](size_t aIndex) const {
+ MOZ_ASSERT(aIndex < mLength);
+ return mData.get()[aIndex];
+ }
+
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef std::reverse_iterator<T*> reverse_iterator;
+ typedef std::reverse_iterator<const T*> const_reverse_iterator;
+
+ // Methods for range-based for loops.
+ iterator begin() { return mData.get(); }
+ const_iterator begin() const { return mData.get(); }
+ const_iterator cbegin() const { return begin(); }
+ iterator end() { return mData.get() + mLength; }
+ const_iterator end() const { return mData.get() + mLength; }
+ const_iterator cend() const { return end(); }
+
+ // Methods for reverse iterating.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+ const_reverse_iterator crend() const { return rend(); }
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_Buffer_h */
diff --git a/mfbt/BufferList.h b/mfbt/BufferList.h
new file mode 100644
index 0000000000..5556abf700
--- /dev/null
+++ b/mfbt/BufferList.h
@@ -0,0 +1,605 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BufferList_h
+#define mozilla_BufferList_h
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <numeric>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Vector.h"
+
+// BufferList represents a sequence of buffers of data. A BufferList can choose
+// to own its buffers or not. The class handles writing to the buffers,
+// iterating over them, and reading data out. Unlike SegmentedVector, the
+// buffers may be of unequal size. Like SegmentedVector, BufferList is a nice
+// way to avoid large contiguous allocations (which can trigger OOMs).
+
+class InfallibleAllocPolicy;
+
+namespace mozilla {
+
+template <typename AllocPolicy>
+class BufferList : private AllocPolicy {
+ // Each buffer in a BufferList has a size and a capacity. The first mSize
+ // bytes are initialized and the remaining |mCapacity - mSize| bytes are free.
+ struct Segment {
+ char* mData;
+ size_t mSize;
+ size_t mCapacity;
+
+ Segment(char* aData, size_t aSize, size_t aCapacity)
+ : mData(aData), mSize(aSize), mCapacity(aCapacity) {}
+
+ Segment(const Segment&) = delete;
+ Segment& operator=(const Segment&) = delete;
+
+ Segment(Segment&&) = default;
+ Segment& operator=(Segment&&) = default;
+
+ char* Start() const { return mData; }
+ char* End() const { return mData + mSize; }
+ };
+
+ template <typename OtherAllocPolicy>
+ friend class BufferList;
+
+ public:
+ // For the convenience of callers, all segments are required to be a multiple
+ // of 8 bytes in capacity. Also, every buffer except the last one is required
+ // to be full (i.e., size == capacity). Therefore, a byte at offset N within
+ // the BufferList and stored in memory at an address A will satisfy
+ // (N % Align == A % Align) if Align == 2, 4, or 8.
+ static const size_t kSegmentAlignment = 8;
+
+ // Allocate a BufferList. The BufferList will free all its buffers when it is
+ // destroyed. If an infallible allocator is used, an initial buffer of size
+ // aInitialSize and capacity aInitialCapacity is allocated automatically. This
+ // data will be contiguous and can be accessed via |Start()|. If a fallible
+ // alloc policy is used, aInitialSize must be 0, and the fallible |Init()|
+ // method may be called instead. Subsequent buffers will be allocated with
+ // capacity aStandardCapacity.
+ BufferList(size_t aInitialSize, size_t aInitialCapacity,
+ size_t aStandardCapacity, AllocPolicy aAP = AllocPolicy())
+ : AllocPolicy(aAP),
+ mOwning(true),
+ mSegments(aAP),
+ mSize(0),
+ mStandardCapacity(aStandardCapacity) {
+ MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0);
+ MOZ_ASSERT(aStandardCapacity % kSegmentAlignment == 0);
+
+ if (aInitialCapacity) {
+ MOZ_ASSERT((aInitialSize == 0 ||
+ std::is_same_v<AllocPolicy, InfallibleAllocPolicy>),
+ "BufferList may only be constructed with an initial size when "
+ "using an infallible alloc policy");
+
+ AllocateSegment(aInitialSize, aInitialCapacity);
+ }
+ }
+
+ BufferList(const BufferList& aOther) = delete;
+
+ BufferList(BufferList&& aOther)
+ : mOwning(aOther.mOwning),
+ mSegments(std::move(aOther.mSegments)),
+ mSize(aOther.mSize),
+ mStandardCapacity(aOther.mStandardCapacity) {
+ aOther.mSegments.clear();
+ aOther.mSize = 0;
+ }
+
+ BufferList& operator=(const BufferList& aOther) = delete;
+
+ BufferList& operator=(BufferList&& aOther) {
+ Clear();
+
+ mOwning = aOther.mOwning;
+ mSegments = std::move(aOther.mSegments);
+ mSize = aOther.mSize;
+ aOther.mSegments.clear();
+ aOther.mSize = 0;
+ return *this;
+ }
+
+ ~BufferList() { Clear(); }
+
+ // Initializes the BufferList with a segment of the given size and capacity.
+ // May only be called once, before any segments have been allocated.
+ bool Init(size_t aInitialSize, size_t aInitialCapacity) {
+ MOZ_ASSERT(mSegments.empty());
+ MOZ_ASSERT(aInitialCapacity != 0);
+ MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0);
+
+ return AllocateSegment(aInitialSize, aInitialCapacity);
+ }
+
+ bool CopyFrom(const BufferList& aOther) {
+ MOZ_ASSERT(mOwning);
+
+ Clear();
+
+ // We don't make an exact copy of aOther. Instead, create a single segment
+ // with enough space to hold all data in aOther.
+ if (!Init(aOther.mSize, (aOther.mSize + kSegmentAlignment - 1) &
+ ~(kSegmentAlignment - 1))) {
+ return false;
+ }
+
+ size_t offset = 0;
+ for (const Segment& segment : aOther.mSegments) {
+ memcpy(Start() + offset, segment.mData, segment.mSize);
+ offset += segment.mSize;
+ }
+ MOZ_ASSERT(offset == mSize);
+
+ return true;
+ }
+
+ // Returns the sum of the sizes of all the buffers.
+ size_t Size() const { return mSize; }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
+ size_t size = mSegments.sizeOfExcludingThis(aMallocSizeOf);
+ for (Segment& segment : mSegments) {
+ size += aMallocSizeOf(segment.Start());
+ }
+ return size;
+ }
+
+ void Clear() {
+ if (mOwning) {
+ for (Segment& segment : mSegments) {
+ this->free_(segment.mData, segment.mCapacity);
+ }
+ }
+ mSegments.clear();
+
+ mSize = 0;
+ }
+
+ // Iterates over bytes in the segments. You can advance it by as many bytes as
+ // you choose.
+ class IterImpl {
+ // Invariants:
+ // (0) mSegment <= bufferList.mSegments.length()
+ // (1) mData <= mDataEnd
+ // (2) If mSegment is not the last segment, mData < mDataEnd
+ uintptr_t mSegment{0};
+ char* mData{nullptr};
+ char* mDataEnd{nullptr};
+ size_t mAbsoluteOffset{0};
+
+ friend class BufferList;
+
+ public:
+ explicit IterImpl(const BufferList& aBuffers) {
+ if (!aBuffers.mSegments.empty()) {
+ mData = aBuffers.mSegments[0].Start();
+ mDataEnd = aBuffers.mSegments[0].End();
+ }
+ }
+
+ // Returns a pointer to the raw data. It is valid to access up to
+ // RemainingInSegment bytes of this buffer.
+ char* Data() const {
+ MOZ_RELEASE_ASSERT(!Done());
+ return mData;
+ }
+
+ bool operator==(const IterImpl& other) const {
+ return mAbsoluteOffset == other.mAbsoluteOffset;
+ }
+ bool operator!=(const IterImpl& other) const { return !(*this == other); }
+
+ // Returns true if the memory in the range [Data(), Data() + aBytes) is all
+ // part of one contiguous buffer.
+ bool HasRoomFor(size_t aBytes) const {
+ return RemainingInSegment() >= aBytes;
+ }
+
+ // Returns the largest value aBytes for which HasRoomFor(aBytes) will be
+ // true.
+ size_t RemainingInSegment() const {
+ MOZ_RELEASE_ASSERT(mData <= mDataEnd);
+ return mDataEnd - mData;
+ }
+
+ // Returns true if there are at least aBytes entries remaining in the
+ // BufferList after this iterator.
+ bool HasBytesAvailable(const BufferList& aBuffers, size_t aBytes) const {
+ return TotalBytesAvailable(aBuffers) >= aBytes;
+ }
+
+ // Returns the largest value `aBytes` for which HasBytesAvailable(aBytes)
+ // will be true.
+ size_t TotalBytesAvailable(const BufferList& aBuffers) const {
+ return aBuffers.mSize - mAbsoluteOffset;
+ }
+
+ // Advances the iterator by aBytes bytes. aBytes must be less than
+ // RemainingInSegment(). If advancing by aBytes takes the iterator to the
+ // end of a buffer, it will be moved to the beginning of the next buffer
+ // unless it is the last buffer.
+ void Advance(const BufferList& aBuffers, size_t aBytes) {
+ const Segment& segment = aBuffers.mSegments[mSegment];
+ MOZ_RELEASE_ASSERT(segment.Start() <= mData);
+ MOZ_RELEASE_ASSERT(mData <= mDataEnd);
+ MOZ_RELEASE_ASSERT(mDataEnd == segment.End());
+
+ MOZ_RELEASE_ASSERT(HasRoomFor(aBytes));
+ mData += aBytes;
+ mAbsoluteOffset += aBytes;
+
+ if (mData == mDataEnd && mSegment + 1 < aBuffers.mSegments.length()) {
+ mSegment++;
+ const Segment& nextSegment = aBuffers.mSegments[mSegment];
+ mData = nextSegment.Start();
+ mDataEnd = nextSegment.End();
+ MOZ_RELEASE_ASSERT(mData < mDataEnd);
+ }
+ }
+
+ // Advance the iterator by aBytes, possibly crossing segments. This function
+ // returns false if it runs out of buffers to advance through. Otherwise it
+ // returns true.
+ bool AdvanceAcrossSegments(const BufferList& aBuffers, size_t aBytes) {
+ // If we don't need to cross segments, we can directly use `Advance` to
+ // get to our destination.
+ if (MOZ_LIKELY(aBytes <= RemainingInSegment())) {
+ Advance(aBuffers, aBytes);
+ return true;
+ }
+
+ // Check if we have enough bytes to scan this far forward.
+ if (!HasBytesAvailable(aBuffers, aBytes)) {
+ return false;
+ }
+
+ // Compare the distance to our target offset from the end of the
+ // BufferList to the distance from the start of our next segment.
+ // Depending on which is closer, we'll advance either forwards or
+ // backwards.
+ size_t targetOffset = mAbsoluteOffset + aBytes;
+ size_t fromEnd = aBuffers.mSize - targetOffset;
+ if (aBytes - RemainingInSegment() < fromEnd) {
+ // Advance through the buffer list until we reach the desired absolute
+ // offset.
+ while (mAbsoluteOffset < targetOffset) {
+ Advance(aBuffers, std::min(targetOffset - mAbsoluteOffset,
+ RemainingInSegment()));
+ }
+ MOZ_ASSERT(mAbsoluteOffset == targetOffset);
+ return true;
+ }
+
+ // Scanning starting from the end of the BufferList. We advance
+ // backwards from the final segment until we find the segment to end in.
+ //
+ // If we end on a segment boundary, make sure to place the cursor at the
+ // beginning of the next segment.
+ mSegment = aBuffers.mSegments.length() - 1;
+ while (fromEnd > aBuffers.mSegments[mSegment].mSize) {
+ fromEnd -= aBuffers.mSegments[mSegment].mSize;
+ mSegment--;
+ }
+ mDataEnd = aBuffers.mSegments[mSegment].End();
+ mData = mDataEnd - fromEnd;
+ mAbsoluteOffset = targetOffset;
+ MOZ_ASSERT_IF(Done(), mSegment == aBuffers.mSegments.length() - 1);
+ MOZ_ASSERT_IF(Done(), mAbsoluteOffset == aBuffers.mSize);
+ return true;
+ }
+
+ // Returns true when the iterator reaches the end of the BufferList.
+ bool Done() const { return mData == mDataEnd; }
+
+ // The absolute offset of this iterator within the BufferList.
+ size_t AbsoluteOffset() const { return mAbsoluteOffset; }
+
+ private:
+ bool IsIn(const BufferList& aBuffers) const {
+ return mSegment < aBuffers.mSegments.length() &&
+ mData >= aBuffers.mSegments[mSegment].mData &&
+ mData < aBuffers.mSegments[mSegment].End();
+ }
+ };
+
+ // Special convenience method that returns Iter().Data().
+ char* Start() {
+ MOZ_RELEASE_ASSERT(!mSegments.empty());
+ return mSegments[0].mData;
+ }
+ const char* Start() const { return mSegments[0].mData; }
+
+ IterImpl Iter() const { return IterImpl(*this); }
+
+ // Copies aSize bytes from aData into the BufferList. The storage for these
+ // bytes may be split across multiple buffers. Size() is increased by aSize.
+ [[nodiscard]] inline bool WriteBytes(const char* aData, size_t aSize);
+
+ // Allocates a buffer of at most |aMaxBytes| bytes and, if successful, returns
+ // that buffer, and places its size in |aSize|. If unsuccessful, returns null
+ // and leaves |aSize| undefined.
+ inline char* AllocateBytes(size_t aMaxSize, size_t* aSize);
+
+ // Copies possibly non-contiguous byte range starting at aIter into
+ // aData. aIter is advanced by aSize bytes. Returns false if it runs out of
+ // data before aSize.
+ inline bool ReadBytes(IterImpl& aIter, char* aData, size_t aSize) const;
+
+ // Return a new BufferList that shares storage with this BufferList. The new
+ // BufferList is read-only. It allows iteration over aSize bytes starting at
+ // aIter. Borrow can fail, in which case *aSuccess will be false upon
+ // return. The borrowed BufferList can use a different AllocPolicy than the
+ // original one. However, it is not responsible for freeing buffers, so the
+ // AllocPolicy is only used for the buffer vector.
+ template <typename BorrowingAllocPolicy>
+ BufferList<BorrowingAllocPolicy> Borrow(
+ IterImpl& aIter, size_t aSize, bool* aSuccess,
+ BorrowingAllocPolicy aAP = BorrowingAllocPolicy()) const;
+
+ // Return a new BufferList and move storage from this BufferList to it. The
+ // new BufferList owns the buffers. Move can fail, in which case *aSuccess
+ // will be false upon return. The new BufferList can use a different
+ // AllocPolicy than the original one. The new OtherAllocPolicy is responsible
+ // for freeing buffers, so the OtherAllocPolicy must use freeing method
+ // compatible to the original one.
+ template <typename OtherAllocPolicy>
+ BufferList<OtherAllocPolicy> MoveFallible(
+ bool* aSuccess, OtherAllocPolicy aAP = OtherAllocPolicy());
+
+ // Return the number of bytes from 'start' to 'end', two iterators within
+ // this BufferList.
+ size_t RangeLength(const IterImpl& start, const IterImpl& end) const {
+ MOZ_ASSERT(start.IsIn(*this) && end.IsIn(*this));
+ return end.mAbsoluteOffset - start.mAbsoluteOffset;
+ }
+
+ // This takes ownership of the data
+ [[nodiscard]] bool WriteBytesZeroCopy(char* aData, size_t aSize,
+ size_t aCapacity) {
+ MOZ_ASSERT(mOwning);
+ MOZ_ASSERT(aSize <= aCapacity);
+
+ // Don't create zero-length segments; that can cause problems for
+ // consumers of the data (bug 1595453).
+ if (aSize == 0) {
+ this->free_(aData, aCapacity);
+ return true;
+ }
+
+ if (!mSegments.append(Segment(aData, aSize, aCapacity))) {
+ this->free_(aData, aCapacity);
+ return false;
+ }
+ mSize += aSize;
+ return true;
+ }
+
+ // Truncate this BufferList at the given iterator location, discarding all
+ // data after this point. After this call, all other iterators will be
+ // invalidated, and the passed-in iterator will be "Done".
+ //
+ // Returns the number of bytes discarded by this truncation.
+ size_t Truncate(IterImpl& aIter);
+
+ private:
+ explicit BufferList(AllocPolicy aAP)
+ : AllocPolicy(aAP), mOwning(false), mSize(0), mStandardCapacity(0) {}
+
+ char* AllocateSegment(size_t aSize, size_t aCapacity) {
+ MOZ_RELEASE_ASSERT(mOwning);
+ MOZ_ASSERT(aCapacity != 0);
+ MOZ_ASSERT(aSize <= aCapacity);
+
+ char* data = this->template pod_malloc<char>(aCapacity);
+ if (!data) {
+ return nullptr;
+ }
+ if (!mSegments.append(Segment(data, aSize, aCapacity))) {
+ this->free_(data, aCapacity);
+ return nullptr;
+ }
+ mSize += aSize;
+ return data;
+ }
+
+ void AssertConsistentSize() const {
+#ifdef DEBUG
+ size_t realSize = 0;
+ for (const auto& segment : mSegments) {
+ realSize += segment.mSize;
+ }
+ MOZ_ASSERT(realSize == mSize, "cached size value is inconsistent!");
+#endif
+ }
+
+ bool mOwning;
+ Vector<Segment, 1, AllocPolicy> mSegments;
+ size_t mSize;
+ size_t mStandardCapacity;
+};
+
+template <typename AllocPolicy>
+[[nodiscard]] bool BufferList<AllocPolicy>::WriteBytes(const char* aData,
+ size_t aSize) {
+ MOZ_RELEASE_ASSERT(mOwning);
+ MOZ_RELEASE_ASSERT(mStandardCapacity);
+
+ size_t copied = 0;
+ while (copied < aSize) {
+ size_t toCopy;
+ char* data = AllocateBytes(aSize - copied, &toCopy);
+ if (!data) {
+ return false;
+ }
+ memcpy(data, aData + copied, toCopy);
+ copied += toCopy;
+ }
+
+ return true;
+}
+
+template <typename AllocPolicy>
+char* BufferList<AllocPolicy>::AllocateBytes(size_t aMaxSize, size_t* aSize) {
+ MOZ_RELEASE_ASSERT(mOwning);
+ MOZ_RELEASE_ASSERT(mStandardCapacity);
+
+ if (!mSegments.empty()) {
+ Segment& lastSegment = mSegments.back();
+
+ size_t capacity = lastSegment.mCapacity - lastSegment.mSize;
+ if (capacity) {
+ size_t size = std::min(aMaxSize, capacity);
+ char* data = lastSegment.mData + lastSegment.mSize;
+
+ lastSegment.mSize += size;
+ mSize += size;
+
+ *aSize = size;
+ return data;
+ }
+ }
+
+ size_t size = std::min(aMaxSize, mStandardCapacity);
+ char* data = AllocateSegment(size, mStandardCapacity);
+ if (data) {
+ *aSize = size;
+ }
+ return data;
+}
+
+template <typename AllocPolicy>
+bool BufferList<AllocPolicy>::ReadBytes(IterImpl& aIter, char* aData,
+ size_t aSize) const {
+ size_t copied = 0;
+ size_t remaining = aSize;
+ while (remaining) {
+ size_t toCopy = std::min(aIter.RemainingInSegment(), remaining);
+ if (!toCopy) {
+ // We've run out of data in the last segment.
+ return false;
+ }
+ memcpy(aData + copied, aIter.Data(), toCopy);
+ copied += toCopy;
+ remaining -= toCopy;
+
+ aIter.Advance(*this, toCopy);
+ }
+
+ return true;
+}
+
+template <typename AllocPolicy>
+template <typename BorrowingAllocPolicy>
+BufferList<BorrowingAllocPolicy> BufferList<AllocPolicy>::Borrow(
+ IterImpl& aIter, size_t aSize, bool* aSuccess,
+ BorrowingAllocPolicy aAP) const {
+ BufferList<BorrowingAllocPolicy> result(aAP);
+
+ size_t size = aSize;
+ while (size) {
+ size_t toAdvance = std::min(size, aIter.RemainingInSegment());
+
+ if (!toAdvance || !result.mSegments.append(
+ typename BufferList<BorrowingAllocPolicy>::Segment(
+ aIter.mData, toAdvance, toAdvance))) {
+ *aSuccess = false;
+ return result;
+ }
+ aIter.Advance(*this, toAdvance);
+ size -= toAdvance;
+ }
+
+ result.mSize = aSize;
+ *aSuccess = true;
+ return result;
+}
+
+template <typename AllocPolicy>
+template <typename OtherAllocPolicy>
+BufferList<OtherAllocPolicy> BufferList<AllocPolicy>::MoveFallible(
+ bool* aSuccess, OtherAllocPolicy aAP) {
+ BufferList<OtherAllocPolicy> result(0, 0, mStandardCapacity, aAP);
+
+ IterImpl iter = Iter();
+ while (!iter.Done()) {
+ size_t toAdvance = iter.RemainingInSegment();
+
+ if (!toAdvance ||
+ !result.mSegments.append(typename BufferList<OtherAllocPolicy>::Segment(
+ iter.mData, toAdvance, toAdvance))) {
+ *aSuccess = false;
+ result.mSegments.clear();
+ return result;
+ }
+ iter.Advance(*this, toAdvance);
+ }
+
+ result.mSize = mSize;
+ mSegments.clear();
+ mSize = 0;
+ *aSuccess = true;
+ return result;
+}
+
+template <typename AllocPolicy>
+size_t BufferList<AllocPolicy>::Truncate(IterImpl& aIter) {
+ MOZ_ASSERT(aIter.IsIn(*this) || aIter.Done());
+ if (aIter.Done()) {
+ return 0;
+ }
+
+ size_t prevSize = mSize;
+
+ // Remove any segments after the iterator's current segment.
+ while (mSegments.length() > aIter.mSegment + 1) {
+ Segment& toFree = mSegments.back();
+ mSize -= toFree.mSize;
+ if (mOwning) {
+ this->free_(toFree.mData, toFree.mCapacity);
+ }
+ mSegments.popBack();
+ }
+
+ // The last segment is now aIter's current segment. Truncate or remove it.
+ Segment& seg = mSegments.back();
+ MOZ_ASSERT(aIter.mDataEnd == seg.End());
+ mSize -= aIter.RemainingInSegment();
+ seg.mSize -= aIter.RemainingInSegment();
+ if (!seg.mSize) {
+ if (mOwning) {
+ this->free_(seg.mData, seg.mCapacity);
+ }
+ mSegments.popBack();
+ }
+
+ // Correct `aIter` to point to the new end of the BufferList.
+ if (mSegments.empty()) {
+ MOZ_ASSERT(mSize == 0);
+ aIter.mSegment = 0;
+ aIter.mData = aIter.mDataEnd = nullptr;
+ } else {
+ aIter.mSegment = mSegments.length() - 1;
+ aIter.mData = aIter.mDataEnd = mSegments.back().End();
+ }
+ MOZ_ASSERT(aIter.Done());
+
+ AssertConsistentSize();
+ return prevSize - mSize;
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_BufferList_h */
diff --git a/mfbt/Casting.h b/mfbt/Casting.h
new file mode 100644
index 0000000000..ebb0e8bc51
--- /dev/null
+++ b/mfbt/Casting.h
@@ -0,0 +1,229 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Cast operations to supplement the built-in casting operations. */
+
+#ifndef mozilla_Casting_h
+#define mozilla_Casting_h
+
+#include "mozilla/Assertions.h"
+
+#include <cstring>
+#include <type_traits>
+#include <limits>
+#include <cmath>
+
+namespace mozilla {
+
+/**
+ * Sets the outparam value of type |To| with the same underlying bit pattern of
+ * |aFrom|.
+ *
+ * |To| and |From| must be types of the same size; be careful of cross-platform
+ * size differences, or this might fail to compile on some but not all
+ * platforms.
+ *
+ * There is also a variant that returns the value directly. In most cases, the
+ * two variants should be identical. However, in the specific case of x86
+ * chips, the behavior differs: returning floating-point values directly is done
+ * through the x87 stack, and x87 loads and stores turn signaling NaNs into
+ * quiet NaNs... silently. Returning floating-point values via outparam,
+ * however, is done entirely within the SSE registers when SSE2 floating-point
+ * is enabled in the compiler, which has semantics-preserving behavior you would
+ * expect.
+ *
+ * If preserving the distinction between signaling NaNs and quiet NaNs is
+ * important to you, you should use the outparam version. In all other cases,
+ * you should use the direct return version.
+ */
+template <typename To, typename From>
+inline void BitwiseCast(const From aFrom, To* aResult) {
+ static_assert(sizeof(From) == sizeof(To),
+ "To and From must have the same size");
+
+ // We could maybe downgrade these to std::is_trivially_copyable, but the
+ // various STLs we use don't all provide it.
+ static_assert(std::is_trivial<From>::value,
+ "shouldn't bitwise-copy a type having non-trivial "
+ "initialization");
+ static_assert(std::is_trivial<To>::value,
+ "shouldn't bitwise-copy a type having non-trivial "
+ "initialization");
+
+ std::memcpy(static_cast<void*>(aResult), static_cast<const void*>(&aFrom),
+ sizeof(From));
+}
+
+template <typename To, typename From>
+inline To BitwiseCast(const From aFrom) {
+ To temp;
+ BitwiseCast<To, From>(aFrom, &temp);
+ return temp;
+}
+
+namespace detail {
+
+template <typename T>
+constexpr int64_t safe_integer() {
+ static_assert(std::is_floating_point_v<T>);
+ return std::pow(2, std::numeric_limits<T>::digits);
+}
+
+template <typename T>
+constexpr uint64_t safe_integer_unsigned() {
+ static_assert(std::is_floating_point_v<T>);
+ return std::pow(2, std::numeric_limits<T>::digits);
+}
+
+// This is working around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81676,
+// fixed in gcc-10
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+template <typename In, typename Out>
+bool IsInBounds(In aIn) {
+ constexpr bool inSigned = std::is_signed_v<In>;
+ constexpr bool outSigned = std::is_signed_v<Out>;
+ constexpr bool bothSigned = inSigned && outSigned;
+ constexpr bool bothUnsigned = !inSigned && !outSigned;
+ constexpr bool inFloat = std::is_floating_point_v<In>;
+ constexpr bool outFloat = std::is_floating_point_v<Out>;
+ constexpr bool bothFloat = inFloat && outFloat;
+ constexpr bool noneFloat = !inFloat && !outFloat;
+ constexpr Out outMax = std::numeric_limits<Out>::max();
+ constexpr Out outMin = std::numeric_limits<Out>::lowest();
+
+ // This selects the widest of two types, and is used to cast throughout.
+ using select_widest = std::conditional_t<(sizeof(In) > sizeof(Out)), In, Out>;
+
+ if constexpr (bothFloat) {
+ if (aIn > select_widest(outMax) || aIn < select_widest(outMin)) {
+ return false;
+ }
+ }
+ // Normal casting applies, the floating point number is floored.
+ if constexpr (inFloat && !outFloat) {
+ static_assert(sizeof(aIn) <= sizeof(int64_t));
+ // Check if the input floating point is larger than the output bounds. This
+ // catches situations where the input is a float larger than the max of the
+ // output type.
+ if (aIn < static_cast<double>(outMin) ||
+ aIn > static_cast<double>(outMax)) {
+ return false;
+ }
+ // At this point we know that the input can be converted to an integer.
+ // Check if it's larger than the bounds of the target integer.
+ if (outSigned) {
+ int64_t asInteger = static_cast<int64_t>(aIn);
+ if (asInteger < outMin || asInteger > outMax) {
+ return false;
+ }
+ } else {
+ uint64_t asInteger = static_cast<uint64_t>(aIn);
+ if (asInteger > outMax) {
+ return false;
+ }
+ }
+ }
+
+ // Checks if the integer is representable exactly as a floating point value of
+ // a specific width.
+ if constexpr (!inFloat && outFloat) {
+ if constexpr (inSigned) {
+ if (aIn < -safe_integer<Out>() || aIn > safe_integer<Out>()) {
+ return false;
+ }
+ } else {
+ if (aIn >= safe_integer_unsigned<Out>()) {
+ return false;
+ }
+ }
+ }
+
+ if constexpr (noneFloat) {
+ if constexpr (bothUnsigned) {
+ if (aIn > select_widest(outMax)) {
+ return false;
+ }
+ }
+ if constexpr (bothSigned) {
+ if (aIn > select_widest(outMax) || aIn < select_widest(outMin)) {
+ return false;
+ }
+ }
+ if constexpr (inSigned && !outSigned) {
+ if (aIn < 0 || std::make_unsigned_t<In>(aIn) > outMax) {
+ return false;
+ }
+ }
+ if constexpr (!inSigned && outSigned) {
+ if (aIn > select_widest(outMax)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+#pragma GCC diagnostic pop
+
+} // namespace detail
+
+/**
+ * Cast a value of type |From| to a value of type |To|, asserting that the cast
+ * will be a safe cast per C++ (that is, that |to| is in the range of values
+ * permitted for the type |From|).
+ * In particular, this will fail if a integer cannot be represented exactly as a
+ * floating point value, because it's too large.
+ */
+template <typename To, typename From>
+inline To AssertedCast(const From aFrom) {
+ static_assert(std::is_arithmetic_v<To> && std::is_arithmetic_v<From>);
+ MOZ_ASSERT((detail::IsInBounds<From, To>(aFrom)));
+ return static_cast<To>(aFrom);
+}
+
+/**
+ * Cast a value of numeric type |From| to a value of numeric type |To|, release
+ * asserting that the cast will be a safe cast per C++ (that is, that |to| is in
+ * the range of values permitted for the type |From|).
+ * In particular, this will fail if a integer cannot be represented exactly as a
+ * floating point value, because it's too large.
+ */
+template <typename To, typename From>
+inline To ReleaseAssertedCast(const From aFrom) {
+ static_assert(std::is_arithmetic_v<To> && std::is_arithmetic_v<From>);
+ MOZ_RELEASE_ASSERT((detail::IsInBounds<From, To>(aFrom)));
+ return static_cast<To>(aFrom);
+}
+
+namespace detail {
+
+template <typename From>
+class LazyAssertedCastT final {
+ const From mVal;
+
+ public:
+ explicit LazyAssertedCastT(const From val) : mVal(val) {}
+
+ template <typename To>
+ operator To() const {
+ return AssertedCast<To>(mVal);
+ }
+};
+
+} // namespace detail
+
+/**
+ * Like AssertedCast, but infers |To| for AssertedCast lazily based on usage.
+ * > uint8_t foo = LazyAssertedCast(1000); // boom
+ */
+template <typename From>
+inline auto LazyAssertedCast(const From val) {
+ return detail::LazyAssertedCastT<From>(val);
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_Casting_h */
diff --git a/mfbt/ChaosMode.cpp b/mfbt/ChaosMode.cpp
new file mode 100644
index 0000000000..d090e8a37e
--- /dev/null
+++ b/mfbt/ChaosMode.cpp
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ChaosMode.h"
+
+namespace mozilla {
+
+namespace detail {
+
+Atomic<uint32_t, SequentiallyConsistent> gChaosModeCounter(0);
+ChaosFeature gChaosFeatures = None;
+
+} /* namespace detail */
+} /* namespace mozilla */
diff --git a/mfbt/ChaosMode.h b/mfbt/ChaosMode.h
new file mode 100644
index 0000000000..faf7acddf3
--- /dev/null
+++ b/mfbt/ChaosMode.h
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ChaosMode_h
+#define mozilla_ChaosMode_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumSet.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+namespace mozilla {
+
+enum ChaosFeature {
+ None = 0x0,
+ // Altering thread scheduling.
+ ThreadScheduling = 0x1,
+ // Altering network request scheduling.
+ NetworkScheduling = 0x2,
+ // Altering timer scheduling.
+ TimerScheduling = 0x4,
+ // Read and write less-than-requested amounts.
+ IOAmounts = 0x8,
+ // Iterate over hash tables in random order.
+ HashTableIteration = 0x10,
+ // Randomly refuse to use cached version of image (when allowed by spec).
+ ImageCache = 0x20,
+ // Delay dispatching threads to encourage dispatched tasks to run.
+ TaskDispatching = 0x40,
+ // Delay task running to encourage sending threads to run.
+ TaskRunning = 0x80,
+ Any = 0xffffffff,
+};
+
+namespace detail {
+extern MFBT_DATA Atomic<uint32_t, SequentiallyConsistent> gChaosModeCounter;
+extern MFBT_DATA ChaosFeature gChaosFeatures;
+} // namespace detail
+
+/**
+ * When "chaos mode" is activated, code that makes implicitly nondeterministic
+ * choices is encouraged to make random and extreme choices, to test more
+ * code paths and uncover bugs.
+ */
+class ChaosMode {
+ public:
+ static void SetChaosFeature(ChaosFeature aChaosFeature) {
+ detail::gChaosFeatures = aChaosFeature;
+ }
+
+ static bool isActive(ChaosFeature aFeature) {
+ if (detail::gChaosModeCounter > 0) {
+ return true;
+ }
+ return detail::gChaosFeatures & aFeature;
+ }
+
+ /**
+ * Increase the chaos mode activation level. An equivalent number of
+ * calls to leaveChaosMode must be made in order to restore the original
+ * chaos mode state. If the activation level is nonzero all chaos mode
+ * features are activated.
+ */
+ static void enterChaosMode() { detail::gChaosModeCounter++; }
+
+ /**
+ * Decrease the chaos mode activation level. See enterChaosMode().
+ */
+ static void leaveChaosMode() {
+ MOZ_ASSERT(detail::gChaosModeCounter > 0);
+ detail::gChaosModeCounter--;
+ }
+
+ /**
+ * Returns a somewhat (but not uniformly) random uint32_t < aBound.
+ * Not to be used for anything except ChaosMode, since it's not very random.
+ */
+ static uint32_t randomUint32LessThan(uint32_t aBound) {
+ MOZ_ASSERT(aBound != 0);
+ return uint32_t(rand()) % aBound;
+ }
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_ChaosMode_h */
diff --git a/mfbt/Char16.h b/mfbt/Char16.h
new file mode 100644
index 0000000000..7856880830
--- /dev/null
+++ b/mfbt/Char16.h
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implements a UTF-16 character type. */
+
+#ifndef mozilla_Char16_h
+#define mozilla_Char16_h
+
+#ifdef __cplusplus
+
+/*
+ * C++11 introduces a char16_t type and support for UTF-16 string and character
+ * literals. C++11's char16_t is a distinct builtin type. Technically, char16_t
+ * is a 16-bit code unit of a Unicode code point, not a "character".
+ */
+
+# ifdef WIN32
+# define MOZ_USE_CHAR16_WRAPPER
+# include <cstdint>
+# include "mozilla/Attributes.h"
+/**
+ * Win32 API extensively uses wchar_t, which is represented by a separated
+ * builtin type than char16_t per spec. It's not the case for MSVC prior to
+ * MSVC 2015, but other compilers follow the spec. We want to mix wchar_t and
+ * char16_t on Windows builds. This class is supposed to make it easier. It
+ * stores char16_t const pointer, but provides implicit casts for wchar_t as
+ * well. On other platforms, we simply use
+ * |typedef const char16_t* char16ptr_t|. Here, we want to make the class as
+ * similar to this typedef, including providing some casts that are allowed
+ * by the typedef.
+ */
+class char16ptr_t {
+ private:
+ const char16_t* mPtr;
+ static_assert(sizeof(char16_t) == sizeof(wchar_t),
+ "char16_t and wchar_t sizes differ");
+
+ public:
+ constexpr MOZ_IMPLICIT char16ptr_t(const char16_t* aPtr) : mPtr(aPtr) {}
+ MOZ_IMPLICIT char16ptr_t(const wchar_t* aPtr)
+ : mPtr(reinterpret_cast<const char16_t*>(aPtr)) {}
+
+ /* Without this, nullptr assignment would be ambiguous. */
+ constexpr MOZ_IMPLICIT char16ptr_t(decltype(nullptr)) : mPtr(nullptr) {}
+
+ constexpr operator const char16_t*() const { return mPtr; }
+ operator const wchar_t*() const {
+ return reinterpret_cast<const wchar_t*>(mPtr);
+ }
+
+ operator wchar_t*() {
+ return const_cast<wchar_t*>(reinterpret_cast<const wchar_t*>(mPtr));
+ }
+
+ constexpr operator const void*() const { return mPtr; }
+ constexpr explicit operator bool() const { return mPtr != nullptr; }
+
+ explicit operator int() const { return reinterpret_cast<intptr_t>(mPtr); }
+ explicit operator unsigned int() const {
+ return reinterpret_cast<uintptr_t>(mPtr);
+ }
+ explicit operator long() const { return reinterpret_cast<intptr_t>(mPtr); }
+ explicit operator unsigned long() const {
+ return reinterpret_cast<uintptr_t>(mPtr);
+ }
+ explicit operator long long() const {
+ return reinterpret_cast<intptr_t>(mPtr);
+ }
+ explicit operator unsigned long long() const {
+ return reinterpret_cast<uintptr_t>(mPtr);
+ }
+
+ /**
+ * Some Windows API calls accept BYTE* but require that data actually be
+ * WCHAR*. Supporting this requires explicit operators to support the
+ * requisite explicit casts.
+ */
+ explicit operator const char*() const {
+ return reinterpret_cast<const char*>(mPtr);
+ }
+ explicit operator const unsigned char*() const {
+ return reinterpret_cast<const unsigned char*>(mPtr);
+ }
+ explicit operator unsigned char*() const {
+ return const_cast<unsigned char*>(
+ reinterpret_cast<const unsigned char*>(mPtr));
+ }
+ explicit operator void*() const { return const_cast<char16_t*>(mPtr); }
+
+ /* Some operators used on pointers. */
+ char16_t operator[](size_t aIndex) const { return mPtr[aIndex]; }
+ bool operator==(const char16ptr_t& aOther) const {
+ return mPtr == aOther.mPtr;
+ }
+ bool operator==(decltype(nullptr)) const { return mPtr == nullptr; }
+ bool operator!=(const char16ptr_t& aOther) const {
+ return mPtr != aOther.mPtr;
+ }
+ bool operator!=(decltype(nullptr)) const { return mPtr != nullptr; }
+ char16ptr_t operator+(int aValue) const { return char16ptr_t(mPtr + aValue); }
+ char16ptr_t operator+(unsigned int aValue) const {
+ return char16ptr_t(mPtr + aValue);
+ }
+ char16ptr_t operator+(long aValue) const {
+ return char16ptr_t(mPtr + aValue);
+ }
+ char16ptr_t operator+(unsigned long aValue) const {
+ return char16ptr_t(mPtr + aValue);
+ }
+ char16ptr_t operator+(long long aValue) const {
+ return char16ptr_t(mPtr + aValue);
+ }
+ char16ptr_t operator+(unsigned long long aValue) const {
+ return char16ptr_t(mPtr + aValue);
+ }
+ ptrdiff_t operator-(const char16ptr_t& aOther) const {
+ return mPtr - aOther.mPtr;
+ }
+};
+
+inline decltype((char*)0 - (char*)0) operator-(const char16_t* aX,
+ const char16ptr_t aY) {
+ return aX - static_cast<const char16_t*>(aY);
+}
+
+# else
+
+typedef const char16_t* char16ptr_t;
+
+# endif
+
+static_assert(sizeof(char16_t) == 2, "Is char16_t type 16 bits?");
+static_assert(char16_t(-1) > char16_t(0), "Is char16_t type unsigned?");
+static_assert(sizeof(u'A') == 2, "Is unicode char literal 16 bits?");
+static_assert(sizeof(u""[0]) == 2, "Is unicode string char 16 bits?");
+
+#endif
+
+#endif /* mozilla_Char16_h */
diff --git a/mfbt/CheckedInt.h b/mfbt/CheckedInt.h
new file mode 100644
index 0000000000..d784376d8c
--- /dev/null
+++ b/mfbt/CheckedInt.h
@@ -0,0 +1,804 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Provides checked integers, detecting integer overflow and divide-by-0. */
+
+#ifndef mozilla_CheckedInt_h
+#define mozilla_CheckedInt_h
+
+#include <stdint.h>
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/IntegerTypeTraits.h"
+#include <limits>
+#include <type_traits>
+
+#define MOZILLA_CHECKEDINT_COMPARABLE_VERSION(major, minor, patch) \
+ (major << 16 | minor << 8 | patch)
+
+// Probe for builtin math overflow support. Disabled for 32-bit builds for now
+// since "gcc -m32" claims to support these but its implementation is buggy.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82274
+// Also disabled for clang before version 7 (resp. Xcode clang 10.0.1): while
+// clang 5 and 6 have a working __builtin_add_overflow, it is not constexpr.
+#if defined(HAVE_64BIT_BUILD)
+# if defined(__has_builtin) && \
+ (!defined(__clang_major__) || \
+ (!defined(__apple_build_version__) && __clang_major__ >= 7) || \
+ (defined(__apple_build_version__) && \
+ MOZILLA_CHECKEDINT_COMPARABLE_VERSION( \
+ __clang_major__, __clang_minor__, __clang_patchlevel__) >= \
+ MOZILLA_CHECKEDINT_COMPARABLE_VERSION(10, 0, 1)))
+# define MOZ_HAS_BUILTIN_OP_OVERFLOW (__has_builtin(__builtin_add_overflow))
+# elif defined(__GNUC__)
+// (clang also defines __GNUC__ but it supports __has_builtin since at least
+// v3.1 (released in 2012) so it won't get here.)
+# define MOZ_HAS_BUILTIN_OP_OVERFLOW (__GNUC__ >= 5)
+# else
+# define MOZ_HAS_BUILTIN_OP_OVERFLOW (0)
+# endif
+#else
+# define MOZ_HAS_BUILTIN_OP_OVERFLOW (0)
+#endif
+
+#undef MOZILLA_CHECKEDINT_COMPARABLE_VERSION
+
+namespace mozilla {
+
+template <typename T>
+class CheckedInt;
+
+namespace detail {
+
+/*
+ * Step 1: manually record supported types
+ *
+ * What's nontrivial here is that there are different families of integer
+ * types: basic integer types and stdint types. It is merrily undefined which
+ * types from one family may be just typedefs for a type from another family.
+ *
+ * For example, on GCC 4.6, aside from the basic integer types, the only other
+ * type that isn't just a typedef for some of them, is int8_t.
+ */
+
+struct UnsupportedType {};
+
+template <typename IntegerType>
+struct IsSupportedPass2 {
+ static const bool value = false;
+};
+
+template <typename IntegerType>
+struct IsSupported {
+ static const bool value = IsSupportedPass2<IntegerType>::value;
+};
+
+template <>
+struct IsSupported<int8_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<uint8_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<int16_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<uint16_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<int32_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<uint32_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<int64_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupported<uint64_t> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<char> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<signed char> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<unsigned char> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<short> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<unsigned short> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<int> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<unsigned int> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<long> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<unsigned long> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<long long> {
+ static const bool value = true;
+};
+
+template <>
+struct IsSupportedPass2<unsigned long long> {
+ static const bool value = true;
+};
+
+/*
+ * Step 2: Implement the actual validity checks.
+ *
+ * Ideas taken from IntegerLib, code different.
+ */
+
+template <typename IntegerType, size_t Size = sizeof(IntegerType)>
+struct TwiceBiggerType {
+ typedef typename detail::StdintTypeForSizeAndSignedness<
+ sizeof(IntegerType) * 2, std::is_signed_v<IntegerType>>::Type Type;
+};
+
+template <typename IntegerType>
+struct TwiceBiggerType<IntegerType, 8> {
+ typedef UnsupportedType Type;
+};
+
+template <typename T>
+constexpr bool HasSignBit(T aX) {
+ // In C++, right bit shifts on negative values is undefined by the standard.
+ // Notice that signed-to-unsigned conversions are always well-defined in the
+ // standard, as the value congruent modulo 2**n as expected. By contrast,
+ // unsigned-to-signed is only well-defined if the value is representable.
+ return bool(std::make_unsigned_t<T>(aX) >> PositionOfSignBit<T>::value);
+}
+
+// Bitwise ops may return a larger type, so it's good to use this inline
+// helper guaranteeing that the result is really of type T.
+template <typename T>
+constexpr T BinaryComplement(T aX) {
+ return ~aX;
+}
+
+template <typename T, typename U, bool IsTSigned = std::is_signed_v<T>,
+ bool IsUSigned = std::is_signed_v<U>>
+struct DoesRangeContainRange {};
+
+template <typename T, typename U, bool Signedness>
+struct DoesRangeContainRange<T, U, Signedness, Signedness> {
+ static const bool value = sizeof(T) >= sizeof(U);
+};
+
+template <typename T, typename U>
+struct DoesRangeContainRange<T, U, true, false> {
+ static const bool value = sizeof(T) > sizeof(U);
+};
+
+template <typename T, typename U>
+struct DoesRangeContainRange<T, U, false, true> {
+ static const bool value = false;
+};
+
+template <typename T, typename U, bool IsTSigned = std::is_signed_v<T>,
+ bool IsUSigned = std::is_signed_v<U>,
+ bool DoesTRangeContainURange = DoesRangeContainRange<T, U>::value>
+struct IsInRangeImpl {};
+
+template <typename T, typename U, bool IsTSigned, bool IsUSigned>
+struct IsInRangeImpl<T, U, IsTSigned, IsUSigned, true> {
+ static constexpr bool run(U) { return true; }
+};
+
+template <typename T, typename U>
+struct IsInRangeImpl<T, U, true, true, false> {
+ static constexpr bool run(U aX) {
+ return aX <= std::numeric_limits<T>::max() &&
+ aX >= std::numeric_limits<T>::min();
+ }
+};
+
+template <typename T, typename U>
+struct IsInRangeImpl<T, U, false, false, false> {
+ static constexpr bool run(U aX) {
+ return aX <= std::numeric_limits<T>::max();
+ }
+};
+
+template <typename T, typename U>
+struct IsInRangeImpl<T, U, true, false, false> {
+ static constexpr bool run(U aX) {
+ return sizeof(T) > sizeof(U) || aX <= U(std::numeric_limits<T>::max());
+ }
+};
+
+template <typename T, typename U>
+struct IsInRangeImpl<T, U, false, true, false> {
+ static constexpr bool run(U aX) {
+ return sizeof(T) >= sizeof(U)
+ ? aX >= 0
+ : aX >= 0 && aX <= U(std::numeric_limits<T>::max());
+ }
+};
+
+template <typename T, typename U>
+constexpr bool IsInRange(U aX) {
+ return IsInRangeImpl<T, U>::run(aX);
+}
+
+template <typename T>
+constexpr bool IsAddValid(T aX, T aY) {
+#if MOZ_HAS_BUILTIN_OP_OVERFLOW
+ T dummy;
+ return !__builtin_add_overflow(aX, aY, &dummy);
+#else
+ // Addition is valid if the sign of aX+aY is equal to either that of aX or
+ // that of aY. Since the value of aX+aY is undefined if we have a signed
+ // type, we compute it using the unsigned type of the same size. Beware!
+ // These bitwise operations can return a larger integer type, if T was a
+ // small type like int8_t, so we explicitly cast to T.
+
+ std::make_unsigned_t<T> ux = aX;
+ std::make_unsigned_t<T> uy = aY;
+ std::make_unsigned_t<T> result = ux + uy;
+ return std::is_signed_v<T>
+ ? HasSignBit(BinaryComplement(T((result ^ aX) & (result ^ aY))))
+ : BinaryComplement(aX) >= aY;
+#endif
+}
+
+template <typename T>
+constexpr bool IsSubValid(T aX, T aY) {
+#if MOZ_HAS_BUILTIN_OP_OVERFLOW
+ T dummy;
+ return !__builtin_sub_overflow(aX, aY, &dummy);
+#else
+ // Subtraction is valid if either aX and aY have same sign, or aX-aY and aX
+ // have same sign. Since the value of aX-aY is undefined if we have a signed
+ // type, we compute it using the unsigned type of the same size.
+ std::make_unsigned_t<T> ux = aX;
+ std::make_unsigned_t<T> uy = aY;
+ std::make_unsigned_t<T> result = ux - uy;
+
+ return std::is_signed_v<T>
+ ? HasSignBit(BinaryComplement(T((result ^ aX) & (aX ^ aY))))
+ : aX >= aY;
+#endif
+}
+
+template <typename T, bool IsTSigned = std::is_signed_v<T>,
+ bool TwiceBiggerTypeIsSupported =
+ IsSupported<typename TwiceBiggerType<T>::Type>::value>
+struct IsMulValidImpl {};
+
+template <typename T, bool IsTSigned>
+struct IsMulValidImpl<T, IsTSigned, true> {
+ static constexpr bool run(T aX, T aY) {
+ typedef typename TwiceBiggerType<T>::Type TwiceBiggerType;
+ TwiceBiggerType product = TwiceBiggerType(aX) * TwiceBiggerType(aY);
+ return IsInRange<T>(product);
+ }
+};
+
+template <typename T>
+struct IsMulValidImpl<T, true, false> {
+ static constexpr bool run(T aX, T aY) {
+ const T max = std::numeric_limits<T>::max();
+ const T min = std::numeric_limits<T>::min();
+
+ if (aX == 0 || aY == 0) {
+ return true;
+ }
+ if (aX > 0) {
+ return aY > 0 ? aX <= max / aY : aY >= min / aX;
+ }
+
+ // If we reach this point, we know that aX < 0.
+ return aY > 0 ? aX >= min / aY : aY >= max / aX;
+ }
+};
+
+template <typename T>
+struct IsMulValidImpl<T, false, false> {
+ static constexpr bool run(T aX, T aY) {
+ return aY == 0 || aX <= std::numeric_limits<T>::max() / aY;
+ }
+};
+
+template <typename T>
+constexpr bool IsMulValid(T aX, T aY) {
+#if MOZ_HAS_BUILTIN_OP_OVERFLOW
+ T dummy;
+ return !__builtin_mul_overflow(aX, aY, &dummy);
+#else
+ return IsMulValidImpl<T>::run(aX, aY);
+#endif
+}
+
+template <typename T>
+constexpr bool IsDivValid(T aX, T aY) {
+ // Keep in mind that in the signed case, min/-1 is invalid because
+ // abs(min)>max.
+ return aY != 0 && !(std::is_signed_v<T> &&
+ aX == std::numeric_limits<T>::min() && aY == T(-1));
+}
+
+template <typename T, bool IsTSigned = std::is_signed_v<T>>
+struct IsModValidImpl;
+
+template <typename T>
+constexpr bool IsModValid(T aX, T aY) {
+ return IsModValidImpl<T>::run(aX, aY);
+}
+
+/*
+ * Mod is pretty simple.
+ * For now, let's just use the ANSI C definition:
+ * If aX or aY are negative, the results are implementation defined.
+ * Consider these invalid.
+ * Undefined for aY=0.
+ * The result will never exceed either aX or aY.
+ *
+ * Checking that aX>=0 is a warning when T is unsigned.
+ */
+
+template <typename T>
+struct IsModValidImpl<T, false> {
+ static constexpr bool run(T aX, T aY) { return aY >= 1; }
+};
+
+template <typename T>
+struct IsModValidImpl<T, true> {
+ static constexpr bool run(T aX, T aY) {
+ if (aX < 0) {
+ return false;
+ }
+ return aY >= 1;
+ }
+};
+
+template <typename T, bool IsSigned = std::is_signed_v<T>>
+struct NegateImpl;
+
+template <typename T>
+struct NegateImpl<T, false> {
+ static constexpr CheckedInt<T> negate(const CheckedInt<T>& aVal) {
+ // Handle negation separately for signed/unsigned, for simpler code and to
+ // avoid an MSVC warning negating an unsigned value.
+ static_assert(detail::IsInRange<T>(0), "Integer type can't represent 0");
+ return CheckedInt<T>(T(0), aVal.isValid() && aVal.mValue == 0);
+ }
+};
+
+template <typename T>
+struct NegateImpl<T, true> {
+ static constexpr CheckedInt<T> negate(const CheckedInt<T>& aVal) {
+ // Watch out for the min-value, which (with twos-complement) can't be
+ // negated as -min-value is then (max-value + 1).
+ if (!aVal.isValid() || aVal.mValue == std::numeric_limits<T>::min()) {
+ return CheckedInt<T>(aVal.mValue, false);
+ }
+ /* For some T, arithmetic ops automatically promote to a wider type, so
+ * explitly do the narrowing cast here. The narrowing cast is valid because
+ * we did the check for min value above. */
+ return CheckedInt<T>(T(-aVal.mValue), true);
+ }
+};
+
+} // namespace detail
+
+/*
+ * Step 3: Now define the CheckedInt class.
+ */
+
+/**
+ * @class CheckedInt
+ * @brief Integer wrapper class checking for integer overflow and other errors
+ * @param T the integer type to wrap. Can be any type among the following:
+ * - any basic integer type such as |int|
+ * - any stdint type such as |int8_t|
+ *
+ * This class implements guarded integer arithmetic. Do a computation, check
+ * that isValid() returns true, you then have a guarantee that no problem, such
+ * as integer overflow, happened during this computation, and you can call
+ * value() to get the plain integer value.
+ *
+ * The arithmetic operators in this class are guaranteed not to raise a signal
+ * (e.g. in case of a division by zero).
+ *
+ * For example, suppose that you want to implement a function that computes
+ * (aX+aY)/aZ, that doesn't crash if aZ==0, and that reports on error (divide by
+ * zero or integer overflow). You could code it as follows:
+ @code
+ bool computeXPlusYOverZ(int aX, int aY, int aZ, int* aResult)
+ {
+ CheckedInt<int> checkedResult = (CheckedInt<int>(aX) + aY) / aZ;
+ if (checkedResult.isValid()) {
+ *aResult = checkedResult.value();
+ return true;
+ } else {
+ return false;
+ }
+ }
+ @endcode
+ *
+ * Implicit conversion from plain integers to checked integers is allowed. The
+ * plain integer is checked to be in range before being casted to the
+ * destination type. This means that the following lines all compile, and the
+ * resulting CheckedInts are correctly detected as valid or invalid:
+ * @code
+ // 1 is of type int, is found to be in range for uint8_t, x is valid
+ CheckedInt<uint8_t> x(1);
+ // -1 is of type int, is found not to be in range for uint8_t, x is invalid
+ CheckedInt<uint8_t> x(-1);
+ // -1 is of type int, is found to be in range for int8_t, x is valid
+ CheckedInt<int8_t> x(-1);
+ // 1000 is of type int16_t, is found not to be in range for int8_t,
+ // x is invalid
+ CheckedInt<int8_t> x(int16_t(1000));
+ // 3123456789 is of type uint32_t, is found not to be in range for int32_t,
+ // x is invalid
+ CheckedInt<int32_t> x(uint32_t(3123456789));
+ * @endcode
+ * Implicit conversion from
+ * checked integers to plain integers is not allowed. As shown in the
+ * above example, to get the value of a checked integer as a normal integer,
+ * call value().
+ *
+ * Arithmetic operations between checked and plain integers is allowed; the
+ * result type is the type of the checked integer.
+ *
+ * Checked integers of different types cannot be used in the same arithmetic
+ * expression.
+ *
+ * There are convenience typedefs for all stdint types, of the following form
+ * (these are just 2 examples):
+ @code
+ typedef CheckedInt<int32_t> CheckedInt32;
+ typedef CheckedInt<uint16_t> CheckedUint16;
+ @endcode
+ */
+template <typename T>
+class CheckedInt {
+ protected:
+ T mValue;
+ bool mIsValid;
+
+ template <typename U>
+ constexpr CheckedInt(U aValue, bool aIsValid)
+ : mValue(aValue), mIsValid(aIsValid) {
+ static_assert(std::is_same_v<T, U>,
+ "this constructor must accept only T values");
+ static_assert(detail::IsSupported<T>::value,
+ "This type is not supported by CheckedInt");
+ }
+
+ friend struct detail::NegateImpl<T>;
+
+ public:
+ /**
+ * Constructs a checked integer with given @a value. The checked integer is
+ * initialized as valid or invalid depending on whether the @a value
+ * is in range.
+ *
+ * This constructor is not explicit. Instead, the type of its argument is a
+ * separate template parameter, ensuring that no conversion is performed
+ * before this constructor is actually called. As explained in the above
+ * documentation for class CheckedInt, this constructor checks that its
+ * argument is valid.
+ */
+ template <typename U>
+ MOZ_IMPLICIT MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT constexpr CheckedInt(U aValue)
+ : mValue(T(aValue)), mIsValid(detail::IsInRange<T>(aValue)) {
+ static_assert(
+ detail::IsSupported<T>::value && detail::IsSupported<U>::value,
+ "This type is not supported by CheckedInt");
+ }
+
+ template <typename U>
+ friend class CheckedInt;
+
+ template <typename U>
+ constexpr CheckedInt<U> toChecked() const {
+ CheckedInt<U> ret(mValue);
+ ret.mIsValid = ret.mIsValid && mIsValid;
+ return ret;
+ }
+
+ /** Constructs a valid checked integer with initial value 0 */
+ constexpr CheckedInt() : mValue(T(0)), mIsValid(true) {
+ static_assert(detail::IsSupported<T>::value,
+ "This type is not supported by CheckedInt");
+ static_assert(detail::IsInRange<T>(0), "Integer type can't represent 0");
+ }
+
+ /** @returns the actual value */
+ constexpr T value() const {
+ MOZ_DIAGNOSTIC_ASSERT(
+ mIsValid,
+ "Invalid checked integer (division by zero or integer overflow)");
+ return mValue;
+ }
+
+ /**
+ * @returns true if the checked integer is valid, i.e. is not the result
+ * of an invalid operation or of an operation involving an invalid checked
+ * integer
+ */
+ constexpr bool isValid() const { return mIsValid; }
+
+ template <typename U>
+ friend constexpr CheckedInt<U> operator+(const CheckedInt<U>& aLhs,
+ const CheckedInt<U>& aRhs);
+ template <typename U>
+ constexpr CheckedInt& operator+=(U aRhs);
+ constexpr CheckedInt& operator+=(const CheckedInt<T>& aRhs);
+
+ template <typename U>
+ friend constexpr CheckedInt<U> operator-(const CheckedInt<U>& aLhs,
+ const CheckedInt<U>& aRhs);
+ template <typename U>
+ constexpr CheckedInt& operator-=(U aRhs);
+ constexpr CheckedInt& operator-=(const CheckedInt<T>& aRhs);
+
+ template <typename U>
+ friend constexpr CheckedInt<U> operator*(const CheckedInt<U>& aLhs,
+ const CheckedInt<U>& aRhs);
+ template <typename U>
+ constexpr CheckedInt& operator*=(U aRhs);
+ constexpr CheckedInt& operator*=(const CheckedInt<T>& aRhs);
+
+ template <typename U>
+ friend constexpr CheckedInt<U> operator/(const CheckedInt<U>& aLhs,
+ const CheckedInt<U>& aRhs);
+ template <typename U>
+ constexpr CheckedInt& operator/=(U aRhs);
+ constexpr CheckedInt& operator/=(const CheckedInt<T>& aRhs);
+
+ template <typename U>
+ friend constexpr CheckedInt<U> operator%(const CheckedInt<U>& aLhs,
+ const CheckedInt<U>& aRhs);
+ template <typename U>
+ constexpr CheckedInt& operator%=(U aRhs);
+ constexpr CheckedInt& operator%=(const CheckedInt<T>& aRhs);
+
+ constexpr CheckedInt operator-() const {
+ return detail::NegateImpl<T>::negate(*this);
+ }
+
+ /**
+ * @returns true if the left and right hand sides are valid
+ * and have the same value.
+ *
+ * Note that these semantics are the reason why we don't offer
+ * a operator!=. Indeed, we'd want to have a!=b be equivalent to !(a==b)
+ * but that would mean that whenever a or b is invalid, a!=b
+ * is always true, which would be very confusing.
+ *
+ * For similar reasons, operators <, >, <=, >= would be very tricky to
+ * specify, so we just avoid offering them.
+ *
+ * Notice that these == semantics are made more reasonable by these facts:
+ * 1. a==b implies equality at the raw data level
+ * (the converse is false, as a==b is never true among invalids)
+ * 2. This is similar to the behavior of IEEE floats, where a==b
+ * means that a and b have the same value *and* neither is NaN.
+ */
+ constexpr bool operator==(const CheckedInt& aOther) const {
+ return mIsValid && aOther.mIsValid && mValue == aOther.mValue;
+ }
+
+ /** prefix ++ */
+ constexpr CheckedInt& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ /** postfix ++ */
+ constexpr CheckedInt operator++(int) {
+ CheckedInt tmp = *this;
+ *this += 1;
+ return tmp;
+ }
+
+ /** prefix -- */
+ constexpr CheckedInt& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ /** postfix -- */
+ constexpr CheckedInt operator--(int) {
+ CheckedInt tmp = *this;
+ *this -= 1;
+ return tmp;
+ }
+
+ private:
+ /**
+ * The !=, <, <=, >, >= operators are disabled:
+ * see the comment on operator==.
+ */
+ template <typename U>
+ bool operator!=(U aOther) const = delete;
+ template <typename U>
+ bool operator<(U aOther) const = delete;
+ template <typename U>
+ bool operator<=(U aOther) const = delete;
+ template <typename U>
+ bool operator>(U aOther) const = delete;
+ template <typename U>
+ bool operator>=(U aOther) const = delete;
+};
+
+#define MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(NAME, OP) \
+ template <typename T> \
+ constexpr CheckedInt<T> operator OP(const CheckedInt<T>& aLhs, \
+ const CheckedInt<T>& aRhs) { \
+ if (!detail::Is##NAME##Valid(aLhs.mValue, aRhs.mValue)) { \
+ static_assert(detail::IsInRange<T>(0), \
+ "Integer type can't represent 0"); \
+ return CheckedInt<T>(T(0), false); \
+ } \
+ /* For some T, arithmetic ops automatically promote to a wider type, so \
+ * explitly do the narrowing cast here. The narrowing cast is valid \
+ * because we did the "Is##NAME##Valid" check above. */ \
+ return CheckedInt<T>(T(aLhs.mValue OP aRhs.mValue), \
+ aLhs.mIsValid && aRhs.mIsValid); \
+ }
+
+#if MOZ_HAS_BUILTIN_OP_OVERFLOW
+# define MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(NAME, OP, FUN) \
+ template <typename T> \
+ constexpr CheckedInt<T> operator OP(const CheckedInt<T>& aLhs, \
+ const CheckedInt<T>& aRhs) { \
+ auto result = T{}; \
+ if (FUN(aLhs.mValue, aRhs.mValue, &result)) { \
+ static_assert(detail::IsInRange<T>(0), \
+ "Integer type can't represent 0"); \
+ return CheckedInt<T>(T(0), false); \
+ } \
+ return CheckedInt<T>(result, aLhs.mIsValid && aRhs.mIsValid); \
+ }
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Add, +, __builtin_add_overflow)
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Sub, -, __builtin_sub_overflow)
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Mul, *, __builtin_mul_overflow)
+# undef MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2
+#else
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Add, +)
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Sub, -)
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Mul, *)
+#endif
+
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Div, /)
+MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Mod, %)
+#undef MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR
+
+// Implement castToCheckedInt<T>(x), making sure that
+// - it allows x to be either a CheckedInt<T> or any integer type
+// that can be casted to T
+// - if x is already a CheckedInt<T>, we just return a reference to it,
+// instead of copying it (optimization)
+
+namespace detail {
+
+template <typename T, typename U>
+struct CastToCheckedIntImpl {
+ typedef CheckedInt<T> ReturnType;
+ static constexpr CheckedInt<T> run(U aU) { return aU; }
+};
+
+template <typename T>
+struct CastToCheckedIntImpl<T, CheckedInt<T>> {
+ typedef const CheckedInt<T>& ReturnType;
+ static constexpr const CheckedInt<T>& run(const CheckedInt<T>& aU) {
+ return aU;
+ }
+};
+
+} // namespace detail
+
+template <typename T, typename U>
+constexpr typename detail::CastToCheckedIntImpl<T, U>::ReturnType
+castToCheckedInt(U aU) {
+ static_assert(detail::IsSupported<T>::value && detail::IsSupported<U>::value,
+ "This type is not supported by CheckedInt");
+ return detail::CastToCheckedIntImpl<T, U>::run(aU);
+}
+
+#define MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(OP, COMPOUND_OP) \
+ template <typename T> \
+ template <typename U> \
+ constexpr CheckedInt<T>& CheckedInt<T>::operator COMPOUND_OP(U aRhs) { \
+ *this = *this OP castToCheckedInt<T>(aRhs); \
+ return *this; \
+ } \
+ template <typename T> \
+ constexpr CheckedInt<T>& CheckedInt<T>::operator COMPOUND_OP( \
+ const CheckedInt<T>& aRhs) { \
+ *this = *this OP aRhs; \
+ return *this; \
+ } \
+ template <typename T, typename U> \
+ constexpr CheckedInt<T> operator OP(const CheckedInt<T>& aLhs, U aRhs) { \
+ return aLhs OP castToCheckedInt<T>(aRhs); \
+ } \
+ template <typename T, typename U> \
+ constexpr CheckedInt<T> operator OP(U aLhs, const CheckedInt<T>& aRhs) { \
+ return castToCheckedInt<T>(aLhs) OP aRhs; \
+ }
+
+MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(+, +=)
+MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(*, *=)
+MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(-, -=)
+MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(/, /=)
+MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(%, %=)
+
+#undef MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS
+
+template <typename T, typename U>
+constexpr bool operator==(const CheckedInt<T>& aLhs, U aRhs) {
+ return aLhs == castToCheckedInt<T>(aRhs);
+}
+
+template <typename T, typename U>
+constexpr bool operator==(U aLhs, const CheckedInt<T>& aRhs) {
+ return castToCheckedInt<T>(aLhs) == aRhs;
+}
+
+// Convenience typedefs.
+typedef CheckedInt<int8_t> CheckedInt8;
+typedef CheckedInt<uint8_t> CheckedUint8;
+typedef CheckedInt<int16_t> CheckedInt16;
+typedef CheckedInt<uint16_t> CheckedUint16;
+typedef CheckedInt<int32_t> CheckedInt32;
+typedef CheckedInt<uint32_t> CheckedUint32;
+typedef CheckedInt<int64_t> CheckedInt64;
+typedef CheckedInt<uint64_t> CheckedUint64;
+
+} // namespace mozilla
+
+#endif /* mozilla_CheckedInt_h */
diff --git a/mfbt/CompactPair.h b/mfbt/CompactPair.h
new file mode 100644
index 0000000000..fa810dc0af
--- /dev/null
+++ b/mfbt/CompactPair.h
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A class holding a pair of objects that tries to conserve storage space. */
+
+#ifndef mozilla_CompactPair_h
+#define mozilla_CompactPair_h
+
+#include <type_traits>
+#include <tuple>
+#include <utility>
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+namespace detail {
+
+enum StorageType { AsBase, AsMember };
+
+// Optimize storage using the Empty Base Optimization -- that empty base classes
+// don't take up space -- to optimize size when one or the other class is
+// stateless and can be used as a base class.
+//
+// The extra conditions on storage for B are necessary so that CompactPairHelper
+// won't ambiguously inherit from either A or B, such that one or the other base
+// class would be inaccessible.
+template <typename A, typename B,
+ detail::StorageType =
+ std::is_empty_v<A> ? detail::AsBase : detail::AsMember,
+ detail::StorageType = std::is_empty_v<B> &&
+ !std::is_base_of<A, B>::value &&
+ !std::is_base_of<B, A>::value
+ ? detail::AsBase
+ : detail::AsMember>
+struct CompactPairHelper;
+
+template <typename A, typename B>
+struct CompactPairHelper<A, B, AsMember, AsMember> {
+ protected:
+ template <typename... AArgs, std::size_t... AIndexes, typename... BArgs,
+ std::size_t... BIndexes>
+ constexpr CompactPairHelper(std::tuple<AArgs...>& aATuple,
+ std::tuple<BArgs...>& aBTuple,
+ std::index_sequence<AIndexes...>,
+ std::index_sequence<BIndexes...>)
+ : mFirstA(std::forward<AArgs>(std::get<AIndexes>(aATuple))...),
+ mSecondB(std::forward<BArgs>(std::get<BIndexes>(aBTuple))...) {}
+
+ public:
+ template <typename AArg, typename BArg>
+ constexpr CompactPairHelper(AArg&& aA, BArg&& aB)
+ : mFirstA(std::forward<AArg>(aA)), mSecondB(std::forward<BArg>(aB)) {}
+
+ constexpr A& first() { return mFirstA; }
+ constexpr const A& first() const { return mFirstA; }
+ constexpr B& second() { return mSecondB; }
+ constexpr const B& second() const { return mSecondB; }
+
+ void swap(CompactPairHelper& aOther) {
+ std::swap(mFirstA, aOther.mFirstA);
+ std::swap(mSecondB, aOther.mSecondB);
+ }
+
+ private:
+ A mFirstA;
+ B mSecondB;
+};
+
+template <typename A, typename B>
+struct CompactPairHelper<A, B, AsMember, AsBase> : private B {
+ protected:
+ template <typename... AArgs, std::size_t... AIndexes, typename... BArgs,
+ std::size_t... BIndexes>
+ constexpr CompactPairHelper(std::tuple<AArgs...>& aATuple,
+ std::tuple<BArgs...>& aBTuple,
+ std::index_sequence<AIndexes...>,
+ std::index_sequence<BIndexes...>)
+ : B(std::forward<BArgs>(std::get<BIndexes>(aBTuple))...),
+ mFirstA(std::forward<AArgs>(std::get<AIndexes>(aATuple))...) {}
+
+ public:
+ template <typename AArg, typename BArg>
+ constexpr CompactPairHelper(AArg&& aA, BArg&& aB)
+ : B(std::forward<BArg>(aB)), mFirstA(std::forward<AArg>(aA)) {}
+
+ constexpr A& first() { return mFirstA; }
+ constexpr const A& first() const { return mFirstA; }
+ constexpr B& second() { return *this; }
+ constexpr const B& second() const { return *this; }
+
+ void swap(CompactPairHelper& aOther) {
+ std::swap(mFirstA, aOther.mFirstA);
+ std::swap(static_cast<B&>(*this), static_cast<B&>(aOther));
+ }
+
+ private:
+ A mFirstA;
+};
+
+template <typename A, typename B>
+struct CompactPairHelper<A, B, AsBase, AsMember> : private A {
+ protected:
+ template <typename... AArgs, std::size_t... AIndexes, typename... BArgs,
+ std::size_t... BIndexes>
+ constexpr CompactPairHelper(std::tuple<AArgs...>& aATuple,
+ std::tuple<BArgs...>& aBTuple,
+ std::index_sequence<AIndexes...>,
+ std::index_sequence<BIndexes...>)
+ : A(std::forward<AArgs>(std::get<AIndexes>(aATuple))...),
+ mSecondB(std::forward<BArgs>(std::get<BIndexes>(aBTuple))...) {}
+
+ public:
+ template <typename AArg, typename BArg>
+ constexpr CompactPairHelper(AArg&& aA, BArg&& aB)
+ : A(std::forward<AArg>(aA)), mSecondB(std::forward<BArg>(aB)) {}
+
+ constexpr A& first() { return *this; }
+ constexpr const A& first() const { return *this; }
+ constexpr B& second() { return mSecondB; }
+ constexpr const B& second() const { return mSecondB; }
+
+ void swap(CompactPairHelper& aOther) {
+ std::swap(static_cast<A&>(*this), static_cast<A&>(aOther));
+ std::swap(mSecondB, aOther.mSecondB);
+ }
+
+ private:
+ B mSecondB;
+};
+
+template <typename A, typename B>
+struct CompactPairHelper<A, B, AsBase, AsBase> : private A, private B {
+ protected:
+ template <typename... AArgs, std::size_t... AIndexes, typename... BArgs,
+ std::size_t... BIndexes>
+ constexpr CompactPairHelper(std::tuple<AArgs...>& aATuple,
+ std::tuple<BArgs...>& aBTuple,
+ std::index_sequence<AIndexes...>,
+ std::index_sequence<BIndexes...>)
+ : A(std::forward<AArgs>(std::get<AIndexes>(aATuple))...),
+ B(std::forward<BArgs>(std::get<BIndexes>(aBTuple))...) {}
+
+ public:
+ template <typename AArg, typename BArg>
+ constexpr CompactPairHelper(AArg&& aA, BArg&& aB)
+ : A(std::forward<AArg>(aA)), B(std::forward<BArg>(aB)) {}
+
+ constexpr A& first() { return static_cast<A&>(*this); }
+ constexpr const A& first() const { return static_cast<A&>(*this); }
+ constexpr B& second() { return static_cast<B&>(*this); }
+ constexpr const B& second() const { return static_cast<B&>(*this); }
+
+ void swap(CompactPairHelper& aOther) {
+ std::swap(static_cast<A&>(*this), static_cast<A&>(aOther));
+ std::swap(static_cast<B&>(*this), static_cast<B&>(aOther));
+ }
+};
+
+} // namespace detail
+
+/**
+ * CompactPair is the logical concatenation of an instance of A with an instance
+ * B. Space is conserved when possible. Neither A nor B may be a final class.
+ *
+ * In general if space conservation is not critical is preferred to use
+ * std::pair.
+ *
+ * It's typically clearer to have individual A and B member fields. Except if
+ * you want the space-conserving qualities of CompactPair, you're probably
+ * better off not using this!
+ *
+ * No guarantees are provided about the memory layout of A and B, the order of
+ * initialization or destruction of A and B, and so on. (This is approximately
+ * required to optimize space usage.) The first/second names are merely
+ * conceptual!
+ */
+template <typename A, typename B>
+struct CompactPair : private detail::CompactPairHelper<A, B> {
+ typedef typename detail::CompactPairHelper<A, B> Base;
+
+ using Base::Base;
+
+ template <typename... AArgs, typename... BArgs>
+ constexpr CompactPair(std::piecewise_construct_t, std::tuple<AArgs...> aFirst,
+ std::tuple<BArgs...> aSecond)
+ : Base(aFirst, aSecond, std::index_sequence_for<AArgs...>(),
+ std::index_sequence_for<BArgs...>()) {}
+
+ CompactPair(CompactPair&& aOther) = default;
+ CompactPair(const CompactPair& aOther) = default;
+
+ CompactPair& operator=(CompactPair&& aOther) = default;
+ CompactPair& operator=(const CompactPair& aOther) = default;
+
+ /** The A instance. */
+ using Base::first;
+ /** The B instance. */
+ using Base::second;
+
+ /** Swap this pair with another pair. */
+ void swap(CompactPair& aOther) { Base::swap(aOther); }
+};
+
+/**
+ * MakeCompactPair allows you to construct a CompactPair instance using type
+ * inference. A call like this:
+ *
+ * MakeCompactPair(Foo(), Bar())
+ *
+ * will return a CompactPair<Foo, Bar>.
+ */
+template <typename A, typename B>
+CompactPair<std::remove_cv_t<std::remove_reference_t<A>>,
+ std::remove_cv_t<std::remove_reference_t<B>>>
+MakeCompactPair(A&& aA, B&& aB) {
+ return CompactPair<std::remove_cv_t<std::remove_reference_t<A>>,
+ std::remove_cv_t<std::remove_reference_t<B>>>(
+ std::forward<A>(aA), std::forward<B>(aB));
+}
+
+/**
+ * CompactPair equality comparison
+ */
+template <typename A, typename B>
+bool operator==(const CompactPair<A, B>& aLhs, const CompactPair<A, B>& aRhs) {
+ return aLhs.first() == aRhs.first() && aLhs.second() == aRhs.second();
+}
+
+} // namespace mozilla
+
+namespace std {
+
+template <typename A, class B>
+void swap(mozilla::CompactPair<A, B>& aX, mozilla::CompactPair<A, B>& aY) {
+ aX.swap(aY);
+}
+
+} // namespace std
+
+#endif /* mozilla_CompactPair_h */
diff --git a/mfbt/Compiler.h b/mfbt/Compiler.h
new file mode 100644
index 0000000000..96c276186d
--- /dev/null
+++ b/mfbt/Compiler.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Various compiler checks. */
+
+#ifndef mozilla_Compiler_h
+#define mozilla_Compiler_h
+
+#define MOZ_IS_GCC 0
+
+#if !defined(__clang__) && defined(__GNUC__)
+
+# undef MOZ_IS_GCC
+# define MOZ_IS_GCC 1
+/*
+ * These macros should simplify gcc version checking. For example, to check
+ * for gcc 4.7.1 or later, check `#if MOZ_GCC_VERSION_AT_LEAST(4, 7, 1)`.
+ */
+# define MOZ_GCC_VERSION_AT_LEAST(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
+ ((major) * 10000 + (minor) * 100 + (patchlevel)))
+# define MOZ_GCC_VERSION_AT_MOST(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) <= \
+ ((major) * 10000 + (minor) * 100 + (patchlevel)))
+# if !MOZ_GCC_VERSION_AT_LEAST(6, 1, 0)
+# error "mfbt (and Gecko) require at least gcc 6.1 to build."
+# endif
+
+#endif
+
+#endif /* mozilla_Compiler_h */
diff --git a/mfbt/Compression.cpp b/mfbt/Compression.cpp
new file mode 100644
index 0000000000..b0c3db6980
--- /dev/null
+++ b/mfbt/Compression.cpp
@@ -0,0 +1,182 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Compression.h"
+#include "mozilla/CheckedInt.h"
+
+// Without including <string>, MSVC 2015 complains about e.g. the impossibility
+// to convert `const void* const` to `void*` when calling memchr from
+// corecrt_memory.h.
+#include <string>
+
+#include "lz4/lz4.h"
+#include "lz4/lz4frame.h"
+
+using namespace mozilla;
+using namespace mozilla::Compression;
+
+/* Our wrappers */
+
+size_t LZ4::compress(const char* aSource, size_t aInputSize, char* aDest) {
+ CheckedInt<int> inputSizeChecked = aInputSize;
+ MOZ_ASSERT(inputSizeChecked.isValid());
+ return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(),
+ LZ4_compressBound(inputSizeChecked.value()));
+}
+
+size_t LZ4::compressLimitedOutput(const char* aSource, size_t aInputSize,
+ char* aDest, size_t aMaxOutputSize) {
+ CheckedInt<int> inputSizeChecked = aInputSize;
+ MOZ_ASSERT(inputSizeChecked.isValid());
+ CheckedInt<int> maxOutputSizeChecked = aMaxOutputSize;
+ MOZ_ASSERT(maxOutputSizeChecked.isValid());
+ return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(),
+ maxOutputSizeChecked.value());
+}
+
+bool LZ4::decompress(const char* aSource, size_t aInputSize, char* aDest,
+ size_t aMaxOutputSize, size_t* aOutputSize) {
+ CheckedInt<int> maxOutputSizeChecked = aMaxOutputSize;
+ MOZ_ASSERT(maxOutputSizeChecked.isValid());
+ CheckedInt<int> inputSizeChecked = aInputSize;
+ MOZ_ASSERT(inputSizeChecked.isValid());
+
+ int ret = LZ4_decompress_safe(aSource, aDest, inputSizeChecked.value(),
+ maxOutputSizeChecked.value());
+ if (ret >= 0) {
+ *aOutputSize = ret;
+ return true;
+ }
+
+ *aOutputSize = 0;
+ return false;
+}
+
+bool LZ4::decompressPartial(const char* aSource, size_t aInputSize, char* aDest,
+ size_t aMaxOutputSize, size_t* aOutputSize) {
+ CheckedInt<int> maxOutputSizeChecked = aMaxOutputSize;
+ MOZ_ASSERT(maxOutputSizeChecked.isValid());
+ CheckedInt<int> inputSizeChecked = aInputSize;
+ MOZ_ASSERT(inputSizeChecked.isValid());
+
+ int ret = LZ4_decompress_safe_partial(
+ aSource, aDest, inputSizeChecked.value(), maxOutputSizeChecked.value(),
+ maxOutputSizeChecked.value());
+ if (ret >= 0) {
+ *aOutputSize = ret;
+ return true;
+ }
+
+ *aOutputSize = 0;
+ return false;
+}
+
+LZ4FrameCompressionContext::LZ4FrameCompressionContext(int aCompressionLevel,
+ size_t aMaxSrcSize,
+ bool aChecksum,
+ bool aStableSrc)
+ : mContext(nullptr),
+ mCompressionLevel(aCompressionLevel),
+ mGenerateChecksum(aChecksum),
+ mStableSrc(aStableSrc),
+ mMaxSrcSize(aMaxSrcSize),
+ mWriteBufLen(0) {
+ LZ4F_contentChecksum_t checksum =
+ mGenerateChecksum ? LZ4F_contentChecksumEnabled : LZ4F_noContentChecksum;
+ LZ4F_preferences_t prefs = {
+ {
+ LZ4F_max256KB,
+ LZ4F_blockLinked,
+ checksum,
+ },
+ mCompressionLevel,
+ };
+ mWriteBufLen = LZ4F_compressBound(mMaxSrcSize, &prefs);
+ LZ4F_errorCode_t err = LZ4F_createCompressionContext(&mContext, LZ4F_VERSION);
+ MOZ_RELEASE_ASSERT(!LZ4F_isError(err));
+}
+
+LZ4FrameCompressionContext::~LZ4FrameCompressionContext() {
+ LZ4F_freeCompressionContext(mContext);
+}
+
+Result<Span<const char>, size_t> LZ4FrameCompressionContext::BeginCompressing(
+ Span<char> aWriteBuffer) {
+ mWriteBuffer = aWriteBuffer;
+ LZ4F_contentChecksum_t checksum =
+ mGenerateChecksum ? LZ4F_contentChecksumEnabled : LZ4F_noContentChecksum;
+ LZ4F_preferences_t prefs = {
+ {
+ LZ4F_max256KB,
+ LZ4F_blockLinked,
+ checksum,
+ },
+ mCompressionLevel,
+ };
+ size_t headerSize = LZ4F_compressBegin(mContext, mWriteBuffer.Elements(),
+ mWriteBufLen, &prefs);
+ if (LZ4F_isError(headerSize)) {
+ return Err(headerSize);
+ }
+
+ return Span{static_cast<const char*>(mWriteBuffer.Elements()), headerSize};
+}
+
+Result<Span<const char>, size_t>
+LZ4FrameCompressionContext::ContinueCompressing(Span<const char> aInput) {
+ LZ4F_compressOptions_t opts = {};
+ opts.stableSrc = (uint32_t)mStableSrc;
+ size_t outputSize =
+ LZ4F_compressUpdate(mContext, mWriteBuffer.Elements(), mWriteBufLen,
+ aInput.Elements(), aInput.Length(), &opts);
+ if (LZ4F_isError(outputSize)) {
+ return Err(outputSize);
+ }
+
+ return Span{static_cast<const char*>(mWriteBuffer.Elements()), outputSize};
+}
+
+Result<Span<const char>, size_t> LZ4FrameCompressionContext::EndCompressing() {
+ size_t outputSize =
+ LZ4F_compressEnd(mContext, mWriteBuffer.Elements(), mWriteBufLen,
+ /* options */ nullptr);
+ if (LZ4F_isError(outputSize)) {
+ return Err(outputSize);
+ }
+
+ return Span{static_cast<const char*>(mWriteBuffer.Elements()), outputSize};
+}
+
+LZ4FrameDecompressionContext::LZ4FrameDecompressionContext(bool aStableDest)
+ : mContext(nullptr), mStableDest(aStableDest) {
+ LZ4F_errorCode_t err =
+ LZ4F_createDecompressionContext(&mContext, LZ4F_VERSION);
+ MOZ_RELEASE_ASSERT(!LZ4F_isError(err));
+}
+
+LZ4FrameDecompressionContext::~LZ4FrameDecompressionContext() {
+ LZ4F_freeDecompressionContext(mContext);
+}
+
+Result<LZ4FrameDecompressionResult, size_t>
+LZ4FrameDecompressionContext::Decompress(Span<char> aOutput,
+ Span<const char> aInput) {
+ LZ4F_decompressOptions_t opts = {};
+ opts.stableDst = (uint32_t)mStableDest;
+ size_t outBytes = aOutput.Length();
+ size_t inBytes = aInput.Length();
+ size_t result = LZ4F_decompress(mContext, aOutput.Elements(), &outBytes,
+ aInput.Elements(), &inBytes, &opts);
+ if (LZ4F_isError(result)) {
+ return Err(result);
+ }
+
+ LZ4FrameDecompressionResult decompressionResult = {};
+ decompressionResult.mFinished = !result;
+ decompressionResult.mSizeRead = inBytes;
+ decompressionResult.mSizeWritten = outBytes;
+ return decompressionResult;
+}
diff --git a/mfbt/Compression.h b/mfbt/Compression.h
new file mode 100644
index 0000000000..d9f787c0b4
--- /dev/null
+++ b/mfbt/Compression.h
@@ -0,0 +1,218 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Various simple compression/decompression functions. */
+
+#ifndef mozilla_Compression_h_
+#define mozilla_Compression_h_
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+#include "mozilla/ResultVariant.h"
+#include "mozilla/Span.h"
+#include "mozilla/UniquePtr.h"
+
+struct LZ4F_cctx_s; // compression context
+struct LZ4F_dctx_s; // decompression context
+
+namespace mozilla {
+namespace Compression {
+
+/**
+ * LZ4 is a very fast byte-wise compression algorithm.
+ *
+ * Compared to Google's Snappy it is faster to compress and decompress and
+ * generally produces output of about the same size.
+ *
+ * Compared to zlib it compresses at about 10x the speed, decompresses at about
+ * 4x the speed and produces output of about 1.5x the size.
+ */
+
+class LZ4 {
+ public:
+ /**
+ * Compresses |aInputSize| bytes from |aSource| into |aDest|. Destination
+ * buffer must be already allocated, and must be sized to handle worst cases
+ * situations (input data not compressible). Worst case size evaluation is
+ * provided by function maxCompressedSize()
+ *
+ * @param aInputSize is the input size. Max supported value is ~1.9GB
+ * @return the number of bytes written in buffer |aDest|
+ */
+ static MFBT_API size_t compress(const char* aSource, size_t aInputSize,
+ char* aDest);
+
+ /**
+ * Compress |aInputSize| bytes from |aSource| into an output buffer
+ * |aDest| of maximum size |aMaxOutputSize|. If it cannot achieve it,
+ * compression will stop, and result of the function will be zero,
+ * |aDest| will still be written to, but since the number of input
+ * bytes consumed is not returned the result is not usable.
+ *
+ * This function never writes outside of provided output buffer.
+ *
+ * @param aInputSize is the input size. Max supported value is ~1.9GB
+ * @param aMaxOutputSize is the size of the destination buffer (which must
+ * be already allocated)
+ * @return the number of bytes written in buffer |aDest| or 0 if the
+ * compression fails
+ */
+ static MFBT_API size_t compressLimitedOutput(const char* aSource,
+ size_t aInputSize, char* aDest,
+ size_t aMaxOutputSize);
+
+ /**
+ * If the source stream is malformed, the function will stop decoding
+ * and return false.
+ *
+ * This function never writes beyond aDest + aMaxOutputSize, and is
+ * therefore protected against malicious data packets.
+ *
+ * Note: Destination buffer must be already allocated. This version is
+ * slightly slower than the decompress without the aMaxOutputSize.
+ *
+ * @param aInputSize is the length of the input compressed data
+ * @param aMaxOutputSize is the size of the destination buffer (which must be
+ * already allocated)
+ * @param aOutputSize the actual number of bytes decoded in the destination
+ * buffer (necessarily <= aMaxOutputSize)
+ * @return true on success, false on failure
+ */
+ [[nodiscard]] static MFBT_API bool decompress(const char* aSource,
+ size_t aInputSize, char* aDest,
+ size_t aMaxOutputSize,
+ size_t* aOutputSize);
+
+ /**
+ * If the source stream is malformed, the function will stop decoding
+ * and return false.
+ *
+ * This function never writes beyond aDest + aMaxOutputSize, and is
+ * therefore protected against malicious data packets. It also ignores
+ * unconsumed input upon reaching aMaxOutputSize and can therefore be used
+ * for partial decompression.
+ *
+ * Note: Destination buffer must be already allocated. This version is
+ * slightly slower than the decompress without the aMaxOutputSize.
+ *
+ * @param aInputSize is the length of the input compressed data
+ * @param aMaxOutputSize is the size of the destination buffer (which must be
+ * already allocated)
+ * @param aOutputSize the actual number of bytes decoded in the destination
+ * buffer (necessarily <= aMaxOutputSize)
+ * @return true on success, false on failure
+ */
+ [[nodiscard]] static MFBT_API bool decompressPartial(const char* aSource,
+ size_t aInputSize,
+ char* aDest,
+ size_t aMaxOutputSize,
+ size_t* aOutputSize);
+
+ /*
+ * Provides the maximum size that LZ4 may output in a "worst case"
+ * scenario (input data not compressible) primarily useful for memory
+ * allocation of output buffer.
+ * note : this function is limited by "int" range (2^31-1)
+ *
+ * @param aInputSize is the input size. Max supported value is ~1.9GB
+ * @return maximum output size in a "worst case" scenario
+ */
+ static inline size_t maxCompressedSize(size_t aInputSize) {
+ size_t max = (aInputSize + (aInputSize / 255) + 16);
+ MOZ_ASSERT(max > aInputSize);
+ return max;
+ }
+};
+
+/**
+ * Context for LZ4 Frame-based streaming compression. Use this if you
+ * want to incrementally compress something or if you want to compress
+ * something such that another application can read it.
+ */
+class LZ4FrameCompressionContext final {
+ public:
+ MFBT_API LZ4FrameCompressionContext(int aCompressionLevel, size_t aMaxSrcSize,
+ bool aChecksum, bool aStableSrc = false);
+
+ MFBT_API ~LZ4FrameCompressionContext();
+
+ size_t GetRequiredWriteBufferLength() { return mWriteBufLen; }
+
+ /**
+ * Begin streaming frame-based compression.
+ *
+ * @return a Result with a Span containing the frame header, or an lz4 error
+ * code (size_t).
+ */
+ MFBT_API Result<Span<const char>, size_t> BeginCompressing(
+ Span<char> aWriteBuffer);
+
+ /**
+ * Continue streaming frame-based compression with the provided input.
+ *
+ * @param aInput input buffer to be compressed.
+ * @return a Result with a Span containing compressed output, or an lz4 error
+ * code (size_t).
+ */
+ MFBT_API Result<Span<const char>, size_t> ContinueCompressing(
+ Span<const char> aInput);
+
+ /**
+ * Finalize streaming frame-based compression with the provided input.
+ *
+ * @return a Result with a Span containing compressed output and the frame
+ * footer, or an lz4 error code (size_t).
+ */
+ MFBT_API Result<Span<const char>, size_t> EndCompressing();
+
+ private:
+ LZ4F_cctx_s* mContext;
+ int mCompressionLevel;
+ bool mGenerateChecksum;
+ bool mStableSrc;
+ size_t mMaxSrcSize;
+ size_t mWriteBufLen;
+ Span<char> mWriteBuffer;
+};
+
+struct LZ4FrameDecompressionResult {
+ size_t mSizeRead;
+ size_t mSizeWritten;
+ bool mFinished;
+};
+
+/**
+ * Context for LZ4 Frame-based streaming decompression. Use this if you
+ * want to decompress something compressed by LZ4FrameCompressionContext
+ * or by another application.
+ */
+class LZ4FrameDecompressionContext final {
+ public:
+ explicit MFBT_API LZ4FrameDecompressionContext(bool aStableDest = false);
+ MFBT_API ~LZ4FrameDecompressionContext();
+
+ /**
+ * Decompress a buffer/part of a buffer compressed with
+ * LZ4FrameCompressionContext or another application.
+ *
+ * @param aOutput output buffer to be write results into.
+ * @param aInput input buffer to be decompressed.
+ * @return a Result with information on bytes read/written and whether we
+ * completely decompressed the input into the output, or an lz4 error code
+ * (size_t).
+ */
+ MFBT_API Result<LZ4FrameDecompressionResult, size_t> Decompress(
+ Span<char> aOutput, Span<const char> aInput);
+
+ private:
+ LZ4F_dctx_s* mContext;
+ bool mStableDest;
+};
+
+} /* namespace Compression */
+} /* namespace mozilla */
+
+#endif /* mozilla_Compression_h_ */
diff --git a/mfbt/DbgMacro.h b/mfbt/DbgMacro.h
new file mode 100644
index 0000000000..3247b993c0
--- /dev/null
+++ b/mfbt/DbgMacro.h
@@ -0,0 +1,206 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_DbgMacro_h
+#define mozilla_DbgMacro_h
+
+/* a MOZ_DBG macro that outputs a wrapped value to stderr then returns it */
+
+#include "mozilla/MacroForEach.h"
+#include "mozilla/Span.h"
+
+#include <stdio.h>
+#include <sstream>
+
+template <typename T>
+class nsTSubstring;
+
+#ifdef ANDROID
+# include <android/log.h>
+#endif
+
+namespace mozilla {
+
+namespace detail {
+
+// Predicate to check whether T can be inserted into an ostream.
+template <typename T, typename = decltype(std::declval<std::ostream&>()
+ << std::declval<T>())>
+std::true_type supports_os_test(const T&);
+std::false_type supports_os_test(...);
+
+template <typename T>
+using supports_os = decltype(supports_os_test(std::declval<T>()));
+
+} // namespace detail
+
+// Helper function to write a value to an ostream.
+//
+// This handles pointer values where the type being pointed to supports being
+// inserted into an ostream, and we write out the value being pointed to in
+// addition to the pointer value.
+template <typename T>
+auto DebugValue(std::ostream& aOut, T* aValue)
+ -> std::enable_if_t<mozilla::detail::supports_os<T>::value, std::ostream&> {
+ if (aValue) {
+ aOut << *aValue << " @ " << aValue;
+ } else {
+ aOut << "null";
+ }
+ return aOut;
+}
+
+// Helper function to write a value to an ostream.
+//
+// This handles all pointer types that cannot be dereferenced and inserted into
+// an ostream.
+template <typename T>
+auto DebugValue(std::ostream& aOut, T* aValue)
+ -> std::enable_if_t<!mozilla::detail::supports_os<T>::value,
+ std::ostream&> {
+ return aOut << aValue;
+}
+
+// Helper function to write a value to an ostream.
+//
+// This handles XPCOM string types.
+template <typename T>
+auto DebugValue(std::ostream& aOut, const T& aValue)
+ -> std::enable_if_t<std::is_base_of<nsTSubstring<char>, T>::value ||
+ std::is_base_of<nsTSubstring<char16_t>, T>::value,
+ std::ostream&> {
+ return aOut << '"' << aValue << '"';
+}
+
+// Helper function to write a value to an ostream.
+//
+// This handles all other types.
+template <typename T>
+auto DebugValue(std::ostream& aOut, const T& aValue)
+ -> std::enable_if_t<!std::is_base_of<nsTSubstring<char>, T>::value &&
+ !std::is_base_of<nsTSubstring<char16_t>, T>::value,
+ std::ostream&> {
+ return aOut << aValue;
+}
+
+namespace detail {
+
+// Helper function template for MOZ_DBG.
+template <typename T>
+auto&& MozDbg(const char* aFile, int aLine, const char* aExpression,
+ T&& aValue) {
+ std::ostringstream s;
+ s << "[MozDbg] [" << aFile << ':' << aLine << "] " << aExpression << " = ";
+ mozilla::DebugValue(s, std::forward<T>(aValue));
+ s << '\n';
+#ifdef ANDROID
+ __android_log_print(ANDROID_LOG_INFO, "Gecko", "%s", s.str().c_str());
+#else
+ fputs(s.str().c_str(), stderr);
+#endif
+ return std::forward<T>(aValue);
+}
+
+} // namespace detail
+
+} // namespace mozilla
+
+template <class ElementType, size_t Extent>
+std::ostream& operator<<(std::ostream& aOut,
+ const mozilla::Span<ElementType, Extent>& aSpan) {
+ aOut << '[';
+ if (!aSpan.IsEmpty()) {
+ aOut << aSpan[0];
+ for (size_t i = 1; i < aSpan.Length(); ++i) {
+ aOut << ", " << aSpan[i];
+ }
+ }
+ return aOut << ']';
+}
+
+// Don't define this for char[], since operator<<(ostream&, char*) is already
+// defined.
+template <typename T, size_t N,
+ typename = std::enable_if_t<!std::is_same<T, char>::value>>
+std::ostream& operator<<(std::ostream& aOut, const T (&aArray)[N]) {
+ return aOut << mozilla::Span(aArray);
+}
+
+// MOZ_DBG is a macro like the Rust dbg!() macro -- it will print out the
+// expression passed to it to stderr and then return the value. It is not
+// available in MOZILLA_OFFICIAL builds, so you shouldn't land any uses of it in
+// the tree.
+//
+// It should work for any type T that has an operator<<(std::ostream&, const T&)
+// defined for it.
+//
+// Note 1: Using MOZ_DBG may cause copies to be made of temporary values:
+//
+// struct A {
+// A(int);
+// A(const A&);
+//
+// int x;
+// };
+//
+// void f(A);
+//
+// f(A{1}); // may (and, in C++17, will) elide the creation of a temporary
+// // for A{1} and instead initialize the function argument
+// // directly using the A(int) constructor
+//
+// f(MOZ_DBG(A{1})); // will create and return a temporary for A{1}, which
+// // then will be passed to the A(const A&) copy
+// // constructor to initialize f's argument
+//
+// Note 2: MOZ_DBG cannot be used to wrap a prvalue that is being used to
+// initialize an object if its type has no move constructor:
+//
+// struct B {
+// B() = default;
+// B(B&&) = delete;
+// };
+//
+// B b1 = B(); // fine, initializes b1 directly
+//
+// B b2 = MOZ_DBG(B()); // compile error: MOZ_DBG needs to materialize a
+// // temporary for B() so it can be passed to
+// // operator<<, but that temporary is returned from
+// // MOZ_DBG as an rvalue reference and so wants to
+// // invoke B's move constructor to initialize b2
+#ifndef MOZILLA_OFFICIAL
+# define MOZ_DBG(...) \
+ mozilla::detail::MozDbg(__FILE__, __LINE__, #__VA_ARGS__, __VA_ARGS__)
+#endif
+
+// Helper macro for MOZ_DEFINE_DBG.
+#define MOZ_DBG_FIELD(name_) << #name_ << " = " << aValue.name_
+
+// Macro to define an operator<<(ostream&) for a struct or class that displays
+// the type name and the values of the specified member variables. Must be
+// called inside the struct or class.
+//
+// For example:
+//
+// struct Point {
+// float x;
+// float y;
+//
+// MOZ_DEFINE_DBG(Point, x, y)
+// };
+//
+// generates an operator<< that outputs strings like
+// "Point { x = 1.0, y = 2.0 }".
+#define MOZ_DEFINE_DBG(type_, ...) \
+ friend std::ostream& operator<<(std::ostream& aOut, const type_& aValue) { \
+ return aOut << #type_ \
+ << (MOZ_ARG_COUNT(__VA_ARGS__) == 0 ? "" : " { ") \
+ MOZ_FOR_EACH_SEPARATED(MOZ_DBG_FIELD, (<< ", "), (), \
+ (__VA_ARGS__)) \
+ << (MOZ_ARG_COUNT(__VA_ARGS__) == 0 ? "" : " }"); \
+ }
+
+#endif // mozilla_DbgMacro_h
diff --git a/mfbt/DebugOnly.h b/mfbt/DebugOnly.h
new file mode 100644
index 0000000000..0441685735
--- /dev/null
+++ b/mfbt/DebugOnly.h
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Provides DebugOnly, a type for variables used only in debug builds (i.e. by
+ * assertions).
+ */
+
+#ifndef mozilla_DebugOnly_h
+#define mozilla_DebugOnly_h
+
+#include "mozilla/Attributes.h"
+
+#include <utility>
+
+namespace mozilla {
+
+/**
+ * DebugOnly contains a value of type T, but only in debug builds. In release
+ * builds, it does not contain a value. This helper is intended to be used with
+ * MOZ_ASSERT()-style macros, allowing one to write:
+ *
+ * DebugOnly<bool> check = func();
+ * MOZ_ASSERT(check);
+ *
+ * more concisely than declaring |check| conditional on #ifdef DEBUG.
+ *
+ * DebugOnly instances can only be coerced to T in debug builds. In release
+ * builds they don't have a value, so type coercion is not well defined.
+ *
+ * NOTE: DebugOnly instances still take up one byte of space, plus padding, even
+ * in optimized, non-DEBUG builds (see bug 1253094 comment 37 for more info).
+ * For this reason the class is MOZ_STACK_CLASS to prevent consumers using
+ * DebugOnly for struct/class members and unwittingly inflating the size of
+ * their objects in release builds.
+ */
+template <typename T>
+class MOZ_STACK_CLASS DebugOnly {
+ public:
+#ifdef DEBUG
+ T value;
+
+ DebugOnly() = default;
+ MOZ_IMPLICIT DebugOnly(T&& aOther) : value(std::move(aOther)) {}
+ MOZ_IMPLICIT DebugOnly(const T& aOther) : value(aOther) {}
+ DebugOnly(const DebugOnly& aOther) : value(aOther.value) {}
+ DebugOnly& operator=(const T& aRhs) {
+ value = aRhs;
+ return *this;
+ }
+ DebugOnly& operator=(T&& aRhs) {
+ value = std::move(aRhs);
+ return *this;
+ }
+
+ void operator++(int) { value++; }
+ void operator--(int) { value--; }
+
+ // Do not define operator+=(), etc. here. These will coerce via the
+ // implicit cast and built-in operators. Defining explicit methods here
+ // will create ambiguity the compiler can't deal with.
+
+ T* operator&() { return &value; }
+
+ operator T&() { return value; }
+ operator const T&() const { return value; }
+
+ T& operator->() { return value; }
+ const T& operator->() const { return value; }
+
+ const T& inspect() const { return value; }
+
+#else
+ DebugOnly() = default;
+ MOZ_IMPLICIT DebugOnly(const T&) {}
+ DebugOnly(const DebugOnly&) {}
+ DebugOnly& operator=(const T&) { return *this; }
+ MOZ_IMPLICIT DebugOnly(T&&) {}
+ DebugOnly& operator=(T&&) { return *this; }
+ void operator++(int) {}
+ void operator--(int) {}
+ DebugOnly& operator+=(const T&) { return *this; }
+ DebugOnly& operator-=(const T&) { return *this; }
+ DebugOnly& operator&=(const T&) { return *this; }
+ DebugOnly& operator|=(const T&) { return *this; }
+ DebugOnly& operator^=(const T&) { return *this; }
+#endif
+
+ /*
+ * DebugOnly must always have a user-defined destructor or else it will
+ * generate "unused variable" warnings, exactly what it's intended
+ * to avoid!
+ */
+ ~DebugOnly() {}
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_DebugOnly_h */
diff --git a/mfbt/DefineEnum.h b/mfbt/DefineEnum.h
new file mode 100644
index 0000000000..afcff10e52
--- /dev/null
+++ b/mfbt/DefineEnum.h
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Poor man's reflection for enumerations. */
+
+#ifndef mozilla_DefineEnum_h
+#define mozilla_DefineEnum_h
+
+#include <stddef.h> // for size_t
+
+#include "mozilla/MacroArgs.h" // for MOZ_ARG_COUNT
+#include "mozilla/MacroForEach.h" // for MOZ_FOR_EACH
+
+/**
+ * MOZ_UNWRAP_ARGS is a helper macro that unwraps a list of comma-separated
+ * items enclosed in parentheses, to yield just the items.
+ *
+ * Usage: |MOZ_UNWRAP_ARGS foo| (note the absence of parentheses in the
+ * invocation), where |foo| is a parenthesis-enclosed list.
+ * For exampe if |foo| is |(3, 4, 5)|, then the expansion is just |3, 4, 5|.
+ */
+#define MOZ_UNWRAP_ARGS(...) __VA_ARGS__
+
+/**
+ * MOZ_DEFINE_ENUM(aEnumName, aEnumerators) is a macro that allows
+ * simultaneously defining an enumeration named |aEnumName|, and a constant
+ * that stores the number of enumerators it has.
+ *
+ * The motivation is to allow the enumeration to evolve over time without
+ * either having to manually keep such a constant up to date, or having to
+ * add a special "sentinel" enumerator for this purpose. (While adding a
+ * "sentinel" enumerator is trivial, it causes headaches with "switch"
+ * statements. We often try to write "switch" statements whose cases exhaust
+ * the enumerators and don't have a "default" case, so that if a new
+ * enumerator is added and we forget to handle it in the "switch", the
+ * compiler points it out. But this means we need to explicitly handle the
+ * sentinel in every "switch".)
+ *
+ * |aEnumerators| is expected to be a comma-separated list of enumerators,
+ * enclosed in parentheses. The enumerators may NOT have associated
+ * initializers (an attempt to have one will result in a compiler error).
+ * This ensures that the enumerator values are in the range [0, N), where N
+ * is the number of enumerators.
+ *
+ * The list of enumerators cannot contain a trailing comma. This is a
+ * limitation of MOZ_FOR_EACH, which we use in the implementation; if
+ * MOZ_FOR_EACH supported trailing commas, we could too.
+ *
+ * The generated constant has the name "k" + |aEnumName| + "Count", and type
+ * "size_t". The enumeration and the constant are both defined in the scope
+ * in which the macro is invoked.
+ *
+ * For convenience, a constant of the enumeration type named
+ * "kHighest" + |aEnumName| is also defined, whose value is the highest
+ * valid enumerator, assuming the enumerators have contiguous values starting
+ * from 0.
+ *
+ * Invocation of the macro may be followed by a semicolon, if one prefers a
+ * more declaration-like syntax.
+ *
+ * Example invocation:
+ * MOZ_DEFINE_ENUM(MyEnum, (Foo, Bar, Baz));
+ *
+ * This expands to:
+ * enum MyEnum { Foo, Bar, Baz };
+ * constexpr size_t kMyEnumCount = 3;
+ * constexpr MyEnum kHighestMyEnum = MyEnum(kMyEnumCount - 1);
+ * // some static_asserts to ensure the values are in the range [0, 3)
+ *
+ * The macro also has several variants:
+ *
+ * - A |_CLASS| variant, which generates an |enum class| instead of
+ * a plain enum.
+ *
+ * - A |_WITH_BASE| variant which generates an enum with a specified
+ * underlying ("base") type, which is provided as an additional
+ * argument in second position.
+ *
+ * - An |_AT_CLASS_SCOPE| variant, designed for enumerations defined
+ * at class scope. For these, the generated constants are static,
+ * and have names prefixed with "s" instead of "k" as per
+ * naming convention.
+ *
+ * (and combinations of these).
+ */
+
+/*
+ * A helper macro for asserting that an enumerator does not have an initializer.
+ *
+ * The static_assert and the comparison are just scaffolding; the important
+ * part is forming the expression |aEnumName::aEnumeratorDecl|.
+ *
+ * If |aEnumeratorDecl| is just the enumerator name without an identifier,
+ * this expression compiles fine. However, if |aEnumeratorDecl| includes an
+ * initializer, as in |eEnumerator = initializer|, then this will fail to
+ * compile in expression context, since |eEnumerator| is not an lvalue.
+ *
+ * (The static_assert itself should always pass in the absence of the above
+ * error, since turning on a bit can only increase an integer value. It just
+ * provides a place to put the expression we want to form.)
+ */
+
+#define MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER(aEnumName, aEnumeratorDecl) \
+ static_assert( \
+ int(aEnumName::aEnumeratorDecl) <= \
+ (int(aEnumName::aEnumeratorDecl) | 1), \
+ "MOZ_DEFINE_ENUM does not allow enumerators to have initializers");
+
+#define MOZ_DEFINE_ENUM_IMPL(aEnumName, aClassSpec, aBaseSpec, aEnumerators) \
+ enum aClassSpec aEnumName aBaseSpec{MOZ_UNWRAP_ARGS aEnumerators}; \
+ constexpr size_t k##aEnumName##Count = MOZ_ARG_COUNT aEnumerators; \
+ constexpr aEnumName kHighest##aEnumName = \
+ aEnumName(k##aEnumName##Count - 1); \
+ MOZ_FOR_EACH(MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER, (aEnumName, ), \
+ aEnumerators)
+
+#define MOZ_DEFINE_ENUM(aEnumName, aEnumerators) \
+ MOZ_DEFINE_ENUM_IMPL(aEnumName, , , aEnumerators)
+
+#define MOZ_DEFINE_ENUM_WITH_BASE(aEnumName, aBaseName, aEnumerators) \
+ MOZ_DEFINE_ENUM_IMPL(aEnumName, , : aBaseName, aEnumerators)
+
+#define MOZ_DEFINE_ENUM_CLASS(aEnumName, aEnumerators) \
+ MOZ_DEFINE_ENUM_IMPL(aEnumName, class, , aEnumerators)
+
+#define MOZ_DEFINE_ENUM_CLASS_WITH_BASE(aEnumName, aBaseName, aEnumerators) \
+ MOZ_DEFINE_ENUM_IMPL(aEnumName, class, : aBaseName, aEnumerators)
+
+#define MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, aClassSpec, aBaseSpec, \
+ aEnumerators) \
+ enum aClassSpec aEnumName aBaseSpec{MOZ_UNWRAP_ARGS aEnumerators}; \
+ constexpr static size_t s##aEnumName##Count = MOZ_ARG_COUNT aEnumerators; \
+ constexpr static aEnumName sHighest##aEnumName = \
+ aEnumName(s##aEnumName##Count - 1); \
+ MOZ_FOR_EACH(MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER, (aEnumName, ), \
+ aEnumerators)
+
+#define MOZ_DEFINE_ENUM_AT_CLASS_SCOPE(aEnumName, aEnumerators) \
+ MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, , , aEnumerators)
+
+#define MOZ_DEFINE_ENUM_WITH_BASE_AT_CLASS_SCOPE(aEnumName, aBaseName, \
+ aEnumerators) \
+ MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, , : aBaseName, aEnumerators)
+
+#define MOZ_DEFINE_ENUM_CLASS_AT_CLASS_SCOPE(aEnumName, aEnumerators) \
+ MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, class, , aEnumerators)
+
+#define MOZ_DEFINE_ENUM_CLASS_WITH_BASE_AT_CLASS_SCOPE(aEnumName, aBaseName, \
+ aEnumerators) \
+ MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, class, \
+ : aBaseName, aEnumerators)
+
+#endif // mozilla_DefineEnum_h
diff --git a/mfbt/DoublyLinkedList.h b/mfbt/DoublyLinkedList.h
new file mode 100644
index 0000000000..df178440d2
--- /dev/null
+++ b/mfbt/DoublyLinkedList.h
@@ -0,0 +1,578 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/** A doubly-linked list with flexible next/prev naming. */
+
+#ifndef mozilla_DoublyLinkedList_h
+#define mozilla_DoublyLinkedList_h
+
+#include <algorithm>
+#include <iosfwd>
+#include <iterator>
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+
+/**
+ * Where mozilla::LinkedList strives for ease of use above all other
+ * considerations, mozilla::DoublyLinkedList strives for flexibility. The
+ * following are things that can be done with mozilla::DoublyLinkedList that
+ * cannot be done with mozilla::LinkedList:
+ *
+ * * Arbitrary next/prev placement and naming. With the tools provided here,
+ * the next and previous pointers can be at the end of the structure, in a
+ * sub-structure, stored with a tag, in a union, wherever, as long as you
+ * can look them up and set them on demand.
+ * * Can be used without deriving from a new base and, thus, does not require
+ * use of constructors.
+ *
+ * Example:
+ *
+ * class Observer : public DoublyLinkedListElement<Observer>
+ * {
+ * public:
+ * void observe(char* aTopic) { ... }
+ * };
+ *
+ * class ObserverContainer
+ * {
+ * private:
+ * DoublyLinkedList<Observer> mList;
+ *
+ * public:
+ * void addObserver(Observer* aObserver)
+ * {
+ * // Will assert if |aObserver| is part of another list.
+ * mList.pushBack(aObserver);
+ * }
+ *
+ * void removeObserver(Observer* aObserver)
+ * {
+ * // Will assert if |aObserver| is not part of |list|.
+ * mList.remove(aObserver);
+ * }
+ *
+ * void notifyObservers(char* aTopic)
+ * {
+ * for (Observer* o : mList) {
+ * o->observe(aTopic);
+ * }
+ * }
+ * };
+ */
+
+namespace mozilla {
+
+/**
+ * Deriving from this will allow T to be inserted into and removed from a
+ * DoublyLinkedList.
+ */
+template <typename T>
+class DoublyLinkedListElement {
+ template <typename U, typename E>
+ friend class DoublyLinkedList;
+ friend T;
+ T* mNext;
+ T* mPrev;
+
+ public:
+ DoublyLinkedListElement() : mNext(nullptr), mPrev(nullptr) {}
+};
+
+/**
+ * Provides access to a DoublyLinkedListElement within T.
+ *
+ * The default implementation of this template works for types that derive
+ * from DoublyLinkedListElement, but one can specialize for their class so
+ * that some appropriate DoublyLinkedListElement reference is returned.
+ *
+ * For more complex cases (multiple DoublyLinkedListElements, for example),
+ * one can define their own trait class and use that as ElementAccess for
+ * DoublyLinkedList. See TestDoublyLinkedList.cpp for an example.
+ */
+template <typename T>
+struct GetDoublyLinkedListElement {
+ static_assert(std::is_base_of<DoublyLinkedListElement<T>, T>::value,
+ "You need your own specialization of GetDoublyLinkedListElement"
+ " or use a separate Trait.");
+ static DoublyLinkedListElement<T>& Get(T* aThis) { return *aThis; }
+};
+
+/**
+ * A doubly linked list. |T| is the type of element stored in this list. |T|
+ * must contain or have access to unique next and previous element pointers.
+ * The template argument |ElementAccess| provides code to tell this list how to
+ * get a reference to a DoublyLinkedListElement that may reside anywhere.
+ */
+template <typename T, typename ElementAccess = GetDoublyLinkedListElement<T>>
+class DoublyLinkedList final {
+ T* mHead;
+ T* mTail;
+
+ /**
+ * Checks that either the list is empty and both mHead and mTail are nullptr
+ * or the list has entries and both mHead and mTail are non-null.
+ */
+ bool isStateValid() const { return (mHead != nullptr) == (mTail != nullptr); }
+
+ bool ElementNotInList(T* aElm) {
+ if (!ElementAccess::Get(aElm).mNext && !ElementAccess::Get(aElm).mPrev) {
+ // Both mNext and mPrev being NULL can mean two things:
+ // - the element is not in the list.
+ // - the element is the first and only element in the list.
+ // So check for the latter.
+ return mHead != aElm;
+ }
+ return false;
+ }
+
+ public:
+ DoublyLinkedList() : mHead(nullptr), mTail(nullptr) {}
+
+ class Iterator final {
+ T* mCurrent;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = T;
+ using difference_type = std::ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+
+ Iterator() : mCurrent(nullptr) {}
+ explicit Iterator(T* aCurrent) : mCurrent(aCurrent) {}
+
+ T& operator*() const { return *mCurrent; }
+ T* operator->() const { return mCurrent; }
+
+ Iterator& operator++() {
+ mCurrent = mCurrent ? ElementAccess::Get(mCurrent).mNext : nullptr;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ Iterator result = *this;
+ ++(*this);
+ return result;
+ }
+
+ Iterator& operator--() {
+ mCurrent = ElementAccess::Get(mCurrent).mPrev;
+ return *this;
+ }
+
+ Iterator operator--(int) {
+ Iterator result = *this;
+ --(*this);
+ return result;
+ }
+
+ bool operator!=(const Iterator& aOther) const {
+ return mCurrent != aOther.mCurrent;
+ }
+
+ bool operator==(const Iterator& aOther) const {
+ return mCurrent == aOther.mCurrent;
+ }
+
+ explicit operator bool() const { return mCurrent; }
+ };
+
+ Iterator begin() { return Iterator(mHead); }
+ const Iterator begin() const { return Iterator(mHead); }
+ const Iterator cbegin() const { return Iterator(mHead); }
+
+ Iterator end() { return Iterator(); }
+ const Iterator end() const { return Iterator(); }
+ const Iterator cend() const { return Iterator(); }
+
+ /**
+ * Returns true if the list contains no elements.
+ */
+ bool isEmpty() const {
+ MOZ_ASSERT(isStateValid());
+ return mHead == nullptr;
+ }
+
+ /**
+ * Inserts aElm into the list at the head position. |aElm| must not already
+ * be in a list.
+ */
+ void pushFront(T* aElm) {
+ MOZ_ASSERT(aElm);
+ MOZ_ASSERT(ElementNotInList(aElm));
+ MOZ_ASSERT(isStateValid());
+
+ ElementAccess::Get(aElm).mNext = mHead;
+ if (mHead) {
+ MOZ_ASSERT(!ElementAccess::Get(mHead).mPrev);
+ ElementAccess::Get(mHead).mPrev = aElm;
+ }
+
+ mHead = aElm;
+ if (!mTail) {
+ mTail = aElm;
+ }
+ }
+
+ /**
+ * Remove the head of the list and return it. Calling this on an empty list
+ * will assert.
+ */
+ T* popFront() {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(isStateValid());
+
+ T* result = mHead;
+ mHead = result ? ElementAccess::Get(result).mNext : nullptr;
+ if (mHead) {
+ ElementAccess::Get(mHead).mPrev = nullptr;
+ }
+
+ if (mTail == result) {
+ mTail = nullptr;
+ }
+
+ if (result) {
+ ElementAccess::Get(result).mNext = nullptr;
+ ElementAccess::Get(result).mPrev = nullptr;
+ }
+
+ return result;
+ }
+
+ /**
+ * Inserts aElm into the list at the tail position. |aElm| must not already
+ * be in a list.
+ */
+ void pushBack(T* aElm) {
+ MOZ_ASSERT(aElm);
+ MOZ_ASSERT(ElementNotInList(aElm));
+ MOZ_ASSERT(isStateValid());
+
+ ElementAccess::Get(aElm).mNext = nullptr;
+ ElementAccess::Get(aElm).mPrev = mTail;
+ if (mTail) {
+ MOZ_ASSERT(!ElementAccess::Get(mTail).mNext);
+ ElementAccess::Get(mTail).mNext = aElm;
+ }
+
+ mTail = aElm;
+ if (!mHead) {
+ mHead = aElm;
+ }
+ }
+
+ /**
+ * Remove the tail of the list and return it. Calling this on an empty list
+ * will assert.
+ */
+ T* popBack() {
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(isStateValid());
+
+ T* result = mTail;
+ mTail = result ? ElementAccess::Get(result).mPrev : nullptr;
+ if (mTail) {
+ ElementAccess::Get(mTail).mNext = nullptr;
+ }
+
+ if (mHead == result) {
+ mHead = nullptr;
+ }
+
+ if (result) {
+ ElementAccess::Get(result).mNext = nullptr;
+ ElementAccess::Get(result).mPrev = nullptr;
+ }
+
+ return result;
+ }
+
+ /**
+ * Insert the given |aElm| *before* |aIter|.
+ */
+ void insertBefore(const Iterator& aIter, T* aElm) {
+ MOZ_ASSERT(aElm);
+ MOZ_ASSERT(ElementNotInList(aElm));
+ MOZ_ASSERT(isStateValid());
+
+ if (!aIter) {
+ return pushBack(aElm);
+ } else if (aIter == begin()) {
+ return pushFront(aElm);
+ }
+
+ T* after = &(*aIter);
+ T* before = ElementAccess::Get(after).mPrev;
+ MOZ_ASSERT(before);
+
+ ElementAccess::Get(before).mNext = aElm;
+ ElementAccess::Get(aElm).mPrev = before;
+ ElementAccess::Get(aElm).mNext = after;
+ ElementAccess::Get(after).mPrev = aElm;
+ }
+
+ /**
+ * Removes the given element from the list. The element must be in this list.
+ */
+ void remove(T* aElm) {
+ MOZ_ASSERT(aElm);
+ MOZ_ASSERT(ElementAccess::Get(aElm).mNext ||
+ ElementAccess::Get(aElm).mPrev ||
+ (aElm == mHead && aElm == mTail),
+ "Attempted to remove element not in this list");
+
+ if (T* prev = ElementAccess::Get(aElm).mPrev) {
+ ElementAccess::Get(prev).mNext = ElementAccess::Get(aElm).mNext;
+ } else {
+ MOZ_ASSERT(mHead == aElm);
+ mHead = ElementAccess::Get(aElm).mNext;
+ }
+
+ if (T* next = ElementAccess::Get(aElm).mNext) {
+ ElementAccess::Get(next).mPrev = ElementAccess::Get(aElm).mPrev;
+ } else {
+ MOZ_ASSERT(mTail == aElm);
+ mTail = ElementAccess::Get(aElm).mPrev;
+ }
+
+ ElementAccess::Get(aElm).mNext = nullptr;
+ ElementAccess::Get(aElm).mPrev = nullptr;
+ }
+
+ /**
+ * Returns an iterator referencing the first found element whose value matches
+ * the given element according to operator==.
+ */
+ Iterator find(const T& aElm) { return std::find(begin(), end(), aElm); }
+
+ /**
+ * Returns whether the given element is in the list. Note that this uses
+ * T::operator==, not pointer comparison.
+ */
+ bool contains(const T& aElm) { return find(aElm) != Iterator(); }
+
+ /**
+ * Returns whether the given element might be in the list. Note that this
+ * assumes the element is either in the list or not in the list, and ignores
+ * the case where the element might be in another list in order to make the
+ * check fast.
+ */
+ bool ElementProbablyInList(T* aElm) {
+ if (isEmpty()) {
+ return false;
+ }
+ return !ElementNotInList(aElm);
+ }
+};
+
+/**
+ * @brief Double linked list that allows insertion/removal during iteration.
+ *
+ * This class uses the mozilla::DoublyLinkedList internally and keeps
+ * track of created iterator instances by putting them on a simple list on stack
+ * (compare nsTAutoObserverArray).
+ * This allows insertion or removal operations to adjust iterators and therefore
+ * keeping them valid during iteration.
+ */
+template <typename T, typename ElementAccess = GetDoublyLinkedListElement<T>>
+class SafeDoublyLinkedList {
+ public:
+ /**
+ * @brief Iterator class for SafeDoublyLinkedList.
+ *
+ * The iterator contains two iterators of the underlying list:
+ * - mCurrent points to the current list element of the iterator.
+ * - mNext points to the next element of the list.
+ *
+ * When removing an element from the list, mCurrent and mNext may
+ * be adjusted:
+ * - If mCurrent is the element to be deleted, it is set to empty. mNext can
+ * still be used to advance to the next element.
+ * - If mNext is the element to be deleted, it is set to its next element
+ * (or to empty if mNext is the last element of the list).
+ */
+ class SafeIterator {
+ using BaseIterator = typename DoublyLinkedList<T, ElementAccess>::Iterator;
+ friend class SafeDoublyLinkedList<T, ElementAccess>;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = T;
+ using difference_type = std::ptrdiff_t;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+
+ SafeIterator() = default;
+ SafeIterator(SafeIterator const& aOther)
+ : SafeIterator(aOther.mCurrent, aOther.mList) {}
+
+ SafeIterator(BaseIterator aBaseIter,
+ SafeDoublyLinkedList<T, ElementAccess>* aList)
+ : mCurrent(aBaseIter),
+ mNext(aBaseIter ? ++aBaseIter : BaseIterator()),
+ mList(aList) {
+ if (mList) {
+ mNextIterator = mList->mIter;
+ mList->mIter = this;
+ }
+ }
+ ~SafeIterator() {
+ if (mList) {
+ MOZ_ASSERT(mList->mIter == this,
+ "Iterators must currently be destroyed in opposite order "
+ "from the construction order. It is suggested that you "
+ "simply put them on the stack");
+ mList->mIter = mNextIterator;
+ }
+ }
+
+ SafeIterator& operator++() {
+ mCurrent = mNext;
+ if (mNext) {
+ ++mNext;
+ }
+ return *this;
+ }
+
+ pointer operator->() { return &*mCurrent; }
+ const_pointer operator->() const { return &*mCurrent; }
+ reference operator*() { return *mCurrent; }
+ const_reference operator*() const { return *mCurrent; }
+
+ pointer current() { return mCurrent ? &*mCurrent : nullptr; }
+ const_pointer current() const { return mCurrent ? &*mCurrent : nullptr; }
+
+ explicit operator bool() const { return bool(mCurrent); }
+ bool operator==(SafeIterator const& other) const {
+ return mCurrent == other.mCurrent;
+ }
+ bool operator!=(SafeIterator const& other) const {
+ return mCurrent != other.mCurrent;
+ }
+
+ BaseIterator& next() { return mNext; } // mainly needed for unittests.
+ private:
+ /**
+ * Base list iterator pointing to the current list element of the iteration.
+ * If element mCurrent points to gets removed, the iterator will be set to
+ * empty. mNext keeps the iterator valid.
+ */
+ BaseIterator mCurrent{nullptr};
+ /**
+ * Base list iterator pointing to the next list element of the iteration.
+ * If element mCurrent points to gets removed, mNext is still valid.
+ * If element mNext points to gets removed, mNext advances, keeping this
+ * iterator valid.
+ */
+ BaseIterator mNext{nullptr};
+
+ /**
+ * Next element in the stack-allocated list of iterators stored in the
+ * SafeLinkedList object.
+ */
+ SafeIterator* mNextIterator{nullptr};
+ SafeDoublyLinkedList<T, ElementAccess>* mList{nullptr};
+
+ void setNext(T* aElm) { mNext = BaseIterator(aElm); }
+ void setCurrent(T* aElm) { mCurrent = BaseIterator(aElm); }
+ };
+
+ private:
+ using BaseListType = DoublyLinkedList<T, ElementAccess>;
+ friend class SafeIterator;
+
+ public:
+ SafeDoublyLinkedList() = default;
+
+ bool isEmpty() const { return mList.isEmpty(); }
+ bool contains(T* aElm) {
+ for (auto iter = mList.begin(); iter != mList.end(); ++iter) {
+ if (&*iter == aElm) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ SafeIterator begin() { return SafeIterator(mList.begin(), this); }
+ SafeIterator begin() const { return SafeIterator(mList.begin(), this); }
+ SafeIterator cbegin() const { return begin(); }
+
+ SafeIterator end() { return SafeIterator(); }
+ SafeIterator end() const { return SafeIterator(); }
+ SafeIterator cend() const { return SafeIterator(); }
+
+ void pushFront(T* aElm) { mList.pushFront(aElm); }
+
+ void pushBack(T* aElm) {
+ mList.pushBack(aElm);
+ auto* iter = mIter;
+ while (iter) {
+ if (!iter->mNext) {
+ iter->setNext(aElm);
+ }
+ iter = iter->mNextIterator;
+ }
+ }
+
+ T* popFront() {
+ T* firstElm = mList.popFront();
+ auto* iter = mIter;
+ while (iter) {
+ if (iter->current() == firstElm) {
+ iter->setCurrent(nullptr);
+ }
+ iter = iter->mNextIterator;
+ }
+
+ return firstElm;
+ }
+
+ T* popBack() {
+ T* lastElm = mList.popBack();
+ auto* iter = mIter;
+ while (iter) {
+ if (iter->current() == lastElm) {
+ iter->setCurrent(nullptr);
+ } else if (iter->mNext && &*(iter->mNext) == lastElm) {
+ iter->setNext(nullptr);
+ }
+ iter = iter->mNextIterator;
+ }
+
+ return lastElm;
+ }
+
+ void remove(T* aElm) {
+ if (!mList.ElementProbablyInList(aElm)) {
+ return;
+ }
+ auto* iter = mIter;
+ while (iter) {
+ if (iter->mNext && &*(iter->mNext) == aElm) {
+ ++(iter->mNext);
+ }
+ if (iter->current() == aElm) {
+ iter->setCurrent(nullptr);
+ }
+ iter = iter->mNextIterator;
+ }
+
+ mList.remove(aElm);
+ }
+
+ private:
+ BaseListType mList;
+ SafeIterator* mIter{nullptr};
+};
+
+} // namespace mozilla
+
+#endif // mozilla_DoublyLinkedList_h
diff --git a/mfbt/EndianUtils.h b/mfbt/EndianUtils.h
new file mode 100644
index 0000000000..b6f3e2c315
--- /dev/null
+++ b/mfbt/EndianUtils.h
@@ -0,0 +1,611 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Functions for reading and writing integers in various endiannesses. */
+
+/*
+ * The classes LittleEndian and BigEndian expose static methods for
+ * reading and writing 16-, 32-, and 64-bit signed and unsigned integers
+ * in their respective endianness. The addresses read from or written
+ * to may be misaligned (although misaligned accesses may incur
+ * architecture-specific performance costs). The naming scheme is:
+ *
+ * {Little,Big}Endian::{read,write}{Uint,Int}<bitsize>
+ *
+ * For instance, LittleEndian::readInt32 will read a 32-bit signed
+ * integer from memory in little endian format. Similarly,
+ * BigEndian::writeUint16 will write a 16-bit unsigned integer to memory
+ * in big-endian format.
+ *
+ * The class NativeEndian exposes methods for conversion of existing
+ * data to and from the native endianness. These methods are intended
+ * for cases where data needs to be transferred, serialized, etc.
+ * swap{To,From}{Little,Big}Endian byteswap a single value if necessary.
+ * Bulk conversion functions are also provided which optimize the
+ * no-conversion-needed case:
+ *
+ * - copyAndSwap{To,From}{Little,Big}Endian;
+ * - swap{To,From}{Little,Big}EndianInPlace.
+ *
+ * The *From* variants are intended to be used for reading data and the
+ * *To* variants for writing data.
+ *
+ * Methods on NativeEndian work with integer data of any type.
+ * Floating-point data is not supported.
+ *
+ * For clarity in networking code, "Network" may be used as a synonym
+ * for "Big" in any of the above methods or class names.
+ *
+ * As an example, reading a file format header whose fields are stored
+ * in big-endian format might look like:
+ *
+ * class ExampleHeader
+ * {
+ * private:
+ * uint32_t mMagic;
+ * uint32_t mLength;
+ * uint32_t mTotalRecords;
+ * uint64_t mChecksum;
+ *
+ * public:
+ * ExampleHeader(const void* data)
+ * {
+ * const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ * mMagic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ * mLength = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ * mTotalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ * mChecksum = BigEndian::readUint64(ptr);
+ * }
+ * ...
+ * };
+ */
+
+#ifndef mozilla_EndianUtils_h
+#define mozilla_EndianUtils_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Compiler.h"
+#include "mozilla/DebugOnly.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#if defined(_MSC_VER)
+# include <stdlib.h>
+# pragma intrinsic(_byteswap_ushort)
+# pragma intrinsic(_byteswap_ulong)
+# pragma intrinsic(_byteswap_uint64)
+#endif
+
+/*
+ * Our supported compilers provide architecture-independent macros for this.
+ * Yes, there are more than two values for __BYTE_ORDER__.
+ */
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ defined(__ORDER_BIG_ENDIAN__)
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define MOZ_LITTLE_ENDIAN() 1
+# define MOZ_BIG_ENDIAN() 0
+# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define MOZ_LITTLE_ENDIAN() 0
+# define MOZ_BIG_ENDIAN() 1
+# else
+# error "Can't handle mixed-endian architectures"
+# endif
+#else
+# error "Don't know how to determine endianness"
+#endif
+
+#if defined(__clang__)
+# if __has_builtin(__builtin_bswap16)
+# define MOZ_HAVE_BUILTIN_BYTESWAP16 __builtin_bswap16
+# endif
+#elif defined(__GNUC__)
+# define MOZ_HAVE_BUILTIN_BYTESWAP16 __builtin_bswap16
+#elif defined(_MSC_VER)
+# define MOZ_HAVE_BUILTIN_BYTESWAP16 _byteswap_ushort
+#endif
+
+namespace mozilla {
+
+namespace detail {
+
+/*
+ * We need wrappers here because free functions with default template
+ * arguments and/or partial specialization of function templates are not
+ * supported by all the compilers we use.
+ */
+template <typename T, size_t Size = sizeof(T)>
+struct Swapper;
+
+template <typename T>
+struct Swapper<T, 2> {
+ static T swap(T aValue) {
+#if defined(MOZ_HAVE_BUILTIN_BYTESWAP16)
+ return MOZ_HAVE_BUILTIN_BYTESWAP16(aValue);
+#else
+ return T(((aValue & 0x00ff) << 8) | ((aValue & 0xff00) >> 8));
+#endif
+ }
+};
+
+template <typename T>
+struct Swapper<T, 4> {
+ static T swap(T aValue) {
+#if defined(__clang__) || defined(__GNUC__)
+ return T(__builtin_bswap32(aValue));
+#elif defined(_MSC_VER)
+ return T(_byteswap_ulong(aValue));
+#else
+ return T(((aValue & 0x000000ffU) << 24) | ((aValue & 0x0000ff00U) << 8) |
+ ((aValue & 0x00ff0000U) >> 8) | ((aValue & 0xff000000U) >> 24));
+#endif
+ }
+};
+
+template <typename T>
+struct Swapper<T, 8> {
+ static inline T swap(T aValue) {
+#if defined(__clang__) || defined(__GNUC__)
+ return T(__builtin_bswap64(aValue));
+#elif defined(_MSC_VER)
+ return T(_byteswap_uint64(aValue));
+#else
+ return T(((aValue & 0x00000000000000ffULL) << 56) |
+ ((aValue & 0x000000000000ff00ULL) << 40) |
+ ((aValue & 0x0000000000ff0000ULL) << 24) |
+ ((aValue & 0x00000000ff000000ULL) << 8) |
+ ((aValue & 0x000000ff00000000ULL) >> 8) |
+ ((aValue & 0x0000ff0000000000ULL) >> 24) |
+ ((aValue & 0x00ff000000000000ULL) >> 40) |
+ ((aValue & 0xff00000000000000ULL) >> 56));
+#endif
+ }
+};
+
+enum Endianness { Little, Big };
+
+#if MOZ_BIG_ENDIAN()
+# define MOZ_NATIVE_ENDIANNESS detail::Big
+#else
+# define MOZ_NATIVE_ENDIANNESS detail::Little
+#endif
+
+class EndianUtils {
+ /**
+ * Assert that the memory regions [aDest, aDest+aCount) and
+ * [aSrc, aSrc+aCount] do not overlap. aCount is given in bytes.
+ */
+ static void assertNoOverlap(const void* aDest, const void* aSrc,
+ size_t aCount) {
+ DebugOnly<const uint8_t*> byteDestPtr = static_cast<const uint8_t*>(aDest);
+ DebugOnly<const uint8_t*> byteSrcPtr = static_cast<const uint8_t*>(aSrc);
+ MOZ_ASSERT(
+ (byteDestPtr <= byteSrcPtr && byteDestPtr + aCount <= byteSrcPtr) ||
+ (byteSrcPtr <= byteDestPtr && byteSrcPtr + aCount <= byteDestPtr));
+ }
+
+ template <typename T>
+ static void assertAligned(T* aPtr) {
+ MOZ_ASSERT((uintptr_t(aPtr) % sizeof(T)) == 0, "Unaligned pointer!");
+ }
+
+ protected:
+ /**
+ * Return |aValue| converted from SourceEndian encoding to DestEndian
+ * encoding.
+ */
+ template <Endianness SourceEndian, Endianness DestEndian, typename T>
+ static inline T maybeSwap(T aValue) {
+ if (SourceEndian == DestEndian) {
+ return aValue;
+ }
+ return Swapper<T>::swap(aValue);
+ }
+
+ /**
+ * Convert |aCount| elements at |aPtr| from SourceEndian encoding to
+ * DestEndian encoding.
+ */
+ template <Endianness SourceEndian, Endianness DestEndian, typename T>
+ static inline void maybeSwapInPlace(T* aPtr, size_t aCount) {
+ assertAligned(aPtr);
+
+ if (SourceEndian == DestEndian) {
+ return;
+ }
+ for (size_t i = 0; i < aCount; i++) {
+ aPtr[i] = Swapper<T>::swap(aPtr[i]);
+ }
+ }
+
+ /**
+ * Write |aCount| elements to the unaligned address |aDest| in DestEndian
+ * format, using elements found at |aSrc| in SourceEndian format.
+ */
+ template <Endianness SourceEndian, Endianness DestEndian, typename T>
+ static void copyAndSwapTo(void* aDest, const T* aSrc, size_t aCount) {
+ assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
+ assertAligned(aSrc);
+
+ if (SourceEndian == DestEndian) {
+ memcpy(aDest, aSrc, aCount * sizeof(T));
+ return;
+ }
+
+ uint8_t* byteDestPtr = static_cast<uint8_t*>(aDest);
+ for (size_t i = 0; i < aCount; ++i) {
+ union {
+ T mVal;
+ uint8_t mBuffer[sizeof(T)];
+ } u;
+ u.mVal = maybeSwap<SourceEndian, DestEndian>(aSrc[i]);
+ memcpy(byteDestPtr, u.mBuffer, sizeof(T));
+ byteDestPtr += sizeof(T);
+ }
+ }
+
+ /**
+ * Write |aCount| elements to |aDest| in DestEndian format, using elements
+ * found at the unaligned address |aSrc| in SourceEndian format.
+ */
+ template <Endianness SourceEndian, Endianness DestEndian, typename T>
+ static void copyAndSwapFrom(T* aDest, const void* aSrc, size_t aCount) {
+ assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
+ assertAligned(aDest);
+
+ if (SourceEndian == DestEndian) {
+ memcpy(aDest, aSrc, aCount * sizeof(T));
+ return;
+ }
+
+ const uint8_t* byteSrcPtr = static_cast<const uint8_t*>(aSrc);
+ for (size_t i = 0; i < aCount; ++i) {
+ union {
+ T mVal;
+ uint8_t mBuffer[sizeof(T)];
+ } u;
+ memcpy(u.mBuffer, byteSrcPtr, sizeof(T));
+ aDest[i] = maybeSwap<SourceEndian, DestEndian>(u.mVal);
+ byteSrcPtr += sizeof(T);
+ }
+ }
+};
+
+template <Endianness ThisEndian>
+class Endian : private EndianUtils {
+ protected:
+ /** Read a uint16_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static uint16_t readUint16(const void* aPtr) {
+ return read<uint16_t>(aPtr);
+ }
+
+ /** Read a uint32_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static uint32_t readUint32(const void* aPtr) {
+ return read<uint32_t>(aPtr);
+ }
+
+ /** Read a uint64_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static uint64_t readUint64(const void* aPtr) {
+ return read<uint64_t>(aPtr);
+ }
+
+ /** Read a uintptr_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static uintptr_t readUintptr(const void* aPtr) {
+ return read<uintptr_t>(aPtr);
+ }
+
+ /** Read an int16_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static int16_t readInt16(const void* aPtr) {
+ return read<int16_t>(aPtr);
+ }
+
+ /** Read an int32_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static int32_t readInt32(const void* aPtr) {
+ return read<uint32_t>(aPtr);
+ }
+
+ /** Read an int64_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static int64_t readInt64(const void* aPtr) {
+ return read<int64_t>(aPtr);
+ }
+
+ /** Read an intptr_t in ThisEndian endianness from |aPtr| and return it. */
+ [[nodiscard]] static intptr_t readIntptr(const void* aPtr) {
+ return read<intptr_t>(aPtr);
+ }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeUint16(void* aPtr, uint16_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeUint32(void* aPtr, uint32_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeUint64(void* aPtr, uint64_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeUintptr(void* aPtr, uintptr_t aValue) {
+ write(aPtr, aValue);
+ }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeInt16(void* aPtr, int16_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeInt32(void* aPtr, int32_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeInt64(void* aPtr, int64_t aValue) { write(aPtr, aValue); }
+
+ /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+ static void writeIntptr(void* aPtr, intptr_t aValue) { write(aPtr, aValue); }
+
+ /*
+ * Converts a value of type T to little-endian format.
+ *
+ * This function is intended for cases where you have data in your
+ * native-endian format and you need it to appear in little-endian
+ * format for transmission.
+ */
+ template <typename T>
+ [[nodiscard]] static T swapToLittleEndian(T aValue) {
+ return maybeSwap<ThisEndian, Little>(aValue);
+ }
+
+ /*
+ * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+ * them to little-endian format if ThisEndian is Big. |aSrc| as a typed
+ * pointer must be aligned; |aDest| need not be.
+ *
+ * As with memcpy, |aDest| and |aSrc| must not overlap.
+ */
+ template <typename T>
+ static void copyAndSwapToLittleEndian(void* aDest, const T* aSrc,
+ size_t aCount) {
+ copyAndSwapTo<ThisEndian, Little>(aDest, aSrc, aCount);
+ }
+
+ /*
+ * Likewise, but converts values in place.
+ */
+ template <typename T>
+ static void swapToLittleEndianInPlace(T* aPtr, size_t aCount) {
+ maybeSwapInPlace<ThisEndian, Little>(aPtr, aCount);
+ }
+
+ /*
+ * Converts a value of type T to big-endian format.
+ */
+ template <typename T>
+ [[nodiscard]] static T swapToBigEndian(T aValue) {
+ return maybeSwap<ThisEndian, Big>(aValue);
+ }
+
+ /*
+ * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+ * them to big-endian format if ThisEndian is Little. |aSrc| as a typed
+ * pointer must be aligned; |aDest| need not be.
+ *
+ * As with memcpy, |aDest| and |aSrc| must not overlap.
+ */
+ template <typename T>
+ static void copyAndSwapToBigEndian(void* aDest, const T* aSrc,
+ size_t aCount) {
+ copyAndSwapTo<ThisEndian, Big>(aDest, aSrc, aCount);
+ }
+
+ /*
+ * Likewise, but converts values in place.
+ */
+ template <typename T>
+ static void swapToBigEndianInPlace(T* aPtr, size_t aCount) {
+ maybeSwapInPlace<ThisEndian, Big>(aPtr, aCount);
+ }
+
+ /*
+ * Synonyms for the big-endian functions, for better readability
+ * in network code.
+ */
+
+ template <typename T>
+ [[nodiscard]] static T swapToNetworkOrder(T aValue) {
+ return swapToBigEndian(aValue);
+ }
+
+ template <typename T>
+ static void copyAndSwapToNetworkOrder(void* aDest, const T* aSrc,
+ size_t aCount) {
+ copyAndSwapToBigEndian(aDest, aSrc, aCount);
+ }
+
+ template <typename T>
+ static void swapToNetworkOrderInPlace(T* aPtr, size_t aCount) {
+ swapToBigEndianInPlace(aPtr, aCount);
+ }
+
+ /*
+ * Converts a value of type T from little-endian format.
+ */
+ template <typename T>
+ [[nodiscard]] static T swapFromLittleEndian(T aValue) {
+ return maybeSwap<Little, ThisEndian>(aValue);
+ }
+
+ /*
+ * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+ * them to little-endian format if ThisEndian is Big. |aDest| as a typed
+ * pointer must be aligned; |aSrc| need not be.
+ *
+ * As with memcpy, |aDest| and |aSrc| must not overlap.
+ */
+ template <typename T>
+ static void copyAndSwapFromLittleEndian(T* aDest, const void* aSrc,
+ size_t aCount) {
+ copyAndSwapFrom<Little, ThisEndian>(aDest, aSrc, aCount);
+ }
+
+ /*
+ * Likewise, but converts values in place.
+ */
+ template <typename T>
+ static void swapFromLittleEndianInPlace(T* aPtr, size_t aCount) {
+ maybeSwapInPlace<Little, ThisEndian>(aPtr, aCount);
+ }
+
+ /*
+ * Converts a value of type T from big-endian format.
+ */
+ template <typename T>
+ [[nodiscard]] static T swapFromBigEndian(T aValue) {
+ return maybeSwap<Big, ThisEndian>(aValue);
+ }
+
+ /*
+ * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+ * them to big-endian format if ThisEndian is Little. |aDest| as a typed
+ * pointer must be aligned; |aSrc| need not be.
+ *
+ * As with memcpy, |aDest| and |aSrc| must not overlap.
+ */
+ template <typename T>
+ static void copyAndSwapFromBigEndian(T* aDest, const void* aSrc,
+ size_t aCount) {
+ copyAndSwapFrom<Big, ThisEndian>(aDest, aSrc, aCount);
+ }
+
+ /*
+ * Likewise, but converts values in place.
+ */
+ template <typename T>
+ static void swapFromBigEndianInPlace(T* aPtr, size_t aCount) {
+ maybeSwapInPlace<Big, ThisEndian>(aPtr, aCount);
+ }
+
+ /*
+ * Synonyms for the big-endian functions, for better readability
+ * in network code.
+ */
+ template <typename T>
+ [[nodiscard]] static T swapFromNetworkOrder(T aValue) {
+ return swapFromBigEndian(aValue);
+ }
+
+ template <typename T>
+ static void copyAndSwapFromNetworkOrder(T* aDest, const void* aSrc,
+ size_t aCount) {
+ copyAndSwapFromBigEndian(aDest, aSrc, aCount);
+ }
+
+ template <typename T>
+ static void swapFromNetworkOrderInPlace(T* aPtr, size_t aCount) {
+ swapFromBigEndianInPlace(aPtr, aCount);
+ }
+
+ private:
+ /**
+ * Read a value of type T, encoded in endianness ThisEndian from |aPtr|.
+ * Return that value encoded in native endianness.
+ */
+ template <typename T>
+ static T read(const void* aPtr) {
+ union {
+ T mVal;
+ uint8_t mBuffer[sizeof(T)];
+ } u;
+ memcpy(u.mBuffer, aPtr, sizeof(T));
+ return maybeSwap<ThisEndian, MOZ_NATIVE_ENDIANNESS>(u.mVal);
+ }
+
+ /**
+ * Write a value of type T, in native endianness, to |aPtr|, in ThisEndian
+ * endianness.
+ */
+ template <typename T>
+ static void write(void* aPtr, T aValue) {
+ T tmp = maybeSwap<MOZ_NATIVE_ENDIANNESS, ThisEndian>(aValue);
+ memcpy(aPtr, &tmp, sizeof(T));
+ }
+
+ Endian() = delete;
+ Endian(const Endian& aTther) = delete;
+ void operator=(const Endian& aOther) = delete;
+};
+
+template <Endianness ThisEndian>
+class EndianReadWrite : public Endian<ThisEndian> {
+ private:
+ typedef Endian<ThisEndian> super;
+
+ public:
+ using super::readInt16;
+ using super::readInt32;
+ using super::readInt64;
+ using super::readIntptr;
+ using super::readUint16;
+ using super::readUint32;
+ using super::readUint64;
+ using super::readUintptr;
+ using super::writeInt16;
+ using super::writeInt32;
+ using super::writeInt64;
+ using super::writeIntptr;
+ using super::writeUint16;
+ using super::writeUint32;
+ using super::writeUint64;
+ using super::writeUintptr;
+};
+
+} /* namespace detail */
+
+class LittleEndian final : public detail::EndianReadWrite<detail::Little> {};
+
+class BigEndian final : public detail::EndianReadWrite<detail::Big> {};
+
+typedef BigEndian NetworkEndian;
+
+class NativeEndian final : public detail::Endian<MOZ_NATIVE_ENDIANNESS> {
+ private:
+ typedef detail::Endian<MOZ_NATIVE_ENDIANNESS> super;
+
+ public:
+ /*
+ * These functions are intended for cases where you have data in your
+ * native-endian format and you need the data to appear in the appropriate
+ * endianness for transmission, serialization, etc.
+ */
+ using super::copyAndSwapToBigEndian;
+ using super::copyAndSwapToLittleEndian;
+ using super::copyAndSwapToNetworkOrder;
+ using super::swapToBigEndian;
+ using super::swapToBigEndianInPlace;
+ using super::swapToLittleEndian;
+ using super::swapToLittleEndianInPlace;
+ using super::swapToNetworkOrder;
+ using super::swapToNetworkOrderInPlace;
+
+ /*
+ * These functions are intended for cases where you have data in the
+ * given endianness (e.g. reading from disk or a file-format) and you
+ * need the data to appear in native-endian format for processing.
+ */
+ using super::copyAndSwapFromBigEndian;
+ using super::copyAndSwapFromLittleEndian;
+ using super::copyAndSwapFromNetworkOrder;
+ using super::swapFromBigEndian;
+ using super::swapFromBigEndianInPlace;
+ using super::swapFromLittleEndian;
+ using super::swapFromLittleEndianInPlace;
+ using super::swapFromNetworkOrder;
+ using super::swapFromNetworkOrderInPlace;
+};
+
+#undef MOZ_NATIVE_ENDIANNESS
+
+} /* namespace mozilla */
+
+#endif /* mozilla_EndianUtils_h */
diff --git a/mfbt/EnumSet.h b/mfbt/EnumSet.h
new file mode 100644
index 0000000000..712e03d3f3
--- /dev/null
+++ b/mfbt/EnumSet.h
@@ -0,0 +1,350 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A set abstraction for enumeration values. */
+
+#ifndef mozilla_EnumSet_h
+#define mozilla_EnumSet_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <initializer_list>
+#include <type_traits>
+
+#include <stdint.h>
+
+namespace mozilla {
+
+/**
+ * EnumSet<T, U> is a set of values defined by an enumeration. It is implemented
+ * using a bit mask with the size of U for each value. It works both for enum
+ * and enum class types. EnumSet also works with U being a BitSet.
+ */
+template <typename T, typename Serialized = typename std::make_unsigned<
+ typename std::underlying_type<T>::type>::type>
+class EnumSet {
+ public:
+ using valueType = T;
+ using serializedType = Serialized;
+
+ constexpr EnumSet() : mBitField() {}
+
+ constexpr MOZ_IMPLICIT EnumSet(T aEnum) : mBitField(BitFor(aEnum)) {}
+
+ constexpr EnumSet(T aEnum1, T aEnum2)
+ : mBitField(BitFor(aEnum1) | BitFor(aEnum2)) {}
+
+ constexpr EnumSet(T aEnum1, T aEnum2, T aEnum3)
+ : mBitField(BitFor(aEnum1) | BitFor(aEnum2) | BitFor(aEnum3)) {}
+
+ constexpr EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4)
+ : mBitField(BitFor(aEnum1) | BitFor(aEnum2) | BitFor(aEnum3) |
+ BitFor(aEnum4)) {}
+
+ constexpr MOZ_IMPLICIT EnumSet(std::initializer_list<T> list) : mBitField() {
+ for (auto value : list) {
+ (*this) += value;
+ }
+ }
+
+#ifdef DEBUG
+ constexpr EnumSet(const EnumSet& aEnumSet) : mBitField(aEnumSet.mBitField) {}
+
+ constexpr EnumSet& operator=(const EnumSet& aEnumSet) {
+ mBitField = aEnumSet.mBitField;
+ IncVersion();
+ return *this;
+ }
+#endif
+
+ /**
+ * Add an element
+ */
+ constexpr void operator+=(T aEnum) {
+ IncVersion();
+ mBitField |= BitFor(aEnum);
+ }
+
+ /**
+ * Add an element
+ */
+ constexpr EnumSet operator+(T aEnum) const {
+ EnumSet result(*this);
+ result += aEnum;
+ return result;
+ }
+
+ /**
+ * Union
+ */
+ void operator+=(const EnumSet& aEnumSet) {
+ IncVersion();
+ mBitField |= aEnumSet.mBitField;
+ }
+
+ /**
+ * Union
+ */
+ EnumSet operator+(const EnumSet& aEnumSet) const {
+ EnumSet result(*this);
+ result += aEnumSet;
+ return result;
+ }
+
+ /**
+ * Remove an element
+ */
+ void operator-=(T aEnum) {
+ IncVersion();
+ mBitField &= ~(BitFor(aEnum));
+ }
+
+ /**
+ * Remove an element
+ */
+ EnumSet operator-(T aEnum) const {
+ EnumSet result(*this);
+ result -= aEnum;
+ return result;
+ }
+
+ /**
+ * Remove a set of elements
+ */
+ void operator-=(const EnumSet& aEnumSet) {
+ IncVersion();
+ mBitField &= ~(aEnumSet.mBitField);
+ }
+
+ /**
+ * Remove a set of elements
+ */
+ EnumSet operator-(const EnumSet& aEnumSet) const {
+ EnumSet result(*this);
+ result -= aEnumSet;
+ return result;
+ }
+
+ /**
+ * Clear
+ */
+ void clear() {
+ IncVersion();
+ mBitField = Serialized();
+ }
+
+ /**
+ * Intersection
+ */
+ void operator&=(const EnumSet& aEnumSet) {
+ IncVersion();
+ mBitField &= aEnumSet.mBitField;
+ }
+
+ /**
+ * Intersection
+ */
+ EnumSet operator&(const EnumSet& aEnumSet) const {
+ EnumSet result(*this);
+ result &= aEnumSet;
+ return result;
+ }
+
+ /**
+ * Equality
+ */
+ bool operator==(const EnumSet& aEnumSet) const {
+ return mBitField == aEnumSet.mBitField;
+ }
+
+ /**
+ * Equality
+ */
+ bool operator==(T aEnum) const { return mBitField == BitFor(aEnum); }
+
+ /**
+ * Not equal
+ */
+ bool operator!=(const EnumSet& aEnumSet) const {
+ return !operator==(aEnumSet);
+ }
+
+ /**
+ * Not equal
+ */
+ bool operator!=(T aEnum) const { return !operator==(aEnum); }
+
+ /**
+ * Test is an element is contained in the set.
+ */
+ bool contains(T aEnum) const { return HasBitFor(aEnum); }
+
+ /**
+ * Test if a set is contained in the set.
+ */
+ bool contains(const EnumSet& aEnumSet) const {
+ return (mBitField & aEnumSet.mBitField) == aEnumSet.mBitField;
+ }
+
+ /**
+ * Return the number of elements in the set.
+ */
+ size_t size() const {
+ if constexpr (std::is_unsigned_v<Serialized>) {
+ if constexpr (kMaxBits > 32) {
+ return CountPopulation64(mBitField);
+ } else {
+ return CountPopulation32(mBitField);
+ }
+ } else {
+ return mBitField.Count();
+ }
+ }
+
+ bool isEmpty() const {
+ if constexpr (std::is_unsigned_v<Serialized>) {
+ return mBitField == 0;
+ } else {
+ return mBitField.IsEmpty();
+ }
+ }
+
+ Serialized serialize() const { return mBitField; }
+
+ void deserialize(Serialized aValue) {
+ IncVersion();
+ mBitField = aValue;
+ }
+
+ class ConstIterator {
+ const EnumSet* mSet;
+ size_t mPos;
+#ifdef DEBUG
+ uint64_t mVersion;
+#endif
+
+ void checkVersion() const {
+ // Check that the set has not been modified while being iterated.
+ MOZ_ASSERT_IF(mSet, mSet->mVersion == mVersion);
+ }
+
+ public:
+ ConstIterator(const EnumSet& aSet, size_t aPos) : mSet(&aSet), mPos(aPos) {
+#ifdef DEBUG
+ mVersion = mSet->mVersion;
+#endif
+ MOZ_ASSERT(aPos <= kMaxBits);
+ if (aPos != kMaxBits && !mSet->HasBitAt(mPos)) {
+ ++*this;
+ }
+ }
+
+ ConstIterator(const ConstIterator& aOther)
+ : mSet(aOther.mSet), mPos(aOther.mPos) {
+#ifdef DEBUG
+ mVersion = aOther.mVersion;
+ checkVersion();
+#endif
+ }
+
+ ConstIterator(ConstIterator&& aOther)
+ : mSet(aOther.mSet), mPos(aOther.mPos) {
+#ifdef DEBUG
+ mVersion = aOther.mVersion;
+ checkVersion();
+#endif
+ aOther.mSet = nullptr;
+ }
+
+ ~ConstIterator() { checkVersion(); }
+
+ bool operator==(const ConstIterator& other) const {
+ MOZ_ASSERT(mSet == other.mSet);
+ checkVersion();
+ return mPos == other.mPos;
+ }
+
+ bool operator!=(const ConstIterator& other) const {
+ return !(*this == other);
+ }
+
+ T operator*() const {
+ MOZ_ASSERT(mSet);
+ MOZ_ASSERT(mPos < kMaxBits);
+ MOZ_ASSERT(mSet->HasBitAt(mPos));
+ checkVersion();
+ return T(mPos);
+ }
+
+ ConstIterator& operator++() {
+ MOZ_ASSERT(mSet);
+ MOZ_ASSERT(mPos < kMaxBits);
+ checkVersion();
+ do {
+ mPos++;
+ } while (mPos < kMaxBits && !mSet->HasBitAt(mPos));
+ return *this;
+ }
+ };
+
+ ConstIterator begin() const { return ConstIterator(*this, 0); }
+
+ ConstIterator end() const { return ConstIterator(*this, kMaxBits); }
+
+ private:
+ constexpr static Serialized BitFor(T aEnum) {
+ const auto pos = static_cast<size_t>(aEnum);
+ return BitAt(pos);
+ }
+
+ constexpr static Serialized BitAt(size_t aPos) {
+ MOZ_DIAGNOSTIC_ASSERT(aPos < kMaxBits);
+ if constexpr (std::is_unsigned_v<Serialized>) {
+ return static_cast<Serialized>(Serialized{1} << aPos);
+ } else {
+ Serialized bitField;
+ bitField[aPos] = true;
+ return bitField;
+ }
+ }
+
+ constexpr bool HasBitFor(T aEnum) const {
+ const auto pos = static_cast<size_t>(aEnum);
+ return HasBitAt(pos);
+ }
+
+ constexpr bool HasBitAt(size_t aPos) const {
+ return static_cast<bool>(mBitField & BitAt(aPos));
+ }
+
+ constexpr void IncVersion() {
+#ifdef DEBUG
+ mVersion++;
+#endif
+ }
+
+ static constexpr size_t MaxBits() {
+ if constexpr (std::is_unsigned_v<Serialized>) {
+ return sizeof(Serialized) * 8;
+ } else {
+ return Serialized::Size();
+ }
+ }
+
+ static constexpr size_t kMaxBits = MaxBits();
+
+ Serialized mBitField;
+
+#ifdef DEBUG
+ uint64_t mVersion = 0;
+#endif
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_EnumSet_h_*/
diff --git a/mfbt/EnumTypeTraits.h b/mfbt/EnumTypeTraits.h
new file mode 100644
index 0000000000..528e1db8a7
--- /dev/null
+++ b/mfbt/EnumTypeTraits.h
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Type traits for enums. */
+
+#ifndef mozilla_EnumTypeTraits_h
+#define mozilla_EnumTypeTraits_h
+
+#include <stddef.h>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+template <size_t EnumSize, bool EnumSigned, size_t StorageSize,
+ bool StorageSigned>
+struct EnumFitsWithinHelper;
+
+// Signed enum, signed storage.
+template <size_t EnumSize, size_t StorageSize>
+struct EnumFitsWithinHelper<EnumSize, true, StorageSize, true>
+ : public std::integral_constant<bool, (EnumSize <= StorageSize)> {};
+
+// Signed enum, unsigned storage.
+template <size_t EnumSize, size_t StorageSize>
+struct EnumFitsWithinHelper<EnumSize, true, StorageSize, false>
+ : public std::integral_constant<bool, false> {};
+
+// Unsigned enum, signed storage.
+template <size_t EnumSize, size_t StorageSize>
+struct EnumFitsWithinHelper<EnumSize, false, StorageSize, true>
+ : public std::integral_constant<bool, (EnumSize * 2 <= StorageSize)> {};
+
+// Unsigned enum, unsigned storage.
+template <size_t EnumSize, size_t StorageSize>
+struct EnumFitsWithinHelper<EnumSize, false, StorageSize, false>
+ : public std::integral_constant<bool, (EnumSize <= StorageSize)> {};
+
+} // namespace detail
+
+/*
+ * Type trait that determines whether the enum type T can fit within the
+ * integral type Storage without data loss. This trait should be used with
+ * caution with an enum type whose underlying type has not been explicitly
+ * specified: for such enums, the C++ implementation is free to choose a type
+ * no smaller than int whose range encompasses all possible values of the enum.
+ * So for an enum with only small non-negative values, the underlying type may
+ * be either int or unsigned int, depending on the whims of the implementation.
+ */
+template <typename T, typename Storage>
+struct EnumTypeFitsWithin
+ : public detail::EnumFitsWithinHelper<
+ sizeof(T),
+ std::is_signed<typename std::underlying_type<T>::type>::value,
+ sizeof(Storage), std::is_signed<Storage>::value> {
+ static_assert(std::is_enum<T>::value, "must provide an enum type");
+ static_assert(std::is_integral<Storage>::value,
+ "must provide an integral type");
+};
+
+/*
+ * Provides information about highest enum member value.
+ * Each specialization of struct MaxEnumValue should define
+ * "static constexpr unsigned int value".
+ *
+ * example:
+ *
+ * enum ExampleEnum
+ * {
+ * CAT = 0,
+ * DOG,
+ * HAMSTER
+ * };
+ *
+ * template <>
+ * struct MaxEnumValue<ExampleEnum>
+ * {
+ * static constexpr unsigned int value = static_cast<unsigned int>(HAMSTER);
+ * };
+ */
+template <typename T>
+struct MaxEnumValue; // no need to define the primary template
+
+/**
+ * Get the underlying value of an enum, but typesafe.
+ *
+ * example:
+ *
+ * enum class Pet : int16_t {
+ * Cat,
+ * Dog,
+ * Fish
+ * };
+ * enum class Plant {
+ * Flower,
+ * Tree,
+ * Vine
+ * };
+ * UnderlyingValue(Pet::Fish) -> int16_t(2)
+ * UnderlyingValue(Plant::Tree) -> int(1)
+ */
+template <typename T>
+inline constexpr auto UnderlyingValue(const T v) {
+ static_assert(std::is_enum_v<T>);
+ return static_cast<typename std::underlying_type<T>::type>(v);
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_EnumTypeTraits_h */
diff --git a/mfbt/EnumeratedArray.h b/mfbt/EnumeratedArray.h
new file mode 100644
index 0000000000..f6edff4875
--- /dev/null
+++ b/mfbt/EnumeratedArray.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* EnumeratedArray is like Array, but indexed by a typed enum. */
+
+#ifndef mozilla_EnumeratedArray_h
+#define mozilla_EnumeratedArray_h
+
+#include <utility>
+
+#include "mozilla/Array.h"
+
+namespace mozilla {
+
+/**
+ * EnumeratedArray is a fixed-size array container for use when an
+ * array is indexed by a specific enum class.
+ *
+ * This provides type safety by guarding at compile time against accidentally
+ * indexing such arrays with unrelated values. This also removes the need
+ * for manual casting when using a typed enum value to index arrays.
+ *
+ * Aside from the typing of indices, EnumeratedArray is similar to Array.
+ *
+ * Example:
+ *
+ * enum class AnimalSpecies {
+ * Cow,
+ * Sheep,
+ * Count
+ * };
+ *
+ * EnumeratedArray<AnimalSpecies, AnimalSpecies::Count, int> headCount;
+ *
+ * headCount[AnimalSpecies::Cow] = 17;
+ * headCount[AnimalSpecies::Sheep] = 30;
+ *
+ */
+template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
+class EnumeratedArray {
+ public:
+ static const size_t kSize = size_t(SizeAsEnumValue);
+
+ private:
+ typedef Array<ValueType, kSize> ArrayType;
+
+ ArrayType mArray;
+
+ public:
+ EnumeratedArray() = default;
+
+ template <typename... Args>
+ MOZ_IMPLICIT constexpr EnumeratedArray(Args&&... aArgs)
+ : mArray{std::forward<Args>(aArgs)...} {}
+
+ ValueType& operator[](IndexType aIndex) { return mArray[size_t(aIndex)]; }
+
+ const ValueType& operator[](IndexType aIndex) const {
+ return mArray[size_t(aIndex)];
+ }
+
+ typedef typename ArrayType::iterator iterator;
+ typedef typename ArrayType::const_iterator const_iterator;
+ typedef typename ArrayType::reverse_iterator reverse_iterator;
+ typedef typename ArrayType::const_reverse_iterator const_reverse_iterator;
+
+ // Methods for range-based for loops.
+ iterator begin() { return mArray.begin(); }
+ const_iterator begin() const { return mArray.begin(); }
+ const_iterator cbegin() const { return mArray.cbegin(); }
+ iterator end() { return mArray.end(); }
+ const_iterator end() const { return mArray.end(); }
+ const_iterator cend() const { return mArray.cend(); }
+
+ // Methods for reverse iterating.
+ reverse_iterator rbegin() { return mArray.rbegin(); }
+ const_reverse_iterator rbegin() const { return mArray.rbegin(); }
+ const_reverse_iterator crbegin() const { return mArray.crbegin(); }
+ reverse_iterator rend() { return mArray.rend(); }
+ const_reverse_iterator rend() const { return mArray.rend(); }
+ const_reverse_iterator crend() const { return mArray.crend(); }
+};
+
+} // namespace mozilla
+
+#endif // mozilla_EnumeratedArray_h
diff --git a/mfbt/EnumeratedRange.h b/mfbt/EnumeratedRange.h
new file mode 100644
index 0000000000..ef0e6910ab
--- /dev/null
+++ b/mfbt/EnumeratedRange.h
@@ -0,0 +1,211 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Iterator over contiguous enum values */
+
+/*
+ * Implements generator functions that create a range to iterate over the values
+ * of a scoped or unscoped enum. Unlike IntegerRange, which can only function on
+ * the underlying integral type, the elements of the generated sequence will
+ * have the type of the enum in question.
+ *
+ * Note that the enum values should be contiguous in the iterated range;
+ * unfortunately there exists no way for EnumeratedRange to enforce this
+ * either dynamically or at compile time.
+ */
+
+#ifndef mozilla_EnumeratedRange_h
+#define mozilla_EnumeratedRange_h
+
+#include <limits>
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ReverseIterator.h"
+
+namespace mozilla {
+
+namespace detail {
+
+template <typename EnumTypeT>
+class EnumeratedIterator {
+ public:
+ typedef typename std::underlying_type<EnumTypeT>::type IntTypeT;
+
+ template <typename EnumType>
+ constexpr explicit EnumeratedIterator(EnumType aCurrent)
+ : mCurrent(aCurrent) {}
+
+ template <typename EnumType>
+ explicit EnumeratedIterator(const EnumeratedIterator<EnumType>& aOther)
+ : mCurrent(aOther.mCurrent) {}
+
+ EnumTypeT operator*() const { return mCurrent; }
+
+ /* Increment and decrement operators */
+
+ EnumeratedIterator& operator++() {
+ mCurrent = EnumTypeT(IntTypeT(mCurrent) + IntTypeT(1));
+ return *this;
+ }
+ EnumeratedIterator& operator--() {
+ mCurrent = EnumTypeT(IntTypeT(mCurrent) - IntTypeT(1));
+ return *this;
+ }
+ EnumeratedIterator operator++(int) {
+ auto ret = *this;
+ mCurrent = EnumTypeT(IntTypeT(mCurrent) + IntTypeT(1));
+ return ret;
+ }
+ EnumeratedIterator operator--(int) {
+ auto ret = *this;
+ mCurrent = EnumTypeT(IntTypeT(mCurrent) - IntTypeT(1));
+ return ret;
+ }
+
+ /* Comparison operators */
+
+ template <typename EnumType>
+ friend bool operator==(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+ template <typename EnumType>
+ friend bool operator!=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+ template <typename EnumType>
+ friend bool operator<(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+ template <typename EnumType>
+ friend bool operator<=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+ template <typename EnumType>
+ friend bool operator>(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+ template <typename EnumType>
+ friend bool operator>=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2);
+
+ private:
+ EnumTypeT mCurrent;
+};
+
+template <typename EnumType>
+bool operator==(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent == aIter2.mCurrent;
+}
+
+template <typename EnumType>
+bool operator!=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent != aIter2.mCurrent;
+}
+
+template <typename EnumType>
+bool operator<(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent < aIter2.mCurrent;
+}
+
+template <typename EnumType>
+bool operator<=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent <= aIter2.mCurrent;
+}
+
+template <typename EnumType>
+bool operator>(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent > aIter2.mCurrent;
+}
+
+template <typename EnumType>
+bool operator>=(const EnumeratedIterator<EnumType>& aIter1,
+ const EnumeratedIterator<EnumType>& aIter2) {
+ return aIter1.mCurrent >= aIter2.mCurrent;
+}
+
+template <typename EnumTypeT>
+class EnumeratedRange {
+ public:
+ typedef EnumeratedIterator<EnumTypeT> iterator;
+ typedef EnumeratedIterator<EnumTypeT> const_iterator;
+ typedef ReverseIterator<iterator> reverse_iterator;
+ typedef ReverseIterator<const_iterator> const_reverse_iterator;
+
+ template <typename EnumType>
+ constexpr EnumeratedRange(EnumType aBegin, EnumType aEnd)
+ : mBegin(aBegin), mEnd(aEnd) {}
+
+ iterator begin() const { return iterator(mBegin); }
+ const_iterator cbegin() const { return begin(); }
+ iterator end() const { return iterator(mEnd); }
+ const_iterator cend() const { return end(); }
+ reverse_iterator rbegin() const { return reverse_iterator(mEnd); }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ reverse_iterator rend() const { return reverse_iterator(mBegin); }
+ const_reverse_iterator crend() const { return rend(); }
+
+ private:
+ EnumTypeT mBegin;
+ EnumTypeT mEnd;
+};
+
+} // namespace detail
+
+#ifdef __GNUC__
+// Enums can have an unsigned underlying type, which makes some of the
+// comparisons below always true or always false. Temporarily disable
+// -Wtype-limits to avoid breaking -Werror builds.
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wtype-limits"
+#endif
+
+// Create a range to iterate from aBegin to aEnd, exclusive.
+template <typename EnumType>
+constexpr detail::EnumeratedRange<EnumType> MakeEnumeratedRange(EnumType aBegin,
+ EnumType aEnd) {
+ MOZ_ASSERT(aBegin <= aEnd, "Cannot generate invalid, unbounded range!");
+ return detail::EnumeratedRange<EnumType>(aBegin, aEnd);
+}
+
+// Create a range to iterate from EnumType(0) to aEnd, exclusive. EnumType(0)
+// should exist, but note that there is no way for us to ensure that it does!
+template <typename EnumType>
+constexpr detail::EnumeratedRange<EnumType> MakeEnumeratedRange(EnumType aEnd) {
+ return MakeEnumeratedRange(EnumType(0), aEnd);
+}
+
+// Create a range to iterate from aBegin to aEnd, inclusive.
+//
+// NOTE: This internally constructs a value that is one past `aEnd`, so the
+// enumeration needs to either have a fixed underlying type, or `aEnd + 1` must
+// be inside the range of the enumeration, in order to not be undefined
+// behavior.
+//
+// See bug 1614512.
+template <typename EnumType>
+constexpr detail::EnumeratedRange<EnumType> MakeInclusiveEnumeratedRange(
+ EnumType aBegin, EnumType aEnd) {
+ using EnumUnderlyingType = typename std::underlying_type_t<EnumType>;
+ const auto end = static_cast<EnumUnderlyingType>(aEnd);
+
+ MOZ_ASSERT(end != std::numeric_limits<EnumUnderlyingType>::max(),
+ "aEnd shouldn't overflow!");
+ return MakeEnumeratedRange(aBegin, static_cast<EnumType>(end + 1));
+}
+
+template <typename EnumType>
+constexpr auto MakeInclusiveEnumeratedRange(EnumType aEnd) {
+ return MakeInclusiveEnumeratedRange(EnumType{0}, aEnd);
+}
+
+#ifdef __GNUC__
+# pragma GCC diagnostic pop
+#endif
+
+} // namespace mozilla
+
+#endif // mozilla_EnumeratedRange_h
diff --git a/mfbt/FStream.h b/mfbt/FStream.h
new file mode 100644
index 0000000000..74f2d16595
--- /dev/null
+++ b/mfbt/FStream.h
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Similar to std::ifstream/ofstream, but takes char16ptr_t on Windows.
+// Until C++17, std functions can only take char* filenames. So Unicode
+// filenames were lost on Windows. To address this limitations, this wrapper
+// uses proprietary wchar_t* overloads on MSVC, and __gnu_cxx::stdio_filebuf
+// extension on MinGW. Once we can use C++17 filesystem API everywhere,
+// we will be able to avoid this wrapper.
+
+#ifndef mozilla_FStream_h
+#define mozilla_FStream_h
+
+#include "mozilla/Char16.h"
+#include <istream>
+#include <ostream>
+#include <fstream>
+#if defined(__MINGW32__) && defined(__GLIBCXX__)
+# include "mozilla/UniquePtr.h"
+# include <fcntl.h>
+# include <ext/stdio_filebuf.h>
+#endif
+
+namespace mozilla {
+
+#if defined(__MINGW32__) && defined(__GLIBCXX__)
+// MinGW does not support wchar_t* overloads that are MSVC extension until
+// C++17, so we have to implement widechar wrappers using a GNU extension.
+class IFStream : public std::istream {
+ public:
+ explicit IFStream(char16ptr_t filename, openmode mode = in);
+
+ std::filebuf* rdbuf() const { return mFileBuf.get(); }
+
+ bool is_open() const { return mFileBuf && mFileBuf->is_open(); }
+ void open(char16ptr_t filename, openmode mode = in);
+ void close() { mFileBuf && mFileBuf->close(); }
+
+ private:
+ UniquePtr<std::filebuf> mFileBuf;
+};
+
+inline IFStream::IFStream(char16ptr_t filename, openmode mode)
+ : std::istream(nullptr) {
+ open(filename, mode);
+}
+
+inline void IFStream::open(char16ptr_t filename, openmode mode) {
+ int fmode = _O_RDONLY;
+ if (mode & binary) {
+ fmode |= _O_BINARY;
+ } else {
+ fmode |= _O_TEXT;
+ }
+ int fd = _wopen(filename, fmode);
+ mFileBuf = MakeUnique<__gnu_cxx::stdio_filebuf<char>>(fd, mode);
+ std::istream::rdbuf(mFileBuf.get());
+}
+
+class OFStream : public std::ostream {
+ public:
+ explicit OFStream(char16ptr_t filename, openmode mode = out);
+
+ std::filebuf* rdbuf() const { return mFileBuf.get(); }
+
+ bool is_open() const { return mFileBuf && mFileBuf->is_open(); }
+ void open(char16ptr_t filename, openmode mode = out);
+ void close() { mFileBuf && mFileBuf->close(); }
+
+ private:
+ UniquePtr<std::filebuf> mFileBuf;
+};
+
+inline OFStream::OFStream(char16ptr_t filename, openmode mode)
+ : std::ostream(nullptr) {
+ open(filename, mode);
+}
+
+inline void OFStream::open(char16ptr_t filename, openmode mode) {
+ int fmode = _O_WRONLY;
+ if (mode & binary) {
+ fmode |= _O_BINARY;
+ } else {
+ fmode |= _O_TEXT;
+ }
+ if (mode & trunc) {
+ fmode |= _O_CREAT | _O_TRUNC;
+ }
+ int fd = _wopen(filename, fmode);
+ mFileBuf = MakeUnique<__gnu_cxx::stdio_filebuf<char>>(fd, mode);
+ std::ostream::rdbuf(mFileBuf.get());
+}
+
+#elif defined(XP_WIN)
+class IFStream : public std::ifstream {
+ public:
+ explicit IFStream(char16ptr_t filename, openmode mode = in)
+ : std::ifstream(filename, mode) {}
+
+ void open(char16ptr_t filename, openmode mode = in) {
+ std::ifstream::open(filename, mode);
+ }
+};
+
+class OFStream : public std::ofstream {
+ public:
+ explicit OFStream(char16ptr_t filename, openmode mode = out)
+ : std::ofstream(filename, mode) {}
+
+ void open(char16ptr_t filename, openmode mode = out) {
+ std::ofstream::open(filename, mode);
+ }
+};
+#else
+using IFStream = std::ifstream;
+using OFStream = std::ofstream;
+#endif
+
+} // namespace mozilla
+
+#endif /* mozilla_FStream_h */
diff --git a/mfbt/FastBernoulliTrial.h b/mfbt/FastBernoulliTrial.h
new file mode 100644
index 0000000000..d1c4f3b9fb
--- /dev/null
+++ b/mfbt/FastBernoulliTrial.h
@@ -0,0 +1,381 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_FastBernoulliTrial_h
+#define mozilla_FastBernoulliTrial_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <cmath>
+#include <stdint.h>
+
+namespace mozilla {
+
+/**
+ * class FastBernoulliTrial: Efficient sampling with uniform probability
+ *
+ * When gathering statistics about a program's behavior, we may be observing
+ * events that occur very frequently (e.g., function calls or memory
+ * allocations) and we may be gathering information that is somewhat expensive
+ * to produce (e.g., call stacks). Sampling all the events could have a
+ * significant impact on the program's performance.
+ *
+ * Why not just sample every N'th event? This technique is called "systematic
+ * sampling"; it's simple and efficient, and it's fine if we imagine a
+ * patternless stream of events. But what if we're sampling allocations, and the
+ * program happens to have a loop where each iteration does exactly N
+ * allocations? You would end up sampling the same allocation every time through
+ * the loop; the entire rest of the loop becomes invisible to your measurements!
+ * More generally, if each iteration does M allocations, and M and N have any
+ * common divisor at all, most allocation sites will never be sampled. If
+ * they're both even, say, the odd-numbered allocations disappear from your
+ * results.
+ *
+ * Ideally, we'd like each event to have some probability P of being sampled,
+ * independent of its neighbors and of its position in the sequence. This is
+ * called "Bernoulli sampling", and it doesn't suffer from any of the problems
+ * mentioned above.
+ *
+ * One disadvantage of Bernoulli sampling is that you can't be sure exactly how
+ * many samples you'll get: technically, it's possible that you might sample
+ * none of them, or all of them. But if the number of events N is large, these
+ * aren't likely outcomes; you can generally expect somewhere around P * N
+ * events to be sampled.
+ *
+ * The other disadvantage of Bernoulli sampling is that you have to generate a
+ * random number for every event, which can be slow.
+ *
+ * [significant pause]
+ *
+ * BUT NOT WITH THIS CLASS! FastBernoulliTrial lets you do true Bernoulli
+ * sampling, while generating a fresh random number only when we do decide to
+ * sample an event, not on every trial. When it decides not to sample, a call to
+ * |FastBernoulliTrial::trial| is nothing but decrementing a counter and
+ * comparing it to zero. So the lower your sampling probability is, the less
+ * overhead FastBernoulliTrial imposes.
+ *
+ * Probabilities of 0 and 1 are handled efficiently. (In neither case need we
+ * ever generate a random number at all.)
+ *
+ * The essential API:
+ *
+ * - FastBernoulliTrial(double P)
+ * Construct an instance that selects events with probability P.
+ *
+ * - FastBernoulliTrial::trial()
+ * Return true with probability P. Call this each time an event occurs, to
+ * decide whether to sample it or not.
+ *
+ * - FastBernoulliTrial::trial(size_t n)
+ * Equivalent to calling trial() |n| times, and returning true if any of those
+ * calls do. However, like trial, this runs in fast constant time.
+ *
+ * What is this good for? In some applications, some events are "bigger" than
+ * others. For example, large allocations are more significant than small
+ * allocations. Perhaps we'd like to imagine that we're drawing allocations
+ * from a stream of bytes, and performing a separate Bernoulli trial on every
+ * byte from the stream. We can accomplish this by calling |t.trial(S)| for
+ * the number of bytes S, and sampling the event if that returns true.
+ *
+ * Of course, this style of sampling needs to be paired with analysis and
+ * presentation that makes the size of the event apparent, lest trials with
+ * large values for |n| appear to be indistinguishable from those with small
+ * values for |n|.
+ */
+class FastBernoulliTrial {
+ /*
+ * This comment should just read, "Generate skip counts with a geometric
+ * distribution", and leave everyone to go look that up and see why it's the
+ * right thing to do, if they don't know already.
+ *
+ * BUT IF YOU'RE CURIOUS, COMMENTS ARE FREE...
+ *
+ * Instead of generating a fresh random number for every trial, we can
+ * randomly generate a count of how many times we should return false before
+ * the next time we return true. We call this a "skip count". Once we've
+ * returned true, we generate a fresh skip count, and begin counting down
+ * again.
+ *
+ * Here's an awesome fact: by exercising a little care in the way we generate
+ * skip counts, we can produce results indistinguishable from those we would
+ * get "rolling the dice" afresh for every trial.
+ *
+ * In short, skip counts in Bernoulli trials of probability P obey a geometric
+ * distribution. If a random variable X is uniformly distributed from [0..1),
+ * then std::floor(std::log(X) / std::log(1-P)) has the appropriate geometric
+ * distribution for the skip counts.
+ *
+ * Why that formula?
+ *
+ * Suppose we're to return |true| with some probability P, say, 0.3. Spread
+ * all possible futures along a line segment of length 1. In portion P of
+ * those cases, we'll return true on the next call to |trial|; the skip count
+ * is 0. For the remaining portion 1-P of cases, the skip count is 1 or more.
+ *
+ * skip: 0 1 or more
+ * |------------------^-----------------------------------------|
+ * portion: 0.3 0.7
+ * P 1-P
+ *
+ * But the "1 or more" section of the line is subdivided the same way: *within
+ * that section*, in portion P the second call to |trial()| returns true, and
+ * in portion 1-P it returns false a second time; the skip count is two or
+ * more. So we return true on the second call in proportion 0.7 * 0.3, and
+ * skip at least the first two in proportion 0.7 * 0.7.
+ *
+ * skip: 0 1 2 or more
+ * |------------------^------------^----------------------------|
+ * portion: 0.3 0.7 * 0.3 0.7 * 0.7
+ * P (1-P)*P (1-P)^2
+ *
+ * We can continue to subdivide:
+ *
+ * skip >= 0: |------------------------------------------------- (1-P)^0 --|
+ * skip >= 1: | ------------------------------- (1-P)^1 --|
+ * skip >= 2: | ------------------ (1-P)^2 --|
+ * skip >= 3: | ^ ---------- (1-P)^3 --|
+ * skip >= 4: | . --- (1-P)^4 --|
+ * .
+ * ^X, see below
+ *
+ * In other words, the likelihood of the next n calls to |trial| returning
+ * false is (1-P)^n. The longer a run we require, the more the likelihood
+ * drops. Further calls may return false too, but this is the probability
+ * we'll skip at least n.
+ *
+ * This is interesting, because we can pick a point along this line segment
+ * and see which skip count's range it falls within; the point X above, for
+ * example, is within the ">= 2" range, but not within the ">= 3" range, so it
+ * designates a skip count of 2. So if we pick points on the line at random
+ * and use the skip counts they fall under, that will be indistinguishable
+ * from generating a fresh random number between 0 and 1 for each trial and
+ * comparing it to P.
+ *
+ * So to find the skip count for a point X, we must ask: To what whole power
+ * must we raise 1-P such that we include X, but the next power would exclude
+ * it? This is exactly std::floor(std::log(X) / std::log(1-P)).
+ *
+ * Our algorithm is then, simply: When constructed, compute an initial skip
+ * count. Return false from |trial| that many times, and then compute a new
+ * skip count.
+ *
+ * For a call to |trial(n)|, if the skip count is greater than n, return false
+ * and subtract n from the skip count. If the skip count is less than n,
+ * return true and compute a new skip count. Since each trial is independent,
+ * it doesn't matter by how much n overshoots the skip count; we can actually
+ * compute a new skip count at *any* time without affecting the distribution.
+ * This is really beautiful.
+ */
+ public:
+ /**
+ * Construct a fast Bernoulli trial generator. Calls to |trial()| return true
+ * with probability |aProbability|. Use |aState0| and |aState1| to seed the
+ * random number generator; both may not be zero.
+ */
+ FastBernoulliTrial(double aProbability, uint64_t aState0, uint64_t aState1)
+ : mProbability(0),
+ mInvLogNotProbability(0),
+ mGenerator(aState0, aState1),
+ mSkipCount(0) {
+ setProbability(aProbability);
+ }
+
+ /**
+ * Return true with probability |mProbability|. Call this each time an event
+ * occurs, to decide whether to sample it or not. The lower |mProbability| is,
+ * the faster this function runs.
+ */
+ bool trial() {
+ if (mSkipCount) {
+ mSkipCount--;
+ return false;
+ }
+
+ return chooseSkipCount();
+ }
+
+ /**
+ * Equivalent to calling trial() |n| times, and returning true if any of those
+ * calls do. However, like trial, this runs in fast constant time.
+ *
+ * What is this good for? In some applications, some events are "bigger" than
+ * others. For example, large allocations are more significant than small
+ * allocations. Perhaps we'd like to imagine that we're drawing allocations
+ * from a stream of bytes, and performing a separate Bernoulli trial on every
+ * byte from the stream. We can accomplish this by calling |t.trial(S)| for
+ * the number of bytes S, and sampling the event if that returns true.
+ *
+ * Of course, this style of sampling needs to be paired with analysis and
+ * presentation that makes the "size" of the event apparent, lest trials with
+ * large values for |n| appear to be indistinguishable from those with small
+ * values for |n|, despite being potentially much more likely to be sampled.
+ */
+ bool trial(size_t aCount) {
+ if (mSkipCount > aCount) {
+ mSkipCount -= aCount;
+ return false;
+ }
+
+ return chooseSkipCount();
+ }
+
+ void setRandomState(uint64_t aState0, uint64_t aState1) {
+ mGenerator.setState(aState0, aState1);
+ }
+
+ void setProbability(double aProbability) {
+ MOZ_ASSERT(0 <= aProbability && aProbability <= 1);
+ mProbability = aProbability;
+ if (0 < mProbability && mProbability < 1) {
+ /*
+ * Let's look carefully at how this calculation plays out in floating-
+ * point arithmetic. We'll assume IEEE, but the final C++ code we arrive
+ * at would still be fine if our numbers were mathematically perfect. So,
+ * while we've considered IEEE's edge cases, we haven't done anything that
+ * should be actively bad when using other representations.
+ *
+ * (In the below, read comparisons as exact mathematical comparisons: when
+ * we say something "equals 1", that means it's exactly equal to 1. We
+ * treat approximation using intervals with open boundaries: saying a
+ * value is in (0,1) doesn't specify how close to 0 or 1 the value gets.
+ * When we use closed boundaries like [2**-53, 1], we're careful to ensure
+ * the boundary values are actually representable.)
+ *
+ * - After the comparison above, we know mProbability is in (0,1).
+ *
+ * - The gaps below 1 are 2**-53, so that interval is (0, 1-2**-53].
+ *
+ * - Because the floating-point gaps near 1 are wider than those near
+ * zero, there are many small positive doubles ε such that 1-ε rounds to
+ * exactly 1. However, 2**-53 can be represented exactly. So
+ * 1-mProbability is in [2**-53, 1].
+ *
+ * - log(1 - mProbability) is thus in (-37, 0].
+ *
+ * That range includes zero, but when we use mInvLogNotProbability, it
+ * would be helpful if we could trust that it's negative. So when log(1
+ * - mProbability) is 0, we'll just set mProbability to 0, so that
+ * mInvLogNotProbability is not used in chooseSkipCount.
+ *
+ * - How much of the range of mProbability does this cause us to ignore?
+ * The only value for which log returns 0 is exactly 1; the slope of log
+ * at 1 is 1, so for small ε such that 1 - ε != 1, log(1 - ε) is -ε,
+ * never 0. The gaps near one are larger than the gaps near zero, so if
+ * 1 - ε wasn't 1, then -ε is representable. So if log(1 - mProbability)
+ * isn't 0, then 1 - mProbability isn't 1, which means that mProbability
+ * is at least 2**-53, as discussed earlier. This is a sampling
+ * likelihood of roughly one in ten trillion, which is unlikely to be
+ * distinguishable from zero in practice.
+ *
+ * So by forbidding zero, we've tightened our range to (-37, -2**-53].
+ *
+ * - Finally, 1 / log(1 - mProbability) is in [-2**53, -1/37). This all
+ * falls readily within the range of an IEEE double.
+ *
+ * ALL THAT HAVING BEEN SAID: here are the five lines of actual code:
+ */
+ double logNotProbability = std::log(1 - mProbability);
+ if (logNotProbability == 0.0)
+ mProbability = 0.0;
+ else
+ mInvLogNotProbability = 1 / logNotProbability;
+ }
+
+ chooseSkipCount();
+ }
+
+ private:
+ /* The likelihood that any given call to |trial| should return true. */
+ double mProbability;
+
+ /*
+ * The value of 1/std::log(1 - mProbability), cached for repeated use.
+ *
+ * If mProbability is exactly 0 or exactly 1, we don't use this value.
+ * Otherwise, we guarantee this value is in the range [-2**53, -1/37), i.e.
+ * definitely negative, as required by chooseSkipCount. See setProbability for
+ * the details.
+ */
+ double mInvLogNotProbability;
+
+ /* Our random number generator. */
+ non_crypto::XorShift128PlusRNG mGenerator;
+
+ /* The number of times |trial| should return false before next returning true.
+ */
+ size_t mSkipCount;
+
+ /*
+ * Choose the next skip count. This also returns the value that |trial| should
+ * return, since we have to check for the extreme values for mProbability
+ * anyway, and |trial| should never return true at all when mProbability is 0.
+ */
+ bool chooseSkipCount() {
+ /*
+ * If the probability is 1.0, every call to |trial| returns true. Make sure
+ * mSkipCount is 0.
+ */
+ if (mProbability == 1.0) {
+ mSkipCount = 0;
+ return true;
+ }
+
+ /*
+ * If the probabilility is zero, |trial| never returns true. Don't bother us
+ * for a while.
+ */
+ if (mProbability == 0.0) {
+ mSkipCount = SIZE_MAX;
+ return false;
+ }
+
+ /*
+ * What sorts of values can this call to std::floor produce?
+ *
+ * Since mGenerator.nextDouble returns a value in [0, 1-2**-53], std::log
+ * returns a value in the range [-infinity, -2**-53], all negative. Since
+ * mInvLogNotProbability is negative (see its comments), the product is
+ * positive and possibly infinite. std::floor returns +infinity unchanged.
+ * So the result will always be positive.
+ *
+ * Converting a double to an integer that is out of range for that integer
+ * is undefined behavior, so we must clamp our result to SIZE_MAX, to ensure
+ * we get an acceptable value for mSkipCount.
+ *
+ * The clamp is written carefully. Note that if we had said:
+ *
+ * if (skipCount > double(SIZE_MAX))
+ * mSkipCount = SIZE_MAX;
+ * else
+ * mSkipCount = skipCount;
+ *
+ * that leads to undefined behavior 64-bit machines: SIZE_MAX coerced to
+ * double can equal 2^64, so if skipCount equaled 2^64 converting it to
+ * size_t would induce undefined behavior.
+ *
+ * Jakob Olesen cleverly suggested flipping the sense of the comparison to
+ * skipCount < double(SIZE_MAX). The conversion will evaluate to 2^64 or
+ * the double just below it: either way, skipCount is guaranteed to have a
+ * value that's safely convertible to size_t.
+ *
+ * (On 32-bit machines, all size_t values can be represented exactly in
+ * double, so all is well.)
+ */
+ double skipCount =
+ std::floor(std::log(mGenerator.nextDouble()) * mInvLogNotProbability);
+ if (skipCount < double(SIZE_MAX))
+ mSkipCount = skipCount;
+ else
+ mSkipCount = SIZE_MAX;
+
+ return true;
+ }
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_FastBernoulliTrial_h */
diff --git a/mfbt/FloatingPoint.cpp b/mfbt/FloatingPoint.cpp
new file mode 100644
index 0000000000..4d52ffaaf8
--- /dev/null
+++ b/mfbt/FloatingPoint.cpp
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of FloatingPoint functions */
+
+#include "mozilla/FloatingPoint.h"
+
+#include <cfloat> // for FLT_MAX
+
+namespace mozilla {
+
+bool IsFloat32Representable(double aValue) {
+ // NaNs and infinities are representable.
+ if (!std::isfinite(aValue)) {
+ return true;
+ }
+
+ // If it exceeds finite |float| range, casting to |double| is always undefined
+ // behavior per C++11 [conv.double]p1 last sentence.
+ if (Abs(aValue) > FLT_MAX) {
+ return false;
+ }
+
+ // But if it's within finite range, then either it's 1) an exact value and so
+ // representable, or 2) it's "between two adjacent destination values" and
+ // safe to cast to "an implementation-defined choice of either of those
+ // values".
+ auto valueAsFloat = static_cast<float>(aValue);
+
+ // Per [conv.fpprom] this never changes value.
+ auto valueAsFloatAsDouble = static_cast<double>(valueAsFloat);
+
+ // Finally, in 1) exact representable value equals exact representable value,
+ // or 2) *changed* value does not equal original value, ergo unrepresentable.
+ return valueAsFloatAsDouble == aValue;
+}
+
+} /* namespace mozilla */
diff --git a/mfbt/FloatingPoint.h b/mfbt/FloatingPoint.h
new file mode 100644
index 0000000000..f4ae36257b
--- /dev/null
+++ b/mfbt/FloatingPoint.h
@@ -0,0 +1,606 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Various predicates and operations on IEEE-754 floating point types. */
+
+#ifndef mozilla_FloatingPoint_h
+#define mozilla_FloatingPoint_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/Types.h"
+
+#include <algorithm>
+#include <climits>
+#include <limits>
+#include <stdint.h>
+
+namespace mozilla {
+
+/*
+ * It's reasonable to ask why we have this header at all. Don't isnan,
+ * copysign, the built-in comparison operators, and the like solve these
+ * problems? Unfortunately, they don't. We've found that various compilers
+ * (MSVC, MSVC when compiling with PGO, and GCC on OS X, at least) miscompile
+ * the standard methods in various situations, so we can't use them. Some of
+ * these compilers even have problems compiling seemingly reasonable bitwise
+ * algorithms! But with some care we've found algorithms that seem to not
+ * trigger those compiler bugs.
+ *
+ * For the aforementioned reasons, be very wary of making changes to any of
+ * these algorithms. If you must make changes, keep a careful eye out for
+ * compiler bustage, particularly PGO-specific bustage.
+ */
+
+namespace detail {
+
+/*
+ * These implementations assume float/double are 32/64-bit single/double
+ * format number types compatible with the IEEE-754 standard. C++ doesn't
+ * require this, but we required it in implementations of these algorithms that
+ * preceded this header, so we shouldn't break anything to continue doing so.
+ */
+template <typename T>
+struct FloatingPointTrait;
+
+template <>
+struct FloatingPointTrait<float> {
+ protected:
+ using Bits = uint32_t;
+
+ static constexpr unsigned kExponentWidth = 8;
+ static constexpr unsigned kSignificandWidth = 23;
+};
+
+template <>
+struct FloatingPointTrait<double> {
+ protected:
+ using Bits = uint64_t;
+
+ static constexpr unsigned kExponentWidth = 11;
+ static constexpr unsigned kSignificandWidth = 52;
+};
+
+} // namespace detail
+
+/*
+ * This struct contains details regarding the encoding of floating-point
+ * numbers that can be useful for direct bit manipulation. As of now, the
+ * template parameter has to be float or double.
+ *
+ * The nested typedef |Bits| is the unsigned integral type with the same size
+ * as T: uint32_t for float and uint64_t for double (static assertions
+ * double-check these assumptions).
+ *
+ * kExponentBias is the offset that is subtracted from the exponent when
+ * computing the value, i.e. one plus the opposite of the mininum possible
+ * exponent.
+ * kExponentShift is the shift that one needs to apply to retrieve the
+ * exponent component of the value.
+ *
+ * kSignBit contains a bits mask. Bit-and-ing with this mask will result in
+ * obtaining the sign bit.
+ * kExponentBits contains the mask needed for obtaining the exponent bits and
+ * kSignificandBits contains the mask needed for obtaining the significand
+ * bits.
+ *
+ * Full details of how floating point number formats are encoded are beyond
+ * the scope of this comment. For more information, see
+ * http://en.wikipedia.org/wiki/IEEE_floating_point
+ * http://en.wikipedia.org/wiki/Floating_point#IEEE_754:_floating_point_in_modern_computers
+ */
+template <typename T>
+struct FloatingPoint final : private detail::FloatingPointTrait<T> {
+ private:
+ using Base = detail::FloatingPointTrait<T>;
+
+ public:
+ /**
+ * An unsigned integral type suitable for accessing the bitwise representation
+ * of T.
+ */
+ using Bits = typename Base::Bits;
+
+ static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T");
+
+ /** The bit-width of the exponent component of T. */
+ using Base::kExponentWidth;
+
+ /** The bit-width of the significand component of T. */
+ using Base::kSignificandWidth;
+
+ static_assert(1 + kExponentWidth + kSignificandWidth == CHAR_BIT * sizeof(T),
+ "sign bit plus bit widths should sum to overall bit width");
+
+ /**
+ * The exponent field in an IEEE-754 floating point number consists of bits
+ * encoding an unsigned number. The *actual* represented exponent (for all
+ * values finite and not denormal) is that value, minus a bias |kExponentBias|
+ * so that a useful range of numbers is represented.
+ */
+ static constexpr unsigned kExponentBias = (1U << (kExponentWidth - 1)) - 1;
+
+ /**
+ * The amount by which the bits of the exponent-field in an IEEE-754 floating
+ * point number are shifted from the LSB of the floating point type.
+ */
+ static constexpr unsigned kExponentShift = kSignificandWidth;
+
+ /** The sign bit in the floating point representation. */
+ static constexpr Bits kSignBit = static_cast<Bits>(1)
+ << (CHAR_BIT * sizeof(Bits) - 1);
+
+ /** The exponent bits in the floating point representation. */
+ static constexpr Bits kExponentBits =
+ ((static_cast<Bits>(1) << kExponentWidth) - 1) << kSignificandWidth;
+
+ /** The significand bits in the floating point representation. */
+ static constexpr Bits kSignificandBits =
+ (static_cast<Bits>(1) << kSignificandWidth) - 1;
+
+ static_assert((kSignBit & kExponentBits) == 0,
+ "sign bit shouldn't overlap exponent bits");
+ static_assert((kSignBit & kSignificandBits) == 0,
+ "sign bit shouldn't overlap significand bits");
+ static_assert((kExponentBits & kSignificandBits) == 0,
+ "exponent bits shouldn't overlap significand bits");
+
+ static_assert((kSignBit | kExponentBits | kSignificandBits) == ~Bits(0),
+ "all bits accounted for");
+};
+
+/**
+ * Determines whether a float/double is negative or -0. It is an error
+ * to call this method on a float/double which is NaN.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool IsNegative(T aValue) {
+ MOZ_ASSERT(!std::isnan(aValue), "NaN does not have a sign");
+ return std::signbit(aValue);
+}
+
+/** Determines whether a float/double represents -0. */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool IsNegativeZero(T aValue) {
+ /* Only the sign bit is set if the value is -0. */
+ typedef FloatingPoint<T> Traits;
+ typedef typename Traits::Bits Bits;
+ Bits bits = BitwiseCast<Bits>(aValue);
+ return bits == Traits::kSignBit;
+}
+
+/** Determines wether a float/double represents +0. */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool IsPositiveZero(T aValue) {
+ /* All bits are zero if the value is +0. */
+ typedef FloatingPoint<T> Traits;
+ typedef typename Traits::Bits Bits;
+ Bits bits = BitwiseCast<Bits>(aValue);
+ return bits == 0;
+}
+
+/**
+ * Returns 0 if a float/double is NaN or infinite;
+ * otherwise, the float/double is returned.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE T ToZeroIfNonfinite(T aValue) {
+ return std::isfinite(aValue) ? aValue : 0;
+}
+
+/**
+ * Returns the exponent portion of the float/double.
+ *
+ * Zero is not special-cased, so ExponentComponent(0.0) is
+ * -int_fast16_t(Traits::kExponentBias).
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE int_fast16_t ExponentComponent(T aValue) {
+ /*
+ * The exponent component of a float/double is an unsigned number, biased
+ * from its actual value. Subtract the bias to retrieve the actual exponent.
+ */
+ typedef FloatingPoint<T> Traits;
+ typedef typename Traits::Bits Bits;
+ Bits bits = BitwiseCast<Bits>(aValue);
+ return int_fast16_t((bits & Traits::kExponentBits) >>
+ Traits::kExponentShift) -
+ int_fast16_t(Traits::kExponentBias);
+}
+
+/** Returns +Infinity. */
+template <typename T>
+static MOZ_ALWAYS_INLINE T PositiveInfinity() {
+ /*
+ * Positive infinity has all exponent bits set, sign bit set to 0, and no
+ * significand.
+ */
+ typedef FloatingPoint<T> Traits;
+ return BitwiseCast<T>(Traits::kExponentBits);
+}
+
+/** Returns -Infinity. */
+template <typename T>
+static MOZ_ALWAYS_INLINE T NegativeInfinity() {
+ /*
+ * Negative infinity has all exponent bits set, sign bit set to 1, and no
+ * significand.
+ */
+ typedef FloatingPoint<T> Traits;
+ return BitwiseCast<T>(Traits::kSignBit | Traits::kExponentBits);
+}
+
+/**
+ * Computes the bit pattern for an infinity with the specified sign bit.
+ */
+template <typename T, int SignBit>
+struct InfinityBits {
+ using Traits = FloatingPoint<T>;
+
+ static_assert(SignBit == 0 || SignBit == 1, "bad sign bit");
+ static constexpr typename Traits::Bits value =
+ (SignBit * Traits::kSignBit) | Traits::kExponentBits;
+};
+
+/**
+ * Computes the bit pattern for a NaN with the specified sign bit and
+ * significand bits.
+ */
+template <typename T, int SignBit, typename FloatingPoint<T>::Bits Significand>
+struct SpecificNaNBits {
+ using Traits = FloatingPoint<T>;
+
+ static_assert(SignBit == 0 || SignBit == 1, "bad sign bit");
+ static_assert((Significand & ~Traits::kSignificandBits) == 0,
+ "significand must only have significand bits set");
+ static_assert(Significand & Traits::kSignificandBits,
+ "significand must be nonzero");
+
+ static constexpr typename Traits::Bits value =
+ (SignBit * Traits::kSignBit) | Traits::kExponentBits | Significand;
+};
+
+/**
+ * Constructs a NaN value with the specified sign bit and significand bits.
+ *
+ * There is also a variant that returns the value directly. In most cases, the
+ * two variants should be identical. However, in the specific case of x86
+ * chips, the behavior differs: returning floating-point values directly is done
+ * through the x87 stack, and x87 loads and stores turn signaling NaNs into
+ * quiet NaNs... silently. Returning floating-point values via outparam,
+ * however, is done entirely within the SSE registers when SSE2 floating-point
+ * is enabled in the compiler, which has semantics-preserving behavior you would
+ * expect.
+ *
+ * If preserving the distinction between signaling NaNs and quiet NaNs is
+ * important to you, you should use the outparam version. In all other cases,
+ * you should use the direct return version.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE void SpecificNaN(
+ int signbit, typename FloatingPoint<T>::Bits significand, T* result) {
+ typedef FloatingPoint<T> Traits;
+ MOZ_ASSERT(signbit == 0 || signbit == 1);
+ MOZ_ASSERT((significand & ~Traits::kSignificandBits) == 0);
+ MOZ_ASSERT(significand & Traits::kSignificandBits);
+
+ BitwiseCast<T>(
+ (signbit ? Traits::kSignBit : 0) | Traits::kExponentBits | significand,
+ result);
+ MOZ_ASSERT(std::isnan(*result));
+}
+
+template <typename T>
+static MOZ_ALWAYS_INLINE T
+SpecificNaN(int signbit, typename FloatingPoint<T>::Bits significand) {
+ T t;
+ SpecificNaN(signbit, significand, &t);
+ return t;
+}
+
+/** Computes the smallest non-zero positive float/double value. */
+template <typename T>
+static MOZ_ALWAYS_INLINE T MinNumberValue() {
+ typedef FloatingPoint<T> Traits;
+ typedef typename Traits::Bits Bits;
+ return BitwiseCast<T>(Bits(1));
+}
+
+namespace detail {
+
+template <typename Float, typename SignedInteger>
+inline bool NumberEqualsSignedInteger(Float aValue, SignedInteger* aInteger) {
+ static_assert(std::is_same_v<Float, float> || std::is_same_v<Float, double>,
+ "Float must be an IEEE-754 floating point type");
+ static_assert(std::is_signed_v<SignedInteger>,
+ "this algorithm only works for signed types: a different one "
+ "will be required for unsigned types");
+ static_assert(sizeof(SignedInteger) >= sizeof(int),
+ "this function *might* require some finessing for signed types "
+ "subject to integral promotion before it can be used on them");
+
+ MOZ_MAKE_MEM_UNDEFINED(aInteger, sizeof(*aInteger));
+
+ // NaNs and infinities are not integers.
+ if (!std::isfinite(aValue)) {
+ return false;
+ }
+
+ // Otherwise do direct comparisons against the minimum/maximum |SignedInteger|
+ // values that can be encoded in |Float|.
+
+ constexpr SignedInteger MaxIntValue =
+ std::numeric_limits<SignedInteger>::max(); // e.g. INT32_MAX
+ constexpr SignedInteger MinValue =
+ std::numeric_limits<SignedInteger>::min(); // e.g. INT32_MIN
+
+ static_assert(IsPowerOfTwo(Abs(MinValue)),
+ "MinValue should be is a small power of two, thus exactly "
+ "representable in float/double both");
+
+ constexpr unsigned SignedIntegerWidth = CHAR_BIT * sizeof(SignedInteger);
+ constexpr unsigned ExponentShift = FloatingPoint<Float>::kExponentShift;
+
+ // Careful! |MaxIntValue| may not be the maximum |SignedInteger| value that
+ // can be encoded in |Float|. Its |SignedIntegerWidth - 1| bits of precision
+ // may exceed |Float|'s |ExponentShift + 1| bits of precision. If necessary,
+ // compute the maximum |SignedInteger| that fits in |Float| from IEEE-754
+ // first principles. (|MinValue| doesn't have this problem because as a
+ // [relatively] small power of two it's always representable in |Float|.)
+
+ // Per C++11 [expr.const]p2, unevaluated subexpressions of logical AND/OR and
+ // conditional expressions *may* contain non-constant expressions, without
+ // making the enclosing expression not constexpr. MSVC implements this -- but
+ // it sometimes warns about undefined behavior in unevaluated subexpressions.
+ // This bites us if we initialize |MaxValue| the obvious way including an
+ // |uint64_t(1) << (SignedIntegerWidth - 2 - ExponentShift)| subexpression.
+ // Pull that shift-amount out and give it a not-too-huge value when it's in an
+ // unevaluated subexpression. 🙄
+ constexpr unsigned PrecisionExceededShiftAmount =
+ ExponentShift > SignedIntegerWidth - 1
+ ? 0
+ : SignedIntegerWidth - 2 - ExponentShift;
+
+ constexpr SignedInteger MaxValue =
+ ExponentShift > SignedIntegerWidth - 1
+ ? MaxIntValue
+ : SignedInteger((uint64_t(1) << (SignedIntegerWidth - 1)) -
+ (uint64_t(1) << PrecisionExceededShiftAmount));
+
+ if (static_cast<Float>(MinValue) <= aValue &&
+ aValue <= static_cast<Float>(MaxValue)) {
+ auto possible = static_cast<SignedInteger>(aValue);
+ if (static_cast<Float>(possible) == aValue) {
+ *aInteger = possible;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template <typename Float, typename SignedInteger>
+inline bool NumberIsSignedInteger(Float aValue, SignedInteger* aInteger) {
+ static_assert(std::is_same_v<Float, float> || std::is_same_v<Float, double>,
+ "Float must be an IEEE-754 floating point type");
+ static_assert(std::is_signed_v<SignedInteger>,
+ "this algorithm only works for signed types: a different one "
+ "will be required for unsigned types");
+ static_assert(sizeof(SignedInteger) >= sizeof(int),
+ "this function *might* require some finessing for signed types "
+ "subject to integral promotion before it can be used on them");
+
+ MOZ_MAKE_MEM_UNDEFINED(aInteger, sizeof(*aInteger));
+
+ if (IsNegativeZero(aValue)) {
+ return false;
+ }
+
+ return NumberEqualsSignedInteger(aValue, aInteger);
+}
+
+} // namespace detail
+
+/**
+ * If |aValue| is identical to some |int32_t| value, set |*aInt32| to that value
+ * and return true. Otherwise return false, leaving |*aInt32| in an
+ * indeterminate state.
+ *
+ * This method returns false for negative zero. If you want to consider -0 to
+ * be 0, use NumberEqualsInt32 below.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool NumberIsInt32(T aValue, int32_t* aInt32) {
+ return detail::NumberIsSignedInteger(aValue, aInt32);
+}
+
+/**
+ * If |aValue| is identical to some |int64_t| value, set |*aInt64| to that value
+ * and return true. Otherwise return false, leaving |*aInt64| in an
+ * indeterminate state.
+ *
+ * This method returns false for negative zero. If you want to consider -0 to
+ * be 0, use NumberEqualsInt64 below.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool NumberIsInt64(T aValue, int64_t* aInt64) {
+ return detail::NumberIsSignedInteger(aValue, aInt64);
+}
+
+/**
+ * If |aValue| is equal to some int32_t value (where -0 and +0 are considered
+ * equal), set |*aInt32| to that value and return true. Otherwise return false,
+ * leaving |*aInt32| in an indeterminate state.
+ *
+ * |NumberEqualsInt32(-0.0, ...)| will return true. To test whether a value can
+ * be losslessly converted to |int32_t| and back, use NumberIsInt32 above.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool NumberEqualsInt32(T aValue, int32_t* aInt32) {
+ return detail::NumberEqualsSignedInteger(aValue, aInt32);
+}
+
+/**
+ * If |aValue| is equal to some int64_t value (where -0 and +0 are considered
+ * equal), set |*aInt64| to that value and return true. Otherwise return false,
+ * leaving |*aInt64| in an indeterminate state.
+ *
+ * |NumberEqualsInt64(-0.0, ...)| will return true. To test whether a value can
+ * be losslessly converted to |int64_t| and back, use NumberIsInt64 above.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool NumberEqualsInt64(T aValue, int64_t* aInt64) {
+ return detail::NumberEqualsSignedInteger(aValue, aInt64);
+}
+
+/**
+ * Computes a NaN value. Do not use this method if you depend upon a particular
+ * NaN value being returned.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE T UnspecifiedNaN() {
+ /*
+ * If we can use any quiet NaN, we might as well use the all-ones NaN,
+ * since it's cheap to materialize on common platforms (such as x64, where
+ * this value can be represented in a 32-bit signed immediate field, allowing
+ * it to be stored to memory in a single instruction).
+ */
+ typedef FloatingPoint<T> Traits;
+ return SpecificNaN<T>(1, Traits::kSignificandBits);
+}
+
+/**
+ * Compare two doubles for equality, *without* equating -0 to +0, and equating
+ * any NaN value to any other NaN value. (The normal equality operators equate
+ * -0 with +0, and they equate NaN to no other value.)
+ */
+template <typename T>
+static inline bool NumbersAreIdentical(T aValue1, T aValue2) {
+ using Bits = typename FloatingPoint<T>::Bits;
+ if (std::isnan(aValue1)) {
+ return std::isnan(aValue2);
+ }
+ return BitwiseCast<Bits>(aValue1) == BitwiseCast<Bits>(aValue2);
+}
+
+/**
+ * Compare two floating point values for bit-wise equality.
+ */
+template <typename T>
+static inline bool NumbersAreBitwiseIdentical(T aValue1, T aValue2) {
+ using Bits = typename FloatingPoint<T>::Bits;
+ return BitwiseCast<Bits>(aValue1) == BitwiseCast<Bits>(aValue2);
+}
+
+/**
+ * Return true iff |aValue| and |aValue2| are equal (ignoring sign if both are
+ * zero) or both NaN.
+ */
+template <typename T>
+static inline bool EqualOrBothNaN(T aValue1, T aValue2) {
+ if (std::isnan(aValue1)) {
+ return std::isnan(aValue2);
+ }
+ return aValue1 == aValue2;
+}
+
+/**
+ * Return NaN if either |aValue1| or |aValue2| is NaN, or the minimum of
+ * |aValue1| and |aValue2| otherwise.
+ */
+template <typename T>
+static inline T NaNSafeMin(T aValue1, T aValue2) {
+ if (std::isnan(aValue1) || std::isnan(aValue2)) {
+ return UnspecifiedNaN<T>();
+ }
+ return std::min(aValue1, aValue2);
+}
+
+/**
+ * Return NaN if either |aValue1| or |aValue2| is NaN, or the maximum of
+ * |aValue1| and |aValue2| otherwise.
+ */
+template <typename T>
+static inline T NaNSafeMax(T aValue1, T aValue2) {
+ if (std::isnan(aValue1) || std::isnan(aValue2)) {
+ return UnspecifiedNaN<T>();
+ }
+ return std::max(aValue1, aValue2);
+}
+
+namespace detail {
+
+template <typename T>
+struct FuzzyEqualsEpsilon;
+
+template <>
+struct FuzzyEqualsEpsilon<float> {
+ // A number near 1e-5 that is exactly representable in a float.
+ static float value() { return 1.0f / (1 << 17); }
+};
+
+template <>
+struct FuzzyEqualsEpsilon<double> {
+ // A number near 1e-12 that is exactly representable in a double.
+ static double value() { return 1.0 / (1LL << 40); }
+};
+
+} // namespace detail
+
+/**
+ * Compare two floating point values for equality, modulo rounding error. That
+ * is, the two values are considered equal if they are both not NaN and if they
+ * are less than or equal to aEpsilon apart. The default value of aEpsilon is
+ * near 1e-5.
+ *
+ * For most scenarios you will want to use FuzzyEqualsMultiplicative instead,
+ * as it is more reasonable over the entire range of floating point numbers.
+ * This additive version should only be used if you know the range of the
+ * numbers you are dealing with is bounded and stays around the same order of
+ * magnitude.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool FuzzyEqualsAdditive(
+ T aValue1, T aValue2, T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value()) {
+ static_assert(std::is_floating_point_v<T>, "floating point type required");
+ return Abs(aValue1 - aValue2) <= aEpsilon;
+}
+
+/**
+ * Compare two floating point values for equality, allowing for rounding error
+ * relative to the magnitude of the values. That is, the two values are
+ * considered equal if they are both not NaN and they are less than or equal to
+ * some aEpsilon apart, where the aEpsilon is scaled by the smaller of the two
+ * argument values.
+ *
+ * In most cases you will want to use this rather than FuzzyEqualsAdditive, as
+ * this function effectively masks out differences in the bottom few bits of
+ * the floating point numbers being compared, regardless of what order of
+ * magnitude those numbers are at.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE bool FuzzyEqualsMultiplicative(
+ T aValue1, T aValue2, T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value()) {
+ static_assert(std::is_floating_point_v<T>, "floating point type required");
+ // can't use std::min because of bug 965340
+ T smaller = Abs(aValue1) < Abs(aValue2) ? Abs(aValue1) : Abs(aValue2);
+ return Abs(aValue1 - aValue2) <= aEpsilon * smaller;
+}
+
+/**
+ * Returns true if |aValue| can be losslessly represented as an IEEE-754 single
+ * precision number, false otherwise. All NaN values are considered
+ * representable (even though the bit patterns of double precision NaNs can't
+ * all be exactly represented in single precision).
+ */
+[[nodiscard]] extern MFBT_API bool IsFloat32Representable(double aValue);
+
+} /* namespace mozilla */
+
+#endif /* mozilla_FloatingPoint_h */
diff --git a/mfbt/FunctionRef.h b/mfbt/FunctionRef.h
new file mode 100644
index 0000000000..374173d884
--- /dev/null
+++ b/mfbt/FunctionRef.h
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A generic callable type that can be initialized from any compatible callable,
+ * suitable for use as a function argument for the duration of the function
+ * call (and no longer).
+ */
+
+#ifndef mozilla_FunctionRef_h
+#define mozilla_FunctionRef_h
+
+#include "mozilla/OperatorNewExtensions.h" // mozilla::NotNull, ::operator new
+
+#include <cstddef> // std::nullptr_t
+#include <type_traits> // std::{declval,integral_constant}, std::is_{convertible,same,void}_v, std::{enable_if,remove_reference,remove_cv}_t
+#include <utility> // std::forward
+
+// This concept and its implementation are substantially inspired by foonathan's
+// prior art:
+//
+// https://foonathan.net/2017/01/function-ref-implementation/
+// https://github.com/foonathan/type_safe/blob/2017851053f8dd268372f1612865792c5c621570/include/type_safe/reference.hpp
+
+namespace mozilla {
+
+namespace detail {
+
+// Template helper to determine if |Returned| is a return type compatible with
+// |Required|: if the former converts to the latter, or if |Required| is |void|
+// and nothing is returned.
+template <typename Returned, typename Required>
+using CompatibleReturnType =
+ std::integral_constant<bool, std::is_void_v<Required> ||
+ std::is_convertible_v<Returned, Required>>;
+
+// Template helper to check if |Func| called with |Params| arguments returns
+// a type compatible with |Ret|.
+template <typename Func, typename Ret, typename... Params>
+using EnableMatchingFunction = std::enable_if_t<
+ CompatibleReturnType<
+ decltype(std::declval<Func&>()(std::declval<Params>()...)), Ret>::value,
+ int>;
+
+struct MatchingFunctionPointerTag {};
+struct MatchingFunctorTag {};
+struct InvalidFunctorTag {};
+
+// Template helper to determine the proper way to store |Callable|: as function
+// pointer, as pointer to object, or unstorable.
+template <typename Callable, typename Ret, typename... Params>
+struct GetCallableTag {
+ // Match the case where |Callable| is a compatible function pointer or
+ // converts to one. (|+obj| invokes such a conversion.)
+ template <typename T>
+ static MatchingFunctionPointerTag test(
+ int, T& obj, EnableMatchingFunction<decltype(+obj), Ret, Params...> = 0);
+
+ // Match the case where |Callable| is callable but can't be converted to a
+ // function pointer. (|short| is a worse match for 0 than |int|, causing the
+ // function pointer match to be preferred if both apply.)
+ template <typename T>
+ static MatchingFunctorTag test(short, T& obj,
+ EnableMatchingFunction<T, Ret, Params...> = 0);
+
+ // Match all remaining cases. (Any other match is preferred to an ellipsis
+ // match.)
+ static InvalidFunctorTag test(...);
+
+ using Type = decltype(test(0, std::declval<Callable&>()));
+};
+
+// If the callable is |nullptr|, |std::declval<std::nullptr_t&>()| will be an
+// error. Provide a specialization for |nullptr| that will fail substitution.
+template <typename Ret, typename... Params>
+struct GetCallableTag<std::nullptr_t, Ret, Params...> {};
+
+template <typename Result, typename Callable, typename Ret, typename... Params>
+using EnableFunctionTag = std::enable_if_t<
+ std::is_same_v<typename GetCallableTag<Callable, Ret, Params...>::Type,
+ Result>,
+ int>;
+
+} // namespace detail
+
+/**
+ * An efficient, type-erasing, non-owning reference to a callable. It is
+ * intended for use as the type of a function parameter that is not used after
+ * the function in question returns.
+ *
+ * This class does not own the callable, so in general it is unsafe to store a
+ * FunctionRef.
+ */
+template <typename Fn>
+class MOZ_TEMPORARY_CLASS FunctionRef;
+
+template <typename Ret, typename... Params>
+class MOZ_TEMPORARY_CLASS FunctionRef<Ret(Params...)> {
+ union Payload;
+
+ // |FunctionRef| stores an adaptor function pointer, determined by the
+ // arguments passed to the constructor. That adaptor will perform the steps
+ // needed to invoke the callable passed at construction time.
+ using Adaptor = Ret (*)(const Payload& aPayload, Params... aParams);
+
+ // If |FunctionRef|'s callable can be stored as a function pointer, that
+ // function pointer is stored after being cast to this *different* function
+ // pointer type. |mAdaptor| then casts back to the original type to call it.
+ // ([expr.reinterpret.cast]p6 guarantees that A->B->A function pointer casts
+ // produce the original function pointer value.) An outlandish signature is
+ // used to emphasize that the exact function pointer type doesn't matter.
+ using FuncPtr = Payload***** (*)(Payload*****);
+
+ /**
+ * An adaptor function (used by this class's function call operator) that
+ * invokes the callable in |mPayload|, forwarding arguments and converting
+ * return type as needed.
+ */
+ const Adaptor mAdaptor;
+
+ /** Storage for the wrapped callable value. */
+ union Payload {
+ // This arm is used if |FunctionRef| is passed a compatible function pointer
+ // or a lambda/callable that converts to a compatible function pointer.
+ FuncPtr mFuncPtr;
+
+ // This arm is used if |FunctionRef| is passed some other callable or
+ // |nullptr|.
+ void* mObject;
+ } mPayload;
+
+ template <typename RealFuncPtr>
+ static Ret CallFunctionPointer(const Payload& aPayload,
+ Params... aParams) noexcept {
+ auto func = reinterpret_cast<RealFuncPtr>(aPayload.mFuncPtr);
+ return static_cast<Ret>(func(std::forward<Params>(aParams)...));
+ }
+
+ template <typename Ret2, typename... Params2>
+ FunctionRef(detail::MatchingFunctionPointerTag, Ret2 (*aFuncPtr)(Params2...))
+ : mAdaptor(&CallFunctionPointer<Ret2 (*)(Params2...)>) {
+ ::new (KnownNotNull, &mPayload.mFuncPtr)
+ FuncPtr(reinterpret_cast<FuncPtr>(aFuncPtr));
+ }
+
+ public:
+ /**
+ * Construct a |FunctionRef| that's like a null function pointer that can't be
+ * called.
+ */
+ MOZ_IMPLICIT FunctionRef(std::nullptr_t) noexcept : mAdaptor(nullptr) {
+ // This is technically unnecessary, but it seems best to always initialize
+ // a union arm.
+ ::new (KnownNotNull, &mPayload.mObject) void*(nullptr);
+ }
+
+ FunctionRef() : FunctionRef(nullptr) {}
+
+ /**
+ * Constructs a |FunctionRef| from an object callable with |Params| arguments,
+ * that returns a type convertible to |Ret|, where the callable isn't
+ * convertible to function pointer (often because it contains some internal
+ * state). For example:
+ *
+ * int x = 5;
+ * DoSomething([&x] { x++; });
+ */
+ template <typename Callable,
+ typename = detail::EnableFunctionTag<detail::MatchingFunctorTag,
+ Callable, Ret, Params...>,
+ typename std::enable_if_t<!std::is_same_v<
+ std::remove_cv_t<std::remove_reference_t<Callable>>,
+ FunctionRef>>* = nullptr>
+ MOZ_IMPLICIT FunctionRef(Callable&& aCallable MOZ_LIFETIME_BOUND) noexcept
+ : mAdaptor([](const Payload& aPayload, Params... aParams) {
+ auto& func = *static_cast<std::remove_reference_t<Callable>*>(aPayload.mObject);
+ return static_cast<Ret>(func(std::forward<Params>(aParams)...));
+ }) {
+ ::new (KnownNotNull, &mPayload.mObject) void*(&aCallable);
+ }
+
+ /**
+ * Constructs a |FunctionRef| from an value callable with |Params| arguments,
+ * that returns a type convertible to |Ret|, where the callable is stateless
+ * and is (or is convertible to) a function pointer. For example:
+ *
+ * // Exact match
+ * double twice(double d) { return d * 2; }
+ * FunctionRef<double(double)> func1(&twice);
+ *
+ * // Compatible match
+ * float thrice(long double d) { return static_cast<float>(d) * 3; }
+ * FunctionRef<double(double)> func2(&thrice);
+ *
+ * // Non-generic lambdas that don't capture anything have a conversion
+ * // function to the appropriate function pointer type.
+ * FunctionRef<int(double)> f([](long double){ return 'c'; });
+ */
+ template <typename Callable,
+ typename = detail::EnableFunctionTag<
+ detail::MatchingFunctionPointerTag, Callable, Ret, Params...>>
+ MOZ_IMPLICIT FunctionRef(const Callable& aCallable) noexcept
+ : FunctionRef(detail::MatchingFunctionPointerTag{}, +aCallable) {}
+
+ /** Call the callable stored in this with the given arguments. */
+ Ret operator()(Params... params) const {
+ return mAdaptor(mPayload, std::forward<Params>(params)...);
+ }
+
+ /** Return true iff this wasn't created from |nullptr|. */
+ explicit operator bool() const noexcept { return mAdaptor != nullptr; }
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_FunctionRef_h */
diff --git a/mfbt/FunctionTypeTraits.h b/mfbt/FunctionTypeTraits.h
new file mode 100644
index 0000000000..83b3bc971a
--- /dev/null
+++ b/mfbt/FunctionTypeTraits.h
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_FunctionTypeTraits_h
+#define mozilla_FunctionTypeTraits_h
+
+#include <cstddef> /* for size_t */
+#include <tuple>
+
+namespace mozilla {
+
+// Main FunctionTypeTraits declaration, taking one template argument.
+//
+// Given a function type, FunctionTypeTraits will expose the following members:
+// - ReturnType: Return type.
+// - arity: Number of parameters (size_t).
+// - ParameterType<N>: Type of the Nth** parameter, 0-indexed.
+//
+// ** `ParameterType<N>` with `N` >= `arity` is allowed and gives `void`.
+// This prevents compilation errors when trying to access a type outside of the
+// function's parameters, which is useful for parameters checks, e.g.:
+// template<typename F>
+// auto foo(F&&)
+// -> enable_if(FunctionTypeTraits<F>::arity == 1 &&
+// is_same<FunctionTypeTraits<F>::template ParameterType<0>,
+// int>::value,
+// void)
+// {
+// // This function will only be enabled if `F` takes one `int`.
+// // Without the permissive ParameterType<any N>, it wouldn't even compile.
+//
+// Note: FunctionTypeTraits does not work with generic lambdas `[](auto&) {}`,
+// because parameter types cannot be known until an actual invocation when types
+// are inferred from the given arguments.
+template <typename T>
+struct FunctionTypeTraits;
+
+// Remove reference and pointer wrappers, if any.
+template <typename T>
+struct FunctionTypeTraits<T&> : public FunctionTypeTraits<T> {};
+template <typename T>
+struct FunctionTypeTraits<T&&> : public FunctionTypeTraits<T> {};
+template <typename T>
+struct FunctionTypeTraits<T*> : public FunctionTypeTraits<T> {};
+
+// Extract `operator()` function from callables (e.g. lambdas, std::function).
+template <typename T>
+struct FunctionTypeTraits
+ : public FunctionTypeTraits<decltype(&T::operator())> {};
+
+namespace detail {
+
+// If `safe`, retrieve the `N`th type from `As`, otherwise `void`.
+// See top description for reason.
+template <bool safe, size_t N, typename... As>
+struct TupleElementSafe;
+template <size_t N, typename... As>
+struct TupleElementSafe<true, N, As...> {
+ using Type = typename std::tuple_element<N, std::tuple<As...>>::type;
+};
+template <size_t N, typename... As>
+struct TupleElementSafe<false, N, As...> {
+ using Type = void;
+};
+
+template <typename R, typename... As>
+struct FunctionTypeTraitsHelper {
+ using ReturnType = R;
+ static constexpr size_t arity = sizeof...(As);
+ template <size_t N>
+ using ParameterType =
+ typename TupleElementSafe<(N < sizeof...(As)), N, As...>::Type;
+};
+
+} // namespace detail
+
+// Specialization for free functions.
+template <typename R, typename... As>
+struct FunctionTypeTraits<R(As...)>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+
+// Specialization for non-const member functions.
+template <typename C, typename R, typename... As>
+struct FunctionTypeTraits<R (C::*)(As...)>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+
+// Specialization for const member functions.
+template <typename C, typename R, typename... As>
+struct FunctionTypeTraits<R (C::*)(As...) const>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+
+#ifdef NS_HAVE_STDCALL
+// Specialization for __stdcall free functions.
+template <typename R, typename... As>
+struct FunctionTypeTraits<R NS_STDCALL(As...)>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+
+// Specialization for __stdcall non-const member functions.
+template <typename C, typename R, typename... As>
+struct FunctionTypeTraits<R (NS_STDCALL C::*)(As...)>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+
+// Specialization for __stdcall const member functions.
+template <typename C, typename R, typename... As>
+struct FunctionTypeTraits<R (NS_STDCALL C::*)(As...) const>
+ : detail::FunctionTypeTraitsHelper<R, As...> {};
+#endif // NS_HAVE_STDCALL
+
+} // namespace mozilla
+
+#endif // mozilla_FunctionTypeTraits_h
diff --git a/mfbt/Fuzzing.h b/mfbt/Fuzzing.h
new file mode 100644
index 0000000000..7435436615
--- /dev/null
+++ b/mfbt/Fuzzing.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Additional definitions and implementation for fuzzing code */
+
+#ifndef mozilla_Fuzzing_h
+#define mozilla_Fuzzing_h
+
+#ifdef FUZZING_SNAPSHOT
+# include "mozilla/fuzzing/NyxWrapper.h"
+
+# ifdef __cplusplus
+# include "mozilla/fuzzing/Nyx.h"
+# include "mozilla/ScopeExit.h"
+
+# define MOZ_FUZZING_NYX_RELEASE(id) \
+ if (mozilla::fuzzing::Nyx::instance().is_enabled(id)) { \
+ mozilla::fuzzing::Nyx::instance().release(); \
+ }
+
+# define MOZ_FUZZING_NYX_GUARD(id) \
+ auto nyxGuard = mozilla::MakeScopeExit([&] { \
+ if (mozilla::fuzzing::Nyx::instance().is_enabled(id)) { \
+ mozilla::fuzzing::Nyx::instance().release(); \
+ } \
+ });
+# endif
+
+# define MOZ_FUZZING_HANDLE_CRASH_EVENT2(aType, aReason) \
+ do { \
+ if (nyx_handle_event) { \
+ nyx_handle_event(aType, __FILE__, __LINE__, aReason); \
+ } \
+ } while (false)
+
+# define MOZ_FUZZING_HANDLE_CRASH_EVENT4(aType, aFilename, aLine, aReason) \
+ do { \
+ if (nyx_handle_event) { \
+ nyx_handle_event(aType, aFilename, aLine, aReason); \
+ } \
+ } while (false)
+
+# define MOZ_FUZZING_NYX_PRINT(aMsg) \
+ do { \
+ if (nyx_puts) { \
+ nyx_puts(aMsg); \
+ } else { \
+ fprintf(stderr, aMsg); \
+ } \
+ } while (false)
+
+# define MOZ_FUZZING_NYX_PRINTF(aFormat, ...) \
+ do { \
+ if (nyx_puts) { \
+ char msgbuf[2048]; \
+ snprintf(msgbuf, sizeof(msgbuf) - 1, "" aFormat, __VA_ARGS__); \
+ nyx_puts(msgbuf); \
+ } else { \
+ fprintf(stderr, aFormat, __VA_ARGS__); \
+ } \
+ } while (false)
+
+# ifdef FUZZ_DEBUG
+# define MOZ_FUZZING_NYX_DEBUG(x) MOZ_FUZZING_NYX_PRINT(x)
+# else
+# define MOZ_FUZZING_NYX_DEBUG(x)
+# endif
+# define MOZ_FUZZING_NYX_ABORT(aMsg) \
+ do { \
+ MOZ_FUZZING_NYX_PRINT(aMsg); \
+ MOZ_REALLY_CRASH(__LINE__); \
+ } while (false);
+#else
+# define MOZ_FUZZING_NYX_RELEASE(id)
+# define MOZ_FUZZING_NYX_GUARD(id)
+# define MOZ_FUZZING_NYX_PRINT(aMsg)
+# define MOZ_FUZZING_NYX_PRINTF(aFormat, ...)
+# define MOZ_FUZZING_NYX_DEBUG(aMsg)
+# define MOZ_FUZZING_NYX_ABORT(aMsg)
+# define MOZ_FUZZING_HANDLE_CRASH_EVENT2(aType, aReason) \
+ do { \
+ } while (false)
+# define MOZ_FUZZING_HANDLE_CRASH_EVENT4(aType, aFilename, aLine, aReason) \
+ do { \
+ } while (false)
+#endif
+
+#endif /* mozilla_Fuzzing_h */
diff --git a/mfbt/HashFunctions.cpp b/mfbt/HashFunctions.cpp
new file mode 100644
index 0000000000..4cb04e58a3
--- /dev/null
+++ b/mfbt/HashFunctions.cpp
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of hash functions. */
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Types.h"
+
+#include <string.h>
+
+namespace mozilla {
+
+uint32_t HashBytes(const void* aBytes, size_t aLength) {
+ uint32_t hash = 0;
+ const char* b = reinterpret_cast<const char*>(aBytes);
+
+ /* Walk word by word. */
+ size_t i = 0;
+ for (; i < aLength - (aLength % sizeof(size_t)); i += sizeof(size_t)) {
+ /* Do an explicitly unaligned load of the data. */
+ size_t data;
+ memcpy(&data, b + i, sizeof(size_t));
+
+ hash = AddToHash(hash, data);
+ }
+
+ /* Get the remaining bytes. */
+ for (; i < aLength; i++) {
+ hash = AddToHash(hash, b[i]);
+ }
+ return hash;
+}
+
+} /* namespace mozilla */
diff --git a/mfbt/HashFunctions.h b/mfbt/HashFunctions.h
new file mode 100644
index 0000000000..b9c2d5e98d
--- /dev/null
+++ b/mfbt/HashFunctions.h
@@ -0,0 +1,420 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Utilities for hashing. */
+
+/*
+ * This file exports functions for hashing data down to a uint32_t (a.k.a.
+ * mozilla::HashNumber), including:
+ *
+ * - HashString Hash a char* or char16_t/wchar_t* of known or unknown
+ * length.
+ *
+ * - HashBytes Hash a byte array of known length.
+ *
+ * - HashGeneric Hash one or more values. Currently, we support uint32_t,
+ * types which can be implicitly cast to uint32_t, data
+ * pointers, and function pointers.
+ *
+ * - AddToHash Add one or more values to the given hash. This supports the
+ * same list of types as HashGeneric.
+ *
+ *
+ * You can chain these functions together to hash complex objects. For example:
+ *
+ * class ComplexObject
+ * {
+ * char* mStr;
+ * uint32_t mUint1, mUint2;
+ * void (*mCallbackFn)();
+ *
+ * public:
+ * HashNumber hash()
+ * {
+ * HashNumber hash = HashString(mStr);
+ * hash = AddToHash(hash, mUint1, mUint2);
+ * return AddToHash(hash, mCallbackFn);
+ * }
+ * };
+ *
+ * If you want to hash an nsAString or nsACString, use the HashString functions
+ * in nsHashKeys.h.
+ */
+
+#ifndef mozilla_HashFunctions_h
+#define mozilla_HashFunctions_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Char16.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Types.h"
+#include "mozilla/WrappingOperations.h"
+
+#include <stdint.h>
+#include <type_traits>
+
+namespace mozilla {
+
+using HashNumber = uint32_t;
+static const uint32_t kHashNumberBits = 32;
+
+/**
+ * The golden ratio as a 32-bit fixed-point value.
+ */
+static const HashNumber kGoldenRatioU32 = 0x9E3779B9U;
+
+/*
+ * Given a raw hash code, h, return a number that can be used to select a hash
+ * bucket.
+ *
+ * This function aims to produce as uniform an output distribution as possible,
+ * especially in the most significant (leftmost) bits, even though the input
+ * distribution may be highly nonrandom, given the constraints that this must
+ * be deterministic and quick to compute.
+ *
+ * Since the leftmost bits of the result are best, the hash bucket index is
+ * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
+ * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
+ */
+constexpr HashNumber ScrambleHashCode(HashNumber h) {
+ /*
+ * Simply returning h would not cause any hash tables to produce wrong
+ * answers. But it can produce pathologically bad performance: The caller
+ * right-shifts the result, keeping only the highest bits. The high bits of
+ * hash codes are very often completely entropy-free. (So are the lowest
+ * bits.)
+ *
+ * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
+ * Programming, 6.4. This mixes all the bits of the input hash code h.
+ *
+ * The value of goldenRatio is taken from the hex expansion of the golden
+ * ratio, which starts 1.9E3779B9.... This value is especially good if
+ * values with consecutive hash codes are stored in a hash table; see Knuth
+ * for details.
+ */
+ return mozilla::WrappingMultiply(h, kGoldenRatioU32);
+}
+
+namespace detail {
+
+MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+constexpr HashNumber RotateLeft5(HashNumber aValue) {
+ return (aValue << 5) | (aValue >> 27);
+}
+
+constexpr HashNumber AddU32ToHash(HashNumber aHash, uint32_t aValue) {
+ /*
+ * This is the meat of all our hash routines. This hash function is not
+ * particularly sophisticated, but it seems to work well for our mostly
+ * plain-text inputs. Implementation notes follow.
+ *
+ * Our use of the golden ratio here is arbitrary; we could pick almost any
+ * number which:
+ *
+ * * is odd (because otherwise, all our hash values will be even)
+ *
+ * * has a reasonably-even mix of 1's and 0's (consider the extreme case
+ * where we multiply by 0x3 or 0xeffffff -- this will not produce good
+ * mixing across all bits of the hash).
+ *
+ * The rotation length of 5 is also arbitrary, although an odd number is again
+ * preferable so our hash explores the whole universe of possible rotations.
+ *
+ * Finally, we multiply by the golden ratio *after* xor'ing, not before.
+ * Otherwise, if |aHash| is 0 (as it often is for the beginning of a
+ * message), the expression
+ *
+ * mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash))
+ * |xor|
+ * aValue
+ *
+ * evaluates to |aValue|.
+ *
+ * (Number-theoretic aside: Because any odd number |m| is relatively prime to
+ * our modulus (2**32), the list
+ *
+ * [x * m (mod 2**32) for 0 <= x < 2**32]
+ *
+ * has no duplicate elements. This means that multiplying by |m| does not
+ * cause us to skip any possible hash values.
+ *
+ * It's also nice if |m| has large-ish order mod 2**32 -- that is, if the
+ * smallest k such that m**k == 1 (mod 2**32) is large -- so we can safely
+ * multiply our hash value by |m| a few times without negating the
+ * multiplicative effect. Our golden ratio constant has order 2**29, which is
+ * more than enough for our purposes.)
+ */
+ return mozilla::WrappingMultiply(kGoldenRatioU32,
+ RotateLeft5(aHash) ^ aValue);
+}
+
+/**
+ * AddUintNToHash takes sizeof(int_type) as a template parameter.
+ * Changes to these functions need to be propagated to
+ * MacroAssembler::prepareHashNonGCThing, which inlines them manually for
+ * the JIT.
+ */
+template <size_t Size>
+constexpr HashNumber AddUintNToHash(HashNumber aHash, uint64_t aValue) {
+ return AddU32ToHash(aHash, static_cast<uint32_t>(aValue));
+}
+
+template <>
+inline HashNumber AddUintNToHash<8>(HashNumber aHash, uint64_t aValue) {
+ uint32_t v1 = static_cast<uint32_t>(aValue);
+ uint32_t v2 = static_cast<uint32_t>(aValue >> 32);
+ return AddU32ToHash(AddU32ToHash(aHash, v1), v2);
+}
+
+} /* namespace detail */
+
+/**
+ * AddToHash takes a hash and some values and returns a new hash based on the
+ * inputs.
+ *
+ * Currently, we support hashing uint32_t's, values which we can implicitly
+ * convert to uint32_t, data pointers, and function pointers.
+ */
+template <typename T, bool TypeIsNotIntegral = !std::is_integral_v<T>,
+ bool TypeIsNotEnum = !std::is_enum_v<T>,
+ std::enable_if_t<TypeIsNotIntegral && TypeIsNotEnum, int> = 0>
+[[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, T aA) {
+ /*
+ * Try to convert |A| to uint32_t implicitly. If this works, great. If not,
+ * we'll error out.
+ */
+ return detail::AddU32ToHash(aHash, aA);
+}
+
+template <typename A>
+[[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, A* aA) {
+ /*
+ * You might think this function should just take a void*. But then we'd only
+ * catch data pointers and couldn't handle function pointers.
+ */
+
+ static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!");
+
+ return detail::AddUintNToHash<sizeof(uintptr_t)>(aHash, uintptr_t(aA));
+}
+
+// We use AddUintNToHash() for hashing all integral types. 8-byte integral
+// types are treated the same as 64-bit pointers, and smaller integral types are
+// first implicitly converted to 32 bits and then passed to AddUintNToHash()
+// to be hashed.
+template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
+[[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) {
+ return detail::AddUintNToHash<sizeof(T)>(aHash, aA);
+}
+
+template <typename T, std::enable_if_t<std::is_enum_v<T>, int> = 0>
+[[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) {
+ // Hash using AddUintNToHash with the underlying type of the enum type
+ using UnderlyingType = typename std::underlying_type<T>::type;
+ return detail::AddUintNToHash<sizeof(UnderlyingType)>(
+ aHash, static_cast<UnderlyingType>(aA));
+}
+
+template <typename A, typename... Args>
+[[nodiscard]] HashNumber AddToHash(HashNumber aHash, A aArg, Args... aArgs) {
+ return AddToHash(AddToHash(aHash, aArg), aArgs...);
+}
+
+/**
+ * The HashGeneric class of functions let you hash one or more values.
+ *
+ * If you want to hash together two values x and y, calling HashGeneric(x, y) is
+ * much better than calling AddToHash(x, y), because AddToHash(x, y) assumes
+ * that x has already been hashed.
+ */
+template <typename... Args>
+[[nodiscard]] inline HashNumber HashGeneric(Args... aArgs) {
+ return AddToHash(0, aArgs...);
+}
+
+/**
+ * Hash successive |*aIter| until |!*aIter|, i.e. til null-termination.
+ *
+ * This function is *not* named HashString like the non-template overloads
+ * below. Some users define HashString overloads and pass inexactly-matching
+ * values to them -- but an inexactly-matching value would match this overload
+ * instead! We follow the general rule and don't mix and match template and
+ * regular overloads to avoid this.
+ *
+ * If you have the string's length, call HashStringKnownLength: it may be
+ * marginally faster.
+ */
+template <typename Iterator>
+[[nodiscard]] constexpr HashNumber HashStringUntilZero(Iterator aIter) {
+ HashNumber hash = 0;
+ for (; auto c = *aIter; ++aIter) {
+ hash = AddToHash(hash, c);
+ }
+ return hash;
+}
+
+/**
+ * Hash successive |aIter[i]| up to |i == aLength|.
+ */
+template <typename Iterator>
+[[nodiscard]] constexpr HashNumber HashStringKnownLength(Iterator aIter,
+ size_t aLength) {
+ HashNumber hash = 0;
+ for (size_t i = 0; i < aLength; i++) {
+ hash = AddToHash(hash, aIter[i]);
+ }
+ return hash;
+}
+
+/**
+ * The HashString overloads below do just what you'd expect.
+ *
+ * These functions are non-template functions so that users can 1) overload them
+ * with their own types 2) in a way that allows implicit conversions to happen.
+ */
+[[nodiscard]] inline HashNumber HashString(const char* aStr) {
+ // Use the |const unsigned char*| version of the above so that all ordinary
+ // character data hashes identically.
+ return HashStringUntilZero(reinterpret_cast<const unsigned char*>(aStr));
+}
+
+[[nodiscard]] inline HashNumber HashString(const char* aStr, size_t aLength) {
+ // Delegate to the |const unsigned char*| version of the above to share
+ // template instantiations.
+ return HashStringKnownLength(reinterpret_cast<const unsigned char*>(aStr),
+ aLength);
+}
+
+[[nodiscard]] inline HashNumber HashString(const unsigned char* aStr,
+ size_t aLength) {
+ return HashStringKnownLength(aStr, aLength);
+}
+
+[[nodiscard]] constexpr HashNumber HashString(const char16_t* aStr) {
+ return HashStringUntilZero(aStr);
+}
+
+[[nodiscard]] inline HashNumber HashString(const char16_t* aStr,
+ size_t aLength) {
+ return HashStringKnownLength(aStr, aLength);
+}
+
+/**
+ * HashString overloads for |wchar_t| on platforms where it isn't |char16_t|.
+ */
+template <typename WCharT, typename = typename std::enable_if<
+ std::is_same<WCharT, wchar_t>::value &&
+ !std::is_same<wchar_t, char16_t>::value>::type>
+[[nodiscard]] inline HashNumber HashString(const WCharT* aStr) {
+ return HashStringUntilZero(aStr);
+}
+
+template <typename WCharT, typename = typename std::enable_if<
+ std::is_same<WCharT, wchar_t>::value &&
+ !std::is_same<wchar_t, char16_t>::value>::type>
+[[nodiscard]] inline HashNumber HashString(const WCharT* aStr, size_t aLength) {
+ return HashStringKnownLength(aStr, aLength);
+}
+
+/**
+ * Hash some number of bytes.
+ *
+ * This hash walks word-by-word, rather than byte-by-byte, so you won't get the
+ * same result out of HashBytes as you would out of HashString.
+ */
+[[nodiscard]] extern MFBT_API HashNumber HashBytes(const void* bytes,
+ size_t aLength);
+
+/**
+ * A pseudorandom function mapping 32-bit integers to 32-bit integers.
+ *
+ * This is for when you're feeding private data (like pointer values or credit
+ * card numbers) to a non-crypto hash function (like HashBytes) and then using
+ * the hash code for something that untrusted parties could observe (like a JS
+ * Map). Plug in a HashCodeScrambler before that last step to avoid leaking the
+ * private data.
+ *
+ * By itself, this does not prevent hash-flooding DoS attacks, because an
+ * attacker can still generate many values with exactly equal hash codes by
+ * attacking the non-crypto hash function alone. Equal hash codes will, of
+ * course, still be equal however much you scramble them.
+ *
+ * The algorithm is SipHash-1-3. See <https://131002.net/siphash/>.
+ */
+class HashCodeScrambler {
+ struct SipHasher;
+
+ uint64_t mK0, mK1;
+
+ public:
+ /** Creates a new scrambler with the given 128-bit key. */
+ constexpr HashCodeScrambler(uint64_t aK0, uint64_t aK1)
+ : mK0(aK0), mK1(aK1) {}
+
+ /**
+ * Scramble a hash code. Always produces the same result for the same
+ * combination of key and hash code.
+ */
+ HashNumber scramble(HashNumber aHashCode) const {
+ SipHasher hasher(mK0, mK1);
+ return HashNumber(hasher.sipHash(aHashCode));
+ }
+
+ static constexpr size_t offsetOfMK0() {
+ return offsetof(HashCodeScrambler, mK0);
+ }
+
+ static constexpr size_t offsetOfMK1() {
+ return offsetof(HashCodeScrambler, mK1);
+ }
+
+ private:
+ struct SipHasher {
+ SipHasher(uint64_t aK0, uint64_t aK1) {
+ // 1. Initialization.
+ mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
+ mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
+ mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
+ mV3 = aK1 ^ UINT64_C(0x7465646279746573);
+ }
+
+ uint64_t sipHash(uint64_t aM) {
+ // 2. Compression.
+ mV3 ^= aM;
+ sipRound();
+ mV0 ^= aM;
+
+ // 3. Finalization.
+ mV2 ^= 0xff;
+ for (int i = 0; i < 3; i++) sipRound();
+ return mV0 ^ mV1 ^ mV2 ^ mV3;
+ }
+
+ void sipRound() {
+ mV0 = WrappingAdd(mV0, mV1);
+ mV1 = RotateLeft(mV1, 13);
+ mV1 ^= mV0;
+ mV0 = RotateLeft(mV0, 32);
+ mV2 = WrappingAdd(mV2, mV3);
+ mV3 = RotateLeft(mV3, 16);
+ mV3 ^= mV2;
+ mV0 = WrappingAdd(mV0, mV3);
+ mV3 = RotateLeft(mV3, 21);
+ mV3 ^= mV0;
+ mV2 = WrappingAdd(mV2, mV1);
+ mV1 = RotateLeft(mV1, 17);
+ mV1 ^= mV2;
+ mV2 = RotateLeft(mV2, 32);
+ }
+
+ uint64_t mV0, mV1, mV2, mV3;
+ };
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_HashFunctions_h */
diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h
new file mode 100644
index 0000000000..9f3f42b40e
--- /dev/null
+++ b/mfbt/HashTable.h
@@ -0,0 +1,2278 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//---------------------------------------------------------------------------
+// Overview
+//---------------------------------------------------------------------------
+//
+// This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
+// fast and have a nice API.
+//
+// Both hash tables have two optional template parameters.
+//
+// - HashPolicy. This defines the operations for hashing and matching keys. The
+// default HashPolicy is appropriate when both of the following two
+// conditions are true.
+//
+// - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
+// for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double.
+//
+// - The type used for lookups (|Lookup|) is the same as the key type. This
+// is usually the case, but not always.
+//
+// There is also a |CStringHasher| policy for |char*| keys. If your keys
+// don't match any of the above cases, you must provide your own hash policy;
+// see the "Hash Policy" section below.
+//
+// - AllocPolicy. This defines how allocations are done by the table.
+//
+// - |MallocAllocPolicy| is the default and is usually appropriate; note that
+// operations (such as insertions) that might cause allocations are
+// fallible and must be checked for OOM. These checks are enforced by the
+// use of [[nodiscard]].
+//
+// - |InfallibleAllocPolicy| is another possibility; it allows the
+// abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
+//
+// Note that entry storage allocation is lazy, and not done until the first
+// lookupForAdd(), put(), or putNew() is performed.
+//
+// See AllocPolicy.h for more details.
+//
+// Documentation on how to use HashMap and HashSet, including examples, is
+// present within those classes. Search for "class HashMap" and "class
+// HashSet".
+//
+// Both HashMap and HashSet are implemented on top of a third class, HashTable.
+// You only need to look at HashTable if you want to understand the
+// implementation.
+//
+// How does mozilla::HashTable (this file) compare with PLDHashTable (and its
+// subclasses, such as nsTHashtable)?
+//
+// - mozilla::HashTable is a lot faster, largely because it uses templates
+// throughout *and* inlines everything. PLDHashTable inlines operations much
+// less aggressively, and also uses "virtual ops" for operations like hashing
+// and matching entries that require function calls.
+//
+// - Correspondingly, mozilla::HashTable use is likely to increase executable
+// size much more than PLDHashTable.
+//
+// - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
+// distinction.
+//
+// - mozilla::HashTable requires more explicit OOM checking. As mentioned
+// above, the use of |InfallibleAllocPolicy| can simplify things.
+//
+// - mozilla::HashTable has a default capacity on creation of 32 and a minimum
+// capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
+// minimum capacity of 8.
+
+#ifndef mozilla_HashTable_h
+#define mozilla_HashTable_h
+
+#include <utility>
+#include <type_traits>
+
+#include "mozilla/AllocPolicy.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Opaque.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/ReentrancyGuard.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/WrappingOperations.h"
+
+namespace mozilla {
+
+template <class, class = void>
+struct DefaultHasher;
+
+template <class, class>
+class HashMapEntry;
+
+namespace detail {
+
+template <typename T>
+class HashTableEntry;
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable;
+
+} // namespace detail
+
+// The "generation" of a hash table is an opaque value indicating the state of
+// modification of the hash table through its lifetime. If the generation of
+// a hash table compares equal at times T1 and T2, then lookups in the hash
+// table, pointers to (or into) hash table entries, etc. at time T1 are valid
+// at time T2. If the generation compares unequal, these computations are all
+// invalid and must be performed again to be used.
+//
+// Generations are meaningfully comparable only with respect to a single hash
+// table. It's always nonsensical to compare the generation of distinct hash
+// tables H1 and H2.
+using Generation = Opaque<uint64_t>;
+
+//---------------------------------------------------------------------------
+// HashMap
+//---------------------------------------------------------------------------
+
+// HashMap is a fast hash-based map from keys to values.
+//
+// Template parameter requirements:
+// - Key/Value: movable, destructible, assignable.
+// - HashPolicy: see the "Hash Policy" section below.
+// - AllocPolicy: see AllocPolicy.h.
+//
+// Note:
+// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
+// called by HashMap must not call back into the same HashMap object.
+//
+template <class Key, class Value, class HashPolicy = DefaultHasher<Key>,
+ class AllocPolicy = MallocAllocPolicy>
+class HashMap {
+ // -- Implementation details -----------------------------------------------
+
+ // HashMap is not copyable or assignable.
+ HashMap(const HashMap& hm) = delete;
+ HashMap& operator=(const HashMap& hm) = delete;
+
+ using TableEntry = HashMapEntry<Key, Value>;
+
+ struct MapHashPolicy : HashPolicy {
+ using Base = HashPolicy;
+ using KeyType = Key;
+
+ static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); }
+
+ static void setKey(TableEntry& aEntry, Key& aKey) {
+ HashPolicy::rekey(aEntry.mutableKey(), aKey);
+ }
+ };
+
+ using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
+ Impl mImpl;
+
+ friend class Impl::Enum;
+
+ public:
+ using Lookup = typename HashPolicy::Lookup;
+ using Entry = TableEntry;
+
+ // -- Initialization -------------------------------------------------------
+
+ explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(),
+ uint32_t aLen = Impl::sDefaultLen)
+ : mImpl(std::move(aAllocPolicy), aLen) {}
+
+ explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
+
+ // HashMap is movable.
+ HashMap(HashMap&& aRhs) = default;
+ HashMap& operator=(HashMap&& aRhs) = default;
+
+ // -- Status and sizing ----------------------------------------------------
+
+ // The map's current generation.
+ Generation generation() const { return mImpl.generation(); }
+
+ // Is the map empty?
+ bool empty() const { return mImpl.empty(); }
+
+ // Number of keys/values in the map.
+ uint32_t count() const { return mImpl.count(); }
+
+ // Number of key/value slots in the map. Note: resize will happen well before
+ // count() == capacity().
+ uint32_t capacity() const { return mImpl.capacity(); }
+
+ // The size of the map's entry storage, in bytes. If the keys/values contain
+ // pointers to other heap blocks, you must iterate over the map and measure
+ // them separately; hence the "shallow" prefix.
+ size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+ size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) +
+ mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ // Attempt to minimize the capacity(). If the table is empty, this will free
+ // the empty storage and upon regrowth it will be given the minimum capacity.
+ void compact() { mImpl.compact(); }
+
+ // Attempt to reserve enough space to fit at least |aLen| elements. This is
+ // total capacity, including elements already present. Does nothing if the
+ // map already has sufficient capacity.
+ [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
+
+ // -- Lookups --------------------------------------------------------------
+
+ // Does the map contain a key/value matching |aLookup|?
+ bool has(const Lookup& aLookup) const {
+ return mImpl.lookup(aLookup).found();
+ }
+
+ // Return a Ptr indicating whether a key/value matching |aLookup| is
+ // present in the map. E.g.:
+ //
+ // using HM = HashMap<int,char>;
+ // HM h;
+ // if (HM::Ptr p = h.lookup(3)) {
+ // assert(p->key() == 3);
+ // char val = p->value();
+ // }
+ //
+ using Ptr = typename Impl::Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
+ return mImpl.lookup(aLookup);
+ }
+
+ // Like lookup(), but does not assert if two threads call it at the same
+ // time. Only use this method when none of the threads will modify the map.
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
+ return mImpl.readonlyThreadsafeLookup(aLookup);
+ }
+
+ // -- Insertions -----------------------------------------------------------
+
+ // Overwrite existing value with |aValue|, or add it if not present. Returns
+ // false on OOM.
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) {
+ return put(aKey, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey,
+ ValueInput&& aValue) {
+ AddPtr p = lookupForAdd(aLookup);
+ if (p) {
+ p->value() = std::forward<ValueInput>(aValue);
+ return true;
+ }
+ return add(p, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ // Like put(), but slightly faster. Must only be used when the given key is
+ // not already present. (In debug builds, assertions check this.)
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) {
+ return mImpl.putNew(aKey, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey,
+ ValueInput&& aValue) {
+ return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ // Like putNew(), but should be only used when the table is known to be big
+ // enough for the insertion, and hashing cannot fail. Typically this is used
+ // to populate an empty map with known-unique keys after reserving space with
+ // reserve(), e.g.
+ //
+ // using HM = HashMap<int,char>;
+ // HM h;
+ // if (!h.reserve(3)) {
+ // MOZ_CRASH("OOM");
+ // }
+ // h.putNewInfallible(1, 'a'); // unique key
+ // h.putNewInfallible(2, 'b'); // unique key
+ // h.putNewInfallible(3, 'c'); // unique key
+ //
+ template <typename KeyInput, typename ValueInput>
+ void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) {
+ mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
+ // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
+ //
+ // using HM = HashMap<int,char>;
+ // HM h;
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3, 'a')) {
+ // return false;
+ // }
+ // }
+ // assert(p->key() == 3);
+ // char val = p->value();
+ //
+ // N.B. The caller must ensure that no mutating hash table operations occur
+ // between a pair of lookupForAdd() and add() calls. To avoid looking up the
+ // key a second time, the caller may use the more efficient relookupOrAdd()
+ // method. This method reuses part of the hashing computation to more
+ // efficiently insert the key if it has not been added. For example, a
+ // mutation-handling version of the previous example:
+ //
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 'a')) {
+ // return false;
+ // }
+ // }
+ // assert(p->key() == 3);
+ // char val = p->value();
+ //
+ using AddPtr = typename Impl::AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
+ return mImpl.lookupForAdd(aLookup);
+ }
+
+ // Add a key/value. Returns false on OOM.
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) {
+ return mImpl.add(aPtr, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ // See the comment above lookupForAdd() for details.
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey,
+ ValueInput&& aValue) {
+ return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey),
+ std::forward<ValueInput>(aValue));
+ }
+
+ // -- Removal --------------------------------------------------------------
+
+ // Lookup and remove the key/value matching |aLookup|, if present.
+ void remove(const Lookup& aLookup) {
+ if (Ptr p = lookup(aLookup)) {
+ remove(p);
+ }
+ }
+
+ // Remove a previously found key/value (assuming aPtr.found()). The map must
+ // not have been mutated in the interim.
+ void remove(Ptr aPtr) { mImpl.remove(aPtr); }
+
+ // Remove all keys/values without changing the capacity.
+ void clear() { mImpl.clear(); }
+
+ // Like clear() followed by compact().
+ void clearAndCompact() { mImpl.clearAndCompact(); }
+
+ // -- Rekeying -------------------------------------------------------------
+
+ // Infallibly rekey one entry, if necessary. Requires that template
+ // parameters Key and HashPolicy::Lookup are the same type.
+ void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) {
+ if (aOldKey != aNewKey) {
+ rekeyAs(aOldKey, aNewKey, aNewKey);
+ }
+ }
+
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
+ const Key& aNewKey) {
+ if (Ptr p = lookup(aOldLookup)) {
+ mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
+ return true;
+ }
+ return false;
+ }
+
+ // -- Iteration ------------------------------------------------------------
+
+ // |iter()| returns an Iterator:
+ //
+ // HashMap<int, char> h;
+ // for (auto iter = h.iter(); !iter.done(); iter.next()) {
+ // char c = iter.get().value();
+ // }
+ //
+ using Iterator = typename Impl::Iterator;
+ Iterator iter() const { return mImpl.iter(); }
+
+ // |modIter()| returns a ModIterator:
+ //
+ // HashMap<int, char> h;
+ // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
+ // if (iter.get().value() == 'l') {
+ // iter.remove();
+ // }
+ // }
+ //
+ // Table resize may occur in ModIterator's destructor.
+ using ModIterator = typename Impl::ModIterator;
+ ModIterator modIter() { return mImpl.modIter(); }
+
+ // These are similar to Iterator/ModIterator/iter(), but use different
+ // terminology.
+ using Range = typename Impl::Range;
+ using Enum = typename Impl::Enum;
+ Range all() const { return mImpl.all(); }
+};
+
+//---------------------------------------------------------------------------
+// HashSet
+//---------------------------------------------------------------------------
+
+// HashSet is a fast hash-based set of values.
+//
+// Template parameter requirements:
+// - T: movable, destructible, assignable.
+// - HashPolicy: see the "Hash Policy" section below.
+// - AllocPolicy: see AllocPolicy.h
+//
+// Note:
+// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
+// HashSet must not call back into the same HashSet object.
+//
+template <class T, class HashPolicy = DefaultHasher<T>,
+ class AllocPolicy = MallocAllocPolicy>
+class HashSet {
+ // -- Implementation details -----------------------------------------------
+
+ // HashSet is not copyable or assignable.
+ HashSet(const HashSet& hs) = delete;
+ HashSet& operator=(const HashSet& hs) = delete;
+
+ struct SetHashPolicy : HashPolicy {
+ using Base = HashPolicy;
+ using KeyType = T;
+
+ static const KeyType& getKey(const T& aT) { return aT; }
+
+ static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
+ };
+
+ using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
+ Impl mImpl;
+
+ friend class Impl::Enum;
+
+ public:
+ using Lookup = typename HashPolicy::Lookup;
+ using Entry = T;
+
+ // -- Initialization -------------------------------------------------------
+
+ explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(),
+ uint32_t aLen = Impl::sDefaultLen)
+ : mImpl(std::move(aAllocPolicy), aLen) {}
+
+ explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
+
+ // HashSet is movable.
+ HashSet(HashSet&& aRhs) = default;
+ HashSet& operator=(HashSet&& aRhs) = default;
+
+ // -- Status and sizing ----------------------------------------------------
+
+ // The set's current generation.
+ Generation generation() const { return mImpl.generation(); }
+
+ // Is the set empty?
+ bool empty() const { return mImpl.empty(); }
+
+ // Number of elements in the set.
+ uint32_t count() const { return mImpl.count(); }
+
+ // Number of element slots in the set. Note: resize will happen well before
+ // count() == capacity().
+ uint32_t capacity() const { return mImpl.capacity(); }
+
+ // The size of the set's entry storage, in bytes. If the elements contain
+ // pointers to other heap blocks, you must iterate over the set and measure
+ // them separately; hence the "shallow" prefix.
+ size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+ size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) +
+ mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ // Attempt to minimize the capacity(). If the table is empty, this will free
+ // the empty storage and upon regrowth it will be given the minimum capacity.
+ void compact() { mImpl.compact(); }
+
+ // Attempt to reserve enough space to fit at least |aLen| elements. This is
+ // total capacity, including elements already present. Does nothing if the
+ // map already has sufficient capacity.
+ [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
+
+ // -- Lookups --------------------------------------------------------------
+
+ // Does the set contain an element matching |aLookup|?
+ bool has(const Lookup& aLookup) const {
+ return mImpl.lookup(aLookup).found();
+ }
+
+ // Return a Ptr indicating whether an element matching |aLookup| is present
+ // in the set. E.g.:
+ //
+ // using HS = HashSet<int>;
+ // HS h;
+ // if (HS::Ptr p = h.lookup(3)) {
+ // assert(*p == 3); // p acts like a pointer to int
+ // }
+ //
+ using Ptr = typename Impl::Ptr;
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
+ return mImpl.lookup(aLookup);
+ }
+
+ // Like lookup(), but does not assert if two threads call it at the same
+ // time. Only use this method when none of the threads will modify the set.
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
+ return mImpl.readonlyThreadsafeLookup(aLookup);
+ }
+
+ // -- Insertions -----------------------------------------------------------
+
+ // Add |aU| if it is not present already. Returns false on OOM.
+ template <typename U>
+ [[nodiscard]] bool put(U&& aU) {
+ AddPtr p = lookupForAdd(aU);
+ return p ? true : add(p, std::forward<U>(aU));
+ }
+
+ // Like put(), but slightly faster. Must only be used when the given element
+ // is not already present. (In debug builds, assertions check this.)
+ template <typename U>
+ [[nodiscard]] bool putNew(U&& aU) {
+ return mImpl.putNew(aU, std::forward<U>(aU));
+ }
+
+ // Like the other putNew(), but for when |Lookup| is different to |T|.
+ template <typename U>
+ [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) {
+ return mImpl.putNew(aLookup, std::forward<U>(aU));
+ }
+
+ // Like putNew(), but should be only used when the table is known to be big
+ // enough for the insertion, and hashing cannot fail. Typically this is used
+ // to populate an empty set with known-unique elements after reserving space
+ // with reserve(), e.g.
+ //
+ // using HS = HashMap<int>;
+ // HS h;
+ // if (!h.reserve(3)) {
+ // MOZ_CRASH("OOM");
+ // }
+ // h.putNewInfallible(1); // unique element
+ // h.putNewInfallible(2); // unique element
+ // h.putNewInfallible(3); // unique element
+ //
+ template <typename U>
+ void putNewInfallible(const Lookup& aLookup, U&& aU) {
+ mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
+ }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
+ // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
+ //
+ // using HS = HashSet<int>;
+ // HS h;
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3)) {
+ // return false;
+ // }
+ // }
+ // assert(*p == 3); // p acts like a pointer to int
+ //
+ // N.B. The caller must ensure that no mutating hash table operations occur
+ // between a pair of lookupForAdd() and add() calls. To avoid looking up the
+ // key a second time, the caller may use the more efficient relookupOrAdd()
+ // method. This method reuses part of the hashing computation to more
+ // efficiently insert the key if it has not been added. For example, a
+ // mutation-handling version of the previous example:
+ //
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 3)) {
+ // return false;
+ // }
+ // }
+ // assert(*p == 3);
+ //
+ // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
+ // entry |t|, where the caller ensures match(l,t).
+ using AddPtr = typename Impl::AddPtr;
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
+ return mImpl.lookupForAdd(aLookup);
+ }
+
+ // Add an element. Returns false on OOM.
+ template <typename U>
+ [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) {
+ return mImpl.add(aPtr, std::forward<U>(aU));
+ }
+
+ // See the comment above lookupForAdd() for details.
+ template <typename U>
+ [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
+ U&& aU) {
+ return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
+ }
+
+ // -- Removal --------------------------------------------------------------
+
+ // Lookup and remove the element matching |aLookup|, if present.
+ void remove(const Lookup& aLookup) {
+ if (Ptr p = lookup(aLookup)) {
+ remove(p);
+ }
+ }
+
+ // Remove a previously found element (assuming aPtr.found()). The set must
+ // not have been mutated in the interim.
+ void remove(Ptr aPtr) { mImpl.remove(aPtr); }
+
+ // Remove all keys/values without changing the capacity.
+ void clear() { mImpl.clear(); }
+
+ // Like clear() followed by compact().
+ void clearAndCompact() { mImpl.clearAndCompact(); }
+
+ // -- Rekeying -------------------------------------------------------------
+
+ // Infallibly rekey one entry, if present. Requires that template parameters
+ // T and HashPolicy::Lookup are the same type.
+ void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) {
+ if (aOldValue != aNewValue) {
+ rekeyAs(aOldValue, aNewValue, aNewValue);
+ }
+ }
+
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
+ const T& aNewValue) {
+ if (Ptr p = lookup(aOldLookup)) {
+ mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
+ return true;
+ }
+ return false;
+ }
+
+ // Infallibly replace the current key at |aPtr| with an equivalent key.
+ // Specifically, both HashPolicy::hash and HashPolicy::match must return
+ // identical results for the new and old key when applied against all
+ // possible matching values.
+ void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) {
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(*aPtr != aNewValue);
+ MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup));
+ MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup));
+ const_cast<T&>(*aPtr) = aNewValue;
+ MOZ_ASSERT(*lookup(aLookup) == aNewValue);
+ }
+ void replaceKey(Ptr aPtr, const T& aNewValue) {
+ replaceKey(aPtr, aNewValue, aNewValue);
+ }
+
+ // -- Iteration ------------------------------------------------------------
+
+ // |iter()| returns an Iterator:
+ //
+ // HashSet<int> h;
+ // for (auto iter = h.iter(); !iter.done(); iter.next()) {
+ // int i = iter.get();
+ // }
+ //
+ using Iterator = typename Impl::Iterator;
+ Iterator iter() const { return mImpl.iter(); }
+
+ // |modIter()| returns a ModIterator:
+ //
+ // HashSet<int> h;
+ // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
+ // if (iter.get() == 42) {
+ // iter.remove();
+ // }
+ // }
+ //
+ // Table resize may occur in ModIterator's destructor.
+ using ModIterator = typename Impl::ModIterator;
+ ModIterator modIter() { return mImpl.modIter(); }
+
+ // These are similar to Iterator/ModIterator/iter(), but use different
+ // terminology.
+ using Range = typename Impl::Range;
+ using Enum = typename Impl::Enum;
+ Range all() const { return mImpl.all(); }
+};
+
+//---------------------------------------------------------------------------
+// Hash Policy
+//---------------------------------------------------------------------------
+
+// A hash policy |HP| for a hash table with key-type |Key| must provide:
+//
+// - a type |HP::Lookup| to use to lookup table entries;
+//
+// - a static member function |HP::hash| that hashes lookup values:
+//
+// static mozilla::HashNumber hash(const Lookup&);
+//
+// - a static member function |HP::match| that tests equality of key and
+// lookup values:
+//
+// static bool match(const Key& aKey, const Lookup& aLookup);
+//
+// |aKey| and |aLookup| can have different hash numbers, only when a
+// collision happens with |prepareHash| operation, which is less frequent.
+// Thus, |HP::match| shouldn't assume the hash equality in the comparison,
+// even if the hash numbers are almost always same between them.
+//
+// Normally, Lookup = Key. In general, though, different values and types of
+// values can be used to lookup and store. If a Lookup value |l| is not equal
+// to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
+// true. E.g.:
+//
+// mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
+// if (!p) {
+// assert(HP::match(k, l)); // must hold
+// h.add(p, k);
+// }
+
+// A pointer hashing policy that uses HashGeneric() to create good hashes for
+// pointers. Note that we don't shift out the lowest k bits because we don't
+// want to assume anything about the alignment of the pointers.
+template <typename Key>
+struct PointerHasher {
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return aKey == aLookup;
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
+};
+
+// The default hash policy, which only works with integers.
+template <class Key, typename>
+struct DefaultHasher {
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ // Just convert the integer to a HashNumber and use that as is. (This
+ // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
+ // subsequently called on the value to improve the distribution.
+ return aLookup;
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ // Use builtin or overloaded operator==.
+ return aKey == aLookup;
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
+};
+
+// A DefaultHasher specialization for enums.
+template <class T>
+struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> {
+ using Key = T;
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ // Use builtin or overloaded operator==.
+ return aKey == static_cast<Key>(aLookup);
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
+};
+
+// A DefaultHasher specialization for pointers.
+template <class T>
+struct DefaultHasher<T*> : PointerHasher<T*> {};
+
+// A DefaultHasher specialization for mozilla::UniquePtr.
+template <class T, class D>
+struct DefaultHasher<UniquePtr<T, D>> {
+ using Key = UniquePtr<T, D>;
+ using Lookup = Key;
+ using PtrHasher = PointerHasher<T*>;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ return PtrHasher::hash(aLookup.get());
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return PtrHasher::match(aKey.get(), aLookup.get());
+ }
+
+ static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) {
+ aKey = std::move(aNewKey);
+ }
+};
+
+// A DefaultHasher specialization for doubles.
+template <>
+struct DefaultHasher<double> {
+ using Key = double;
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ // Just xor the high bits with the low bits, and then treat the bits of the
+ // result as a uint32_t.
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ uint64_t u = BitwiseCast<uint64_t>(aLookup);
+ return HashNumber(u ^ (u >> 32));
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
+ }
+};
+
+// A DefaultHasher specialization for floats.
+template <>
+struct DefaultHasher<float> {
+ using Key = float;
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ // Just use the value as if its bits form an integer. ScrambleHashCode() is
+ // subsequently called on the value to improve the distribution.
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ return HashNumber(BitwiseCast<uint32_t>(aLookup));
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
+ }
+};
+
+// A hash policy for C strings.
+struct CStringHasher {
+ using Key = const char*;
+ using Lookup = const char*;
+
+ static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return strcmp(aKey, aLookup) == 0;
+ }
+};
+
+//---------------------------------------------------------------------------
+// Fallible Hashing Interface
+//---------------------------------------------------------------------------
+
+// Most of the time generating a hash code is infallible, but sometimes it is
+// necessary to generate hash codes on demand in a way that can fail. Specialize
+// this class for your own hash policy to provide fallible hashing.
+//
+// This is used by MovableCellHasher to handle the fact that generating a unique
+// ID for cell pointer may fail due to OOM.
+//
+// The default implementations of these methods delegate to the usual HashPolicy
+// implementation and always succeed.
+template <typename HashPolicy>
+struct FallibleHashMethods {
+ // Return true if a hashcode is already available for its argument, and
+ // sets |aHashOut|. Once this succeeds for a specific argument it
+ // must continue to do so.
+ //
+ // Return false if a hashcode is not already available. This implies that any
+ // lookup must fail, as the hash code would have to have been successfully
+ // created on insertion.
+ template <typename Lookup>
+ static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
+ *aHashOut = HashPolicy::hash(aLookup);
+ return true;
+ }
+
+ // Fallible method to ensure a hashcode exists for its argument and create one
+ // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns
+ // false on error, e.g. out of memory.
+ template <typename Lookup>
+ static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) {
+ *aHashOut = HashPolicy::hash(aLookup);
+ return true;
+ }
+};
+
+template <typename HashPolicy, typename Lookup>
+static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
+ return FallibleHashMethods<typename HashPolicy::Base>::maybeGetHash(
+ std::forward<Lookup>(aLookup), aHashOut);
+}
+
+template <typename HashPolicy, typename Lookup>
+static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) {
+ return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
+ std::forward<Lookup>(aLookup), aHashOut);
+}
+
+//---------------------------------------------------------------------------
+// Implementation Details (HashMapEntry, HashTableEntry, HashTable)
+//---------------------------------------------------------------------------
+
+// Both HashMap and HashSet are implemented by a single HashTable that is even
+// more heavily parameterized than the other two. This leaves HashTable gnarly
+// and extremely coupled to HashMap and HashSet; thus code should not use
+// HashTable directly.
+
+template <class Key, class Value>
+class HashMapEntry {
+ Key key_;
+ Value value_;
+
+ template <class, class, class>
+ friend class detail::HashTable;
+ template <class>
+ friend class detail::HashTableEntry;
+ template <class, class, class, class>
+ friend class HashMap;
+
+ public:
+ template <typename KeyInput, typename ValueInput>
+ HashMapEntry(KeyInput&& aKey, ValueInput&& aValue)
+ : key_(std::forward<KeyInput>(aKey)),
+ value_(std::forward<ValueInput>(aValue)) {}
+
+ HashMapEntry(HashMapEntry&& aRhs) = default;
+ HashMapEntry& operator=(HashMapEntry&& aRhs) = default;
+
+ using KeyType = Key;
+ using ValueType = Value;
+
+ const Key& key() const { return key_; }
+
+ // Use this method with caution! If the key is changed such that its hash
+ // value also changes, the map will be left in an invalid state.
+ Key& mutableKey() { return key_; }
+
+ const Value& value() const { return value_; }
+ Value& value() { return value_; }
+
+ private:
+ HashMapEntry(const HashMapEntry&) = delete;
+ void operator=(const HashMapEntry&) = delete;
+};
+
+namespace detail {
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable;
+
+template <typename T>
+class EntrySlot;
+
+template <typename T>
+class HashTableEntry {
+ private:
+ using NonConstT = std::remove_const_t<T>;
+
+ // Instead of having a hash table entry store that looks like this:
+ //
+ // +--------+--------+--------+--------+
+ // | entry0 | entry1 | .... | entryN |
+ // +--------+--------+--------+--------+
+ //
+ // where the entries contained their cached hash code, we're going to lay out
+ // the entry store thusly:
+ //
+ // +-------+-------+-------+-------+--------+--------+--------+--------+
+ // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
+ // +-------+-------+-------+-------+--------+--------+--------+--------+
+ //
+ // with all the cached hashes prior to the actual entries themselves.
+ //
+ // We do this because implementing the first strategy requires us to make
+ // HashTableEntry look roughly like:
+ //
+ // template <typename T>
+ // class HashTableEntry {
+ // HashNumber mKeyHash;
+ // T mValue;
+ // };
+ //
+ // The problem with this setup is that, depending on the layout of `T`, there
+ // may be platform ABI-mandated padding between `mKeyHash` and the first
+ // member of `T`. This ABI-mandated padding is wasted space, and can be
+ // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms.
+ // In such cases, we're throwing away a quarter of our entry store on padding,
+ // which is undesirable.
+ //
+ // The second layout above, namely:
+ //
+ // +-------+-------+-------+-------+--------+--------+--------+--------+
+ // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
+ // +-------+-------+-------+-------+--------+--------+--------+--------+
+ //
+ // means there is no wasted space between the hashes themselves, and no wasted
+ // space between the entries themselves. However, we would also like there to
+ // be no gap between the last hash and the first entry. The memory allocator
+ // guarantees the alignment of the start of the hashes. The use of a
+ // power-of-two capacity of at least 4 guarantees that the alignment of the
+ // *end* of the hash array is no less than the alignment of the start.
+ // Finally, the static_asserts here guarantee that the entries themselves
+ // don't need to be any more aligned than the alignment of the entry store
+ // itself.
+ //
+ // This assertion is safe for 32-bit builds because on both Windows and Linux
+ // (including Android), the minimum alignment for allocations larger than 8
+ // bytes is 8 bytes, and the actual data for entries in our entry store is
+ // guaranteed to have that alignment as well, thanks to the power-of-two
+ // number of cached hash values stored prior to the entry data.
+
+ // The allocation policy must allocate a table with at least this much
+ // alignment.
+ static constexpr size_t kMinimumAlignment = 8;
+
+ static_assert(alignof(HashNumber) <= kMinimumAlignment,
+ "[N*2 hashes, N*2 T values] allocation's alignment must be "
+ "enough to align each hash");
+ static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber),
+ "subsequent N*2 T values must not require more than an even "
+ "number of HashNumbers provides");
+
+ static const HashNumber sFreeKey = 0;
+ static const HashNumber sRemovedKey = 1;
+ static const HashNumber sCollisionBit = 1;
+
+ alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)];
+
+ private:
+ template <class, class, class>
+ friend class HashTable;
+ template <typename>
+ friend class EntrySlot;
+
+ // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
+ // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even
+ // through |void*|. Placing the latter cast in these separate functions
+ // breaks the chain such that affected GCC versions no longer warn/error.
+ void* rawValuePtr() { return mValueData; }
+
+ static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
+
+ HashTableEntry(const HashTableEntry&) = delete;
+ void operator=(const HashTableEntry&) = delete;
+
+ NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
+
+ void destroyStoredT() {
+ NonConstT* ptr = valuePtr();
+ ptr->~T();
+ MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
+ }
+
+ public:
+ HashTableEntry() = default;
+
+ ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); }
+
+ void destroy() { destroyStoredT(); }
+
+ void swap(HashTableEntry* aOther, bool aIsLive) {
+ // This allows types to use Argument-Dependent-Lookup, and thus use a custom
+ // std::swap, which is needed by types like JS::Heap and such.
+ using std::swap;
+
+ if (this == aOther) {
+ return;
+ }
+ if (aIsLive) {
+ swap(*valuePtr(), *aOther->valuePtr());
+ } else {
+ *aOther->valuePtr() = std::move(*valuePtr());
+ destroy();
+ }
+ }
+
+ T& get() { return *valuePtr(); }
+
+ NonConstT& getMutable() { return *valuePtr(); }
+};
+
+// A slot represents a cached hash value and its associated entry stored
+// in the hash table. These two things are not stored in contiguous memory.
+template <class T>
+class EntrySlot {
+ using NonConstT = std::remove_const_t<T>;
+
+ using Entry = HashTableEntry<T>;
+
+ Entry* mEntry;
+ HashNumber* mKeyHash;
+
+ template <class, class, class>
+ friend class HashTable;
+
+ EntrySlot(Entry* aEntry, HashNumber* aKeyHash)
+ : mEntry(aEntry), mKeyHash(aKeyHash) {}
+
+ public:
+ static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; }
+
+ EntrySlot(const EntrySlot&) = default;
+ EntrySlot(EntrySlot&& aOther) = default;
+
+ EntrySlot& operator=(const EntrySlot&) = default;
+ EntrySlot& operator=(EntrySlot&&) = default;
+
+ bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; }
+
+ bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; }
+
+ EntrySlot& operator++() {
+ ++mEntry;
+ ++mKeyHash;
+ return *this;
+ }
+
+ void destroy() { mEntry->destroy(); }
+
+ void swap(EntrySlot& aOther) {
+ mEntry->swap(aOther.mEntry, aOther.isLive());
+ std::swap(*mKeyHash, *aOther.mKeyHash);
+ }
+
+ T& get() const { return mEntry->get(); }
+
+ NonConstT& getMutable() { return mEntry->getMutable(); }
+
+ bool isFree() const { return *mKeyHash == Entry::sFreeKey; }
+
+ void clearLive() {
+ MOZ_ASSERT(isLive());
+ *mKeyHash = Entry::sFreeKey;
+ mEntry->destroyStoredT();
+ }
+
+ void clear() {
+ if (isLive()) {
+ mEntry->destroyStoredT();
+ }
+ MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry));
+ *mKeyHash = Entry::sFreeKey;
+ }
+
+ bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; }
+
+ void removeLive() {
+ MOZ_ASSERT(isLive());
+ *mKeyHash = Entry::sRemovedKey;
+ mEntry->destroyStoredT();
+ }
+
+ bool isLive() const { return isLiveHash(*mKeyHash); }
+
+ void setCollision() {
+ MOZ_ASSERT(isLive());
+ *mKeyHash |= Entry::sCollisionBit;
+ }
+ void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; }
+ bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; }
+ bool matchHash(HashNumber hn) {
+ return (*mKeyHash & ~Entry::sCollisionBit) == hn;
+ }
+ HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; }
+
+ template <typename... Args>
+ void setLive(HashNumber aHashNumber, Args&&... aArgs) {
+ MOZ_ASSERT(!isLive());
+ *mKeyHash = aHashNumber;
+ new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...);
+ MOZ_ASSERT(isLive());
+ }
+
+ Entry* toEntry() const { return mEntry; }
+};
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable : private AllocPolicy {
+ friend class mozilla::ReentrancyGuard;
+
+ using NonConstT = std::remove_const_t<T>;
+ using Key = typename HashPolicy::KeyType;
+ using Lookup = typename HashPolicy::Lookup;
+
+ public:
+ using Entry = HashTableEntry<T>;
+ using Slot = EntrySlot<T>;
+
+ template <typename F>
+ static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) {
+ auto hashes = reinterpret_cast<HashNumber*>(aTable);
+ auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]);
+ Slot slot(entries, hashes);
+ for (size_t i = 0; i < size_t(aCapacity); ++i) {
+ f(slot);
+ ++slot;
+ }
+ }
+
+ // A nullable pointer to a hash table element. A Ptr |p| can be tested
+ // either explicitly |if (p.found()) p->...| or using boolean conversion
+ // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+ // table operations unless |generation()| is tested.
+ class Ptr {
+ friend class HashTable;
+
+ Slot mSlot;
+#ifdef DEBUG
+ const HashTable* mTable;
+ Generation mGeneration;
+#endif
+
+ protected:
+ Ptr(Slot aSlot, const HashTable& aTable)
+ : mSlot(aSlot)
+#ifdef DEBUG
+ ,
+ mTable(&aTable),
+ mGeneration(aTable.generation())
+#endif
+ {
+ }
+
+ // This constructor is used only by AddPtr() within lookupForAdd().
+ explicit Ptr(const HashTable& aTable)
+ : mSlot(nullptr, nullptr)
+#ifdef DEBUG
+ ,
+ mTable(&aTable),
+ mGeneration(aTable.generation())
+#endif
+ {
+ }
+
+ bool isValid() const { return !!mSlot.toEntry(); }
+
+ public:
+ Ptr()
+ : mSlot(nullptr, nullptr)
+#ifdef DEBUG
+ ,
+ mTable(nullptr),
+ mGeneration(0)
+#endif
+ {
+ }
+
+ bool found() const {
+ if (!isValid()) {
+ return false;
+ }
+#ifdef DEBUG
+ MOZ_ASSERT(mGeneration == mTable->generation());
+#endif
+ return mSlot.isLive();
+ }
+
+ explicit operator bool() const { return found(); }
+
+ bool operator==(const Ptr& aRhs) const {
+ MOZ_ASSERT(found() && aRhs.found());
+ return mSlot == aRhs.mSlot;
+ }
+
+ bool operator!=(const Ptr& aRhs) const {
+#ifdef DEBUG
+ MOZ_ASSERT(mGeneration == mTable->generation());
+#endif
+ return !(*this == aRhs);
+ }
+
+ T& operator*() const {
+#ifdef DEBUG
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(mGeneration == mTable->generation());
+#endif
+ return mSlot.get();
+ }
+
+ T* operator->() const {
+#ifdef DEBUG
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(mGeneration == mTable->generation());
+#endif
+ return &mSlot.get();
+ }
+ };
+
+ // A Ptr that can be used to add a key after a failed lookup.
+ class AddPtr : public Ptr {
+ friend class HashTable;
+
+ HashNumber mKeyHash;
+#ifdef DEBUG
+ uint64_t mMutationCount;
+#endif
+
+ AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber)
+ : Ptr(aSlot, aTable),
+ mKeyHash(aHashNumber)
+#ifdef DEBUG
+ ,
+ mMutationCount(aTable.mMutationCount)
+#endif
+ {
+ }
+
+ // This constructor is used when lookupForAdd() is performed on a table
+ // lacking entry storage; it leaves mSlot null but initializes everything
+ // else.
+ AddPtr(const HashTable& aTable, HashNumber aHashNumber)
+ : Ptr(aTable),
+ mKeyHash(aHashNumber)
+#ifdef DEBUG
+ ,
+ mMutationCount(aTable.mMutationCount)
+#endif
+ {
+ MOZ_ASSERT(isLive());
+ }
+
+ bool isLive() const { return isLiveHash(mKeyHash); }
+
+ public:
+ AddPtr() : mKeyHash(0) {}
+ };
+
+ // A hash table iterator that (mostly) doesn't allow table modifications.
+ // As with Ptr/AddPtr, Iterator objects must not be used after any mutating
+ // hash table operation unless the |generation()| is tested.
+ class Iterator {
+ void moveToNextLiveEntry() {
+ while (++mCur < mEnd && !mCur.isLive()) {
+ continue;
+ }
+ }
+
+ protected:
+ friend class HashTable;
+
+ explicit Iterator(const HashTable& aTable)
+ : mCur(aTable.slotForIndex(0)),
+ mEnd(aTable.slotForIndex(aTable.capacity()))
+#ifdef DEBUG
+ ,
+ mTable(aTable),
+ mMutationCount(aTable.mMutationCount),
+ mGeneration(aTable.generation()),
+ mValidEntry(true)
+#endif
+ {
+ if (!done() && !mCur.isLive()) {
+ moveToNextLiveEntry();
+ }
+ }
+
+ Slot mCur;
+ Slot mEnd;
+#ifdef DEBUG
+ const HashTable& mTable;
+ uint64_t mMutationCount;
+ Generation mGeneration;
+ bool mValidEntry;
+#endif
+
+ public:
+ bool done() const {
+ MOZ_ASSERT(mGeneration == mTable.generation());
+ MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
+ return mCur == mEnd;
+ }
+
+ T& get() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(mValidEntry);
+ MOZ_ASSERT(mGeneration == mTable.generation());
+ MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
+ return mCur.get();
+ }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(mGeneration == mTable.generation());
+ MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
+ moveToNextLiveEntry();
+#ifdef DEBUG
+ mValidEntry = true;
+#endif
+ }
+ };
+
+ // A hash table iterator that permits modification, removal and rekeying.
+ // Since rehashing when elements were removed during enumeration would be
+ // bad, it is postponed until the ModIterator is destructed. Since the
+ // ModIterator's destructor touches the hash table, the user must ensure
+ // that the hash table is still alive when the destructor runs.
+ class ModIterator : public Iterator {
+ friend class HashTable;
+
+ HashTable& mTable;
+ bool mRekeyed;
+ bool mRemoved;
+
+ // ModIterator is movable but not copyable.
+ ModIterator(const ModIterator&) = delete;
+ void operator=(const ModIterator&) = delete;
+
+ protected:
+ explicit ModIterator(HashTable& aTable)
+ : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {}
+
+ public:
+ MOZ_IMPLICIT ModIterator(ModIterator&& aOther)
+ : Iterator(aOther),
+ mTable(aOther.mTable),
+ mRekeyed(aOther.mRekeyed),
+ mRemoved(aOther.mRemoved) {
+ aOther.mRekeyed = false;
+ aOther.mRemoved = false;
+ }
+
+ // Removes the current element from the table, leaving |get()|
+ // invalid until the next call to |next()|.
+ void remove() {
+ mTable.remove(this->mCur);
+ mRemoved = true;
+#ifdef DEBUG
+ this->mValidEntry = false;
+ this->mMutationCount = mTable.mMutationCount;
+#endif
+ }
+
+ NonConstT& getMutable() {
+ MOZ_ASSERT(!this->done());
+ MOZ_ASSERT(this->mValidEntry);
+ MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation());
+ MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount);
+ return this->mCur.getMutable();
+ }
+
+ // Removes the current element and re-inserts it into the table with
+ // a new key at the new Lookup position. |get()| is invalid after
+ // this operation until the next call to |next()|.
+ void rekey(const Lookup& l, const Key& k) {
+ MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()));
+ Ptr p(this->mCur, mTable);
+ mTable.rekeyWithoutRehash(p, l, k);
+ mRekeyed = true;
+#ifdef DEBUG
+ this->mValidEntry = false;
+ this->mMutationCount = mTable.mMutationCount;
+#endif
+ }
+
+ void rekey(const Key& k) { rekey(k, k); }
+
+ // Potentially rehashes the table.
+ ~ModIterator() {
+ if (mRekeyed) {
+ mTable.mGen++;
+ mTable.infallibleRehashIfOverloaded();
+ }
+
+ if (mRemoved) {
+ mTable.compact();
+ }
+ }
+ };
+
+ // Range is similar to Iterator, but uses different terminology.
+ class Range {
+ friend class HashTable;
+
+ Iterator mIter;
+
+ protected:
+ explicit Range(const HashTable& table) : mIter(table) {}
+
+ public:
+ bool empty() const { return mIter.done(); }
+
+ T& front() const { return mIter.get(); }
+
+ void popFront() { return mIter.next(); }
+ };
+
+ // Enum is similar to ModIterator, but uses different terminology.
+ class Enum {
+ ModIterator mIter;
+
+ // Enum is movable but not copyable.
+ Enum(const Enum&) = delete;
+ void operator=(const Enum&) = delete;
+
+ public:
+ template <class Map>
+ explicit Enum(Map& map) : mIter(map.mImpl) {}
+
+ MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {}
+
+ bool empty() const { return mIter.done(); }
+
+ T& front() const { return mIter.get(); }
+
+ void popFront() { return mIter.next(); }
+
+ void removeFront() { mIter.remove(); }
+
+ NonConstT& mutableFront() { return mIter.getMutable(); }
+
+ void rekeyFront(const Lookup& aLookup, const Key& aKey) {
+ mIter.rekey(aLookup, aKey);
+ }
+
+ void rekeyFront(const Key& aKey) { mIter.rekey(aKey); }
+ };
+
+ // HashTable is movable
+ HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); }
+ HashTable& operator=(HashTable&& aRhs) {
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
+ if (mTable) {
+ destroyTable(*this, mTable, capacity());
+ }
+ AllocPolicy::operator=(std::move(aRhs));
+ moveFrom(aRhs);
+ return *this;
+ }
+
+ private:
+ void moveFrom(HashTable& aRhs) {
+ mGen = aRhs.mGen;
+ mHashShift = aRhs.mHashShift;
+ mTable = aRhs.mTable;
+ mEntryCount = aRhs.mEntryCount;
+ mRemovedCount = aRhs.mRemovedCount;
+#ifdef DEBUG
+ mMutationCount = aRhs.mMutationCount;
+ mEntered = aRhs.mEntered;
+#endif
+ aRhs.mTable = nullptr;
+ aRhs.clearAndCompact();
+ }
+
+ // HashTable is not copyable or assignable
+ HashTable(const HashTable&) = delete;
+ void operator=(const HashTable&) = delete;
+
+ static const uint32_t CAP_BITS = 30;
+
+ public:
+ uint64_t mGen : 56; // entry storage generation number
+ uint64_t mHashShift : 8; // multiplicative hash shift
+ char* mTable; // entry storage
+ uint32_t mEntryCount; // number of entries in mTable
+ uint32_t mRemovedCount; // removed entry sentinels in mTable
+
+#ifdef DEBUG
+ uint64_t mMutationCount;
+ mutable bool mEntered;
+#endif
+
+ // The default initial capacity is 32 (enough to hold 16 elements), but it
+ // can be as low as 4.
+ static const uint32_t sDefaultLen = 16;
+ static const uint32_t sMinCapacity = 4;
+ // See the comments in HashTableEntry about this value.
+ static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions");
+ static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
+ static const uint32_t sMaxCapacity = 1u << CAP_BITS;
+
+ // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+ // math we implement it as a ratio of integers.
+ static const uint8_t sAlphaDenominator = 4;
+ static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+ static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+
+ static const HashNumber sFreeKey = Entry::sFreeKey;
+ static const HashNumber sRemovedKey = Entry::sRemovedKey;
+ static const HashNumber sCollisionBit = Entry::sCollisionBit;
+
+ static uint32_t bestCapacity(uint32_t aLen) {
+ static_assert(
+ (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
+ "multiplication in numerator below could overflow");
+ static_assert(
+ sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
+ "numerator calculation below could potentially overflow");
+
+ // Callers should ensure this is true.
+ MOZ_ASSERT(aLen <= sMaxInit);
+
+ // Compute the smallest capacity allowing |aLen| elements to be
+ // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling
+ // integral division: <http://stackoverflow.com/a/2745086>.)
+ uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) /
+ sMaxAlphaNumerator;
+ capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity);
+
+ MOZ_ASSERT(capacity >= aLen);
+ MOZ_ASSERT(capacity <= sMaxCapacity);
+
+ return capacity;
+ }
+
+ static uint32_t hashShift(uint32_t aLen) {
+ // Reject all lengths whose initial computed capacity would exceed
+ // sMaxCapacity. Round that maximum aLen down to the nearest power of two
+ // for speedier code.
+ if (MOZ_UNLIKELY(aLen > sMaxInit)) {
+ MOZ_CRASH("initial length is too large");
+ }
+
+ return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen));
+ }
+
+ static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); }
+
+ static HashNumber prepareHash(HashNumber aInputHash) {
+ HashNumber keyHash = ScrambleHashCode(aInputHash);
+
+ // Avoid reserved hash codes.
+ if (!isLiveHash(keyHash)) {
+ keyHash -= (sRemovedKey + 1);
+ }
+ return keyHash & ~sCollisionBit;
+ }
+
+ enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+
+ // Fake a struct that we're going to alloc. See the comments in
+ // HashTableEntry about how the table is laid out, and why it's safe.
+ struct FakeSlot {
+ unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)];
+ };
+
+ static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity,
+ FailureBehavior aReportFailure = ReportFailure) {
+ FakeSlot* fake =
+ aReportFailure
+ ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity)
+ : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity);
+
+ MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==
+ 0);
+
+ char* table = reinterpret_cast<char*>(fake);
+ if (table) {
+ forEachSlot(table, aCapacity, [&](Slot& slot) {
+ *slot.mKeyHash = sFreeKey;
+ new (KnownNotNull, slot.toEntry()) Entry();
+ });
+ }
+ return table;
+ }
+
+ static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable,
+ uint32_t aCapacity) {
+ forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) {
+ if (slot.isLive()) {
+ slot.toEntry()->destroyStoredT();
+ }
+ });
+ freeTable(aAllocPolicy, aOldTable, aCapacity);
+ }
+
+ static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable,
+ uint32_t aCapacity) {
+ FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable);
+ aAllocPolicy.free_(fake, aCapacity);
+ }
+
+ public:
+ HashTable(AllocPolicy aAllocPolicy, uint32_t aLen)
+ : AllocPolicy(std::move(aAllocPolicy)),
+ mGen(0),
+ mHashShift(hashShift(aLen)),
+ mTable(nullptr),
+ mEntryCount(0),
+ mRemovedCount(0)
+#ifdef DEBUG
+ ,
+ mMutationCount(0),
+ mEntered(false)
+#endif
+ {
+ }
+
+ explicit HashTable(AllocPolicy aAllocPolicy)
+ : HashTable(aAllocPolicy, sDefaultLen) {}
+
+ ~HashTable() {
+ if (mTable) {
+ destroyTable(*this, mTable, capacity());
+ }
+ }
+
+ private:
+ HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; }
+
+ struct DoubleHash {
+ HashNumber mHash2;
+ HashNumber mSizeMask;
+ };
+
+ DoubleHash hash2(HashNumber aCurKeyHash) const {
+ uint32_t sizeLog2 = kHashNumberBits - mHashShift;
+ DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1,
+ (HashNumber(1) << sizeLog2) - 1};
+ return dh;
+ }
+
+ static HashNumber applyDoubleHash(HashNumber aHash1,
+ const DoubleHash& aDoubleHash) {
+ return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
+ }
+
+ static MOZ_ALWAYS_INLINE bool match(T& aEntry, const Lookup& aLookup) {
+ return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup);
+ }
+
+ enum LookupReason { ForNonAdd, ForAdd };
+
+ Slot slotForIndex(HashNumber aIndex) const {
+ auto hashes = reinterpret_cast<HashNumber*>(mTable);
+ auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]);
+ return Slot(&entries[aIndex], &hashes[aIndex]);
+ }
+
+ // Warning: in order for readonlyThreadsafeLookup() to be safe this
+ // function must not modify the table in any way when Reason==ForNonAdd.
+ template <LookupReason Reason>
+ MOZ_ALWAYS_INLINE Slot lookup(const Lookup& aLookup,
+ HashNumber aKeyHash) const {
+ MOZ_ASSERT(isLiveHash(aKeyHash));
+ MOZ_ASSERT(!(aKeyHash & sCollisionBit));
+ MOZ_ASSERT(mTable);
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(aKeyHash);
+ Slot slot = slotForIndex(h1);
+
+ // Miss: return space for a new entry.
+ if (slot.isFree()) {
+ return slot;
+ }
+
+ // Hit: return entry.
+ if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
+ return slot;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(aKeyHash);
+
+ // Save the first removed entry pointer so we can recycle later.
+ Maybe<Slot> firstRemoved;
+
+ while (true) {
+ if (Reason == ForAdd && !firstRemoved) {
+ if (MOZ_UNLIKELY(slot.isRemoved())) {
+ firstRemoved.emplace(slot);
+ } else {
+ slot.setCollision();
+ }
+ }
+
+ h1 = applyDoubleHash(h1, dh);
+
+ slot = slotForIndex(h1);
+ if (slot.isFree()) {
+ return firstRemoved.refOr(slot);
+ }
+
+ if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
+ return slot;
+ }
+ }
+ }
+
+ // This is a copy of lookup() hardcoded to the assumptions:
+ // 1. the lookup is for an add;
+ // 2. the key, whose |keyHash| has been passed, is not in the table.
+ Slot findNonLiveSlot(HashNumber aKeyHash) {
+ MOZ_ASSERT(!(aKeyHash & sCollisionBit));
+ MOZ_ASSERT(mTable);
+
+ // We assume 'aKeyHash' has already been distributed.
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(aKeyHash);
+ Slot slot = slotForIndex(h1);
+
+ // Miss: return space for a new entry.
+ if (!slot.isLive()) {
+ return slot;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(aKeyHash);
+
+ while (true) {
+ slot.setCollision();
+
+ h1 = applyDoubleHash(h1, dh);
+
+ slot = slotForIndex(h1);
+ if (!slot.isLive()) {
+ return slot;
+ }
+ }
+ }
+
+ enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+
+ RebuildStatus changeTableSize(
+ uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) {
+ MOZ_ASSERT(IsPowerOfTwo(newCapacity));
+ MOZ_ASSERT(!!mTable == !!capacity());
+
+ // Look, but don't touch, until we succeed in getting new entry store.
+ char* oldTable = mTable;
+ uint32_t oldCapacity = capacity();
+ uint32_t newLog2 = mozilla::CeilingLog2(newCapacity);
+
+ if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+ if (aReportFailure) {
+ this->reportAllocOverflow();
+ }
+ return RehashFailed;
+ }
+
+ char* newTable = createTable(*this, newCapacity, aReportFailure);
+ if (!newTable) {
+ return RehashFailed;
+ }
+
+ // We can't fail from here on, so update table parameters.
+ mHashShift = kHashNumberBits - newLog2;
+ mRemovedCount = 0;
+ mGen++;
+ mTable = newTable;
+
+ // Copy only live entries, leaving removed ones behind.
+ forEachSlot(oldTable, oldCapacity, [&](Slot& slot) {
+ if (slot.isLive()) {
+ HashNumber hn = slot.getKeyHash();
+ findNonLiveSlot(hn).setLive(
+ hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get())));
+ }
+
+ slot.clear();
+ });
+
+ // All entries have been destroyed, no need to destroyTable.
+ freeTable(*this, oldTable, oldCapacity);
+ return Rehashed;
+ }
+
+ RebuildStatus rehashIfOverloaded(
+ FailureBehavior aReportFailure = ReportFailure) {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
+ "multiplication below could overflow");
+
+ // Note: if capacity() is zero, this will always succeed, which is
+ // what we want.
+ bool overloaded = mEntryCount + mRemovedCount >=
+ capacity() * sMaxAlphaNumerator / sAlphaDenominator;
+
+ if (!overloaded) {
+ return NotOverloaded;
+ }
+
+ // Succeed if a quarter or more of all entries are removed. Note that this
+ // always succeeds if capacity() == 0 (i.e. entry storage has not been
+ // allocated), which is what we want, because it means changeTableSize()
+ // will allocate the requested capacity rather than doubling it.
+ bool manyRemoved = mRemovedCount >= (capacity() >> 2);
+ uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2;
+ return changeTableSize(newCapacity, aReportFailure);
+ }
+
+ void infallibleRehashIfOverloaded() {
+ if (rehashIfOverloaded(DontReportFailure) == RehashFailed) {
+ rehashTableInPlace();
+ }
+ }
+
+ void remove(Slot& aSlot) {
+ MOZ_ASSERT(mTable);
+
+ if (aSlot.hasCollision()) {
+ aSlot.removeLive();
+ mRemovedCount++;
+ } else {
+ aSlot.clearLive();
+ }
+ mEntryCount--;
+#ifdef DEBUG
+ mMutationCount++;
+#endif
+ }
+
+ void shrinkIfUnderloaded() {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
+ "multiplication below could overflow");
+ bool underloaded =
+ capacity() > sMinCapacity &&
+ mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator;
+
+ if (underloaded) {
+ (void)changeTableSize(capacity() / 2, DontReportFailure);
+ }
+ }
+
+ // This is identical to changeTableSize(currentSize), but without requiring
+ // a second table. We do this by recycling the collision bits to tell us if
+ // the element is already inserted or still waiting to be inserted. Since
+ // already-inserted elements win any conflicts, we get the same table as we
+ // would have gotten through random insertion order.
+ void rehashTableInPlace() {
+ mRemovedCount = 0;
+ mGen++;
+ forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); });
+ for (uint32_t i = 0; i < capacity();) {
+ Slot src = slotForIndex(i);
+
+ if (!src.isLive() || src.hasCollision()) {
+ ++i;
+ continue;
+ }
+
+ HashNumber keyHash = src.getKeyHash();
+ HashNumber h1 = hash1(keyHash);
+ DoubleHash dh = hash2(keyHash);
+ Slot tgt = slotForIndex(h1);
+ while (true) {
+ if (!tgt.hasCollision()) {
+ src.swap(tgt);
+ tgt.setCollision();
+ break;
+ }
+
+ h1 = applyDoubleHash(h1, dh);
+ tgt = slotForIndex(h1);
+ }
+ }
+
+ // TODO: this algorithm leaves collision bits on *all* elements, even if
+ // they are on no collision path. We have the option of setting the
+ // collision bits correctly on a subsequent pass or skipping the rehash
+ // unless we are totally filled with tombstones: benchmark to find out
+ // which approach is best.
+ }
+
+ // Prefer to use putNewInfallible; this function does not check
+ // invariants.
+ template <typename... Args>
+ void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) {
+ MOZ_ASSERT(mTable);
+
+ Slot slot = findNonLiveSlot(aKeyHash);
+
+ if (slot.isRemoved()) {
+ mRemovedCount--;
+ aKeyHash |= sCollisionBit;
+ }
+
+ slot.setLive(aKeyHash, std::forward<Args>(aArgs)...);
+ mEntryCount++;
+#ifdef DEBUG
+ mMutationCount++;
+#endif
+ }
+
+ public:
+ void clear() {
+ forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); });
+ mRemovedCount = 0;
+ mEntryCount = 0;
+#ifdef DEBUG
+ mMutationCount++;
+#endif
+ }
+
+ // Resize the table down to the smallest capacity that doesn't overload the
+ // table. Since we call shrinkIfUnderloaded() on every remove, you only need
+ // to call this after a bulk removal of items done without calling remove().
+ void compact() {
+ if (empty()) {
+ // Free the entry storage.
+ freeTable(*this, mTable, capacity());
+ mGen++;
+ mHashShift = hashShift(0); // gives minimum capacity on regrowth
+ mTable = nullptr;
+ mRemovedCount = 0;
+ return;
+ }
+
+ uint32_t bestCapacity = this->bestCapacity(mEntryCount);
+ MOZ_ASSERT(bestCapacity <= capacity());
+
+ if (bestCapacity < capacity()) {
+ (void)changeTableSize(bestCapacity, DontReportFailure);
+ }
+ }
+
+ void clearAndCompact() {
+ clear();
+ compact();
+ }
+
+ [[nodiscard]] bool reserve(uint32_t aLen) {
+ if (aLen == 0) {
+ return true;
+ }
+
+ if (MOZ_UNLIKELY(aLen > sMaxInit)) {
+ this->reportAllocOverflow();
+ return false;
+ }
+
+ uint32_t bestCapacity = this->bestCapacity(aLen);
+ if (bestCapacity <= capacity()) {
+ return true; // Capacity is already sufficient.
+ }
+
+ RebuildStatus status = changeTableSize(bestCapacity, ReportFailure);
+ MOZ_ASSERT(status != NotOverloaded);
+ return status != RehashFailed;
+ }
+
+ Iterator iter() const { return Iterator(*this); }
+
+ ModIterator modIter() { return ModIterator(*this); }
+
+ Range all() const { return Range(*this); }
+
+ bool empty() const { return mEntryCount == 0; }
+
+ uint32_t count() const { return mEntryCount; }
+
+ uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); }
+
+ uint32_t capacity() const { return mTable ? rawCapacity() : 0; }
+
+ Generation generation() const { return Generation(mGen); }
+
+ size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(mTable);
+ }
+
+ size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
+ if (empty()) {
+ return Ptr();
+ }
+
+ HashNumber inputHash;
+ if (!MaybeGetHash<HashPolicy>(aLookup, &inputHash)) {
+ return Ptr();
+ }
+
+ HashNumber keyHash = prepareHash(inputHash);
+ return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this);
+ }
+
+ MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
+ ReentrancyGuard g(*this);
+ return readonlyThreadsafeLookup(aLookup);
+ }
+
+ MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
+ ReentrancyGuard g(*this);
+
+ HashNumber inputHash;
+ if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
+ return AddPtr();
+ }
+
+ HashNumber keyHash = prepareHash(inputHash);
+
+ if (!mTable) {
+ return AddPtr(*this, keyHash);
+ }
+
+ // Directly call the constructor in the return statement to avoid
+ // excess copying when building with Visual Studio 2017.
+ // See bug 1385181.
+ return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash);
+ }
+
+ template <typename... Args>
+ [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) {
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT_IF(aPtr.isValid(), mTable);
+ MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this);
+ MOZ_ASSERT(!aPtr.found());
+ MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit));
+
+ // Check for error from ensureHash() here.
+ if (!aPtr.isLive()) {
+ return false;
+ }
+
+ MOZ_ASSERT(aPtr.mGeneration == generation());
+#ifdef DEBUG
+ MOZ_ASSERT(aPtr.mMutationCount == mMutationCount);
+#endif
+
+ if (!aPtr.isValid()) {
+ MOZ_ASSERT(!mTable && mEntryCount == 0);
+ uint32_t newCapacity = rawCapacity();
+ RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
+ MOZ_ASSERT(status != NotOverloaded);
+ if (status == RehashFailed) {
+ return false;
+ }
+ aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
+
+ } else if (aPtr.mSlot.isRemoved()) {
+ // Changing an entry from removed to live does not affect whether we are
+ // overloaded and can be handled separately.
+ if (!this->checkSimulatedOOM()) {
+ return false;
+ }
+ mRemovedCount--;
+ aPtr.mKeyHash |= sCollisionBit;
+
+ } else {
+ // Preserve the validity of |aPtr.mSlot|.
+ RebuildStatus status = rehashIfOverloaded();
+ if (status == RehashFailed) {
+ return false;
+ }
+ if (status == NotOverloaded && !this->checkSimulatedOOM()) {
+ return false;
+ }
+ if (status == Rehashed) {
+ aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
+ }
+ }
+
+ aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
+ mEntryCount++;
+#ifdef DEBUG
+ mMutationCount++;
+ aPtr.mGeneration = generation();
+ aPtr.mMutationCount = mMutationCount;
+#endif
+ return true;
+ }
+
+ // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this
+ // function must take care not to use |aLookup| after moving |aArgs|.
+ template <typename... Args>
+ void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) {
+ MOZ_ASSERT(!lookup(aLookup).found());
+ ReentrancyGuard g(*this);
+ HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
+ putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
+ }
+
+ // Note: |aLookup| may alias arguments in |aArgs|, so this function must take
+ // care not to use |aLookup| after moving |aArgs|.
+ template <typename... Args>
+ [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) {
+ MOZ_ASSERT(!lookup(aLookup).found());
+ ReentrancyGuard g(*this);
+ if (!this->checkSimulatedOOM()) {
+ return false;
+ }
+ HashNumber inputHash;
+ if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
+ return false;
+ }
+ HashNumber keyHash = prepareHash(inputHash);
+ if (rehashIfOverloaded() == RehashFailed) {
+ return false;
+ }
+ putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
+ return true;
+ }
+
+ // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this
+ // function must take care not to use |aLookup| after moving |aArgs|.
+ template <typename... Args>
+ [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
+ Args&&... aArgs) {
+ // Check for error from ensureHash() here.
+ if (!aPtr.isLive()) {
+ return false;
+ }
+#ifdef DEBUG
+ aPtr.mGeneration = generation();
+ aPtr.mMutationCount = mMutationCount;
+#endif
+ if (mTable) {
+ ReentrancyGuard g(*this);
+ // Check that aLookup has not been destroyed.
+ MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash);
+ aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash);
+ if (aPtr.found()) {
+ return true;
+ }
+ } else {
+ // Clear aPtr so it's invalid; add() will allocate storage and redo the
+ // lookup.
+ aPtr.mSlot = Slot(nullptr, nullptr);
+ }
+ return add(aPtr, std::forward<Args>(aArgs)...);
+ }
+
+ void remove(Ptr aPtr) {
+ MOZ_ASSERT(mTable);
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(aPtr.mGeneration == generation());
+ remove(aPtr.mSlot);
+ shrinkIfUnderloaded();
+ }
+
+ void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
+ MOZ_ASSERT(mTable);
+ ReentrancyGuard g(*this);
+ MOZ_ASSERT(aPtr.found());
+ MOZ_ASSERT(aPtr.mGeneration == generation());
+ typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
+ HashPolicy::setKey(t, const_cast<Key&>(aKey));
+ remove(aPtr.mSlot);
+ HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
+ putNewInfallibleInternal(keyHash, std::move(t));
+ }
+
+ void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
+ rekeyWithoutRehash(aPtr, aLookup, aKey);
+ infallibleRehashIfOverloaded();
+ }
+};
+
+} // namespace detail
+} // namespace mozilla
+
+#endif /* mozilla_HashTable_h */
diff --git a/mfbt/HelperMacros.h b/mfbt/HelperMacros.h
new file mode 100644
index 0000000000..883a16ec59
--- /dev/null
+++ b/mfbt/HelperMacros.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* MOZ_STRINGIFY Macros */
+
+#ifndef mozilla_HelperMacros_h
+#define mozilla_HelperMacros_h
+
+// Wraps x in quotes without expanding a macro name
+#define MOZ_STRINGIFY_NO_EXPANSION(x) #x
+
+// Wraps x in quotes; expanding x if it as a macro name
+#define MOZ_STRINGIFY(x) MOZ_STRINGIFY_NO_EXPANSION(x)
+
+#endif // mozilla_HelperMacros_h
diff --git a/mfbt/InitializedOnce.h b/mfbt/InitializedOnce.h
new file mode 100644
index 0000000000..aac152df35
--- /dev/null
+++ b/mfbt/InitializedOnce.h
@@ -0,0 +1,247 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Class template for objects that can only be initialized once.
+
+#ifndef mozilla_mfbt_initializedonce_h__
+#define mozilla_mfbt_initializedonce_h__
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Maybe.h"
+
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+enum struct InitWhen { InConstructorOnly, LazyAllowed };
+enum struct DestroyWhen { EarlyAllowed, InDestructorOnly };
+
+namespace ValueCheckPolicies {
+template <typename T>
+struct AllowAnyValue {
+ constexpr static bool Check(const T& /*aValue*/) { return true; }
+};
+
+template <typename T>
+struct ConvertsToTrue {
+ constexpr static bool Check(const T& aValue) {
+ return static_cast<bool>(aValue);
+ }
+};
+} // namespace ValueCheckPolicies
+
+// A kind of mozilla::Maybe that can only be initialized and cleared once. It
+// cannot be re-initialized. This is a more stateful than a const Maybe<T> in
+// that it can be cleared, but much less stateful than a non-const Maybe<T>
+// which could be reinitialized multiple times. Can only be used with const T
+// to ensure that the contents cannot be modified either.
+// TODO: Make constructors constexpr when Maybe's constructors are constexpr
+// (Bug 1601336).
+template <typename T, InitWhen InitWhenVal, DestroyWhen DestroyWhenVal,
+ template <typename> class ValueCheckPolicy =
+ ValueCheckPolicies::AllowAnyValue>
+class InitializedOnce final {
+ static_assert(std::is_const_v<T>);
+ using MaybeType = Maybe<std::remove_const_t<T>>;
+
+ public:
+ using ValueType = T;
+
+ template <typename Dummy = void>
+ explicit constexpr InitializedOnce(
+ std::enable_if_t<InitWhenVal == InitWhen::LazyAllowed, Dummy>* =
+ nullptr) {}
+
+ // note: aArg0 is named separately here to disallow calling this with no
+ // arguments. The default constructor should only be available conditionally
+ // and is declared above.
+ template <typename Arg0, typename... Args>
+ explicit constexpr InitializedOnce(Arg0&& aArg0, Args&&... aArgs)
+ : mMaybe{Some(std::remove_const_t<T>{std::forward<Arg0>(aArg0),
+ std::forward<Args>(aArgs)...})} {
+ MOZ_ASSERT(ValueCheckPolicy<T>::Check(*mMaybe));
+ }
+
+ InitializedOnce(const InitializedOnce&) = delete;
+ InitializedOnce(InitializedOnce&& aOther) : mMaybe{std::move(aOther.mMaybe)} {
+ static_assert(DestroyWhenVal == DestroyWhen::EarlyAllowed);
+#ifdef DEBUG
+ aOther.mWasReset = true;
+#endif
+ }
+ InitializedOnce& operator=(const InitializedOnce&) = delete;
+ InitializedOnce& operator=(InitializedOnce&& aOther) {
+ static_assert(InitWhenVal == InitWhen::LazyAllowed &&
+ DestroyWhenVal == DestroyWhen::EarlyAllowed);
+ MOZ_ASSERT(!mWasReset);
+ MOZ_ASSERT(!mMaybe);
+ mMaybe.~MaybeType();
+ new (&mMaybe) MaybeType{std::move(aOther.mMaybe)};
+#ifdef DEBUG
+ aOther.mWasReset = true;
+#endif
+ return *this;
+ }
+
+ template <typename... Args, typename Dummy = void>
+ constexpr std::enable_if_t<InitWhenVal == InitWhen::LazyAllowed, Dummy> init(
+ Args&&... aArgs) {
+ MOZ_ASSERT(mMaybe.isNothing());
+ MOZ_ASSERT(!mWasReset);
+ mMaybe.emplace(std::remove_const_t<T>{std::forward<Args>(aArgs)...});
+ MOZ_ASSERT(ValueCheckPolicy<T>::Check(*mMaybe));
+ }
+
+ constexpr explicit operator bool() const { return isSome(); }
+ constexpr bool isSome() const { return mMaybe.isSome(); }
+ constexpr bool isNothing() const { return mMaybe.isNothing(); }
+
+ constexpr T& operator*() const { return *mMaybe; }
+ constexpr T* operator->() const { return mMaybe.operator->(); }
+
+ constexpr T& ref() const { return mMaybe.ref(); }
+
+ template <typename Dummy = void>
+ std::enable_if_t<DestroyWhenVal == DestroyWhen::EarlyAllowed, Dummy>
+ destroy() {
+ MOZ_ASSERT(mMaybe.isSome());
+ maybeDestroy();
+ }
+
+ template <typename Dummy = void>
+ std::enable_if_t<DestroyWhenVal == DestroyWhen::EarlyAllowed, Dummy>
+ maybeDestroy() {
+ mMaybe.reset();
+#ifdef DEBUG
+ mWasReset = true;
+#endif
+ }
+
+ template <typename Dummy = T>
+ std::enable_if_t<DestroyWhenVal == DestroyWhen::EarlyAllowed, Dummy>
+ release() {
+ MOZ_ASSERT(mMaybe.isSome());
+ auto res = std::move(mMaybe.ref());
+ destroy();
+ return res;
+ }
+
+ private:
+ MaybeType mMaybe;
+#ifdef DEBUG
+ bool mWasReset = false;
+#endif
+};
+
+template <typename T, InitWhen InitWhenVal, DestroyWhen DestroyWhenVal,
+ template <typename> class ValueCheckPolicy>
+class LazyInitializer {
+ public:
+ explicit LazyInitializer(InitializedOnce<T, InitWhenVal, DestroyWhenVal,
+ ValueCheckPolicy>& aLazyInitialized)
+ : mLazyInitialized{aLazyInitialized} {}
+
+ template <typename U>
+ LazyInitializer& operator=(U&& aValue) {
+ mLazyInitialized.init(std::forward<U>(aValue));
+ return *this;
+ }
+
+ LazyInitializer(const LazyInitializer&) = delete;
+ LazyInitializer& operator=(const LazyInitializer&) = delete;
+
+ private:
+ InitializedOnce<T, InitWhenVal, DestroyWhenVal, ValueCheckPolicy>&
+ mLazyInitialized;
+};
+
+} // namespace detail
+
+// The following *InitializedOnce* template aliases allow to declare class
+// member variables that can only be initialized once, but maybe destroyed
+// earlier explicitly than in the containing classes destructor.
+// The intention is to restrict the possible state transitions for member
+// variables that can almost be const, but not quite. This may be particularly
+// useful for classes with a lot of members. Uses in other contexts, e.g. as
+// local variables, are possible, but probably seldom useful. They can only be
+// instantiated with a const element type. Any misuses that cannot be detected
+// at compile time trigger a MOZ_ASSERT at runtime. Individually spelled out
+// assertions for these aspects are not necessary, which may improve the
+// readability of the code without impairing safety.
+//
+// The base variant InitializedOnce requires initialization in the constructor,
+// but allows early destruction using destroy(), and allow move construction. It
+// is similar to Maybe<const T> in some sense, but a Maybe<const T> could be
+// reinitialized arbitrarily. InitializedOnce expresses the intent not to do
+// this, and prohibits reinitialization.
+//
+// The Lazy* variants allow default construction, and can be initialized lazily
+// using init() in that case, but it cannot be reinitialized either. They do not
+// allow early destruction.
+//
+// The Lazy*EarlyDestructible variants allow lazy initialization, early
+// destruction, move construction and move assignment. This should be used only
+// when really required.
+//
+// The *NotNull variants only allow initialization with values that convert to
+// bool as true. They are named NotNull because the typical use case is with
+// (smart) pointer types, but any other type convertible to bool will also work
+// analogously.
+//
+// There is no variant combining detail::DestroyWhen::InConstructorOnly with
+// detail::DestroyWhen::InDestructorOnly because this would be equivalent to a
+// const member.
+//
+// For special cases, e.g. requiring custom value check policies,
+// detail::InitializedOnce might be instantiated directly, but be mindful when
+// doing this.
+
+template <typename T>
+using InitializedOnce =
+ detail::InitializedOnce<T, detail::InitWhen::InConstructorOnly,
+ detail::DestroyWhen::EarlyAllowed>;
+
+template <typename T>
+using InitializedOnceNotNull =
+ detail::InitializedOnce<T, detail::InitWhen::InConstructorOnly,
+ detail::DestroyWhen::EarlyAllowed,
+ detail::ValueCheckPolicies::ConvertsToTrue>;
+
+template <typename T>
+using LazyInitializedOnce =
+ detail::InitializedOnce<T, detail::InitWhen::LazyAllowed,
+ detail::DestroyWhen::InDestructorOnly>;
+
+template <typename T>
+using LazyInitializedOnceNotNull =
+ detail::InitializedOnce<T, detail::InitWhen::LazyAllowed,
+ detail::DestroyWhen::InDestructorOnly,
+ detail::ValueCheckPolicies::ConvertsToTrue>;
+
+template <typename T>
+using LazyInitializedOnceEarlyDestructible =
+ detail::InitializedOnce<T, detail::InitWhen::LazyAllowed,
+ detail::DestroyWhen::EarlyAllowed>;
+
+template <typename T>
+using LazyInitializedOnceNotNullEarlyDestructible =
+ detail::InitializedOnce<T, detail::InitWhen::LazyAllowed,
+ detail::DestroyWhen::EarlyAllowed,
+ detail::ValueCheckPolicies::ConvertsToTrue>;
+
+template <typename T, detail::InitWhen InitWhenVal,
+ detail::DestroyWhen DestroyWhenVal,
+ template <typename> class ValueCheckPolicy>
+auto do_Init(detail::InitializedOnce<T, InitWhenVal, DestroyWhenVal,
+ ValueCheckPolicy>& aLazyInitialized) {
+ return detail::LazyInitializer(aLazyInitialized);
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/mfbt/IntegerRange.h b/mfbt/IntegerRange.h
new file mode 100644
index 0000000000..4415031454
--- /dev/null
+++ b/mfbt/IntegerRange.h
@@ -0,0 +1,192 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Iterator over ranges of integers */
+
+#ifndef mozilla_IntegerRange_h
+#define mozilla_IntegerRange_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ReverseIterator.h"
+
+#include <iterator>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+template <typename IntTypeT>
+class IntegerIterator {
+ public:
+ // It is disputable whether these type definitions are correct, since
+ // operator* doesn't return a reference at all. Also, the iterator_category
+ // can be at most std::input_iterator_tag (rather than
+ // std::bidrectional_iterator_tag, as it might seem), because it is a stashing
+ // iterator. See also, e.g.,
+ // https://stackoverflow.com/questions/50909701/what-should-be-iterator-category-for-a-stashing-iterator
+ using value_type = const IntTypeT;
+ using pointer = const value_type*;
+ using reference = const value_type&;
+ using difference_type = std::make_signed_t<IntTypeT>;
+ using iterator_category = std::input_iterator_tag;
+
+ template <typename IntType>
+ explicit IntegerIterator(IntType aCurrent) : mCurrent(aCurrent) {}
+
+ template <typename IntType>
+ explicit IntegerIterator(const IntegerIterator<IntType>& aOther)
+ : mCurrent(aOther.mCurrent) {}
+
+ // This intentionally returns a value rather than a reference, to make
+ // mozilla::ReverseIterator work with it. Still, std::reverse_iterator cannot
+ // be used with IntegerIterator because it still is a "stashing iterator". See
+ // Bug 1175485.
+ IntTypeT operator*() const { return mCurrent; }
+
+ /* Increment and decrement operators */
+
+ IntegerIterator& operator++() {
+ ++mCurrent;
+ return *this;
+ }
+ IntegerIterator& operator--() {
+ --mCurrent;
+ return *this;
+ }
+ IntegerIterator operator++(int) {
+ auto ret = *this;
+ ++mCurrent;
+ return ret;
+ }
+ IntegerIterator operator--(int) {
+ auto ret = *this;
+ --mCurrent;
+ return ret;
+ }
+
+ /* Comparison operators */
+
+ template <typename IntType1, typename IntType2>
+ friend bool operator==(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+ template <typename IntType1, typename IntType2>
+ friend bool operator!=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+ template <typename IntType1, typename IntType2>
+ friend bool operator<(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+ template <typename IntType1, typename IntType2>
+ friend bool operator<=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+ template <typename IntType1, typename IntType2>
+ friend bool operator>(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+ template <typename IntType1, typename IntType2>
+ friend bool operator>=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2);
+
+ private:
+ IntTypeT mCurrent;
+};
+
+template <typename IntType1, typename IntType2>
+bool operator==(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent == aIter2.mCurrent;
+}
+
+template <typename IntType1, typename IntType2>
+bool operator!=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent != aIter2.mCurrent;
+}
+
+template <typename IntType1, typename IntType2>
+bool operator<(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent < aIter2.mCurrent;
+}
+
+template <typename IntType1, typename IntType2>
+bool operator<=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent <= aIter2.mCurrent;
+}
+
+template <typename IntType1, typename IntType2>
+bool operator>(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent > aIter2.mCurrent;
+}
+
+template <typename IntType1, typename IntType2>
+bool operator>=(const IntegerIterator<IntType1>& aIter1,
+ const IntegerIterator<IntType2>& aIter2) {
+ return aIter1.mCurrent >= aIter2.mCurrent;
+}
+
+template <typename IntTypeT>
+class IntegerRange {
+ public:
+ typedef IntegerIterator<IntTypeT> iterator;
+ typedef IntegerIterator<IntTypeT> const_iterator;
+ typedef ReverseIterator<IntegerIterator<IntTypeT>> reverse_iterator;
+ typedef ReverseIterator<IntegerIterator<IntTypeT>> const_reverse_iterator;
+
+ template <typename IntType>
+ explicit IntegerRange(IntType aEnd) : mBegin(0), mEnd(aEnd) {}
+
+ template <typename IntType1, typename IntType2>
+ IntegerRange(IntType1 aBegin, IntType2 aEnd) : mBegin(aBegin), mEnd(aEnd) {}
+
+ iterator begin() const { return iterator(mBegin); }
+ const_iterator cbegin() const { return begin(); }
+ iterator end() const { return iterator(mEnd); }
+ const_iterator cend() const { return end(); }
+ reverse_iterator rbegin() const { return reverse_iterator(iterator(mEnd)); }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ reverse_iterator rend() const { return reverse_iterator(iterator(mBegin)); }
+ const_reverse_iterator crend() const { return rend(); }
+
+ private:
+ IntTypeT mBegin;
+ IntTypeT mEnd;
+};
+
+template <typename T, bool = std::is_unsigned_v<T>>
+struct GeqZero {
+ static bool isNonNegative(T t) { return t >= 0; }
+};
+
+template <typename T>
+struct GeqZero<T, true> {
+ static bool isNonNegative(T t) { return true; }
+};
+
+} // namespace detail
+
+template <typename IntType>
+detail::IntegerRange<IntType> IntegerRange(IntType aEnd) {
+ static_assert(std::is_integral_v<IntType>, "value must be integral");
+ MOZ_ASSERT(detail::GeqZero<IntType>::isNonNegative(aEnd),
+ "Should never have negative value here");
+ return detail::IntegerRange<IntType>(aEnd);
+}
+
+template <typename IntType1, typename IntType2>
+detail::IntegerRange<IntType2> IntegerRange(IntType1 aBegin, IntType2 aEnd) {
+ static_assert(std::is_integral_v<IntType1> && std::is_integral_v<IntType2>,
+ "values must both be integral");
+ static_assert(std::is_signed_v<IntType1> == std::is_signed_v<IntType2>,
+ "signed/unsigned mismatch");
+ MOZ_ASSERT(aEnd >= aBegin, "End value should be larger than begin value");
+ return detail::IntegerRange<IntType2>(aBegin, aEnd);
+}
+
+} // namespace mozilla
+
+#endif // mozilla_IntegerRange_h
diff --git a/mfbt/IntegerTypeTraits.h b/mfbt/IntegerTypeTraits.h
new file mode 100644
index 0000000000..33b51b9901
--- /dev/null
+++ b/mfbt/IntegerTypeTraits.h
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_IntegerTypeTraits_h
+#define mozilla_IntegerTypeTraits_h
+
+#include <stddef.h>
+#include <stdint.h>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+/**
+ * StdintTypeForSizeAndSignedness returns the stdint integer type
+ * of given size (can be 1, 2, 4 or 8) and given signedness
+ * (false means unsigned, true means signed).
+ */
+template <size_t Size, bool Signedness>
+struct StdintTypeForSizeAndSignedness;
+
+template <>
+struct StdintTypeForSizeAndSignedness<1, true> {
+ typedef int8_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<1, false> {
+ typedef uint8_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<2, true> {
+ typedef int16_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<2, false> {
+ typedef uint16_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<4, true> {
+ typedef int32_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<4, false> {
+ typedef uint32_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<8, true> {
+ typedef int64_t Type;
+};
+
+template <>
+struct StdintTypeForSizeAndSignedness<8, false> {
+ typedef uint64_t Type;
+};
+
+} // namespace detail
+
+template <size_t Size>
+struct UnsignedStdintTypeForSize
+ : detail::StdintTypeForSizeAndSignedness<Size, false> {};
+
+template <size_t Size>
+struct SignedStdintTypeForSize
+ : detail::StdintTypeForSizeAndSignedness<Size, true> {};
+
+template <typename IntegerType>
+struct PositionOfSignBit {
+ static_assert(std::is_integral_v<IntegerType>,
+ "PositionOfSignBit is only for integral types");
+ // 8 here should be CHAR_BIT from limits.h, but the world has moved on.
+ static const size_t value = 8 * sizeof(IntegerType) - 1;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_IntegerTypeTraits_h
diff --git a/mfbt/JSONWriter.cpp b/mfbt/JSONWriter.cpp
new file mode 100644
index 0000000000..144291ae6a
--- /dev/null
+++ b/mfbt/JSONWriter.cpp
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/JSONWriter.h"
+
+namespace mozilla {
+namespace detail {
+
+// The chars with non-'___' entries in this table are those that can be
+// represented with a two-char escape sequence. The value is the second char in
+// the sequence, that which follows the initial backslash.
+#define ___ 0
+const char gTwoCharEscapes[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ /* 0+ */ ___, ___, ___, ___, ___, ___, ___, ___, 'b', 't',
+ /* 10+ */ 'n', ___, 'f', 'r', ___, ___, ___, ___, ___, ___,
+ /* 20+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 30+ */ ___, ___, ___, ___, '"', ___, ___, ___, ___, ___,
+ /* 40+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 50+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 60+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 70+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 80+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 90+ */ ___, ___, '\\', ___, ___, ___, ___, ___, ___, ___,
+ /* 100+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 110+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 120+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 130+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 140+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 150+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 160+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 170+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 180+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 190+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 200+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 210+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 220+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 230+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 240+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___,
+ /* 250+ */ ___, ___, ___, ___, ___, ___};
+#undef ___
+
+} // namespace detail
+} // namespace mozilla
diff --git a/mfbt/JSONWriter.h b/mfbt/JSONWriter.h
new file mode 100644
index 0000000000..f779ee9837
--- /dev/null
+++ b/mfbt/JSONWriter.h
@@ -0,0 +1,545 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A JSON pretty-printer class. */
+
+// A typical JSON-writing library requires you to first build up a data
+// structure that represents a JSON object and then serialize it (to file, or
+// somewhere else). This approach makes for a clean API, but building the data
+// structure takes up memory. Sometimes that isn't desirable, such as when the
+// JSON data is produced for memory reporting.
+//
+// The JSONWriter class instead allows JSON data to be written out
+// incrementally without building up large data structures.
+//
+// The API is slightly uglier than you would see in a typical JSON-writing
+// library, but still fairly easy to use. It's possible to generate invalid
+// JSON with JSONWriter, but typically the most basic testing will identify any
+// such problems.
+//
+// Similarly, there are no RAII facilities for automatically closing objects
+// and arrays. These would be nice if you are generating all your code within
+// nested functions, but in other cases you'd have to maintain an explicit
+// stack of RAII objects and manually unwind it, which is no better than just
+// calling "end" functions. Furthermore, the consequences of forgetting to
+// close an object or array are obvious and, again, will be identified via
+// basic testing, unlike other cases where RAII is typically used (e.g. smart
+// pointers) and the consequences of defects are more subtle.
+//
+// Importantly, the class does solve the two hard problems of JSON
+// pretty-printing, which are (a) correctly escaping strings, and (b) adding
+// appropriate indentation and commas between items.
+//
+// By default, every property is placed on its own line. However, it is
+// possible to request that objects and arrays be placed entirely on a single
+// line, which can reduce output size significantly in some cases.
+//
+// Strings used (for property names and string property values) are |const
+// char*| throughout, and can be ASCII or UTF-8.
+//
+// EXAMPLE
+// -------
+// Assume that |MyWriteFunc| is a class that implements |JSONWriteFunc|. The
+// following code:
+//
+// JSONWriter w(MakeUnique<MyWriteFunc>());
+// w.Start();
+// {
+// w.NullProperty("null");
+// w.BoolProperty("bool", true);
+// w.IntProperty("int", 1);
+// w.StartArrayProperty("array");
+// {
+// w.StringElement("string");
+// w.StartObjectElement();
+// {
+// w.DoubleProperty("double", 3.4);
+// w.StartArrayProperty("single-line array", w.SingleLineStyle);
+// {
+// w.IntElement(1);
+// w.StartObjectElement(); // SingleLineStyle is inherited from
+// w.EndObjectElement(); // above for this collection
+// }
+// w.EndArray();
+// }
+// w.EndObjectElement();
+// }
+// w.EndArrayProperty();
+// }
+// w.End();
+//
+// will produce pretty-printed output for the following JSON object:
+//
+// {
+// "null": null,
+// "bool": true,
+// "int": 1,
+// "array": [
+// "string",
+// {
+// "double": 3.4,
+// "single-line array": [1, {}]
+// }
+// ]
+// }
+//
+// The nesting in the example code is obviously optional, but can aid
+// readability.
+
+#ifndef mozilla_JSONWriter_h
+#define mozilla_JSONWriter_h
+
+#include "double-conversion/double-conversion.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/Span.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Vector.h"
+
+#include <utility>
+
+namespace mozilla {
+
+// A quasi-functor for JSONWriter. We don't use a true functor because that
+// requires templatizing JSONWriter, and the templatization seeps to lots of
+// places we don't want it to.
+class JSONWriteFunc {
+ public:
+ virtual void Write(const Span<const char>& aStr) = 0;
+ virtual ~JSONWriteFunc() = default;
+};
+
+// Ideally this would be within |EscapedString| but when compiling with GCC
+// on Linux that caused link errors, whereas this formulation didn't.
+namespace detail {
+extern MFBT_DATA const char gTwoCharEscapes[256];
+} // namespace detail
+
+class JSONWriter {
+ // From http://www.ietf.org/rfc/rfc4627.txt:
+ //
+ // "All Unicode characters may be placed within the quotation marks except
+ // for the characters that must be escaped: quotation mark, reverse
+ // solidus, and the control characters (U+0000 through U+001F)."
+ //
+ // This implementation uses two-char escape sequences where possible, namely:
+ //
+ // \", \\, \b, \f, \n, \r, \t
+ //
+ // All control characters not in the above list are represented with a
+ // six-char escape sequence, e.g. '\u000b' (a.k.a. '\v').
+ //
+ class EscapedString {
+ // `mStringSpan` initially points at the user-provided string. If that
+ // string needs escaping, `mStringSpan` will point at `mOwnedStr` below.
+ Span<const char> mStringSpan;
+ // String storage in case escaping is actually needed, null otherwise.
+ UniquePtr<char[]> mOwnedStr;
+
+ void CheckInvariants() const {
+ // Either there was no escaping so `mOwnedStr` is null, or escaping was
+ // needed, in which case `mStringSpan` should point at `mOwnedStr`.
+ MOZ_ASSERT(!mOwnedStr || mStringSpan.data() == mOwnedStr.get());
+ }
+
+ static char hexDigitToAsciiChar(uint8_t u) {
+ u = u & 0xf;
+ return u < 10 ? '0' + u : 'a' + (u - 10);
+ }
+
+ public:
+ explicit EscapedString(const Span<const char>& aStr) : mStringSpan(aStr) {
+ // First, see if we need to modify the string.
+ size_t nExtra = 0;
+ for (const char& c : aStr) {
+ // ensure it can't be interpreted as negative
+ uint8_t u = static_cast<uint8_t>(c);
+ if (u == 0) {
+ // Null terminator within the span, assume we may have been given a
+ // span to a buffer that contains a null-terminated string in it.
+ // We need to truncate the Span so that it doesn't include this null
+ // terminator and anything past it; Either we will return it as-is, or
+ // processing should stop there.
+ mStringSpan = mStringSpan.First(&c - mStringSpan.data());
+ break;
+ }
+ if (detail::gTwoCharEscapes[u]) {
+ nExtra += 1;
+ } else if (u <= 0x1f) {
+ nExtra += 5;
+ }
+ }
+
+ // Note: Don't use `aStr` anymore, as it could contain a null terminator;
+ // use the correctly-sized `mStringSpan` instead.
+
+ if (nExtra == 0) {
+ // No escapes needed. mStringSpan already points at the original string.
+ CheckInvariants();
+ return;
+ }
+
+ // Escapes are needed. We'll create a new string.
+ mOwnedStr = MakeUnique<char[]>(mStringSpan.Length() + nExtra);
+
+ size_t i = 0;
+ for (const char c : mStringSpan) {
+ // ensure it can't be interpreted as negative
+ uint8_t u = static_cast<uint8_t>(c);
+ MOZ_ASSERT(u != 0, "Null terminator should have been handled above");
+ if (detail::gTwoCharEscapes[u]) {
+ mOwnedStr[i++] = '\\';
+ mOwnedStr[i++] = detail::gTwoCharEscapes[u];
+ } else if (u <= 0x1f) {
+ mOwnedStr[i++] = '\\';
+ mOwnedStr[i++] = 'u';
+ mOwnedStr[i++] = '0';
+ mOwnedStr[i++] = '0';
+ mOwnedStr[i++] = hexDigitToAsciiChar((u & 0x00f0) >> 4);
+ mOwnedStr[i++] = hexDigitToAsciiChar(u & 0x000f);
+ } else {
+ mOwnedStr[i++] = u;
+ }
+ }
+ MOZ_ASSERT(i == mStringSpan.Length() + nExtra);
+ mStringSpan = Span<const char>(mOwnedStr.get(), i);
+ CheckInvariants();
+ }
+
+ explicit EscapedString(const char* aStr) = delete;
+
+ const Span<const char>& SpanRef() const { return mStringSpan; }
+ };
+
+ public:
+ // Collections (objects and arrays) are printed in a multi-line style by
+ // default. This can be changed to a single-line style if SingleLineStyle is
+ // specified. If a collection is printed in single-line style, every nested
+ // collection within it is also printed in single-line style, even if
+ // multi-line style is requested.
+ // If SingleLineStyle is set in the constructer, all JSON whitespace is
+ // eliminated, including spaces after colons and commas, for the most compact
+ // encoding possible.
+ enum CollectionStyle {
+ MultiLineStyle, // the default
+ SingleLineStyle
+ };
+
+ protected:
+ static constexpr Span<const char> scArrayBeginString = MakeStringSpan("[");
+ static constexpr Span<const char> scArrayEndString = MakeStringSpan("]");
+ static constexpr Span<const char> scCommaString = MakeStringSpan(",");
+ static constexpr Span<const char> scEmptyString = MakeStringSpan("");
+ static constexpr Span<const char> scFalseString = MakeStringSpan("false");
+ static constexpr Span<const char> scNewLineString = MakeStringSpan("\n");
+ static constexpr Span<const char> scNullString = MakeStringSpan("null");
+ static constexpr Span<const char> scObjectBeginString = MakeStringSpan("{");
+ static constexpr Span<const char> scObjectEndString = MakeStringSpan("}");
+ static constexpr Span<const char> scPropertyBeginString =
+ MakeStringSpan("\"");
+ static constexpr Span<const char> scPropertyEndString = MakeStringSpan("\":");
+ static constexpr Span<const char> scQuoteString = MakeStringSpan("\"");
+ static constexpr Span<const char> scSpaceString = MakeStringSpan(" ");
+ static constexpr Span<const char> scTopObjectBeginString =
+ MakeStringSpan("{");
+ static constexpr Span<const char> scTopObjectEndString = MakeStringSpan("}");
+ static constexpr Span<const char> scTrueString = MakeStringSpan("true");
+
+ JSONWriteFunc& mWriter;
+ const UniquePtr<JSONWriteFunc> mMaybeOwnedWriter;
+ Vector<bool, 8> mNeedComma; // do we need a comma at depth N?
+ Vector<bool, 8> mNeedNewlines; // do we need newlines at depth N?
+ size_t mDepth; // the current nesting depth
+
+ void Indent() {
+ for (size_t i = 0; i < mDepth; i++) {
+ mWriter.Write(scSpaceString);
+ }
+ }
+
+ // Adds whatever is necessary (maybe a comma, and then a newline and
+ // whitespace) to separate an item (property or element) from what's come
+ // before.
+ void Separator() {
+ if (mNeedComma[mDepth]) {
+ mWriter.Write(scCommaString);
+ }
+ if (mDepth > 0 && mNeedNewlines[mDepth]) {
+ mWriter.Write(scNewLineString);
+ Indent();
+ } else if (mNeedComma[mDepth] && mNeedNewlines[0]) {
+ mWriter.Write(scSpaceString);
+ }
+ }
+
+ void PropertyNameAndColon(const Span<const char>& aName) {
+ mWriter.Write(scPropertyBeginString);
+ mWriter.Write(EscapedString(aName).SpanRef());
+ mWriter.Write(scPropertyEndString);
+ if (mNeedNewlines[0]) {
+ mWriter.Write(scSpaceString);
+ }
+ }
+
+ void Scalar(const Span<const char>& aMaybePropertyName,
+ const Span<const char>& aStringValue) {
+ Separator();
+ if (!aMaybePropertyName.empty()) {
+ PropertyNameAndColon(aMaybePropertyName);
+ }
+ mWriter.Write(aStringValue);
+ mNeedComma[mDepth] = true;
+ }
+
+ void QuotedScalar(const Span<const char>& aMaybePropertyName,
+ const Span<const char>& aStringValue) {
+ Separator();
+ if (!aMaybePropertyName.empty()) {
+ PropertyNameAndColon(aMaybePropertyName);
+ }
+ mWriter.Write(scQuoteString);
+ mWriter.Write(aStringValue);
+ mWriter.Write(scQuoteString);
+ mNeedComma[mDepth] = true;
+ }
+
+ void NewVectorEntries(bool aNeedNewLines) {
+ // If these tiny allocations OOM we might as well just crash because we
+ // must be in serious memory trouble.
+ MOZ_RELEASE_ASSERT(mNeedComma.resizeUninitialized(mDepth + 1));
+ MOZ_RELEASE_ASSERT(mNeedNewlines.resizeUninitialized(mDepth + 1));
+ mNeedComma[mDepth] = false;
+ mNeedNewlines[mDepth] = aNeedNewLines;
+ }
+
+ void StartCollection(const Span<const char>& aMaybePropertyName,
+ const Span<const char>& aStartChar,
+ CollectionStyle aStyle = MultiLineStyle) {
+ Separator();
+ if (!aMaybePropertyName.empty()) {
+ PropertyNameAndColon(aMaybePropertyName);
+ }
+ mWriter.Write(aStartChar);
+ mNeedComma[mDepth] = true;
+ mDepth++;
+ NewVectorEntries(mNeedNewlines[mDepth - 1] && aStyle == MultiLineStyle);
+ }
+
+ // Adds the whitespace and closing char necessary to end a collection.
+ void EndCollection(const Span<const char>& aEndChar) {
+ MOZ_ASSERT(mDepth > 0);
+ if (mNeedNewlines[mDepth]) {
+ mWriter.Write(scNewLineString);
+ mDepth--;
+ Indent();
+ } else {
+ mDepth--;
+ }
+ mWriter.Write(aEndChar);
+ }
+
+ public:
+ explicit JSONWriter(JSONWriteFunc& aWriter,
+ CollectionStyle aStyle = MultiLineStyle)
+ : mWriter(aWriter), mNeedComma(), mNeedNewlines(), mDepth(0) {
+ NewVectorEntries(aStyle == MultiLineStyle);
+ }
+
+ explicit JSONWriter(UniquePtr<JSONWriteFunc> aWriter,
+ CollectionStyle aStyle = MultiLineStyle)
+ : mWriter(*aWriter),
+ mMaybeOwnedWriter(std::move(aWriter)),
+ mNeedComma(),
+ mNeedNewlines(),
+ mDepth(0) {
+ MOZ_RELEASE_ASSERT(
+ mMaybeOwnedWriter,
+ "JSONWriter must be given a non-null UniquePtr<JSONWriteFunc>");
+ NewVectorEntries(aStyle == MultiLineStyle);
+ }
+
+ // Returns the JSONWriteFunc passed in at creation, for temporary use. The
+ // JSONWriter object still owns the JSONWriteFunc.
+ JSONWriteFunc& WriteFunc() const { return mWriter; }
+
+ // For all the following functions, the "Prints:" comment indicates what the
+ // basic output looks like. However, it doesn't indicate the whitespace and
+ // trailing commas, which are automatically added as required.
+ //
+ // All property names and string properties are escaped as necessary.
+
+ // Prints: {
+ void Start(CollectionStyle aStyle = MultiLineStyle) {
+ StartCollection(scEmptyString, scTopObjectBeginString, aStyle);
+ }
+
+ // Prints: } and final newline.
+ void End() {
+ EndCollection(scTopObjectEndString);
+ if (mNeedNewlines[mDepth]) {
+ mWriter.Write(scNewLineString);
+ }
+ }
+
+ // Prints: "<aName>": null
+ void NullProperty(const Span<const char>& aName) {
+ Scalar(aName, scNullString);
+ }
+
+ template <size_t N>
+ void NullProperty(const char (&aName)[N]) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ NullProperty(Span<const char>(aName, N));
+ }
+
+ // Prints: null
+ void NullElement() { NullProperty(scEmptyString); }
+
+ // Prints: "<aName>": <aBool>
+ void BoolProperty(const Span<const char>& aName, bool aBool) {
+ Scalar(aName, aBool ? scTrueString : scFalseString);
+ }
+
+ template <size_t N>
+ void BoolProperty(const char (&aName)[N], bool aBool) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ BoolProperty(Span<const char>(aName, N), aBool);
+ }
+
+ // Prints: <aBool>
+ void BoolElement(bool aBool) { BoolProperty(scEmptyString, aBool); }
+
+ // Prints: "<aName>": <aInt>
+ void IntProperty(const Span<const char>& aName, int64_t aInt) {
+ char buf[64];
+ int len = SprintfLiteral(buf, "%" PRId64, aInt);
+ MOZ_RELEASE_ASSERT(len > 0);
+ Scalar(aName, Span<const char>(buf, size_t(len)));
+ }
+
+ template <size_t N>
+ void IntProperty(const char (&aName)[N], int64_t aInt) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ IntProperty(Span<const char>(aName, N), aInt);
+ }
+
+ // Prints: <aInt>
+ void IntElement(int64_t aInt) { IntProperty(scEmptyString, aInt); }
+
+ // Prints: "<aName>": <aDouble>
+ void DoubleProperty(const Span<const char>& aName, double aDouble) {
+ static const size_t buflen = 64;
+ char buf[buflen];
+ const double_conversion::DoubleToStringConverter& converter =
+ double_conversion::DoubleToStringConverter::EcmaScriptConverter();
+ double_conversion::StringBuilder builder(buf, buflen);
+ converter.ToShortest(aDouble, &builder);
+ // TODO: The builder should know the length?!
+ Scalar(aName, MakeStringSpan(builder.Finalize()));
+ }
+
+ template <size_t N>
+ void DoubleProperty(const char (&aName)[N], double aDouble) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ DoubleProperty(Span<const char>(aName, N), aDouble);
+ }
+
+ // Prints: <aDouble>
+ void DoubleElement(double aDouble) { DoubleProperty(scEmptyString, aDouble); }
+
+ // Prints: "<aName>": "<aStr>"
+ void StringProperty(const Span<const char>& aName,
+ const Span<const char>& aStr) {
+ QuotedScalar(aName, EscapedString(aStr).SpanRef());
+ }
+
+ template <size_t NN>
+ void StringProperty(const char (&aName)[NN], const Span<const char>& aStr) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StringProperty(Span<const char>(aName, NN), aStr);
+ }
+
+ template <size_t SN>
+ void StringProperty(const Span<const char>& aName, const char (&aStr)[SN]) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StringProperty(aName, Span<const char>(aStr, SN));
+ }
+
+ template <size_t NN, size_t SN>
+ void StringProperty(const char (&aName)[NN], const char (&aStr)[SN]) {
+ // Keep null terminators from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StringProperty(Span<const char>(aName, NN), Span<const char>(aStr, SN));
+ }
+
+ // Prints: "<aStr>"
+ void StringElement(const Span<const char>& aStr) {
+ StringProperty(scEmptyString, aStr);
+ }
+
+ template <size_t N>
+ void StringElement(const char (&aName)[N]) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StringElement(Span<const char>(aName, N));
+ }
+
+ // Prints: "<aName>": [
+ void StartArrayProperty(const Span<const char>& aName,
+ CollectionStyle aStyle = MultiLineStyle) {
+ StartCollection(aName, scArrayBeginString, aStyle);
+ }
+
+ template <size_t N>
+ void StartArrayProperty(const char (&aName)[N],
+ CollectionStyle aStyle = MultiLineStyle) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StartArrayProperty(Span<const char>(aName, N), aStyle);
+ }
+
+ // Prints: [
+ void StartArrayElement(CollectionStyle aStyle = MultiLineStyle) {
+ StartArrayProperty(scEmptyString, aStyle);
+ }
+
+ // Prints: ]
+ void EndArray() { EndCollection(scArrayEndString); }
+
+ // Prints: "<aName>": {
+ void StartObjectProperty(const Span<const char>& aName,
+ CollectionStyle aStyle = MultiLineStyle) {
+ StartCollection(aName, scObjectBeginString, aStyle);
+ }
+
+ template <size_t N>
+ void StartObjectProperty(const char (&aName)[N],
+ CollectionStyle aStyle = MultiLineStyle) {
+ // Keep null terminator from literal strings, will be removed by
+ // EscapedString. This way C buffer arrays can be used as well.
+ StartObjectProperty(Span<const char>(aName, N), aStyle);
+ }
+
+ // Prints: {
+ void StartObjectElement(CollectionStyle aStyle = MultiLineStyle) {
+ StartObjectProperty(scEmptyString, aStyle);
+ }
+
+ // Prints: }
+ void EndObject() { EndCollection(scObjectEndString); }
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_JSONWriter_h */
diff --git a/mfbt/JsRust.h b/mfbt/JsRust.h
new file mode 100644
index 0000000000..ff622e33d4
--- /dev/null
+++ b/mfbt/JsRust.h
@@ -0,0 +1,21 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Checking for jsrust crate availability for linking.
+ * For testing, define MOZ_PRETEND_NO_JSRUST to pretend
+ * that we don't have jsrust.
+ */
+
+#ifndef mozilla_JsRust_h
+#define mozilla_JsRust_h
+
+#if (defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API)) && \
+ !defined(MOZ_PRETEND_NO_JSRUST)
+# define MOZ_HAS_JSRUST() 1
+#else
+# define MOZ_HAS_JSRUST() 0
+#endif
+
+#endif // mozilla_JsRust_h
diff --git a/mfbt/Latin1.h b/mfbt/Latin1.h
new file mode 100644
index 0000000000..a57d771b64
--- /dev/null
+++ b/mfbt/Latin1.h
@@ -0,0 +1,262 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Latin-1 operations (i.e. a byte is the corresponding code point).
+ * (Note: this is *not* the same as the encoding of windows-1252 or
+ * latin1 content on the web. In Web terms, this encoding
+ * corresponds to "isomorphic decode" / "isomorphic encoding" from
+ * the Infra Standard.)
+ */
+
+#ifndef mozilla_Latin1_h
+#define mozilla_Latin1_h
+
+#include <type_traits>
+
+#include "mozilla/JsRust.h"
+#include "mozilla/Span.h"
+
+#if MOZ_HAS_JSRUST()
+# include "encoding_rs_mem.h"
+#endif
+
+namespace mozilla {
+
+namespace detail {
+
+// It's important for optimizations that Latin1ness checks
+// and inflation/deflation function use the same short
+// string limit. The limit is 16, because that's the shortest
+// that inflates/deflates using SIMD.
+constexpr size_t kShortStringLimitForInlinePaths = 16;
+
+template <typename Char>
+class MakeUnsignedChar {
+ public:
+ using Type = std::make_unsigned_t<Char>;
+};
+
+template <>
+class MakeUnsignedChar<char16_t> {
+ public:
+ using Type = char16_t;
+};
+
+template <>
+class MakeUnsignedChar<char32_t> {
+ public:
+ using Type = char32_t;
+};
+
+} // namespace detail
+
+/**
+ * Returns true iff |aChar| is Latin-1 but not ASCII, i.e. in the range
+ * [0x80, 0xFF].
+ */
+template <typename Char>
+constexpr bool IsNonAsciiLatin1(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return uc >= 0x80 && uc <= 0xFF;
+}
+
+#if MOZ_HAS_JSRUST()
+
+/**
+ * Returns |true| iff |aString| contains only Latin1 characters, that is,
+ * characters in the range [U+0000, U+00FF].
+ *
+ * @param aString a potentially-invalid UTF-16 string to scan
+ */
+inline bool IsUtf16Latin1(mozilla::Span<const char16_t> aString) {
+ size_t length = aString.Length();
+ const char16_t* ptr = aString.Elements();
+ // For short strings, calling into Rust is a pessimization, and the SIMD
+ // code won't have a chance to kick in anyway.
+ // 16 is a bit larger than logically necessary for this function alone,
+ // but it's important that the limit here matches the limit used in
+ // LossyConvertUtf16toLatin1!
+ if (length < mozilla::detail::kShortStringLimitForInlinePaths) {
+ char16_t accu = 0;
+ for (size_t i = 0; i < length; i++) {
+ accu |= ptr[i];
+ }
+ return accu < 0x100;
+ }
+ return encoding_mem_is_utf16_latin1(ptr, length);
+}
+
+/**
+ * Returns |true| iff |aString| is valid UTF-8 containing only Latin-1
+ * characters.
+ *
+ * If you know that the argument is always absolutely guaranteed to be valid
+ * UTF-8, use the faster UnsafeIsValidUtf8Latin1() instead.
+ *
+ * @param aString potentially-invalid UTF-8 string to scan
+ */
+inline bool IsUtf8Latin1(mozilla::Span<const char> aString) {
+ return encoding_mem_is_utf8_latin1(aString.Elements(), aString.Length());
+}
+
+/**
+ * Returns |true| iff |aString|, which MUST be valid UTF-8, contains only
+ * Latin1 characters, that is, characters in the range [U+0000, U+00FF].
+ * (If |aString| might not be valid UTF-8, use |IsUtf8Latin1| instead.)
+ *
+ * @param aString known-valid UTF-8 string to scan
+ */
+inline bool UnsafeIsValidUtf8Latin1(mozilla::Span<const char> aString) {
+ return encoding_mem_is_str_latin1(aString.Elements(), aString.Length());
+}
+
+/**
+ * Returns the index of first byte that starts an invalid byte
+ * sequence or a non-Latin1 byte sequence in a potentially-invalid UTF-8
+ * string, or the length of the string if there are neither.
+ *
+ * If you know that the argument is always absolutely guaranteed to be valid
+ * UTF-8, use the faster UnsafeValidUtf8Lati1UpTo() instead.
+ *
+ * @param aString potentially-invalid UTF-8 string to scan
+ */
+inline size_t Utf8Latin1UpTo(mozilla::Span<const char> aString) {
+ return encoding_mem_utf8_latin1_up_to(aString.Elements(), aString.Length());
+}
+
+/**
+ * Returns the index of first byte that starts a non-Latin1 byte
+ * sequence in a known-valid UTF-8 string, or the length of the
+ * string if there are none. (If the string might not be valid
+ * UTF-8, use Utf8Latin1UpTo() instead.)
+ *
+ * @param aString known-valid UTF-8 string to scan
+ */
+inline size_t UnsafeValidUtf8Lati1UpTo(mozilla::Span<const char> aString) {
+ return encoding_mem_str_latin1_up_to(aString.Elements(), aString.Length());
+}
+
+/**
+ * If all the code points in the input are below U+0100, converts to Latin1,
+ * i.e. unsigned byte value is Unicode scalar value. If there are code points
+ * above U+00FF, produces unspecified garbage in a memory-safe way. The
+ * nature of the garbage must not be relied upon.
+ *
+ * The length of aDest must not be less than the length of aSource.
+ */
+inline void LossyConvertUtf16toLatin1(mozilla::Span<const char16_t> aSource,
+ mozilla::Span<char> aDest) {
+ const char16_t* srcPtr = aSource.Elements();
+ size_t srcLen = aSource.Length();
+ char* dstPtr = aDest.Elements();
+ size_t dstLen = aDest.Length();
+ // Avoid function call overhead when SIMD isn't used anyway
+ // If you change the length limit here, be sure to change
+ // IsUtf16Latin1 and IsAscii to match so that optimizations don't
+ // fail!
+ if (srcLen < mozilla::detail::kShortStringLimitForInlinePaths) {
+ MOZ_ASSERT(dstLen >= srcLen);
+ uint8_t* unsignedPtr = reinterpret_cast<uint8_t*>(dstPtr);
+ const char16_t* end = srcPtr + srcLen;
+ while (srcPtr < end) {
+ *unsignedPtr = static_cast<uint8_t>(*srcPtr);
+ ++srcPtr;
+ ++unsignedPtr;
+ }
+ return;
+ }
+ encoding_mem_convert_utf16_to_latin1_lossy(srcPtr, srcLen, dstPtr, dstLen);
+}
+
+/**
+ * If all the code points in the input are below U+0100, converts to Latin1,
+ * i.e. unsigned byte value is Unicode scalar value. If there are code points
+ * above U+00FF, produces unspecified garbage in a memory-safe way. The
+ * nature of the garbage must not be relied upon.
+ *
+ * Returns the number of code units written.
+ *
+ * The length of aDest must not be less than the length of aSource.
+ */
+inline size_t LossyConvertUtf8toLatin1(mozilla::Span<const char> aSource,
+ mozilla::Span<char> aDest) {
+ return encoding_mem_convert_utf8_to_latin1_lossy(
+ aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length());
+}
+
+/**
+ * Converts each byte of |aSource|, interpreted as a Unicode scalar value
+ * having that unsigned value, to its UTF-8 representation in |aDest|.
+ *
+ * Returns the number of code units written.
+ *
+ * The length of aDest must be at least twice the length of aSource.
+ */
+inline size_t ConvertLatin1toUtf8(mozilla::Span<const char> aSource,
+ mozilla::Span<char> aDest) {
+ return encoding_mem_convert_latin1_to_utf8(
+ aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length());
+}
+
+/**
+ * Converts bytes whose unsigned value is interpreted as Unicode code point
+ * (i.e. U+0000 to U+00FF, inclusive) to UTF-8 with potentially insufficient
+ * output space.
+ *
+ * Returns the number of bytes read and the number of bytes written.
+ *
+ * If the output isn't large enough, not all input is consumed.
+ *
+ * The conversion is guaranteed to be complete if the length of aDest is
+ * at least the length of aSource times two.
+ *
+ * The output is always valid UTF-8 ending on scalar value boundary
+ * even in the case of partial conversion.
+ *
+ * The semantics of this function match the semantics of
+ * TextEncoder.encodeInto.
+ * https://encoding.spec.whatwg.org/#dom-textencoder-encodeinto
+ */
+inline std::tuple<size_t, size_t> ConvertLatin1toUtf8Partial(
+ mozilla::Span<const char> aSource, mozilla::Span<char> aDest) {
+ size_t srcLen = aSource.Length();
+ size_t dstLen = aDest.Length();
+ encoding_mem_convert_latin1_to_utf8_partial(aSource.Elements(), &srcLen,
+ aDest.Elements(), &dstLen);
+ return std::make_tuple(srcLen, dstLen);
+}
+
+/**
+ * Converts Latin-1 code points (i.e. each byte is the identical code
+ * point) from |aSource| to UTF-16 code points in |aDest|.
+ *
+ * The length of aDest must not be less than the length of aSource.
+ */
+inline void ConvertLatin1toUtf16(mozilla::Span<const char> aSource,
+ mozilla::Span<char16_t> aDest) {
+ const char* srcPtr = aSource.Elements();
+ size_t srcLen = aSource.Length();
+ char16_t* dstPtr = aDest.Elements();
+ size_t dstLen = aDest.Length();
+ // Avoid function call overhead when SIMD isn't used anyway
+ if (srcLen < mozilla::detail::kShortStringLimitForInlinePaths) {
+ MOZ_ASSERT(dstLen >= srcLen);
+ const uint8_t* unsignedPtr = reinterpret_cast<const uint8_t*>(srcPtr);
+ const uint8_t* end = unsignedPtr + srcLen;
+ while (unsignedPtr < end) {
+ *dstPtr = *unsignedPtr;
+ ++unsignedPtr;
+ ++dstPtr;
+ }
+ return;
+ }
+ encoding_mem_convert_latin1_to_utf16(srcPtr, srcLen, dstPtr, dstLen);
+}
+
+#endif
+
+}; // namespace mozilla
+
+#endif // mozilla_Latin1_h
diff --git a/mfbt/Likely.h b/mfbt/Likely.h
new file mode 100644
index 0000000000..5b65e97241
--- /dev/null
+++ b/mfbt/Likely.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * MOZ_LIKELY and MOZ_UNLIKELY macros to hint to the compiler how a
+ * boolean predicate should be branch-predicted.
+ */
+
+#ifndef mozilla_Likely_h
+#define mozilla_Likely_h
+
+#if defined(__clang__) || defined(__GNUC__)
+# define MOZ_LIKELY(x) (__builtin_expect(!!(x), 1))
+# define MOZ_UNLIKELY(x) (__builtin_expect(!!(x), 0))
+#else
+# define MOZ_LIKELY(x) (!!(x))
+# define MOZ_UNLIKELY(x) (!!(x))
+#endif
+
+#endif /* mozilla_Likely_h */
diff --git a/mfbt/LinkedList.h b/mfbt/LinkedList.h
new file mode 100644
index 0000000000..850b8594c7
--- /dev/null
+++ b/mfbt/LinkedList.h
@@ -0,0 +1,748 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A type-safe doubly-linked list class. */
+
+/*
+ * The classes LinkedList<T> and LinkedListElement<T> together form a
+ * convenient, type-safe doubly-linked list implementation.
+ *
+ * The class T which will be inserted into the linked list must inherit from
+ * LinkedListElement<T>. A given object may be in only one linked list at a
+ * time.
+ *
+ * A LinkedListElement automatically removes itself from the list upon
+ * destruction, and a LinkedList will fatally assert in debug builds if it's
+ * non-empty when it's destructed.
+ *
+ * For example, you might use LinkedList in a simple observer list class as
+ * follows.
+ *
+ * class Observer : public LinkedListElement<Observer>
+ * {
+ * public:
+ * void observe(char* aTopic) { ... }
+ * };
+ *
+ * class ObserverContainer
+ * {
+ * private:
+ * LinkedList<Observer> list;
+ *
+ * public:
+ * void addObserver(Observer* aObserver)
+ * {
+ * // Will assert if |aObserver| is part of another list.
+ * list.insertBack(aObserver);
+ * }
+ *
+ * void removeObserver(Observer* aObserver)
+ * {
+ * // Will assert if |aObserver| is not part of some list.
+ * aObserver.remove();
+ * // Or, will assert if |aObserver| is not part of |list| specifically.
+ * // aObserver.removeFrom(list);
+ * }
+ *
+ * void notifyObservers(char* aTopic)
+ * {
+ * for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext()) {
+ * o->observe(aTopic);
+ * }
+ * }
+ * };
+ *
+ * Additionally, the class AutoCleanLinkedList<T> is a LinkedList<T> that will
+ * remove and delete each element still within itself upon destruction. Note
+ * that because each element is deleted, elements must have been allocated
+ * using |new|.
+ */
+
+#ifndef mozilla_LinkedList_h
+#define mozilla_LinkedList_h
+
+#include <algorithm>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/RefPtr.h"
+
+#ifdef __cplusplus
+
+namespace mozilla {
+
+template <typename T>
+class LinkedListElement;
+
+namespace detail {
+
+/**
+ * LinkedList supports refcounted elements using this adapter class. Clients
+ * using LinkedList<RefPtr<T>> will get a data structure that holds a strong
+ * reference to T as long as T is in the list.
+ */
+template <typename T>
+struct LinkedListElementTraits {
+ typedef T* RawType;
+ typedef const T* ConstRawType;
+ typedef T* ClientType;
+ typedef const T* ConstClientType;
+
+ // These static methods are called when an element is added to or removed from
+ // a linked list. It can be used to keep track ownership in lists that are
+ // supposed to own their elements. If elements are transferred from one list
+ // to another, no enter or exit calls happen since the elements still belong
+ // to a list.
+ static void enterList(LinkedListElement<T>* elt) {}
+ static void exitList(LinkedListElement<T>* elt) {}
+
+ // This method is called when AutoCleanLinkedList cleans itself
+ // during destruction. It can be used to call delete on elements if
+ // the list is the sole owner.
+ static void cleanElement(LinkedListElement<T>* elt) { delete elt->asT(); }
+};
+
+template <typename T>
+struct LinkedListElementTraits<RefPtr<T>> {
+ typedef T* RawType;
+ typedef const T* ConstRawType;
+ typedef RefPtr<T> ClientType;
+ typedef RefPtr<const T> ConstClientType;
+
+ static void enterList(LinkedListElement<RefPtr<T>>* elt) {
+ elt->asT()->AddRef();
+ }
+ static void exitList(LinkedListElement<RefPtr<T>>* elt) {
+ elt->asT()->Release();
+ }
+ static void cleanElement(LinkedListElement<RefPtr<T>>* elt) {}
+};
+
+} /* namespace detail */
+
+template <typename T>
+class LinkedList;
+
+template <typename T>
+class LinkedListElement {
+ typedef typename detail::LinkedListElementTraits<T> Traits;
+ typedef typename Traits::RawType RawType;
+ typedef typename Traits::ConstRawType ConstRawType;
+ typedef typename Traits::ClientType ClientType;
+ typedef typename Traits::ConstClientType ConstClientType;
+
+ /*
+ * It's convenient that we return nullptr when getNext() or getPrevious()
+ * hits the end of the list, but doing so costs an extra word of storage in
+ * each linked list node (to keep track of whether |this| is the sentinel
+ * node) and a branch on this value in getNext/getPrevious.
+ *
+ * We could get rid of the extra word of storage by shoving the "is
+ * sentinel" bit into one of the pointers, although this would, of course,
+ * have performance implications of its own.
+ *
+ * But the goal here isn't to win an award for the fastest or slimmest
+ * linked list; rather, we want a *convenient* linked list. So we won't
+ * waste time guessing which micro-optimization strategy is best.
+ *
+ *
+ * Speaking of unnecessary work, it's worth addressing here why we wrote
+ * mozilla::LinkedList in the first place, instead of using stl::list.
+ *
+ * The key difference between mozilla::LinkedList and stl::list is that
+ * mozilla::LinkedList stores the mPrev/mNext pointers in the object itself,
+ * while stl::list stores the mPrev/mNext pointers in a list element which
+ * itself points to the object being stored.
+ *
+ * mozilla::LinkedList's approach makes it harder to store an object in more
+ * than one list. But the upside is that you can call next() / prev() /
+ * remove() directly on the object. With stl::list, you'd need to store a
+ * pointer to its iterator in the object in order to accomplish this. Not
+ * only would this waste space, but you'd have to remember to update that
+ * pointer every time you added or removed the object from a list.
+ *
+ * In-place, constant-time removal is a killer feature of doubly-linked
+ * lists, and supporting this painlessly was a key design criterion.
+ */
+
+ private:
+ LinkedListElement* mNext;
+ LinkedListElement* mPrev;
+ const bool mIsSentinel;
+
+ public:
+ LinkedListElement() : mNext(this), mPrev(this), mIsSentinel(false) {}
+
+ /*
+ * Moves |aOther| into |*this|. If |aOther| is already in a list, then
+ * |aOther| is removed from the list and replaced by |*this|.
+ */
+ LinkedListElement(LinkedListElement<T>&& aOther)
+ : mIsSentinel(aOther.mIsSentinel) {
+ adjustLinkForMove(std::move(aOther));
+ }
+
+ LinkedListElement& operator=(LinkedListElement<T>&& aOther) {
+ MOZ_ASSERT(mIsSentinel == aOther.mIsSentinel, "Mismatch NodeKind!");
+ MOZ_ASSERT(!isInList(),
+ "Assigning to an element in a list messes up that list!");
+ adjustLinkForMove(std::move(aOther));
+ return *this;
+ }
+
+ ~LinkedListElement() {
+ if (!mIsSentinel && isInList()) {
+ remove();
+ }
+ }
+
+ /*
+ * Get the next element in the list, or nullptr if this is the last element
+ * in the list.
+ */
+ RawType getNext() { return mNext->asT(); }
+ ConstRawType getNext() const { return mNext->asT(); }
+
+ /*
+ * Get the previous element in the list, or nullptr if this is the first
+ * element in the list.
+ */
+ RawType getPrevious() { return mPrev->asT(); }
+ ConstRawType getPrevious() const { return mPrev->asT(); }
+
+ /*
+ * Insert aElem after this element in the list. |this| must be part of a
+ * linked list when you call setNext(); otherwise, this method will assert.
+ */
+ void setNext(RawType aElem) {
+ MOZ_ASSERT(isInList());
+ setNextUnsafe(aElem);
+ }
+
+ /*
+ * Insert aElem before this element in the list. |this| must be part of a
+ * linked list when you call setPrevious(); otherwise, this method will
+ * assert.
+ */
+ void setPrevious(RawType aElem) {
+ MOZ_ASSERT(isInList());
+ setPreviousUnsafe(aElem);
+ }
+
+ /*
+ * Remove this element from the list which contains it. If this element is
+ * not currently part of a linked list, this method asserts.
+ */
+ void remove() {
+ MOZ_ASSERT(isInList());
+
+ mPrev->mNext = mNext;
+ mNext->mPrev = mPrev;
+ mNext = this;
+ mPrev = this;
+
+ Traits::exitList(this);
+ }
+
+ /*
+ * Remove this element from the list containing it. Returns a pointer to the
+ * element that follows this element (before it was removed). This method
+ * asserts if the element does not belong to a list. Note: In a refcounted
+ * list, |this| may be destroyed.
+ */
+ RawType removeAndGetNext() {
+ RawType r = getNext();
+ remove();
+ return r;
+ }
+
+ /*
+ * Remove this element from the list containing it. Returns a pointer to the
+ * previous element in the containing list (before the removal). This method
+ * asserts if the element does not belong to a list. Note: In a refcounted
+ * list, |this| may be destroyed.
+ */
+ RawType removeAndGetPrevious() {
+ RawType r = getPrevious();
+ remove();
+ return r;
+ }
+
+ /*
+ * Identical to remove(), but also asserts in debug builds that this element
+ * is in aList.
+ */
+ void removeFrom(const LinkedList<T>& aList) {
+ aList.assertContains(asT());
+ remove();
+ }
+
+ /*
+ * Return true if |this| part is of a linked list, and false otherwise.
+ */
+ bool isInList() const {
+ MOZ_ASSERT((mNext == this) == (mPrev == this));
+ return mNext != this;
+ }
+
+ private:
+ friend class LinkedList<T>;
+ friend struct detail::LinkedListElementTraits<T>;
+
+ enum class NodeKind { Normal, Sentinel };
+
+ explicit LinkedListElement(NodeKind nodeKind)
+ : mNext(this), mPrev(this), mIsSentinel(nodeKind == NodeKind::Sentinel) {}
+
+ /*
+ * Return |this| cast to T* if we're a normal node, or return nullptr if
+ * we're a sentinel node.
+ */
+ RawType asT() { return mIsSentinel ? nullptr : static_cast<RawType>(this); }
+ ConstRawType asT() const {
+ return mIsSentinel ? nullptr : static_cast<ConstRawType>(this);
+ }
+
+ /*
+ * Insert aElem after this element, but don't check that this element is in
+ * the list. This is called by LinkedList::insertFront().
+ */
+ void setNextUnsafe(RawType aElem) {
+ LinkedListElement* listElem = static_cast<LinkedListElement*>(aElem);
+ MOZ_RELEASE_ASSERT(!listElem->isInList());
+
+ listElem->mNext = this->mNext;
+ listElem->mPrev = this;
+ this->mNext->mPrev = listElem;
+ this->mNext = listElem;
+
+ Traits::enterList(aElem);
+ }
+
+ /*
+ * Insert aElem before this element, but don't check that this element is in
+ * the list. This is called by LinkedList::insertBack().
+ */
+ void setPreviousUnsafe(RawType aElem) {
+ LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(aElem);
+ MOZ_RELEASE_ASSERT(!listElem->isInList());
+
+ listElem->mNext = this;
+ listElem->mPrev = this->mPrev;
+ this->mPrev->mNext = listElem;
+ this->mPrev = listElem;
+
+ Traits::enterList(aElem);
+ }
+
+ /*
+ * Transfers the elements [aBegin, aEnd) before the "this" list element.
+ */
+ void transferBeforeUnsafe(LinkedListElement<T>& aBegin,
+ LinkedListElement<T>& aEnd) {
+ MOZ_RELEASE_ASSERT(!aBegin.mIsSentinel);
+ if (!aBegin.isInList() || !aEnd.isInList()) {
+ return;
+ }
+
+ auto otherPrev = aBegin.mPrev;
+
+ aBegin.mPrev = this->mPrev;
+ this->mPrev->mNext = &aBegin;
+ this->mPrev = aEnd.mPrev;
+ aEnd.mPrev->mNext = this;
+
+ // Patch the gap in the source list
+ otherPrev->mNext = &aEnd;
+ aEnd.mPrev = otherPrev;
+ }
+
+ /*
+ * Adjust mNext and mPrev for implementing move constructor and move
+ * assignment.
+ */
+ void adjustLinkForMove(LinkedListElement<T>&& aOther) {
+ if (!aOther.isInList()) {
+ mNext = this;
+ mPrev = this;
+ return;
+ }
+
+ if (!mIsSentinel) {
+ Traits::enterList(this);
+ }
+
+ MOZ_ASSERT(aOther.mNext->mPrev == &aOther);
+ MOZ_ASSERT(aOther.mPrev->mNext == &aOther);
+
+ /*
+ * Initialize |this| with |aOther|'s mPrev/mNext pointers, and adjust those
+ * element to point to this one.
+ */
+ mNext = aOther.mNext;
+ mPrev = aOther.mPrev;
+
+ mNext->mPrev = this;
+ mPrev->mNext = this;
+
+ /*
+ * Adjust |aOther| so it doesn't think it's in a list. This makes it
+ * safely destructable.
+ */
+ aOther.mNext = &aOther;
+ aOther.mPrev = &aOther;
+
+ if (!mIsSentinel) {
+ Traits::exitList(&aOther);
+ }
+ }
+
+ LinkedListElement& operator=(const LinkedListElement<T>& aOther) = delete;
+ LinkedListElement(const LinkedListElement<T>& aOther) = delete;
+};
+
+template <typename T>
+class LinkedList {
+ private:
+ typedef typename detail::LinkedListElementTraits<T> Traits;
+ typedef typename Traits::RawType RawType;
+ typedef typename Traits::ConstRawType ConstRawType;
+ typedef typename Traits::ClientType ClientType;
+ typedef typename Traits::ConstClientType ConstClientType;
+ typedef LinkedListElement<T>* ElementType;
+ typedef const LinkedListElement<T>* ConstElementType;
+
+ LinkedListElement<T> sentinel;
+
+ public:
+ template <typename Type, typename Element>
+ class Iterator {
+ Type mCurrent;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = T;
+ using difference_type = std::ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+
+ explicit Iterator(Type aCurrent) : mCurrent(aCurrent) {}
+
+ Type operator*() const { return mCurrent; }
+
+ const Iterator& operator++() {
+ mCurrent = static_cast<Element>(mCurrent)->getNext();
+ return *this;
+ }
+
+ bool operator!=(const Iterator& aOther) const {
+ return mCurrent != aOther.mCurrent;
+ }
+ };
+
+ LinkedList() : sentinel(LinkedListElement<T>::NodeKind::Sentinel) {}
+
+ LinkedList(LinkedList<T>&& aOther) : sentinel(std::move(aOther.sentinel)) {}
+
+ LinkedList& operator=(LinkedList<T>&& aOther) {
+ MOZ_ASSERT(isEmpty(),
+ "Assigning to a non-empty list leaks elements in that list!");
+ sentinel = std::move(aOther.sentinel);
+ return *this;
+ }
+
+ ~LinkedList() {
+# ifdef DEBUG
+ if (!isEmpty()) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "%s has a buggy user: "
+ "it should have removed all this list's elements before "
+ "the list's destruction",
+ __PRETTY_FUNCTION__);
+ }
+# endif
+ }
+
+ /*
+ * Add aElem to the front of the list.
+ */
+ void insertFront(RawType aElem) {
+ /* Bypass setNext()'s this->isInList() assertion. */
+ sentinel.setNextUnsafe(aElem);
+ }
+
+ /*
+ * Add aElem to the back of the list.
+ */
+ void insertBack(RawType aElem) { sentinel.setPreviousUnsafe(aElem); }
+
+ /*
+ * Move all elements from another list to the back
+ */
+ void extendBack(LinkedList<T>&& aOther) {
+ MOZ_RELEASE_ASSERT(this != &aOther);
+ if (aOther.isEmpty()) {
+ return;
+ }
+ sentinel.transferBeforeUnsafe(**aOther.begin(), aOther.sentinel);
+ }
+
+ /*
+ * Move elements from another list to the specified position
+ */
+ void splice(size_t aDestinationPos, LinkedList<T>& aListFrom,
+ size_t aSourceStart, size_t aSourceLen) {
+ MOZ_RELEASE_ASSERT(this != &aListFrom);
+ if (aListFrom.isEmpty() || !aSourceLen) {
+ return;
+ }
+
+ const auto safeForward = [](LinkedList<T>& aList,
+ LinkedListElement<T>& aBegin,
+ size_t aPos) -> LinkedListElement<T>& {
+ auto* iter = &aBegin;
+ for (size_t i = 0; i < aPos; ++i, (iter = iter->mNext)) {
+ if (iter->mIsSentinel) {
+ break;
+ }
+ }
+ return *iter;
+ };
+
+ auto& sourceBegin =
+ safeForward(aListFrom, *aListFrom.sentinel.mNext, aSourceStart);
+ if (sourceBegin.mIsSentinel) {
+ return;
+ }
+ auto& sourceEnd = safeForward(aListFrom, sourceBegin, aSourceLen);
+ auto& destination = safeForward(*this, *sentinel.mNext, aDestinationPos);
+
+ destination.transferBeforeUnsafe(sourceBegin, sourceEnd);
+ }
+
+ /*
+ * Get the first element of the list, or nullptr if the list is empty.
+ */
+ RawType getFirst() { return sentinel.getNext(); }
+ ConstRawType getFirst() const { return sentinel.getNext(); }
+
+ /*
+ * Get the last element of the list, or nullptr if the list is empty.
+ */
+ RawType getLast() { return sentinel.getPrevious(); }
+ ConstRawType getLast() const { return sentinel.getPrevious(); }
+
+ /*
+ * Get and remove the first element of the list. If the list is empty,
+ * return nullptr.
+ */
+ ClientType popFirst() {
+ ClientType ret = sentinel.getNext();
+ if (ret) {
+ static_cast<LinkedListElement<T>*>(RawType(ret))->remove();
+ }
+ return ret;
+ }
+
+ /*
+ * Get and remove the last element of the list. If the list is empty,
+ * return nullptr.
+ */
+ ClientType popLast() {
+ ClientType ret = sentinel.getPrevious();
+ if (ret) {
+ static_cast<LinkedListElement<T>*>(RawType(ret))->remove();
+ }
+ return ret;
+ }
+
+ /*
+ * Return true if the list is empty, or false otherwise.
+ */
+ bool isEmpty() const { return !sentinel.isInList(); }
+
+ /**
+ * Returns whether the given element is in the list.
+ */
+ bool contains(ConstRawType aElm) const {
+ return std::find(begin(), end(), aElm) != end();
+ }
+
+ /*
+ * Remove all the elements from the list.
+ *
+ * This runs in time linear to the list's length, because we have to mark
+ * each element as not in the list.
+ */
+ void clear() {
+ while (popFirst()) {
+ }
+ }
+
+ /**
+ * Return the length of elements in the list.
+ */
+ size_t length() const { return std::distance(begin(), end()); }
+
+ /*
+ * Allow range-based iteration:
+ *
+ * for (MyElementType* elt : myList) { ... }
+ */
+ Iterator<RawType, ElementType> begin() {
+ return Iterator<RawType, ElementType>(getFirst());
+ }
+ Iterator<ConstRawType, ConstElementType> begin() const {
+ return Iterator<ConstRawType, ConstElementType>(getFirst());
+ }
+ Iterator<RawType, ElementType> end() {
+ return Iterator<RawType, ElementType>(nullptr);
+ }
+ Iterator<ConstRawType, ConstElementType> end() const {
+ return Iterator<ConstRawType, ConstElementType>(nullptr);
+ }
+
+ /*
+ * Measures the memory consumption of the list excluding |this|. Note that
+ * it only measures the list elements themselves. If the list elements
+ * contain pointers to other memory blocks, those blocks must be measured
+ * separately during a subsequent iteration over the list.
+ */
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ ConstRawType t = getFirst();
+ while (t) {
+ n += aMallocSizeOf(t);
+ t = static_cast<const LinkedListElement<T>*>(t)->getNext();
+ }
+ return n;
+ }
+
+ /*
+ * Like sizeOfExcludingThis(), but measures |this| as well.
+ */
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ /*
+ * In a debug build, make sure that the list is sane (no cycles, consistent
+ * mNext/mPrev pointers, only one sentinel). Has no effect in release builds.
+ */
+ void debugAssertIsSane() const {
+# ifdef DEBUG
+ const LinkedListElement<T>* slow;
+ const LinkedListElement<T>* fast1;
+ const LinkedListElement<T>* fast2;
+
+ /*
+ * Check for cycles in the forward singly-linked list using the
+ * tortoise/hare algorithm.
+ */
+ for (slow = sentinel.mNext, fast1 = sentinel.mNext->mNext,
+ fast2 = sentinel.mNext->mNext->mNext;
+ slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
+ slow = slow->mNext, fast1 = fast2->mNext, fast2 = fast1->mNext) {
+ MOZ_ASSERT(slow != fast1);
+ MOZ_ASSERT(slow != fast2);
+ }
+
+ /* Check for cycles in the backward singly-linked list. */
+ for (slow = sentinel.mPrev, fast1 = sentinel.mPrev->mPrev,
+ fast2 = sentinel.mPrev->mPrev->mPrev;
+ slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
+ slow = slow->mPrev, fast1 = fast2->mPrev, fast2 = fast1->mPrev) {
+ MOZ_ASSERT(slow != fast1);
+ MOZ_ASSERT(slow != fast2);
+ }
+
+ /*
+ * Check that |sentinel| is the only node in the list with
+ * mIsSentinel == true.
+ */
+ for (const LinkedListElement<T>* elem = sentinel.mNext; elem != &sentinel;
+ elem = elem->mNext) {
+ MOZ_ASSERT(!elem->mIsSentinel);
+ }
+
+ /* Check that the mNext/mPrev pointers match up. */
+ const LinkedListElement<T>* prev = &sentinel;
+ const LinkedListElement<T>* cur = sentinel.mNext;
+ do {
+ MOZ_ASSERT(cur->mPrev == prev);
+ MOZ_ASSERT(prev->mNext == cur);
+
+ prev = cur;
+ cur = cur->mNext;
+ } while (cur != &sentinel);
+# endif /* ifdef DEBUG */
+ }
+
+ private:
+ friend class LinkedListElement<T>;
+
+ void assertContains(const RawType aValue) const {
+# ifdef DEBUG
+ for (ConstRawType elem = getFirst(); elem; elem = elem->getNext()) {
+ if (elem == aValue) {
+ return;
+ }
+ }
+ MOZ_CRASH("element wasn't found in this list!");
+# endif
+ }
+
+ LinkedList& operator=(const LinkedList<T>& aOther) = delete;
+ LinkedList(const LinkedList<T>& aOther) = delete;
+};
+
+template <typename T>
+inline void ImplCycleCollectionUnlink(LinkedList<RefPtr<T>>& aField) {
+ aField.clear();
+}
+
+template <typename T>
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback,
+ LinkedList<RefPtr<T>>& aField, const char* aName, uint32_t aFlags = 0) {
+ typedef typename detail::LinkedListElementTraits<T> Traits;
+ typedef typename Traits::RawType RawType;
+ for (RawType element : aField) {
+ // RefPtr is stored as a raw pointer in LinkedList.
+ // So instead of creating a new RefPtr from the raw
+ // pointer (which is not allowed), we simply call
+ // CycleCollectionNoteChild against the raw pointer
+ CycleCollectionNoteChild(aCallback, element, aName, aFlags);
+ }
+}
+
+template <typename T>
+class AutoCleanLinkedList : public LinkedList<T> {
+ private:
+ using Traits = detail::LinkedListElementTraits<T>;
+ using ClientType = typename detail::LinkedListElementTraits<T>::ClientType;
+
+ public:
+ AutoCleanLinkedList() = default;
+ AutoCleanLinkedList(AutoCleanLinkedList&&) = default;
+ ~AutoCleanLinkedList() { clear(); }
+
+ AutoCleanLinkedList& operator=(AutoCleanLinkedList&& aOther) = default;
+
+ void clear() {
+ while (ClientType element = this->popFirst()) {
+ Traits::cleanElement(element);
+ }
+ }
+};
+
+} /* namespace mozilla */
+
+#endif /* __cplusplus */
+
+#endif /* mozilla_LinkedList_h */
diff --git a/mfbt/Literals.h b/mfbt/Literals.h
new file mode 100644
index 0000000000..d1d403afae
--- /dev/null
+++ b/mfbt/Literals.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Helpers for units on integer literals. */
+
+#ifndef mozilla_Literals_h
+#define mozilla_Literals_h
+
+#include <cstddef>
+
+// User-defined literals to make constants more legible. Use them by
+// appending them to literals such as:
+//
+// size_t page_size = 4_KiB;
+//
+constexpr size_t operator"" _KiB(unsigned long long int aNum) {
+ return size_t(aNum) * 1024;
+}
+
+constexpr size_t operator"" _KiB(long double aNum) {
+ return size_t(aNum * 1024);
+}
+
+constexpr size_t operator"" _MiB(unsigned long long int aNum) {
+ return size_t(aNum) * 1024_KiB;
+}
+
+constexpr size_t operator"" _MiB(long double aNum) {
+ return size_t(aNum * 1024_KiB);
+}
+
+constexpr double operator""_percent(long double aPercent) {
+ return double(aPercent) / 100;
+}
+
+#endif /* ! mozilla_Literals_h */
diff --git a/mfbt/MacroArgs.h b/mfbt/MacroArgs.h
new file mode 100644
index 0000000000..9afaaef945
--- /dev/null
+++ b/mfbt/MacroArgs.h
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implements various macros meant to ease the use of variadic macros.
+ */
+
+#ifndef mozilla_MacroArgs_h
+#define mozilla_MacroArgs_h
+
+// Concatenates pre-processor tokens in a way that can be used with __LINE__.
+#define MOZ_CONCAT2(x, y) x##y
+#define MOZ_CONCAT(x, y) MOZ_CONCAT2(x, y)
+
+/*
+ * MOZ_ARG_COUNT(...) counts the number of variadic arguments.
+ * You must pass in between 0 and 50 (inclusive) variadic arguments.
+ * For example:
+ *
+ * MOZ_ARG_COUNT() expands to 0
+ * MOZ_ARG_COUNT(a) expands to 1
+ * MOZ_ARG_COUNT(a, b) expands to 2
+ *
+ * Implementation notes:
+ * The `##__VA_ARGS__` form is a GCC extension that removes the comma if
+ * __VA_ARGS__ is empty. It is supported by Clang too. MSVC ignores ##,
+ * and its default behavior is already to strip the comma when __VA_ARGS__
+ * is empty.
+ *
+ * So MOZ_MACROARGS_ARG_COUNT_HELPER() expands to
+ * (_, 50, 49, ...)
+ * MOZ_MACROARGS_ARG_COUNT_HELPER(a) expands to
+ * (_, a, 50, 49, ...)
+ * etc.
+ */
+#define MOZ_ARG_COUNT(...) \
+ MOZ_MACROARGS_ARG_COUNT_HELPER2(MOZ_MACROARGS_ARG_COUNT_HELPER(__VA_ARGS__))
+
+#define MOZ_MACROARGS_ARG_COUNT_HELPER(...) \
+ (_, ##__VA_ARGS__, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, \
+ 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, \
+ 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define MOZ_MACROARGS_ARG_COUNT_HELPER2(aArgs) \
+ MOZ_MACROARGS_ARG_COUNT_HELPER3 aArgs
+
+#define MOZ_MACROARGS_ARG_COUNT_HELPER3( \
+ a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, \
+ a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, \
+ a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45, a46, \
+ a47, a48, a49, a50, a51, ...) \
+ a51
+
+/*
+ * MOZ_PASTE_PREFIX_AND_ARG_COUNT(aPrefix, ...) counts the number of variadic
+ * arguments and prefixes it with |aPrefix|. For example:
+ *
+ * MOZ_PASTE_PREFIX_AND_ARG_COUNT(, foo, 42) expands to 2
+ * MOZ_PASTE_PREFIX_AND_ARG_COUNT(A, foo, 42, bar) expands to A3
+ * MOZ_PASTE_PREFIX_AND_ARG_COUNT(A) expands to A0
+ * MOZ_PASTE_PREFIX_AND_ARG_COUNT() expands to 0, but MSVC warns there
+ * aren't enough arguments given.
+ *
+ * You must pass in between 0 and 50 (inclusive) variadic arguments, past
+ * |aPrefix|.
+ */
+#define MOZ_PASTE_PREFIX_AND_ARG_COUNT_GLUE(a, b) a b
+#define MOZ_PASTE_PREFIX_AND_ARG_COUNT(aPrefix, ...) \
+ MOZ_PASTE_PREFIX_AND_ARG_COUNT_GLUE(MOZ_CONCAT, \
+ (aPrefix, MOZ_ARG_COUNT(__VA_ARGS__)))
+
+/*
+ * MOZ_ARGS_AFTER_N expands to its arguments excluding the first |N|
+ * arguments. For example:
+ *
+ * MOZ_ARGS_AFTER_2(a, b, c, d) expands to: c, d
+ */
+#define MOZ_ARGS_AFTER_1(a1, ...) __VA_ARGS__
+#define MOZ_ARGS_AFTER_2(a1, a2, ...) __VA_ARGS__
+
+/*
+ * MOZ_ARG_N expands to its |N|th argument.
+ */
+#define MOZ_ARG_1(a1, ...) a1
+#define MOZ_ARG_2(a1, a2, ...) a2
+#define MOZ_ARG_3(a1, a2, a3, ...) a3
+#define MOZ_ARG_4(a1, a2, a3, a4, ...) a4
+#define MOZ_ARG_5(a1, a2, a3, a4, a5, ...) a5
+#define MOZ_ARG_6(a1, a2, a3, a4, a5, a6, ...) a6
+#define MOZ_ARG_7(a1, a2, a3, a4, a5, a6, a7, ...) a7
+#define MOZ_ARG_8(a1, a2, a3, a4, a5, a6, a7, a8, ...) a8
+#define MOZ_ARG_9(a1, a2, a3, a4, a5, a6, a7, a8, a9, ...) a9
+
+#endif /* mozilla_MacroArgs_h */
diff --git a/mfbt/MacroForEach.h b/mfbt/MacroForEach.h
new file mode 100644
index 0000000000..c3067e3620
--- /dev/null
+++ b/mfbt/MacroForEach.h
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implements a higher-order macro for iteratively calling another macro with
+ * fixed leading arguments, plus a trailing element picked from a second list
+ * of arguments.
+ */
+
+#ifndef mozilla_MacroForEach_h
+#define mozilla_MacroForEach_h
+
+#include "mozilla/MacroArgs.h"
+
+/*
+ * MOZ_FOR_EACH(aMacro, aFixedArgs, aArgs) expands to N calls to the macro
+ * |aMacro| where N is equal the number of items in the list |aArgs|. The
+ * arguments for each |aMacro| call are composed of *all* arguments in the list
+ * |aFixedArgs| as well as a single argument in the list |aArgs|. For example:
+ *
+ * #define MACRO_A(x) x +
+ * int a = MOZ_FOR_EACH(MACRO_A, (), (1, 2, 3)) 0;
+ * // Expands to: MACRO_A(1) MACRO_A(2) MACRO_A(3) 0;
+ * // And further to: 1 + 2 + 3 + 0;
+ *
+ * #define MACRO_B(k, x) (k + x) +
+ * int b = MOZ_FOR_EACH(MACRO_B, (5,), (1, 2)) 0;
+ * // Expands to: MACRO_B(5, 1) MACRO_B(5, 2) 0;
+ *
+ * #define MACRO_C(k1, k2, x) (k1 + k2 + x) +
+ * int c = MOZ_FOR_EACH(MACRO_C, (5, 8,), (1, 2)) 0;
+ * // Expands to: MACRO_B(5, 8, 1) MACRO_B(5, 8, 2) 0;
+ *
+ * MOZ_FOR_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) is identical
+ * to MOZ_FOR_EACH except that it inserts |aSeparator| between each call to
+ * the macro. |aSeparator| must be wrapped by parens. For example:
+ *
+ * #define MACRO_A(x) x
+ * int a = MOZ_FOR_EACH_SEPARATED(MACRO_A, (+), (), (1, 2, 3));
+ * // Expands to: MACRO_A(1) + MACRO_A(2) + MACRO_A(3);
+ * // And further to: 1 + 2 + 3
+ *
+ * #define MACRO_B(t, n) t n
+ * void test(MOZ_FOR_EACH_SEPARATED(MACRO_B, (,), (int,), (a, b)));
+ * // Expands to: void test(MACRO_B(int, a) , MACRO_B(int, b));
+ * // And further to: void test(int a , int b);
+ *
+ * If the |aFixedArgs| list is not empty, a trailing comma must be included.
+ *
+ * The |aArgs| list may be up to 50 items long.
+ */
+#define MOZ_FOR_EACH_EXPAND_HELPER(...) __VA_ARGS__
+#define MOZ_FOR_EACH_GLUE(a, b) a b
+#define MOZ_FOR_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) \
+ MOZ_FOR_EACH_GLUE(MOZ_PASTE_PREFIX_AND_ARG_COUNT( \
+ MOZ_FOR_EACH_, MOZ_FOR_EACH_EXPAND_HELPER aArgs), \
+ (aMacro, aSeparator, aFixedArgs, aArgs))
+#define MOZ_FOR_EACH(aMacro, aFixedArgs, aArgs) \
+ MOZ_FOR_EACH_SEPARATED(aMacro, (), aFixedArgs, aArgs)
+
+#define MOZ_FOR_EACH_HELPER_GLUE(a, b) a b
+#define MOZ_FOR_EACH_HELPER(aMacro, aFixedArgs, aArgs) \
+ MOZ_FOR_EACH_HELPER_GLUE( \
+ aMacro, (MOZ_FOR_EACH_EXPAND_HELPER aFixedArgs MOZ_ARG_1 aArgs))
+
+#define MOZ_FOR_EACH_0(m, s, fa, a)
+#define MOZ_FOR_EACH_1(m, s, fa, a) MOZ_FOR_EACH_HELPER(m, fa, a)
+#define MOZ_FOR_EACH_2(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_1(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_3(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_2(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_4(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_3(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_5(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_4(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_6(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_5(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_7(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_6(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_8(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_7(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_9(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_8(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_10(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_9(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_11(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_10(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_12(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_11(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_13(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_12(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_14(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_13(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_15(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_14(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_16(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_15(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_17(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_16(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_18(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_17(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_19(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_18(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_20(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_19(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_21(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_20(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_22(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_21(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_23(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_22(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_24(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_23(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_25(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_24(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_26(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_25(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_27(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_26(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_28(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_27(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_29(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_28(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_30(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_29(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_31(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_30(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_32(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_31(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_33(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_32(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_34(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_33(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_35(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_34(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_36(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_35(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_37(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_36(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_38(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_37(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_39(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_38(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_40(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_39(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_41(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_40(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_42(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_41(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_43(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_42(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_44(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_43(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_45(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_44(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_46(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_45(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_47(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_46(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_48(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_47(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_49(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_48(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+#define MOZ_FOR_EACH_50(m, s, fa, a) \
+ MOZ_FOR_EACH_HELPER(m, fa, a) \
+ MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_49(m, s, fa, (MOZ_ARGS_AFTER_1 a))
+
+#endif /* mozilla_MacroForEach_h */
diff --git a/mfbt/MathAlgorithms.h b/mfbt/MathAlgorithms.h
new file mode 100644
index 0000000000..66aa1b9f71
--- /dev/null
+++ b/mfbt/MathAlgorithms.h
@@ -0,0 +1,492 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* mfbt maths algorithms. */
+
+#ifndef mozilla_MathAlgorithms_h
+#define mozilla_MathAlgorithms_h
+
+#include "mozilla/Assertions.h"
+
+#include <cmath>
+#include <algorithm>
+#include <limits.h>
+#include <stdint.h>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+template <typename T>
+struct AllowDeprecatedAbsFixed : std::false_type {};
+
+template <>
+struct AllowDeprecatedAbsFixed<int32_t> : std::true_type {};
+template <>
+struct AllowDeprecatedAbsFixed<int64_t> : std::true_type {};
+
+template <typename T>
+struct AllowDeprecatedAbs : AllowDeprecatedAbsFixed<T> {};
+
+template <>
+struct AllowDeprecatedAbs<int> : std::true_type {};
+template <>
+struct AllowDeprecatedAbs<long> : std::true_type {};
+
+} // namespace detail
+
+// DO NOT USE DeprecatedAbs. It exists only until its callers can be converted
+// to Abs below, and it will be removed when all callers have been changed.
+template <typename T>
+inline std::enable_if_t<detail::AllowDeprecatedAbs<T>::value, T> DeprecatedAbs(
+ const T aValue) {
+ // The absolute value of the smallest possible value of a signed-integer type
+ // won't fit in that type (on twos-complement systems -- and we're blithely
+ // assuming we're on such systems, for the non-<stdint.h> types listed above),
+ // so assert that the input isn't that value.
+ //
+ // This is the case if: the value is non-negative; or if adding one (giving a
+ // value in the range [-maxvalue, 0]), then negating (giving a value in the
+ // range [0, maxvalue]), doesn't produce maxvalue (because in twos-complement,
+ // (minvalue + 1) == -maxvalue).
+ MOZ_ASSERT(aValue >= 0 ||
+ -(aValue + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1),
+ "You can't negate the smallest possible negative integer!");
+ return aValue >= 0 ? aValue : -aValue;
+}
+
+namespace detail {
+
+template <typename T, typename = void>
+struct AbsReturnType;
+
+template <typename T>
+struct AbsReturnType<
+ T, std::enable_if_t<std::is_integral_v<T> && std::is_signed_v<T>>> {
+ using Type = std::make_unsigned_t<T>;
+};
+
+template <typename T>
+struct AbsReturnType<T, std::enable_if_t<std::is_floating_point_v<T>>> {
+ using Type = T;
+};
+
+} // namespace detail
+
+template <typename T>
+inline constexpr typename detail::AbsReturnType<T>::Type Abs(const T aValue) {
+ using ReturnType = typename detail::AbsReturnType<T>::Type;
+ return aValue >= 0 ? ReturnType(aValue) : ~ReturnType(aValue) + 1;
+}
+
+template <>
+inline float Abs<float>(const float aFloat) {
+ return std::fabs(aFloat);
+}
+
+template <>
+inline double Abs<double>(const double aDouble) {
+ return std::fabs(aDouble);
+}
+
+template <>
+inline long double Abs<long double>(const long double aLongDouble) {
+ return std::fabs(aLongDouble);
+}
+
+} // namespace mozilla
+
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || \
+ defined(_M_X64) || defined(_M_ARM64))
+# define MOZ_BITSCAN_WINDOWS
+
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward, _BitScanReverse)
+
+# if defined(_M_AMD64) || defined(_M_X64) || defined(_M_ARM64)
+# define MOZ_BITSCAN_WINDOWS64
+# pragma intrinsic(_BitScanForward64, _BitScanReverse64)
+# endif
+
+#endif
+
+namespace mozilla {
+
+namespace detail {
+
+#if defined(MOZ_BITSCAN_WINDOWS)
+
+inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) {
+ unsigned long index;
+ if (!_BitScanReverse(&index, static_cast<unsigned long>(aValue))) return 32;
+ return uint_fast8_t(31 - index);
+}
+
+inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) {
+ unsigned long index;
+ if (!_BitScanForward(&index, static_cast<unsigned long>(aValue))) return 32;
+ return uint_fast8_t(index);
+}
+
+inline uint_fast8_t CountPopulation32(uint32_t aValue) {
+ uint32_t x = aValue - ((aValue >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+}
+inline uint_fast8_t CountPopulation64(uint64_t aValue) {
+ return uint_fast8_t(CountPopulation32(aValue & 0xffffffff) +
+ CountPopulation32(aValue >> 32));
+}
+
+inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) {
+# if defined(MOZ_BITSCAN_WINDOWS64)
+ unsigned long index;
+ if (!_BitScanReverse64(&index, static_cast<unsigned __int64>(aValue)))
+ return 64;
+ return uint_fast8_t(63 - index);
+# else
+ uint32_t hi = uint32_t(aValue >> 32);
+ if (hi != 0) {
+ return CountLeadingZeroes32(hi);
+ }
+ return 32u + CountLeadingZeroes32(uint32_t(aValue));
+# endif
+}
+
+inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) {
+# if defined(MOZ_BITSCAN_WINDOWS64)
+ unsigned long index;
+ if (!_BitScanForward64(&index, static_cast<unsigned __int64>(aValue)))
+ return 64;
+ return uint_fast8_t(index);
+# else
+ uint32_t lo = uint32_t(aValue);
+ if (lo != 0) {
+ return CountTrailingZeroes32(lo);
+ }
+ return 32u + CountTrailingZeroes32(uint32_t(aValue >> 32));
+# endif
+}
+
+#elif defined(__clang__) || defined(__GNUC__)
+
+# if defined(__clang__)
+# if !__has_builtin(__builtin_ctz) || !__has_builtin(__builtin_clz)
+# error "A clang providing __builtin_c[lt]z is required to build"
+# endif
+# else
+// gcc has had __builtin_clz and friends since 3.4: no need to check.
+# endif
+
+inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_clz(aValue));
+}
+
+inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_ctz(aValue));
+}
+
+inline uint_fast8_t CountPopulation32(uint32_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_popcount(aValue));
+}
+
+inline uint_fast8_t CountPopulation64(uint64_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_popcountll(aValue));
+}
+
+inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_clzll(aValue));
+}
+
+inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) {
+ return static_cast<uint_fast8_t>(__builtin_ctzll(aValue));
+}
+
+#else
+# error "Implement these!"
+inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) = delete;
+inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) = delete;
+inline uint_fast8_t CountPopulation32(uint32_t aValue) = delete;
+inline uint_fast8_t CountPopulation64(uint64_t aValue) = delete;
+inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) = delete;
+inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) = delete;
+#endif
+
+} // namespace detail
+
+/**
+ * Compute the number of high-order zero bits in the NON-ZERO number |aValue|.
+ * That is, looking at the bitwise representation of the number, with the
+ * highest- valued bits at the start, return the number of zeroes before the
+ * first one is observed.
+ *
+ * CountLeadingZeroes32(0xF0FF1000) is 0;
+ * CountLeadingZeroes32(0x7F8F0001) is 1;
+ * CountLeadingZeroes32(0x3FFF0100) is 2;
+ * CountLeadingZeroes32(0x1FF50010) is 3; and so on.
+ */
+inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) {
+ MOZ_ASSERT(aValue != 0);
+ return detail::CountLeadingZeroes32(aValue);
+}
+
+/**
+ * Compute the number of low-order zero bits in the NON-ZERO number |aValue|.
+ * That is, looking at the bitwise representation of the number, with the
+ * lowest- valued bits at the start, return the number of zeroes before the
+ * first one is observed.
+ *
+ * CountTrailingZeroes32(0x0100FFFF) is 0;
+ * CountTrailingZeroes32(0x7000FFFE) is 1;
+ * CountTrailingZeroes32(0x0080FFFC) is 2;
+ * CountTrailingZeroes32(0x0080FFF8) is 3; and so on.
+ */
+inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) {
+ MOZ_ASSERT(aValue != 0);
+ return detail::CountTrailingZeroes32(aValue);
+}
+
+/**
+ * Compute the number of one bits in the number |aValue|,
+ */
+inline uint_fast8_t CountPopulation32(uint32_t aValue) {
+ return detail::CountPopulation32(aValue);
+}
+
+/** Analogous to CountPopulation32, but for 64-bit numbers */
+inline uint_fast8_t CountPopulation64(uint64_t aValue) {
+ return detail::CountPopulation64(aValue);
+}
+
+/** Analogous to CountLeadingZeroes32, but for 64-bit numbers. */
+inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) {
+ MOZ_ASSERT(aValue != 0);
+ return detail::CountLeadingZeroes64(aValue);
+}
+
+/** Analogous to CountTrailingZeroes32, but for 64-bit numbers. */
+inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) {
+ MOZ_ASSERT(aValue != 0);
+ return detail::CountTrailingZeroes64(aValue);
+}
+
+namespace detail {
+
+template <typename T, size_t Size = sizeof(T)>
+class CeilingLog2;
+
+template <typename T>
+class CeilingLog2<T, 4> {
+ public:
+ static uint_fast8_t compute(const T aValue) {
+ // Check for <= 1 to avoid the == 0 undefined case.
+ return aValue <= 1 ? 0u : 32u - CountLeadingZeroes32(aValue - 1);
+ }
+};
+
+template <typename T>
+class CeilingLog2<T, 8> {
+ public:
+ static uint_fast8_t compute(const T aValue) {
+ // Check for <= 1 to avoid the == 0 undefined case.
+ return aValue <= 1 ? 0u : 64u - CountLeadingZeroes64(aValue - 1);
+ }
+};
+
+} // namespace detail
+
+/**
+ * Compute the log of the least power of 2 greater than or equal to |aValue|.
+ *
+ * CeilingLog2(0..1) is 0;
+ * CeilingLog2(2) is 1;
+ * CeilingLog2(3..4) is 2;
+ * CeilingLog2(5..8) is 3;
+ * CeilingLog2(9..16) is 4; and so on.
+ */
+template <typename T>
+inline uint_fast8_t CeilingLog2(const T aValue) {
+ return detail::CeilingLog2<T>::compute(aValue);
+}
+
+/** A CeilingLog2 variant that accepts only size_t. */
+inline uint_fast8_t CeilingLog2Size(size_t aValue) {
+ return CeilingLog2(aValue);
+}
+
+namespace detail {
+
+template <typename T, size_t Size = sizeof(T)>
+class FloorLog2;
+
+template <typename T>
+class FloorLog2<T, 4> {
+ public:
+ static uint_fast8_t compute(const T aValue) {
+ return 31u - CountLeadingZeroes32(aValue | 1);
+ }
+};
+
+template <typename T>
+class FloorLog2<T, 8> {
+ public:
+ static uint_fast8_t compute(const T aValue) {
+ return 63u - CountLeadingZeroes64(aValue | 1);
+ }
+};
+
+} // namespace detail
+
+/**
+ * Compute the log of the greatest power of 2 less than or equal to |aValue|.
+ *
+ * FloorLog2(0..1) is 0;
+ * FloorLog2(2..3) is 1;
+ * FloorLog2(4..7) is 2;
+ * FloorLog2(8..15) is 3; and so on.
+ */
+template <typename T>
+inline constexpr uint_fast8_t FloorLog2(const T aValue) {
+ return detail::FloorLog2<T>::compute(aValue);
+}
+
+/** A FloorLog2 variant that accepts only size_t. */
+inline uint_fast8_t FloorLog2Size(size_t aValue) { return FloorLog2(aValue); }
+
+/*
+ * Compute the smallest power of 2 greater than or equal to |x|. |x| must not
+ * be so great that the computed value would overflow |size_t|.
+ */
+inline size_t RoundUpPow2(size_t aValue) {
+ MOZ_ASSERT(aValue <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)),
+ "can't round up -- will overflow!");
+ return size_t(1) << CeilingLog2(aValue);
+}
+
+/**
+ * Rotates the bits of the given value left by the amount of the shift width.
+ */
+template <typename T>
+MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline T RotateLeft(const T aValue,
+ uint_fast8_t aShift) {
+ static_assert(std::is_unsigned_v<T>, "Rotates require unsigned values");
+
+ MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
+ MOZ_ASSERT(aShift > 0,
+ "Rotation by value length is undefined behavior, but compilers "
+ "do not currently fold a test into the rotate instruction. "
+ "Please remove this restriction when compilers optimize the "
+ "zero case (http://blog.regehr.org/archives/1063).");
+
+ return (aValue << aShift) | (aValue >> (sizeof(T) * CHAR_BIT - aShift));
+}
+
+/**
+ * Rotates the bits of the given value right by the amount of the shift width.
+ */
+template <typename T>
+MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline T RotateRight(const T aValue,
+ uint_fast8_t aShift) {
+ static_assert(std::is_unsigned_v<T>, "Rotates require unsigned values");
+
+ MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
+ MOZ_ASSERT(aShift > 0,
+ "Rotation by value length is undefined behavior, but compilers "
+ "do not currently fold a test into the rotate instruction. "
+ "Please remove this restriction when compilers optimize the "
+ "zero case (http://blog.regehr.org/archives/1063).");
+
+ return (aValue >> aShift) | (aValue << (sizeof(T) * CHAR_BIT - aShift));
+}
+
+/**
+ * Returns true if |x| is a power of two.
+ * Zero is not an integer power of two. (-Inf is not an integer)
+ */
+template <typename T>
+constexpr bool IsPowerOfTwo(T x) {
+ static_assert(std::is_unsigned_v<T>, "IsPowerOfTwo requires unsigned values");
+ return x && (x & (x - 1)) == 0;
+}
+
+template <typename T>
+inline T Clamp(const T aValue, const T aMin, const T aMax) {
+ static_assert(std::is_integral_v<T>,
+ "Clamp accepts only integral types, so that it doesn't have"
+ " to distinguish differently-signed zeroes (which users may"
+ " or may not care to distinguish, likely at a perf cost) or"
+ " to decide how to clamp NaN or a range with a NaN"
+ " endpoint.");
+ MOZ_ASSERT(aMin <= aMax);
+
+ if (aValue <= aMin) return aMin;
+ if (aValue >= aMax) return aMax;
+ return aValue;
+}
+
+template <typename T>
+inline uint_fast8_t CountTrailingZeroes(T aValue) {
+ static_assert(sizeof(T) <= 8);
+ static_assert(std::is_integral_v<T>);
+ // This casts to 32-bits
+ if constexpr (sizeof(T) <= 4) {
+ return CountTrailingZeroes32(aValue);
+ }
+ // This doesn't
+ if constexpr (sizeof(T) == 8) {
+ return CountTrailingZeroes64(aValue);
+ }
+}
+
+// Greatest Common Divisor, from
+// https://en.wikipedia.org/wiki/Binary_GCD_algorithm#Implementation
+template <typename T>
+MOZ_ALWAYS_INLINE T GCD(T aA, T aB) {
+ static_assert(std::is_integral_v<T>);
+
+ MOZ_ASSERT(aA >= 0);
+ MOZ_ASSERT(aB >= 0);
+
+ if (aA == 0) {
+ return aB;
+ }
+ if (aB == 0) {
+ return aA;
+ }
+
+ T az = CountTrailingZeroes(aA);
+ T bz = CountTrailingZeroes(aB);
+ T shift = std::min<T>(az, bz);
+ aA >>= az;
+ aB >>= bz;
+
+ while (aA != 0) {
+ if constexpr (!std::is_signed_v<T>) {
+ if (aA < aB) {
+ std::swap(aA, aB);
+ }
+ }
+ T diff = aA - aB;
+ if constexpr (std::is_signed_v<T>) {
+ aB = std::min<T>(aA, aB);
+ }
+ if constexpr (std::is_signed_v<T>) {
+ aA = std::abs(diff);
+ } else {
+ aA = diff;
+ }
+ if (aA) {
+ aA >>= CountTrailingZeroes(aA);
+ }
+ }
+
+ return aB << shift;
+}
+
+} /* namespace mozilla */
+
+#endif /* mozilla_MathAlgorithms_h */
diff --git a/mfbt/Maybe.h b/mfbt/Maybe.h
new file mode 100644
index 0000000000..100b139c79
--- /dev/null
+++ b/mfbt/Maybe.h
@@ -0,0 +1,977 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A class for optional values and in-place lazy construction. */
+
+#ifndef mozilla_Maybe_h
+#define mozilla_Maybe_h
+
+#include <new> // for placement new
+#include <ostream>
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MaybeStorageBase.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/Poison.h"
+#include "mozilla/ThreadSafety.h"
+
+class nsCycleCollectionTraversalCallback;
+
+template <typename T>
+inline void CycleCollectionNoteChild(
+ nsCycleCollectionTraversalCallback& aCallback, T* aChild, const char* aName,
+ uint32_t aFlags);
+
+namespace mozilla {
+
+struct Nothing {};
+
+inline constexpr bool operator==(const Nothing&, const Nothing&) {
+ return true;
+}
+
+template <class T>
+class Maybe;
+
+namespace detail {
+
+// You would think that poisoning Maybe instances could just be a call
+// to mozWritePoison. Unfortunately, using a simple call to
+// mozWritePoison generates poor code on MSVC for small structures. The
+// generated code contains (always not-taken) branches and does a bunch
+// of setup for `rep stos{l,q}`, even though we know at compile time
+// exactly how many words we're poisoning. Instead, we're going to
+// force MSVC to generate the code we want via recursive templates.
+
+// Write the given poisonValue into p at offset*sizeof(uintptr_t).
+template <size_t offset>
+inline void WritePoisonAtOffset(void* p, const uintptr_t poisonValue) {
+ memcpy(static_cast<char*>(p) + offset * sizeof(poisonValue), &poisonValue,
+ sizeof(poisonValue));
+}
+
+template <size_t Offset, size_t NOffsets>
+struct InlinePoisoner {
+ static void poison(void* p, const uintptr_t poisonValue) {
+ WritePoisonAtOffset<Offset>(p, poisonValue);
+ InlinePoisoner<Offset + 1, NOffsets>::poison(p, poisonValue);
+ }
+};
+
+template <size_t N>
+struct InlinePoisoner<N, N> {
+ static void poison(void*, const uintptr_t) {
+ // All done!
+ }
+};
+
+// We can't generate inline code for large structures, though, because we'll
+// blow out recursive template instantiation limits, and the code would be
+// bloated to boot. So provide a fallback to the out-of-line poisoner.
+template <size_t ObjectSize>
+struct OutOfLinePoisoner {
+ static MOZ_NEVER_INLINE void poison(void* p, const uintptr_t) {
+ mozWritePoison(p, ObjectSize);
+ }
+};
+
+template <typename T>
+inline void PoisonObject(T* p) {
+ const uintptr_t POISON = mozPoisonValue();
+ std::conditional_t<(sizeof(T) <= 8 * sizeof(POISON)),
+ InlinePoisoner<0, sizeof(T) / sizeof(POISON)>,
+ OutOfLinePoisoner<sizeof(T)>>::poison(p, POISON);
+}
+
+template <typename T>
+struct MaybePoisoner {
+ static const size_t N = sizeof(T);
+
+ static void poison(void* aPtr) {
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ if (N >= sizeof(uintptr_t)) {
+ PoisonObject(static_cast<std::remove_cv_t<T>*>(aPtr));
+ }
+#endif
+ MOZ_MAKE_MEM_UNDEFINED(aPtr, N);
+ }
+};
+
+template <typename T,
+ bool TriviallyDestructibleAndCopyable =
+ IsTriviallyDestructibleAndCopyable<T>,
+ bool Copyable = std::is_copy_constructible_v<T>,
+ bool Movable = std::is_move_constructible_v<T>>
+class Maybe_CopyMove_Enabler;
+
+#define MOZ_MAYBE_COPY_OPS() \
+ Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler& aOther) { \
+ if (downcast(aOther).isSome()) { \
+ downcast(*this).emplace(*downcast(aOther)); \
+ } \
+ } \
+ \
+ Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler& aOther) { \
+ return downcast(*this).template operator= <T>(downcast(aOther)); \
+ }
+
+#define MOZ_MAYBE_MOVE_OPS() \
+ constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) { \
+ if (downcast(aOther).isSome()) { \
+ downcast(*this).emplace(std::move(*downcast(aOther))); \
+ downcast(aOther).reset(); \
+ } \
+ } \
+ \
+ constexpr Maybe_CopyMove_Enabler& operator=( \
+ Maybe_CopyMove_Enabler&& aOther) { \
+ downcast(*this).template operator= <T>(std::move(downcast(aOther))); \
+ \
+ return *this; \
+ }
+
+#define MOZ_MAYBE_DOWNCAST() \
+ static constexpr Maybe<T>& downcast(Maybe_CopyMove_Enabler& aObj) { \
+ return static_cast<Maybe<T>&>(aObj); \
+ } \
+ static constexpr const Maybe<T>& downcast( \
+ const Maybe_CopyMove_Enabler& aObj) { \
+ return static_cast<const Maybe<T>&>(aObj); \
+ }
+
+template <typename T>
+class Maybe_CopyMove_Enabler<T, true, true, true> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = default;
+ Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = default;
+ constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) {
+ downcast(aOther).reset();
+ }
+ constexpr Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&& aOther) {
+ downcast(aOther).reset();
+ return *this;
+ }
+
+ private:
+ MOZ_MAYBE_DOWNCAST()
+};
+
+template <typename T>
+class Maybe_CopyMove_Enabler<T, true, false, true> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = delete;
+ Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = delete;
+ constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) {
+ downcast(aOther).reset();
+ }
+ constexpr Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&& aOther) {
+ downcast(aOther).reset();
+ return *this;
+ }
+
+ private:
+ MOZ_MAYBE_DOWNCAST()
+};
+
+template <typename T>
+class Maybe_CopyMove_Enabler<T, false, true, true> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ MOZ_MAYBE_COPY_OPS()
+ MOZ_MAYBE_MOVE_OPS()
+
+ private:
+ MOZ_MAYBE_DOWNCAST()
+};
+
+template <typename T>
+class Maybe_CopyMove_Enabler<T, false, false, true> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ MOZ_MAYBE_MOVE_OPS()
+
+ private:
+ MOZ_MAYBE_DOWNCAST()
+};
+
+template <typename T>
+class Maybe_CopyMove_Enabler<T, false, true, false> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ MOZ_MAYBE_COPY_OPS()
+
+ private:
+ MOZ_MAYBE_DOWNCAST()
+};
+
+template <typename T, bool TriviallyDestructibleAndCopyable>
+class Maybe_CopyMove_Enabler<T, TriviallyDestructibleAndCopyable, false,
+ false> {
+ public:
+ Maybe_CopyMove_Enabler() = default;
+
+ Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = delete;
+ Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = delete;
+ Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&&) = delete;
+ Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&&) = delete;
+};
+
+#undef MOZ_MAYBE_COPY_OPS
+#undef MOZ_MAYBE_MOVE_OPS
+#undef MOZ_MAYBE_DOWNCAST
+
+template <typename T, bool TriviallyDestructibleAndCopyable =
+ IsTriviallyDestructibleAndCopyable<T>>
+struct MaybeStorage;
+
+template <typename T>
+struct MaybeStorage<T, false> : MaybeStorageBase<T> {
+ protected:
+ char mIsSome = false; // not bool -- guarantees minimal space consumption
+
+ MaybeStorage() = default;
+ explicit MaybeStorage(const T& aVal)
+ : MaybeStorageBase<T>{aVal}, mIsSome{true} {}
+ explicit MaybeStorage(T&& aVal)
+ : MaybeStorageBase<T>{std::move(aVal)}, mIsSome{true} {}
+
+ template <typename... Args>
+ explicit MaybeStorage(std::in_place_t, Args&&... aArgs)
+ : MaybeStorageBase<T>{std::in_place, std::forward<Args>(aArgs)...},
+ mIsSome{true} {}
+
+ public:
+ // Copy and move operations are no-ops, since copying is moving is implemented
+ // by Maybe_CopyMove_Enabler.
+
+ MaybeStorage(const MaybeStorage&) : MaybeStorageBase<T>{} {}
+ MaybeStorage& operator=(const MaybeStorage&) { return *this; }
+ MaybeStorage(MaybeStorage&&) : MaybeStorageBase<T>{} {}
+ MaybeStorage& operator=(MaybeStorage&&) { return *this; }
+
+ ~MaybeStorage() {
+ if (mIsSome) {
+ this->addr()->T::~T();
+ }
+ }
+};
+
+template <typename T>
+struct MaybeStorage<T, true> : MaybeStorageBase<T> {
+ protected:
+ char mIsSome = false; // not bool -- guarantees minimal space consumption
+
+ constexpr MaybeStorage() = default;
+ constexpr explicit MaybeStorage(const T& aVal)
+ : MaybeStorageBase<T>{aVal}, mIsSome{true} {}
+ constexpr explicit MaybeStorage(T&& aVal)
+ : MaybeStorageBase<T>{std::move(aVal)}, mIsSome{true} {}
+
+ template <typename... Args>
+ constexpr explicit MaybeStorage(std::in_place_t, Args&&... aArgs)
+ : MaybeStorageBase<T>{std::in_place, std::forward<Args>(aArgs)...},
+ mIsSome{true} {}
+};
+
+} // namespace detail
+
+template <typename T, typename U = typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type>
+constexpr Maybe<U> Some(T&& aValue);
+
+/*
+ * Maybe is a container class which contains either zero or one elements. It
+ * serves two roles. It can represent values which are *semantically* optional,
+ * augmenting a type with an explicit 'Nothing' value. In this role, it provides
+ * methods that make it easy to work with values that may be missing, along with
+ * equality and comparison operators so that Maybe values can be stored in
+ * containers. Maybe values can be constructed conveniently in expressions using
+ * type inference, as follows:
+ *
+ * void doSomething(Maybe<Foo> aFoo) {
+ * if (aFoo) // Make sure that aFoo contains a value...
+ * aFoo->takeAction(); // and then use |aFoo->| to access it.
+ * } // |*aFoo| also works!
+ *
+ * doSomething(Nothing()); // Passes a Maybe<Foo> containing no value.
+ * doSomething(Some(Foo(100))); // Passes a Maybe<Foo> containing |Foo(100)|.
+ *
+ * You'll note that it's important to check whether a Maybe contains a value
+ * before using it, using conversion to bool, |isSome()|, or |isNothing()|. You
+ * can avoid these checks, and sometimes write more readable code, using
+ * |valueOr()|, |ptrOr()|, and |refOr()|, which allow you to retrieve the value
+ * in the Maybe and provide a default for the 'Nothing' case. You can also use
+ * |apply()| to call a function only if the Maybe holds a value, and |map()| to
+ * transform the value in the Maybe, returning another Maybe with a possibly
+ * different type.
+ *
+ * Maybe's other role is to support lazily constructing objects without using
+ * dynamic storage. A Maybe directly contains storage for a value, but it's
+ * empty by default. |emplace()|, as mentioned above, can be used to construct a
+ * value in Maybe's storage. The value a Maybe contains can be destroyed by
+ * calling |reset()|; this will happen automatically if a Maybe is destroyed
+ * while holding a value.
+ *
+ * It's a common idiom in C++ to use a pointer as a 'Maybe' type, with a null
+ * value meaning 'Nothing' and any other value meaning 'Some'. You can convert
+ * from such a pointer to a Maybe value using 'ToMaybe()'.
+ *
+ * Maybe is inspired by similar types in the standard library of many other
+ * languages (e.g. Haskell's Maybe and Rust's Option). In the C++ world it's
+ * very similar to std::optional, which was proposed for C++14 and originated in
+ * Boost. The most important differences between Maybe and std::optional are:
+ *
+ * - std::optional<T> may be compared with T. We deliberately forbid that.
+ * - std::optional has |valueOr()|, equivalent to Maybe's |valueOr()|, but
+ * lacks corresponding methods for |refOr()| and |ptrOr()|.
+ * - std::optional lacks |map()| and |apply()|, making it less suitable for
+ * functional-style code.
+ * - std::optional lacks many convenience functions that Maybe has. Most
+ * unfortunately, it lacks equivalents of the type-inferred constructor
+ * functions |Some()| and |Nothing()|.
+ */
+template <class T>
+class MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS Maybe
+ : private detail::MaybeStorage<T>,
+ public detail::Maybe_CopyMove_Enabler<T> {
+ template <typename, bool, bool, bool>
+ friend class detail::Maybe_CopyMove_Enabler;
+
+ template <typename U, typename V>
+ friend constexpr Maybe<V> Some(U&& aValue);
+
+ struct SomeGuard {};
+
+ template <typename U>
+ constexpr Maybe(U&& aValue, SomeGuard)
+ : detail::MaybeStorage<T>{std::forward<U>(aValue)} {}
+
+ using detail::MaybeStorage<T>::mIsSome;
+ using detail::MaybeStorage<T>::mStorage;
+
+ void poisonData() { detail::MaybePoisoner<T>::poison(&mStorage.val); }
+
+ public:
+ using ValueType = T;
+
+ MOZ_ALLOW_TEMPORARY constexpr Maybe() = default;
+
+ MOZ_ALLOW_TEMPORARY MOZ_IMPLICIT constexpr Maybe(Nothing) : Maybe{} {}
+
+ template <typename... Args>
+ constexpr explicit Maybe(std::in_place_t, Args&&... aArgs)
+ : detail::MaybeStorage<T>{std::in_place, std::forward<Args>(aArgs)...} {}
+
+ /**
+ * Maybe<T> can be copy-constructed from a Maybe<U> if T is constructible from
+ * a const U&.
+ */
+ template <typename U,
+ typename = std::enable_if_t<std::is_constructible_v<T, const U&>>>
+ MOZ_IMPLICIT Maybe(const Maybe<U>& aOther) {
+ if (aOther.isSome()) {
+ emplace(*aOther);
+ }
+ }
+
+ /**
+ * Maybe<T> can be move-constructed from a Maybe<U> if T is constructible from
+ * a U&&.
+ */
+ template <typename U,
+ typename = std::enable_if_t<std::is_constructible_v<T, U&&>>>
+ MOZ_IMPLICIT Maybe(Maybe<U>&& aOther) {
+ if (aOther.isSome()) {
+ emplace(std::move(*aOther));
+ aOther.reset();
+ }
+ }
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_constructible_v<T, const U&>>>
+ Maybe& operator=(const Maybe<U>& aOther) {
+ if (aOther.isSome()) {
+ if (mIsSome) {
+ ref() = aOther.ref();
+ } else {
+ emplace(*aOther);
+ }
+ } else {
+ reset();
+ }
+ return *this;
+ }
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_constructible_v<T, U&&>>>
+ Maybe& operator=(Maybe<U>&& aOther) {
+ if (aOther.isSome()) {
+ if (mIsSome) {
+ ref() = std::move(aOther.ref());
+ } else {
+ emplace(std::move(*aOther));
+ }
+ aOther.reset();
+ } else {
+ reset();
+ }
+
+ return *this;
+ }
+
+ constexpr Maybe& operator=(Nothing) {
+ reset();
+ return *this;
+ }
+
+ /* Methods that check whether this Maybe contains a value */
+ constexpr explicit operator bool() const { return isSome(); }
+ constexpr bool isSome() const { return mIsSome; }
+ constexpr bool isNothing() const { return !mIsSome; }
+
+ /* Returns the contents of this Maybe<T> by value. Unsafe unless |isSome()|.
+ */
+ constexpr T value() const&;
+ constexpr T value() &&;
+ constexpr T value() const&&;
+
+ /**
+ * Move the contents of this Maybe<T> out of internal storage and return it
+ * without calling the destructor. The internal storage is also reset to
+ * avoid multiple calls. Unsafe unless |isSome()|.
+ */
+ T extract() {
+ MOZ_RELEASE_ASSERT(isSome());
+ T v = std::move(mStorage.val);
+ reset();
+ return v;
+ }
+
+ /**
+ * Returns the value (possibly |Nothing()|) by moving it out of this Maybe<T>
+ * and leaving |Nothing()| in its place.
+ */
+ Maybe<T> take() { return std::exchange(*this, Nothing()); }
+
+ /*
+ * Returns the contents of this Maybe<T> by value. If |isNothing()|, returns
+ * the default value provided.
+ *
+ * Note: If the value passed to aDefault is not the result of a trivial
+ * expression, but expensive to evaluate, e.g. |valueOr(ExpensiveFunction())|,
+ * use |valueOrFrom| instead, e.g.
+ * |valueOrFrom([arg] { return ExpensiveFunction(arg); })|. This ensures
+ * that the expensive expression is only evaluated when its result will
+ * actually be used.
+ */
+ template <typename V>
+ constexpr T valueOr(V&& aDefault) const {
+ if (isSome()) {
+ return ref();
+ }
+ return std::forward<V>(aDefault);
+ }
+
+ /*
+ * Returns the contents of this Maybe<T> by value. If |isNothing()|, returns
+ * the value returned from the function or functor provided.
+ */
+ template <typename F>
+ constexpr T valueOrFrom(F&& aFunc) const {
+ if (isSome()) {
+ return ref();
+ }
+ return aFunc();
+ }
+
+ /* Returns the contents of this Maybe<T> by pointer. Unsafe unless |isSome()|.
+ */
+ T* ptr();
+ constexpr const T* ptr() const;
+
+ /*
+ * Returns the contents of this Maybe<T> by pointer. If |isNothing()|,
+ * returns the default value provided.
+ */
+ T* ptrOr(T* aDefault) {
+ if (isSome()) {
+ return ptr();
+ }
+ return aDefault;
+ }
+
+ constexpr const T* ptrOr(const T* aDefault) const {
+ if (isSome()) {
+ return ptr();
+ }
+ return aDefault;
+ }
+
+ /*
+ * Returns the contents of this Maybe<T> by pointer. If |isNothing()|,
+ * returns the value returned from the function or functor provided.
+ */
+ template <typename F>
+ T* ptrOrFrom(F&& aFunc) {
+ if (isSome()) {
+ return ptr();
+ }
+ return aFunc();
+ }
+
+ template <typename F>
+ const T* ptrOrFrom(F&& aFunc) const {
+ if (isSome()) {
+ return ptr();
+ }
+ return aFunc();
+ }
+
+ constexpr T* operator->();
+ constexpr const T* operator->() const;
+
+ /* Returns the contents of this Maybe<T> by ref. Unsafe unless |isSome()|. */
+ constexpr T& ref() &;
+ constexpr const T& ref() const&;
+ constexpr T&& ref() &&;
+ constexpr const T&& ref() const&&;
+
+ /*
+ * Returns the contents of this Maybe<T> by ref. If |isNothing()|, returns
+ * the default value provided.
+ */
+ constexpr T& refOr(T& aDefault) {
+ if (isSome()) {
+ return ref();
+ }
+ return aDefault;
+ }
+
+ constexpr const T& refOr(const T& aDefault) const {
+ if (isSome()) {
+ return ref();
+ }
+ return aDefault;
+ }
+
+ /*
+ * Returns the contents of this Maybe<T> by ref. If |isNothing()|, returns the
+ * value returned from the function or functor provided.
+ */
+ template <typename F>
+ constexpr T& refOrFrom(F&& aFunc) {
+ if (isSome()) {
+ return ref();
+ }
+ return aFunc();
+ }
+
+ template <typename F>
+ constexpr const T& refOrFrom(F&& aFunc) const {
+ if (isSome()) {
+ return ref();
+ }
+ return aFunc();
+ }
+
+ constexpr T& operator*() &;
+ constexpr const T& operator*() const&;
+ constexpr T&& operator*() &&;
+ constexpr const T&& operator*() const&&;
+
+ /* If |isSome()|, runs the provided function or functor on the contents of
+ * this Maybe. */
+ template <typename Func>
+ constexpr Maybe& apply(Func&& aFunc) {
+ if (isSome()) {
+ std::forward<Func>(aFunc)(ref());
+ }
+ return *this;
+ }
+
+ template <typename Func>
+ constexpr const Maybe& apply(Func&& aFunc) const {
+ if (isSome()) {
+ std::forward<Func>(aFunc)(ref());
+ }
+ return *this;
+ }
+
+ /*
+ * If |isSome()|, runs the provided function and returns the result wrapped
+ * in a Maybe. If |isNothing()|, returns an empty Maybe value with the same
+ * value type as what the provided function would have returned.
+ */
+ template <typename Func>
+ constexpr auto map(Func&& aFunc) {
+ if (isSome()) {
+ return Some(std::forward<Func>(aFunc)(ref()));
+ }
+ return Maybe<decltype(std::forward<Func>(aFunc)(ref()))>{};
+ }
+
+ template <typename Func>
+ constexpr auto map(Func&& aFunc) const {
+ if (isSome()) {
+ return Some(std::forward<Func>(aFunc)(ref()));
+ }
+ return Maybe<decltype(std::forward<Func>(aFunc)(ref()))>{};
+ }
+
+ /* If |isSome()|, empties this Maybe and destroys its contents. */
+ constexpr void reset() {
+ if (isSome()) {
+ if constexpr (!std::is_trivially_destructible_v<T>) {
+ /*
+ * Static analyzer gets confused if we have Maybe<MutexAutoLock>,
+ * so we suppress thread-safety warnings here
+ */
+ MOZ_PUSH_IGNORE_THREAD_SAFETY
+ ref().T::~T();
+ MOZ_POP_THREAD_SAFETY
+ poisonData();
+ }
+ mIsSome = false;
+ }
+ }
+
+ /*
+ * Constructs a T value in-place in this empty Maybe<T>'s storage. The
+ * arguments to |emplace()| are the parameters to T's constructor.
+ */
+ template <typename... Args>
+ constexpr void emplace(Args&&... aArgs);
+
+ template <typename U>
+ constexpr std::enable_if_t<std::is_same_v<T, U> &&
+ std::is_copy_constructible_v<U> &&
+ !std::is_move_constructible_v<U>>
+ emplace(U&& aArgs) {
+ emplace(aArgs);
+ }
+
+ friend std::ostream& operator<<(std::ostream& aStream,
+ const Maybe<T>& aMaybe) {
+ if (aMaybe) {
+ aStream << aMaybe.ref();
+ } else {
+ aStream << "<Nothing>";
+ }
+ return aStream;
+ }
+};
+
+template <typename T>
+class Maybe<T&> {
+ public:
+ constexpr Maybe() = default;
+ constexpr MOZ_IMPLICIT Maybe(Nothing) {}
+
+ void emplace(T& aRef) { mValue = &aRef; }
+
+ /* Methods that check whether this Maybe contains a value */
+ constexpr explicit operator bool() const { return isSome(); }
+ constexpr bool isSome() const { return mValue; }
+ constexpr bool isNothing() const { return !mValue; }
+
+ T& ref() const {
+ MOZ_RELEASE_ASSERT(isSome());
+ return *mValue;
+ }
+
+ T* operator->() const { return &ref(); }
+ T& operator*() const { return ref(); }
+
+ // Deliberately not defining value and ptr accessors, as these may be
+ // confusing on a reference-typed Maybe.
+
+ // XXX Should we define refOr?
+
+ void reset() { mValue = nullptr; }
+
+ template <typename Func>
+ Maybe& apply(Func&& aFunc) {
+ if (isSome()) {
+ std::forward<Func>(aFunc)(ref());
+ }
+ return *this;
+ }
+
+ template <typename Func>
+ const Maybe& apply(Func&& aFunc) const {
+ if (isSome()) {
+ std::forward<Func>(aFunc)(ref());
+ }
+ return *this;
+ }
+
+ template <typename Func>
+ auto map(Func&& aFunc) {
+ Maybe<decltype(std::forward<Func>(aFunc)(ref()))> val;
+ if (isSome()) {
+ val.emplace(std::forward<Func>(aFunc)(ref()));
+ }
+ return val;
+ }
+
+ template <typename Func>
+ auto map(Func&& aFunc) const {
+ Maybe<decltype(std::forward<Func>(aFunc)(ref()))> val;
+ if (isSome()) {
+ val.emplace(std::forward<Func>(aFunc)(ref()));
+ }
+ return val;
+ }
+
+ bool refEquals(const Maybe<T&>& aOther) const {
+ return mValue == aOther.mValue;
+ }
+
+ bool refEquals(const T& aOther) const { return mValue == &aOther; }
+
+ private:
+ T* mValue = nullptr;
+};
+
+template <typename T>
+constexpr T Maybe<T>::value() const& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return ref();
+}
+
+template <typename T>
+constexpr T Maybe<T>::value() && {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(ref());
+}
+
+template <typename T>
+constexpr T Maybe<T>::value() const&& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(ref());
+}
+
+template <typename T>
+T* Maybe<T>::ptr() {
+ MOZ_RELEASE_ASSERT(isSome());
+ return &ref();
+}
+
+template <typename T>
+constexpr const T* Maybe<T>::ptr() const {
+ MOZ_RELEASE_ASSERT(isSome());
+ return &ref();
+}
+
+template <typename T>
+constexpr T* Maybe<T>::operator->() {
+ MOZ_RELEASE_ASSERT(isSome());
+ return ptr();
+}
+
+template <typename T>
+constexpr const T* Maybe<T>::operator->() const {
+ MOZ_RELEASE_ASSERT(isSome());
+ return ptr();
+}
+
+template <typename T>
+constexpr T& Maybe<T>::ref() & {
+ MOZ_RELEASE_ASSERT(isSome());
+ return mStorage.val;
+}
+
+template <typename T>
+constexpr const T& Maybe<T>::ref() const& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return mStorage.val;
+}
+
+template <typename T>
+constexpr T&& Maybe<T>::ref() && {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(mStorage.val);
+}
+
+template <typename T>
+constexpr const T&& Maybe<T>::ref() const&& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(mStorage.val);
+}
+
+template <typename T>
+constexpr T& Maybe<T>::operator*() & {
+ MOZ_RELEASE_ASSERT(isSome());
+ return ref();
+}
+
+template <typename T>
+constexpr const T& Maybe<T>::operator*() const& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return ref();
+}
+
+template <typename T>
+constexpr T&& Maybe<T>::operator*() && {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(ref());
+}
+
+template <typename T>
+constexpr const T&& Maybe<T>::operator*() const&& {
+ MOZ_RELEASE_ASSERT(isSome());
+ return std::move(ref());
+}
+
+template <typename T>
+template <typename... Args>
+constexpr void Maybe<T>::emplace(Args&&... aArgs) {
+ MOZ_RELEASE_ASSERT(!isSome());
+ ::new (KnownNotNull, &mStorage.val) T(std::forward<Args>(aArgs)...);
+ mIsSome = true;
+}
+
+/*
+ * Some() creates a Maybe<T> value containing the provided T value. If T has a
+ * move constructor, it's used to make this as efficient as possible.
+ *
+ * Some() selects the type of Maybe it returns by removing any const, volatile,
+ * or reference qualifiers from the type of the value you pass to it. This gives
+ * it more intuitive behavior when used in expressions, but it also means that
+ * if you need to construct a Maybe value that holds a const, volatile, or
+ * reference value, you need to use emplace() instead.
+ */
+template <typename T, typename U>
+constexpr Maybe<U> Some(T&& aValue) {
+ return {std::forward<T>(aValue), typename Maybe<U>::SomeGuard{}};
+}
+
+template <typename T>
+constexpr Maybe<T&> SomeRef(T& aValue) {
+ Maybe<T&> value;
+ value.emplace(aValue);
+ return value;
+}
+
+template <typename T>
+constexpr Maybe<T&> ToMaybeRef(T* const aPtr) {
+ return aPtr ? SomeRef(*aPtr) : Nothing{};
+}
+
+template <typename T>
+Maybe<std::remove_cv_t<std::remove_reference_t<T>>> ToMaybe(T* aPtr) {
+ if (aPtr) {
+ return Some(*aPtr);
+ }
+ return Nothing();
+}
+
+/*
+ * Two Maybe<T> values are equal if
+ * - both are Nothing, or
+ * - both are Some, and the values they contain are equal.
+ */
+template <typename T>
+constexpr bool operator==(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ static_assert(!std::is_reference_v<T>,
+ "operator== is not defined for Maybe<T&>, compare values or "
+ "addresses explicitly instead");
+ if (aLHS.isNothing() != aRHS.isNothing()) {
+ return false;
+ }
+ return aLHS.isNothing() || *aLHS == *aRHS;
+}
+
+template <typename T>
+constexpr bool operator!=(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ return !(aLHS == aRHS);
+}
+
+/*
+ * We support comparison to Nothing to allow reasonable expressions like:
+ * if (maybeValue == Nothing()) { ... }
+ */
+template <typename T>
+constexpr bool operator==(const Maybe<T>& aLHS, const Nothing& aRHS) {
+ return aLHS.isNothing();
+}
+
+template <typename T>
+constexpr bool operator!=(const Maybe<T>& aLHS, const Nothing& aRHS) {
+ return !(aLHS == aRHS);
+}
+
+template <typename T>
+constexpr bool operator==(const Nothing& aLHS, const Maybe<T>& aRHS) {
+ return aRHS.isNothing();
+}
+
+template <typename T>
+constexpr bool operator!=(const Nothing& aLHS, const Maybe<T>& aRHS) {
+ return !(aLHS == aRHS);
+}
+
+/*
+ * Maybe<T> values are ordered in the same way T values are ordered, except that
+ * Nothing comes before anything else.
+ */
+template <typename T>
+constexpr bool operator<(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ if (aLHS.isNothing()) {
+ return aRHS.isSome();
+ }
+ if (aRHS.isNothing()) {
+ return false;
+ }
+ return *aLHS < *aRHS;
+}
+
+template <typename T>
+constexpr bool operator>(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ return !(aLHS < aRHS || aLHS == aRHS);
+}
+
+template <typename T>
+constexpr bool operator<=(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ return aLHS < aRHS || aLHS == aRHS;
+}
+
+template <typename T>
+constexpr bool operator>=(const Maybe<T>& aLHS, const Maybe<T>& aRHS) {
+ return !(aLHS < aRHS);
+}
+
+template <typename T>
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback, mozilla::Maybe<T>& aField,
+ const char* aName, uint32_t aFlags = 0) {
+ if (aField) {
+ ImplCycleCollectionTraverse(aCallback, aField.ref(), aName, aFlags);
+ }
+}
+
+template <typename T>
+inline void ImplCycleCollectionUnlink(mozilla::Maybe<T>& aField) {
+ if (aField) {
+ ImplCycleCollectionUnlink(aField.ref());
+ }
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_Maybe_h */
diff --git a/mfbt/MaybeOneOf.h b/mfbt/MaybeOneOf.h
new file mode 100644
index 0000000000..769f18d5dd
--- /dev/null
+++ b/mfbt/MaybeOneOf.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A class storing one of two optional value types that supports in-place lazy
+ * construction.
+ */
+
+#ifndef mozilla_MaybeOneOf_h
+#define mozilla_MaybeOneOf_h
+
+#include <stddef.h> // for size_t
+
+#include <new> // for placement new
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/TemplateLib.h"
+
+namespace mozilla {
+
+/*
+ * MaybeOneOf<T1, T2> is like Maybe, but it supports constructing either T1
+ * or T2. When a MaybeOneOf<T1, T2> is constructed, it is |empty()|, i.e.,
+ * no value has been constructed and no destructor will be called when the
+ * MaybeOneOf<T1, T2> is destroyed. Upon calling |construct<T1>()| or
+ * |construct<T2>()|, a T1 or T2 object will be constructed with the given
+ * arguments and that object will be destroyed when the owning MaybeOneOf is
+ * destroyed.
+ *
+ * Because MaybeOneOf must be aligned suitable to hold any value stored within
+ * it, and because |alignas| requirements don't affect platform ABI with respect
+ * to how parameters are laid out in memory, MaybeOneOf can't be used as the
+ * type of a function parameter. Pass MaybeOneOf to functions by pointer or
+ * reference instead.
+ */
+template <class T1, class T2>
+class MOZ_NON_PARAM MaybeOneOf {
+ static constexpr size_t StorageAlignment =
+ tl::Max<alignof(T1), alignof(T2)>::value;
+ static constexpr size_t StorageSize = tl::Max<sizeof(T1), sizeof(T2)>::value;
+
+ alignas(StorageAlignment) unsigned char storage[StorageSize];
+
+ // GCC fails due to -Werror=strict-aliasing if |storage| is directly cast to
+ // T*. Indirecting through these functions addresses the problem.
+ void* data() { return storage; }
+ const void* data() const { return storage; }
+
+ enum State { None, SomeT1, SomeT2 } state;
+ template <class T, class Ignored = void>
+ struct Type2State {};
+
+ template <class T>
+ T& as() {
+ MOZ_ASSERT(state == Type2State<T>::result);
+ return *static_cast<T*>(data());
+ }
+
+ template <class T>
+ const T& as() const {
+ MOZ_ASSERT(state == Type2State<T>::result);
+ return *static_cast<const T*>(data());
+ }
+
+ public:
+ MaybeOneOf() : state(None) {}
+ ~MaybeOneOf() { destroyIfConstructed(); }
+
+ MaybeOneOf(MaybeOneOf&& rhs) : state(None) {
+ if (!rhs.empty()) {
+ if (rhs.constructed<T1>()) {
+ construct<T1>(std::move(rhs.as<T1>()));
+ rhs.as<T1>().~T1();
+ } else {
+ construct<T2>(std::move(rhs.as<T2>()));
+ rhs.as<T2>().~T2();
+ }
+ rhs.state = None;
+ }
+ }
+
+ MaybeOneOf& operator=(MaybeOneOf&& rhs) {
+ MOZ_ASSERT(this != &rhs, "Self-move is prohibited");
+ this->~MaybeOneOf();
+ new (this) MaybeOneOf(std::move(rhs));
+ return *this;
+ }
+
+ bool empty() const { return state == None; }
+
+ template <class T>
+ bool constructed() const {
+ return state == Type2State<T>::result;
+ }
+
+ template <class T, class... Args>
+ void construct(Args&&... aArgs) {
+ MOZ_ASSERT(state == None);
+ state = Type2State<T>::result;
+ ::new (KnownNotNull, data()) T(std::forward<Args>(aArgs)...);
+ }
+
+ template <class T>
+ T& ref() {
+ return as<T>();
+ }
+
+ template <class T>
+ const T& ref() const {
+ return as<T>();
+ }
+
+ void destroy() {
+ MOZ_ASSERT(state == SomeT1 || state == SomeT2);
+ if (state == SomeT1) {
+ as<T1>().~T1();
+ } else if (state == SomeT2) {
+ as<T2>().~T2();
+ }
+ state = None;
+ }
+
+ void destroyIfConstructed() {
+ if (!empty()) {
+ destroy();
+ }
+ }
+
+ template <typename Func>
+ constexpr auto mapNonEmpty(Func&& aFunc) const {
+ MOZ_ASSERT(!empty());
+ if (state == SomeT1) {
+ return std::forward<Func>(aFunc)(as<T1>());
+ }
+ return std::forward<Func>(aFunc)(as<T2>());
+ }
+ template <typename Func>
+ constexpr auto mapNonEmpty(Func&& aFunc) {
+ MOZ_ASSERT(!empty());
+ if (state == SomeT1) {
+ return std::forward<Func>(aFunc)(as<T1>());
+ }
+ return std::forward<Func>(aFunc)(as<T2>());
+ }
+
+ private:
+ MaybeOneOf(const MaybeOneOf& aOther) = delete;
+ const MaybeOneOf& operator=(const MaybeOneOf& aOther) = delete;
+};
+
+template <class T1, class T2>
+template <class Ignored>
+struct MaybeOneOf<T1, T2>::Type2State<T1, Ignored> {
+ typedef MaybeOneOf<T1, T2> Enclosing;
+ static const typename Enclosing::State result = Enclosing::SomeT1;
+};
+
+template <class T1, class T2>
+template <class Ignored>
+struct MaybeOneOf<T1, T2>::Type2State<T2, Ignored> {
+ typedef MaybeOneOf<T1, T2> Enclosing;
+ static const typename Enclosing::State result = Enclosing::SomeT2;
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_MaybeOneOf_h */
diff --git a/mfbt/MaybeStorageBase.h b/mfbt/MaybeStorageBase.h
new file mode 100644
index 0000000000..2732d78d05
--- /dev/null
+++ b/mfbt/MaybeStorageBase.h
@@ -0,0 +1,92 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Internal storage class used e.g. by Maybe and Result. This file doesn't
+ * contain any public declarations. */
+
+#ifndef mfbt_MaybeStorageBase_h
+#define mfbt_MaybeStorageBase_h
+
+#include <type_traits>
+#include <utility>
+
+namespace mozilla::detail {
+
+template <typename T>
+constexpr bool IsTriviallyDestructibleAndCopyable =
+ std::is_trivially_destructible_v<T> &&
+ (std::is_trivially_copy_constructible_v<T> ||
+ !std::is_copy_constructible_v<T>);
+
+template <typename T, bool TriviallyDestructibleAndCopyable =
+ IsTriviallyDestructibleAndCopyable<T>>
+struct MaybeStorageBase;
+
+template <typename T>
+struct MaybeStorageBase<T, false> {
+ protected:
+ using NonConstT = std::remove_const_t<T>;
+
+ union Union {
+ Union() {}
+ explicit Union(const T& aVal) : val{aVal} {}
+ template <typename U,
+ typename = std::enable_if_t<std::is_move_constructible_v<U>>>
+ explicit Union(U&& aVal) : val{std::forward<U>(aVal)} {}
+ template <typename... Args>
+ explicit Union(std::in_place_t, Args&&... aArgs)
+ : val{std::forward<Args>(aArgs)...} {}
+
+ ~Union() {}
+
+ NonConstT val;
+ } mStorage;
+
+ public:
+ MaybeStorageBase() = default;
+ explicit MaybeStorageBase(const T& aVal) : mStorage{aVal} {}
+ explicit MaybeStorageBase(T&& aVal) : mStorage{std::move(aVal)} {}
+ template <typename... Args>
+ explicit MaybeStorageBase(std::in_place_t, Args&&... aArgs)
+ : mStorage{std::in_place, std::forward<Args>(aArgs)...} {}
+
+ const T* addr() const { return &mStorage.val; }
+ T* addr() { return &mStorage.val; }
+};
+
+template <typename T>
+struct MaybeStorageBase<T, true> {
+ protected:
+ using NonConstT = std::remove_const_t<T>;
+
+ union Union {
+ constexpr Union() : dummy() {}
+ constexpr explicit Union(const T& aVal) : val{aVal} {}
+ constexpr explicit Union(T&& aVal) : val{std::move(aVal)} {}
+ template <typename... Args>
+ constexpr explicit Union(std::in_place_t, Args&&... aArgs)
+ : val{std::forward<Args>(aArgs)...} {}
+
+ NonConstT val;
+ char dummy;
+ } mStorage;
+
+ public:
+ constexpr MaybeStorageBase() = default;
+ constexpr explicit MaybeStorageBase(const T& aVal) : mStorage{aVal} {}
+ constexpr explicit MaybeStorageBase(T&& aVal) : mStorage{std::move(aVal)} {}
+
+ template <typename... Args>
+ constexpr explicit MaybeStorageBase(std::in_place_t, Args&&... aArgs)
+ : mStorage{std::in_place, std::forward<Args>(aArgs)...} {}
+
+ constexpr const T* addr() const { return &mStorage.val; }
+ constexpr T* addr() { return &mStorage.val; }
+};
+
+} // namespace mozilla::detail
+
+#endif
diff --git a/mfbt/MemoryChecking.h b/mfbt/MemoryChecking.h
new file mode 100644
index 0000000000..eed75cd058
--- /dev/null
+++ b/mfbt/MemoryChecking.h
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Provides a common interface to the ASan (AddressSanitizer) and Valgrind
+ * functions used to mark memory in certain ways. In detail, the following
+ * three macros are provided:
+ *
+ * MOZ_MAKE_MEM_NOACCESS - Mark memory as unsafe to access (e.g. freed)
+ * MOZ_MAKE_MEM_UNDEFINED - Mark memory as accessible, with content undefined
+ * MOZ_MAKE_MEM_DEFINED - Mark memory as accessible, with content defined
+ *
+ * With Valgrind in use, these directly map to the three respective Valgrind
+ * macros. With ASan in use, the NOACCESS macro maps to poisoning the memory,
+ * while the UNDEFINED/DEFINED macros unpoison memory.
+ *
+ * With no memory checker available, all macros expand to the empty statement.
+ */
+
+#ifndef mozilla_MemoryChecking_h
+#define mozilla_MemoryChecking_h
+
+#if defined(MOZ_VALGRIND)
+# include "valgrind/memcheck.h"
+#endif
+
+#if defined(MOZ_ASAN) || defined(MOZ_VALGRIND)
+# define MOZ_HAVE_MEM_CHECKS 1
+#endif
+
+#if defined(MOZ_ASAN)
+# include <stddef.h>
+
+# include "mozilla/Attributes.h"
+# include "mozilla/Types.h"
+
+# ifdef _MSC_VER
+// In clang-cl based ASAN, we link against the memory poisoning functions
+// statically.
+# define MOZ_ASAN_VISIBILITY
+# else
+# define MOZ_ASAN_VISIBILITY MOZ_EXPORT
+# endif
+
+extern "C" {
+/* These definitions are usually provided through the
+ * sanitizer/asan_interface.h header installed by ASan.
+ */
+void MOZ_ASAN_VISIBILITY __asan_poison_memory_region(void const volatile* addr,
+ size_t size);
+void MOZ_ASAN_VISIBILITY
+__asan_unpoison_memory_region(void const volatile* addr, size_t size);
+
+# define MOZ_MAKE_MEM_NOACCESS(addr, size) \
+ __asan_poison_memory_region((addr), (size))
+
+# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \
+ __asan_unpoison_memory_region((addr), (size))
+
+# define MOZ_MAKE_MEM_DEFINED(addr, size) \
+ __asan_unpoison_memory_region((addr), (size))
+
+/*
+ * These definitions are usually provided through the
+ * sanitizer/lsan_interface.h header installed by LSan.
+ */
+void MOZ_EXPORT __lsan_ignore_object(const void* p);
+}
+#elif defined(MOZ_MSAN)
+# include <stddef.h>
+
+# include "mozilla/Types.h"
+
+extern "C" {
+/* These definitions are usually provided through the
+ * sanitizer/msan_interface.h header installed by MSan.
+ */
+void MOZ_EXPORT __msan_poison(void const volatile* addr, size_t size);
+void MOZ_EXPORT __msan_unpoison(void const volatile* addr, size_t size);
+
+# define MOZ_MAKE_MEM_NOACCESS(addr, size) __msan_poison((addr), (size))
+
+# define MOZ_MAKE_MEM_UNDEFINED(addr, size) __msan_poison((addr), (size))
+
+# define MOZ_MAKE_MEM_DEFINED(addr, size) __msan_unpoison((addr), (size))
+}
+#elif defined(MOZ_VALGRIND)
+# define MOZ_MAKE_MEM_NOACCESS(addr, size) \
+ VALGRIND_MAKE_MEM_NOACCESS((addr), (size))
+
+# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \
+ VALGRIND_MAKE_MEM_UNDEFINED((addr), (size))
+
+# define MOZ_MAKE_MEM_DEFINED(addr, size) \
+ VALGRIND_MAKE_MEM_DEFINED((addr), (size))
+#else
+
+# define MOZ_MAKE_MEM_NOACCESS(addr, size) \
+ do { \
+ } while (0)
+# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \
+ do { \
+ } while (0)
+# define MOZ_MAKE_MEM_DEFINED(addr, size) \
+ do { \
+ } while (0)
+
+#endif
+
+/*
+ * MOZ_LSAN_INTENTIONAL_LEAK(X) is a macro to tell LeakSanitizer that X
+ * points to a value that will intentionally never be deallocated during
+ * the execution of the process.
+ *
+ * Additional uses of this macro should be reviewed by people
+ * conversant in leak-checking and/or MFBT peers.
+ */
+#if defined(MOZ_ASAN)
+# define MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X) __lsan_ignore_object(X)
+#else
+# define MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X) /* nothing */
+#endif // defined(MOZ_ASAN)
+
+#endif /* mozilla_MemoryChecking_h */
diff --git a/mfbt/MemoryReporting.h b/mfbt/MemoryReporting.h
new file mode 100644
index 0000000000..d2340ecf09
--- /dev/null
+++ b/mfbt/MemoryReporting.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Memory reporting infrastructure. */
+
+#ifndef mozilla_MemoryReporting_h
+#define mozilla_MemoryReporting_h
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+
+namespace mozilla {
+
+/*
+ * This is for functions that are like malloc_usable_size. Such functions are
+ * used for measuring the size of data structures.
+ */
+typedef size_t (*MallocSizeOf)(const void* p);
+
+} /* namespace mozilla */
+
+#endif /* __cplusplus */
+
+typedef size_t (*MozMallocSizeOf)(const void* p);
+
+#endif /* mozilla_MemoryReporting_h */
diff --git a/mfbt/MoveOnlyFunction.h b/mfbt/MoveOnlyFunction.h
new file mode 100644
index 0000000000..d6ade3fd49
--- /dev/null
+++ b/mfbt/MoveOnlyFunction.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MoveOnlyFunction_h
+#define mozilla_MoveOnlyFunction_h
+
+// Use stl-like empty propagation to avoid issues with wrapping closures which
+// implicitly coerce to bool.
+#define FU2_WITH_LIMITED_EMPTY_PROPAGATION
+
+#include "function2/function2.hpp"
+
+namespace mozilla {
+
+/// A type like `std::function`, but with support for move-only callable
+/// objects.
+///
+/// A similar type is proposed to be added to the standard library as
+/// `std::move_only_function` in C++23.
+///
+/// Unlike `std::function`, the function signature may be given const or
+/// reference qualifiers which will be applied to `operator()`. This can be used
+/// to declare const qualified or move-only functions.
+///
+/// The implementation this definition depends on (function2) also has support
+/// for callables with overload sets, however support for this was not exposed
+/// to align better with the proposed `std::move_only_function`, which does not
+/// support overload sets.
+///
+/// A custom typedef over `fu2::function_base` is used to control the size and
+/// alignment of the inline storage to store 2 aligned pointers, and ensure the
+/// type is compatible with `nsTArray`.
+template <typename Signature>
+using MoveOnlyFunction = fu2::function_base<
+ /* IsOwning */ true,
+ /* IsCopyable */ false,
+ /* Capacity */ fu2::capacity_fixed<2 * sizeof(void*), alignof(void*)>,
+ /* IsThrowing */ false,
+ /* HasStrongExceptionGuarantee */ false,
+ /* Signature */ Signature>;
+
+} // namespace mozilla
+
+#endif // mozilla_MoveOnlyFunction_h
diff --git a/mfbt/MruCache.h b/mfbt/MruCache.h
new file mode 100644
index 0000000000..716224a3e0
--- /dev/null
+++ b/mfbt/MruCache.h
@@ -0,0 +1,165 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_MruCache_h
+#define mozilla_MruCache_h
+
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Attributes.h"
+#include "mozilla/HashFunctions.h"
+
+namespace mozilla {
+
+namespace detail {
+
+// Helper struct for checking if a value is empty.
+//
+// `IsNotEmpty` will return true if `Value` is not a pointer type or if the
+// pointer value is not null.
+template <typename Value, bool IsPtr = std::is_pointer<Value>::value>
+struct EmptyChecker {
+ static bool IsNotEmpty(const Value&) { return true; }
+};
+// Template specialization for the `IsPtr == true` case.
+template <typename Value>
+struct EmptyChecker<Value, true> {
+ static bool IsNotEmpty(const Value& aVal) { return aVal != nullptr; }
+};
+
+} // namespace detail
+
+// Provides a most recently used cache that can be used as a layer on top of
+// a larger container where lookups can be expensive. The default size is 31,
+// which as a prime number provides a better distrubution of cached entries.
+//
+// Users are expected to provide a `Cache` class that defines two required
+// methods:
+// - A method for providing the hash of a key:
+//
+// static HashNumber Hash(const KeyType& aKey)
+//
+// - A method for matching a key to a value, for pointer types the value
+// is guaranteed not to be null.
+//
+// static bool Match(const KeyType& aKey, const ValueType& aVal)
+//
+// For example:
+// class MruExample : public MruCache<void*, PtrInfo*, MruExample>
+// {
+// static HashNumber Hash(const KeyType& aKey)
+// {
+// return HashGeneric(aKey);
+// }
+// static Match(const KeyType& aKey, const ValueType& aVal)
+// {
+// return aVal->mPtr == aKey;
+// }
+// };
+template <class Key, class Value, class Cache, size_t Size = 31>
+class MruCache {
+ // Best distribution is achieved with a prime number. Ideally the closest
+ // to a power of two will be the most efficient use of memory. This
+ // assertion is pretty weak, but should catch the common inclination to
+ // use a power-of-two.
+ static_assert(Size % 2 != 0, "Use a prime number");
+
+ // This is a stronger assertion but significantly limits the values to just
+ // those close to a power-of-two value.
+ // static_assert(Size == 7 || Size == 13 || Size == 31 || Size == 61 ||
+ // Size == 127 || Size == 251 || Size == 509 || Size == 1021,
+ // "Use a prime number less than 1024");
+
+ public:
+ using KeyType = Key;
+ using ValueType = Value;
+
+ MruCache() = default;
+ MruCache(const MruCache&) = delete;
+ MruCache(const MruCache&&) = delete;
+
+ // Inserts the given value into the cache. Potentially overwrites an
+ // existing entry.
+ template <typename U>
+ void Put(const KeyType& aKey, U&& aVal) {
+ *RawEntry(aKey) = std::forward<U>(aVal);
+ }
+
+ // Removes the given entry if it is in the cache.
+ void Remove(const KeyType& aKey) { Lookup(aKey).Remove(); }
+
+ // Clears all cached entries and resets them to a default value.
+ void Clear() {
+ for (ValueType& val : mCache) {
+ val = ValueType{};
+ }
+ }
+
+ // Helper that holds an entry that matched a lookup key. Usage:
+ //
+ // auto p = mCache.Lookup(aKey);
+ // if (p) {
+ // return p.Data();
+ // }
+ //
+ // auto foo = new Foo();
+ // p.Set(foo);
+ // return foo;
+ class Entry {
+ public:
+ Entry(ValueType* aEntry, bool aMatch) : mEntry(aEntry), mMatch(aMatch) {
+ MOZ_ASSERT(mEntry);
+ }
+
+ explicit operator bool() const { return mMatch; }
+
+ ValueType& Data() const {
+ MOZ_ASSERT(mMatch);
+ return *mEntry;
+ }
+
+ template <typename U>
+ void Set(U&& aValue) {
+ mMatch = true;
+ Data() = std::forward<U>(aValue);
+ }
+
+ void Remove() {
+ if (mMatch) {
+ Data() = ValueType{};
+ mMatch = false;
+ }
+ }
+
+ private:
+ ValueType* mEntry; // Location of the entry in the cache.
+ bool mMatch; // Whether the value matched.
+ };
+
+ // Retrieves an entry from the cache. Can be used to test if an entry is
+ // present, update the entry to a new value, or remove the entry if one was
+ // matched.
+ Entry Lookup(const KeyType& aKey) {
+ using EmptyChecker = detail::EmptyChecker<ValueType>;
+
+ auto entry = RawEntry(aKey);
+ bool match = EmptyChecker::IsNotEmpty(*entry) && Cache::Match(aKey, *entry);
+ return Entry(entry, match);
+ }
+
+ private:
+ MOZ_ALWAYS_INLINE ValueType* RawEntry(const KeyType& aKey) {
+ return &mCache[Cache::Hash(aKey) % Size];
+ }
+
+ ValueType mCache[Size] = {};
+};
+
+} // namespace mozilla
+
+#endif // mozilla_mrucache_h
diff --git a/mfbt/NeverDestroyed.h b/mfbt/NeverDestroyed.h
new file mode 100644
index 0000000000..fe3b366c69
--- /dev/null
+++ b/mfbt/NeverDestroyed.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_NeverDestroyed_h
+#define mozilla_NeverDestroyed_h
+
+#include <new>
+#include <type_traits>
+#include <utility>
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+// Helper type for creating a local static member of type `T` when `T` has a
+// non-trivial static destructor. When used for the local static value, this
+// type will avoid introducing a static destructor for these types, as they
+// will survive until shutdown.
+//
+// This can be very useful to avoid static destructors, which are heavily
+// discouraged. Using this type is unnecessary if `T` already has a trivial
+// destructor, and may introduce unnecessary extra overhead.
+//
+// This type must only be used with static local members within a function,
+// which will be enforced by the clang static analysis.
+template <typename T>
+class MOZ_STATIC_LOCAL_CLASS MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS
+ NeverDestroyed {
+ public:
+ static_assert(
+ !std::is_trivially_destructible_v<T>,
+ "NeverDestroyed is unnecessary for trivially destructable types");
+
+ // Allow constructing the inner type.
+ // This isn't constexpr, as it requires invoking placement-new. See the
+ // comment on `mStorage`.
+ template <typename... U>
+ explicit NeverDestroyed(U&&... aArgs) {
+ new (mStorage) T(std::forward<U>(aArgs)...);
+ }
+
+ const T& operator*() const { return *get(); }
+ T& operator*() { return *get(); }
+
+ const T* operator->() const { return get(); }
+ T* operator->() { return get(); }
+
+ const T* get() const { return reinterpret_cast<T*>(mStorage); }
+ T* get() { return reinterpret_cast<T*>(mStorage); }
+
+ // Block copy & move constructor, as the type is not safe to copy.
+ NeverDestroyed(const NeverDestroyed&) = delete;
+ NeverDestroyed& operator=(const NeverDestroyed&) = delete;
+
+ private:
+ // Correctly aligned storage for the type. We unfortunately can't use a union
+ // for alignment & constexpr initialization as that would require an explicit
+ // destructor declaration, making `NeverDestroyed` non-trivially destructable.
+ alignas(T) char mStorage[sizeof(T)];
+};
+
+}; // namespace mozilla
+
+#endif // mozilla_NeverDestroyed_h
diff --git a/mfbt/NonDereferenceable.h b/mfbt/NonDereferenceable.h
new file mode 100644
index 0000000000..30c4cac853
--- /dev/null
+++ b/mfbt/NonDereferenceable.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_NonDereferenceable_h
+#define mozilla_NonDereferenceable_h
+
+/* A pointer wrapper indicating that the pointer should not be dereferenced. */
+
+#include "mozilla/Attributes.h"
+
+#include <cstdint>
+
+// Macro indicating that a function manipulates a pointer that will not be
+// dereferenced, and therefore there is no need to check the object.
+#if defined(__clang__)
+# define NO_POINTEE_CHECKS __attribute__((no_sanitize("vptr")))
+#else
+# define NO_POINTEE_CHECKS /* nothing */
+#endif
+
+namespace mozilla {
+
+// NonDereferenceable<T> wraps a raw pointer value of type T*, but prevents
+// dereferencing.
+//
+// The main use case is for pointers that referencing memory that may not
+// contain a valid object, either because the object has already been freed, or
+// is under active construction or destruction (and hence parts of it may be
+// uninitialized or destructed.)
+// Such a pointer may still be useful, e.g., for its numeric value for
+// logging/debugging purposes, which may be accessed with `value()`.
+// Using NonDereferenceable with such pointers will make this intent clearer,
+// and prevent misuses.
+//
+// Note that NonDereferenceable is only a wrapper and is NOT an owning pointer,
+// i.e., it will not release/free the object.
+//
+// NonDereferenceable allows conversions between compatible pointer types, e.g.,
+// to navigate a class hierarchy and identify parent/sub-objects. Note that the
+// converted pointers stay safely NonDereferenceable.
+//
+// Use of NonDereferenceable is required to avoid errors from sanitization tools
+// like `clang++ -fsanitize=vptr`, and should prevent false positives while
+// pointers are manipulated within NonDereferenceable objects.
+//
+template <typename T>
+class NonDereferenceable {
+ public:
+ // Default construction with a null value.
+ NonDereferenceable() : mPtr(nullptr) {}
+
+ // Default copy construction and assignment.
+ NO_POINTEE_CHECKS
+ NonDereferenceable(const NonDereferenceable&) = default;
+ NO_POINTEE_CHECKS
+ NonDereferenceable<T>& operator=(const NonDereferenceable&) = default;
+ // No move operations, as we're only carrying a non-owning pointer, so
+ // copying is most efficient.
+
+ // Construct/assign from a T* raw pointer.
+ // A raw pointer should usually point at a valid object, however we want to
+ // leave the ability to the user to create a NonDereferenceable from any
+ // pointer. Also, strictly speaking, in a constructor or destructor, `this`
+ // points at an object still being constructed or already partially
+ // destructed, which some very sensitive sanitizers could complain about.
+ NO_POINTEE_CHECKS
+ explicit NonDereferenceable(T* aPtr) : mPtr(aPtr) {}
+ NO_POINTEE_CHECKS
+ NonDereferenceable& operator=(T* aPtr) {
+ mPtr = aPtr;
+ return *this;
+ }
+
+ // Construct/assign from a compatible pointer type.
+ template <typename U>
+ NO_POINTEE_CHECKS explicit NonDereferenceable(U* aOther)
+ : mPtr(static_cast<T*>(aOther)) {}
+ template <typename U>
+ NO_POINTEE_CHECKS NonDereferenceable& operator=(U* aOther) {
+ mPtr = static_cast<T*>(aOther);
+ return *this;
+ }
+
+ // Construct/assign from a NonDereferenceable with a compatible pointer type.
+ template <typename U>
+ NO_POINTEE_CHECKS MOZ_IMPLICIT
+ NonDereferenceable(const NonDereferenceable<U>& aOther)
+ : mPtr(static_cast<T*>(aOther.mPtr)) {}
+ template <typename U>
+ NO_POINTEE_CHECKS NonDereferenceable& operator=(
+ const NonDereferenceable<U>& aOther) {
+ mPtr = static_cast<T*>(aOther.mPtr);
+ return *this;
+ }
+
+ // Explicitly disallow dereference operators, so that compiler errors point
+ // at these lines:
+ T& operator*() = delete; // Cannot dereference NonDereferenceable!
+ T* operator->() = delete; // Cannot dereference NonDereferenceable!
+
+ // Null check.
+ NO_POINTEE_CHECKS
+ explicit operator bool() const { return !!mPtr; }
+
+ // Extract the pointer value, untyped.
+ NO_POINTEE_CHECKS
+ uintptr_t value() const { return reinterpret_cast<uintptr_t>(mPtr); }
+
+ private:
+ // Let other NonDereferenceable templates access mPtr, to permit construction/
+ // assignment from compatible pointer types.
+ template <typename>
+ friend class NonDereferenceable;
+
+ T* MOZ_NON_OWNING_REF mPtr;
+};
+
+} // namespace mozilla
+
+#undef NO_POINTEE_CHECKS
+
+#endif /* mozilla_NonDereferenceable_h */
diff --git a/mfbt/NotNull.h b/mfbt/NotNull.h
new file mode 100644
index 0000000000..1a12400e14
--- /dev/null
+++ b/mfbt/NotNull.h
@@ -0,0 +1,449 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_NotNull_h
+#define mozilla_NotNull_h
+
+// It's often unclear if a particular pointer, be it raw (T*) or smart
+// (RefPtr<T>, nsCOMPtr<T>, etc.) can be null. This leads to missing null
+// checks (which can cause crashes) and unnecessary null checks (which clutter
+// the code).
+//
+// C++ has a built-in alternative that avoids these problems: references. This
+// module defines another alternative, NotNull, which can be used in cases
+// where references are not suitable.
+//
+// In the comments below we use the word "handle" to cover all varieties of
+// pointers and references.
+//
+// References
+// ----------
+// References are always non-null. (You can do |T& r = *p;| where |p| is null,
+// but that's undefined behaviour. C++ doesn't provide any built-in, ironclad
+// guarantee of non-nullness.)
+//
+// A reference works well when you need a temporary handle to an existing
+// single object, e.g. for passing a handle to a function, or as a local handle
+// within another object. (In Rust parlance, this is a "borrow".)
+//
+// A reference is less appropriate in the following cases.
+//
+// - As a primary handle to an object. E.g. code such as this is possible but
+// strange: |T& t = *new T(); ...; delete &t;|
+//
+// - As a handle to an array. It's common for |T*| to refer to either a single
+// |T| or an array of |T|, but |T&| cannot refer to an array of |T| because
+// you can't index off a reference (at least, not without first converting it
+// to a pointer).
+//
+// - When the handle identity is meaningful, e.g. if you have a hashtable of
+// handles, because you have to use |&| on the reference to convert it to a
+// pointer.
+//
+// - Some people don't like using non-const references as function parameters,
+// because it is not clear at the call site that the argument might be
+// modified.
+//
+// - When you need "smart" behaviour. E.g. we lack reference equivalents to
+// RefPtr and nsCOMPtr.
+//
+// - When interfacing with code that uses pointers a lot, sometimes using a
+// reference just feels like an odd fit.
+//
+// Furthermore, a reference is impossible in the following cases.
+//
+// - When the handle is rebound to another object. References don't allow this.
+//
+// - When the handle has type |void|. |void&| is not allowed.
+//
+// NotNull is an alternative that can be used in any of the above cases except
+// for the last one, where the handle type is |void|. See below.
+
+#include <stddef.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+
+namespace mozilla {
+
+namespace detail {
+template <typename T>
+struct CopyablePtr {
+ T mPtr;
+
+ template <typename U>
+ explicit CopyablePtr(U&& aPtr) : mPtr{std::forward<U>(aPtr)} {}
+
+ template <typename U>
+ explicit CopyablePtr(CopyablePtr<U> aPtr) : mPtr{std::move(aPtr.mPtr)} {}
+};
+} // namespace detail
+
+template <typename T>
+class MovingNotNull;
+
+// NotNull can be used to wrap a "base" pointer (raw or smart) to indicate it
+// is not null. Some examples:
+//
+// - NotNull<char*>
+// - NotNull<RefPtr<Event>>
+// - NotNull<nsCOMPtr<Event>>
+// - NotNull<UniquePtr<Pointee>>
+//
+// NotNull has the following notable properties.
+//
+// - It has zero space overhead.
+//
+// - It must be initialized explicitly. There is no default initialization.
+//
+// - It auto-converts to the base pointer type.
+//
+// - It does not auto-convert from a base pointer. Implicit conversion from a
+// less-constrained type (e.g. T*) to a more-constrained type (e.g.
+// NotNull<T*>) is dangerous. Creation and assignment from a base pointer can
+// only be done with WrapNotNull() or MakeNotNull<>(), which makes them
+// impossible to overlook, both when writing and reading code.
+//
+// - When initialized (or assigned) it is checked, and if it is null we abort.
+// This guarantees that it cannot be null.
+//
+// - |operator bool()| is deleted. This means you cannot check a NotNull in a
+// boolean context, which eliminates the possibility of unnecessary null
+// checks.
+//
+// - It is not movable, but copyable if the base pointer type is copyable. It
+// may be used together with MovingNotNull to avoid unnecessary copies or when
+// the base pointer type is not copyable (such as UniquePtr<T>).
+//
+template <typename T>
+class NotNull {
+ template <typename U>
+ friend constexpr NotNull<U> WrapNotNull(U aBasePtr);
+ template <typename U>
+ friend constexpr NotNull<U> WrapNotNullUnchecked(U aBasePtr);
+ template <typename U, typename... Args>
+ friend constexpr NotNull<U> MakeNotNull(Args&&... aArgs);
+ template <typename U>
+ friend class NotNull;
+
+ detail::CopyablePtr<T> mBasePtr;
+
+ // This constructor is only used by WrapNotNull() and MakeNotNull<U>().
+ template <typename U>
+ constexpr explicit NotNull(U aBasePtr) : mBasePtr(T{std::move(aBasePtr)}) {
+ static_assert(sizeof(T) == sizeof(NotNull<T>),
+ "NotNull must have zero space overhead.");
+ static_assert(offsetof(NotNull<T>, mBasePtr) == 0,
+ "mBasePtr must have zero offset.");
+ }
+
+ public:
+ // Disallow default construction.
+ NotNull() = delete;
+
+ // Construct/assign from another NotNull with a compatible base pointer type.
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<const U&, T>>>
+ constexpr MOZ_IMPLICIT NotNull(const NotNull<U>& aOther)
+ : mBasePtr(aOther.mBasePtr) {}
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U&&, T>>>
+ constexpr MOZ_IMPLICIT NotNull(MovingNotNull<U>&& aOther)
+ : mBasePtr(std::move(aOther).unwrapBasePtr()) {}
+
+ // Disallow null checks, which are unnecessary for this type.
+ explicit operator bool() const = delete;
+
+ // Explicit conversion to a base pointer. Use only to resolve ambiguity or to
+ // get a castable pointer.
+ constexpr const T& get() const { return mBasePtr.mPtr; }
+
+ // Implicit conversion to a base pointer. Preferable to get().
+ constexpr operator const T&() const { return get(); }
+
+ // Implicit conversion to a raw pointer from const lvalue-reference if
+ // supported by the base pointer (for RefPtr<T> -> T* compatibility).
+ template <typename U,
+ std::enable_if_t<!std::is_pointer_v<T> &&
+ std::is_convertible_v<const T&, U*>,
+ int> = 0>
+ constexpr operator U*() const& {
+ return get();
+ }
+
+ // Don't allow implicit conversions to raw pointers from rvalue-references.
+ template <typename U,
+ std::enable_if_t<!std::is_pointer_v<T> &&
+ std::is_convertible_v<const T&, U*> &&
+ !std::is_convertible_v<const T&&, U*>,
+ int> = 0>
+ constexpr operator U*() const&& = delete;
+
+ // Dereference operators.
+ constexpr auto* operator->() const MOZ_NONNULL_RETURN {
+ return mBasePtr.mPtr.operator->();
+ }
+ constexpr decltype(*mBasePtr.mPtr) operator*() const {
+ return *mBasePtr.mPtr;
+ }
+
+ // NotNull can be copied, but not moved. Moving a NotNull with a smart base
+ // pointer would leave a nullptr NotNull behind. The move operations must not
+ // be explicitly deleted though, since that would cause overload resolution to
+ // fail in situations where a copy is possible.
+ NotNull(const NotNull&) = default;
+ NotNull& operator=(const NotNull&) = default;
+};
+
+// Specialization for T* to allow adding MOZ_NONNULL_RETURN attributes.
+template <typename T>
+class NotNull<T*> {
+ template <typename U>
+ friend constexpr NotNull<U> WrapNotNull(U aBasePtr);
+ template <typename U>
+ friend constexpr NotNull<U*> WrapNotNullUnchecked(U* aBasePtr);
+ template <typename U, typename... Args>
+ friend constexpr NotNull<U> MakeNotNull(Args&&... aArgs);
+ template <typename U>
+ friend class NotNull;
+
+ T* mBasePtr;
+
+ // This constructor is only used by WrapNotNull() and MakeNotNull<U>().
+ template <typename U>
+ constexpr explicit NotNull(U* aBasePtr) : mBasePtr(aBasePtr) {}
+
+ public:
+ // Disallow default construction.
+ NotNull() = delete;
+
+ // Construct/assign from another NotNull with a compatible base pointer type.
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<const U&, T*>>>
+ constexpr MOZ_IMPLICIT NotNull(const NotNull<U>& aOther)
+ : mBasePtr(aOther.get()) {
+ static_assert(sizeof(T*) == sizeof(NotNull<T*>),
+ "NotNull must have zero space overhead.");
+ static_assert(offsetof(NotNull<T*>, mBasePtr) == 0,
+ "mBasePtr must have zero offset.");
+ }
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U&&, T*>>>
+ constexpr MOZ_IMPLICIT NotNull(MovingNotNull<U>&& aOther)
+ : mBasePtr(NotNull{std::move(aOther)}) {}
+
+ // Disallow null checks, which are unnecessary for this type.
+ explicit operator bool() const = delete;
+
+ // Explicit conversion to a base pointer. Use only to resolve ambiguity or to
+ // get a castable pointer.
+ constexpr T* get() const MOZ_NONNULL_RETURN { return mBasePtr; }
+
+ // Implicit conversion to a base pointer. Preferable to get().
+ constexpr operator T*() const MOZ_NONNULL_RETURN { return get(); }
+
+ // Dereference operators.
+ constexpr T* operator->() const MOZ_NONNULL_RETURN { return get(); }
+ constexpr T& operator*() const { return *mBasePtr; }
+};
+
+template <typename T>
+constexpr NotNull<T> WrapNotNull(T aBasePtr) {
+ MOZ_RELEASE_ASSERT(aBasePtr);
+ return NotNull<T>{std::move(aBasePtr)};
+}
+
+// WrapNotNullUnchecked should only be used in situations, where it is
+// statically known that aBasePtr is non-null, and redundant release assertions
+// should be avoided. It is only defined for raw base pointers, since it is only
+// needed for those right now. There is no fundamental reason not to allow
+// arbitrary base pointers here.
+template <typename T>
+constexpr NotNull<T> WrapNotNullUnchecked(T aBasePtr) {
+ return NotNull<T>{std::move(aBasePtr)};
+}
+
+template <typename T>
+MOZ_NONNULL(1)
+constexpr NotNull<T*> WrapNotNullUnchecked(T* const aBasePtr) {
+#if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wpointer-bool-conversion"
+#elif defined(__GNUC__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wnonnull-compare"
+#endif
+ MOZ_ASSERT(aBasePtr);
+#if defined(__clang__)
+# pragma clang diagnostic pop
+#elif defined(__GNUC__)
+# pragma GCC diagnostic pop
+#endif
+ return NotNull<T*>{aBasePtr};
+}
+
+// A variant of NotNull that can be used as a return value or parameter type and
+// moved into both NotNull and non-NotNull targets. This is not possible with
+// NotNull, as it is not movable. MovingNotNull can therefore not guarantee it
+// is always non-nullptr, but it can't be dereferenced, and there are debug
+// assertions that ensure it is only moved once.
+template <typename T>
+class MOZ_NON_AUTOABLE MovingNotNull {
+ template <typename U>
+ friend constexpr MovingNotNull<U> WrapMovingNotNullUnchecked(U aBasePtr);
+
+ T mBasePtr;
+#ifdef DEBUG
+ bool mConsumed = false;
+#endif
+
+ // This constructor is only used by WrapNotNull() and MakeNotNull<U>().
+ template <typename U>
+ constexpr explicit MovingNotNull(U aBasePtr) : mBasePtr{std::move(aBasePtr)} {
+#ifndef DEBUG
+ static_assert(sizeof(T) == sizeof(MovingNotNull<T>),
+ "NotNull must have zero space overhead.");
+#endif
+ static_assert(offsetof(MovingNotNull<T>, mBasePtr) == 0,
+ "mBasePtr must have zero offset.");
+ }
+
+ public:
+ MovingNotNull() = delete;
+
+ MOZ_IMPLICIT MovingNotNull(const NotNull<T>& aSrc) : mBasePtr(aSrc.get()) {}
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
+ MOZ_IMPLICIT MovingNotNull(const NotNull<U>& aSrc) : mBasePtr(aSrc.get()) {}
+
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
+ MOZ_IMPLICIT MovingNotNull(MovingNotNull<U>&& aSrc)
+ : mBasePtr(std::move(aSrc).unwrapBasePtr()) {}
+
+ MOZ_IMPLICIT operator T() && { return std::move(*this).unwrapBasePtr(); }
+
+ MOZ_IMPLICIT operator NotNull<T>() && { return std::move(*this).unwrap(); }
+
+ NotNull<T> unwrap() && {
+ return WrapNotNullUnchecked(std::move(*this).unwrapBasePtr());
+ }
+
+ T unwrapBasePtr() && {
+#ifdef DEBUG
+ MOZ_ASSERT(!mConsumed);
+ mConsumed = true;
+#endif
+ return std::move(mBasePtr);
+ }
+
+ MovingNotNull(MovingNotNull&&) = default;
+ MovingNotNull& operator=(MovingNotNull&&) = default;
+};
+
+template <typename T>
+constexpr MovingNotNull<T> WrapMovingNotNullUnchecked(T aBasePtr) {
+ return MovingNotNull<T>{std::move(aBasePtr)};
+}
+
+template <typename T>
+constexpr MovingNotNull<T> WrapMovingNotNull(T aBasePtr) {
+ MOZ_RELEASE_ASSERT(aBasePtr);
+ return WrapMovingNotNullUnchecked(std::move(aBasePtr));
+}
+
+namespace detail {
+
+// Extract the pointed-to type from a pointer type (be it raw or smart).
+// The default implementation uses the dereferencing operator of the pointer
+// type to find what it's pointing to.
+template <typename Pointer>
+struct PointedTo {
+ // Remove the reference that dereferencing operators may return.
+ using Type = std::remove_reference_t<decltype(*std::declval<Pointer>())>;
+ using NonConstType = std::remove_const_t<Type>;
+};
+
+// Specializations for raw pointers.
+// This is especially required because VS 2017 15.6 (March 2018) started
+// rejecting the above `decltype(*std::declval<Pointer>())` trick for raw
+// pointers.
+// See bug 1443367.
+template <typename T>
+struct PointedTo<T*> {
+ using Type = T;
+ using NonConstType = T;
+};
+
+template <typename T>
+struct PointedTo<const T*> {
+ using Type = const T;
+ using NonConstType = T;
+};
+
+} // namespace detail
+
+// Allocate an object with infallible new, and wrap its pointer in NotNull.
+// |MakeNotNull<Ptr<Ob>>(args...)| will run |new Ob(args...)|
+// and return NotNull<Ptr<Ob>>.
+template <typename T, typename... Args>
+constexpr NotNull<T> MakeNotNull(Args&&... aArgs) {
+ using Pointee = typename detail::PointedTo<T>::NonConstType;
+ static_assert(!std::is_array_v<Pointee>,
+ "MakeNotNull cannot construct an array");
+ return NotNull<T>(new Pointee(std::forward<Args>(aArgs)...));
+}
+
+// Compare two NotNulls.
+template <typename T, typename U>
+constexpr bool operator==(const NotNull<T>& aLhs, const NotNull<U>& aRhs) {
+ return aLhs.get() == aRhs.get();
+}
+template <typename T, typename U>
+constexpr bool operator!=(const NotNull<T>& aLhs, const NotNull<U>& aRhs) {
+ return aLhs.get() != aRhs.get();
+}
+
+// Compare a NotNull to a base pointer.
+template <typename T, typename U>
+constexpr bool operator==(const NotNull<T>& aLhs, const U& aRhs) {
+ return aLhs.get() == aRhs;
+}
+template <typename T, typename U>
+constexpr bool operator!=(const NotNull<T>& aLhs, const U& aRhs) {
+ return aLhs.get() != aRhs;
+}
+
+// Compare a base pointer to a NotNull.
+template <typename T, typename U>
+constexpr bool operator==(const T& aLhs, const NotNull<U>& aRhs) {
+ return aLhs == aRhs.get();
+}
+template <typename T, typename U>
+constexpr bool operator!=(const T& aLhs, const NotNull<U>& aRhs) {
+ return aLhs != aRhs.get();
+}
+
+// Disallow comparing a NotNull to a nullptr.
+template <typename T>
+bool operator==(const NotNull<T>&, decltype(nullptr)) = delete;
+template <typename T>
+bool operator!=(const NotNull<T>&, decltype(nullptr)) = delete;
+
+// Disallow comparing a nullptr to a NotNull.
+template <typename T>
+bool operator==(decltype(nullptr), const NotNull<T>&) = delete;
+template <typename T>
+bool operator!=(decltype(nullptr), const NotNull<T>&) = delete;
+
+} // namespace mozilla
+
+#endif /* mozilla_NotNull_h */
diff --git a/mfbt/Opaque.h b/mfbt/Opaque.h
new file mode 100644
index 0000000000..e5dc84f159
--- /dev/null
+++ b/mfbt/Opaque.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* An opaque integral type supporting only comparison operators. */
+
+#ifndef mozilla_Opaque_h
+#define mozilla_Opaque_h
+
+#include <type_traits>
+
+namespace mozilla {
+
+/**
+ * Opaque<T> is a replacement for integral T in cases where only comparisons
+ * must be supported, and it's desirable to prevent accidental dependency on
+ * exact values.
+ */
+template <typename T>
+class Opaque final {
+ static_assert(std::is_integral_v<T>,
+ "mozilla::Opaque only supports integral types");
+
+ T mValue;
+
+ public:
+ Opaque() = default;
+ explicit Opaque(T aValue) : mValue(aValue) {}
+
+ bool operator==(const Opaque& aOther) const {
+ return mValue == aOther.mValue;
+ }
+
+ bool operator!=(const Opaque& aOther) const { return !(*this == aOther); }
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_Opaque_h */
diff --git a/mfbt/OperatorNewExtensions.h b/mfbt/OperatorNewExtensions.h
new file mode 100644
index 0000000000..a44a6bdeae
--- /dev/null
+++ b/mfbt/OperatorNewExtensions.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A version of |operator new| that eschews mandatory null-checks. */
+
+#ifndef mozilla_OperatorNewExtensions_h
+#define mozilla_OperatorNewExtensions_h
+
+#include "mozilla/Assertions.h"
+
+// Credit goes to WebKit for this implementation, cf.
+// https://bugs.webkit.org/show_bug.cgi?id=74676
+namespace mozilla {
+enum NotNullTag {
+ KnownNotNull,
+};
+} // namespace mozilla
+
+/*
+ * The logic here is a little subtle. [expr.new] states that if the allocation
+ * function being called returns null, then object initialization must not be
+ * done, and the entirety of the new expression must return null. Non-throwing
+ * (noexcept) functions are defined to return null to indicate failure. The
+ * standard placement operator new is defined in such a way, and so it requires
+ * a null check, even when that null check would be extraneous. Functions
+ * declared without such a specification are defined to throw std::bad_alloc if
+ * they fail, and return a non-null pointer otherwise. We compile without
+ * exceptions, so any placement new overload we define that doesn't declare
+ * itself as noexcept must therefore avoid generating a null check. Below is
+ * just such an overload.
+ *
+ * You might think that MOZ_NONNULL might perform the same function, but
+ * MOZ_NONNULL isn't supported on all of our compilers, and even when it is
+ * supported, doesn't work on all the versions we support. And even keeping
+ * those limitations in mind, we can't put MOZ_NONNULL on the global,
+ * standardized placement new function in any event.
+ *
+ * We deliberately don't add MOZ_NONNULL(3) to tag |p| as non-null, to benefit
+ * hypothetical static analyzers. Doing so makes |MOZ_ASSERT(p)|'s internal
+ * test vacuous, and some compilers warn about such vacuous tests.
+ */
+inline void* operator new(size_t, mozilla::NotNullTag, void* p) {
+ MOZ_ASSERT(p);
+ return p;
+}
+
+#endif // mozilla_OperatorNewExtensions_h
diff --git a/mfbt/PairHash.h b/mfbt/PairHash.h
new file mode 100644
index 0000000000..100832dc12
--- /dev/null
+++ b/mfbt/PairHash.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Utilities for hashing pairs. */
+
+#ifndef mozilla_PairHash_h
+#define mozilla_PairHash_h
+
+#include "mozilla/CompactPair.h"
+#include "mozilla/HashFunctions.h"
+
+#include <utility> // std::pair
+
+namespace mozilla {
+
+/**
+ * The HashPair overloads below do just what you'd expect.
+ *
+ * These functions support hash of std::pair<T,U> and mozilla::CompactPair<T,u>
+ * where type T and U both support AddToHash.
+ */
+template <typename U, typename V>
+[[nodiscard]] inline HashNumber HashPair(const std::pair<U, V>& pair) {
+ // Pair hash combines the hash of each member
+ return HashGeneric(pair.first, pair.second);
+}
+
+template <typename U, typename V>
+[[nodiscard]] inline HashNumber HashCompactPair(const CompactPair<U, V>& pair) {
+ // Pair hash combines the hash of each member
+ return HashGeneric(pair.first(), pair.second());
+}
+
+/**
+ * Hash policy for std::pair compatible with HashTable
+ */
+template <typename T, typename U>
+struct PairHasher {
+ using Key = std::pair<T, U>;
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) { return HashPair(aLookup); }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return aKey == aLookup;
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
+};
+
+/**
+ * Hash policy for mozilla::CompactPair compatible with HashTable
+ */
+template <typename T, typename U>
+struct CompactPairHasher {
+ using Key = CompactPair<T, U>;
+ using Lookup = Key;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ return HashCompactPair(aLookup);
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return aKey == aLookup;
+ }
+
+ static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_PairHash_h */
diff --git a/mfbt/Path.h b/mfbt/Path.h
new file mode 100644
index 0000000000..eed687dd06
--- /dev/null
+++ b/mfbt/Path.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Represents the native path format on the platform. */
+
+#ifndef mozilla_Path_h
+#define mozilla_Path_h
+
+namespace mozilla {
+namespace filesystem {
+
+/*
+ * Mozilla vaiant of std::filesystem::path.
+ * Only |value_type| is implemented at the moment.
+ */
+class Path {
+ public:
+#ifdef XP_WIN
+ using value_type = char16_t;
+#else
+ using value_type = char;
+#endif
+};
+
+} /* namespace filesystem */
+} /* namespace mozilla */
+
+#endif /* mozilla_Path_h */
diff --git a/mfbt/PodOperations.h b/mfbt/PodOperations.h
new file mode 100644
index 0000000000..f4e5da4c79
--- /dev/null
+++ b/mfbt/PodOperations.h
@@ -0,0 +1,160 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Operations for zeroing POD types, arrays, and so on.
+ *
+ * These operations are preferable to memset, memcmp, and the like because they
+ * don't require remembering to multiply by sizeof(T), array lengths, and so on
+ * everywhere.
+ */
+
+#ifndef mozilla_PodOperations_h
+#define mozilla_PodOperations_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stdint.h>
+#include <string.h>
+
+namespace mozilla {
+
+template <typename T, size_t Length>
+class Array;
+
+template <typename T>
+class NotNull;
+
+/** Set the contents of |aT| to 0. */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodZero(T* aT) {
+ memset(aT, 0, sizeof(T));
+}
+
+/** Set the contents of |aNElem| elements starting at |aT| to 0. */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodZero(T* aT, size_t aNElem) {
+ /*
+ * This function is often called with 'aNElem' small; we use an inline loop
+ * instead of calling 'memset' with a non-constant length. The compiler
+ * should inline the memset call with constant size, though.
+ */
+ for (T* end = aT + aNElem; aT < end; aT++) {
+ memset(aT, 0, sizeof(T));
+ }
+}
+
+/** Set the contents of |aNElem| elements starting at |aT| to 0. */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodZero(NotNull<T*> aT, size_t aNElem) {
+ PodZero(aT.get(), aNElem);
+}
+
+/*
+ * Arrays implicitly convert to pointers to their first element, which is
+ * dangerous when combined with the above PodZero definitions. Adding an
+ * overload for arrays is ambiguous, so we need another identifier. The
+ * ambiguous overload is left to catch mistaken uses of PodZero; if you get a
+ * compile error involving PodZero and array types, use PodArrayZero instead.
+ */
+template <typename T, size_t N>
+static void PodZero(T (&aT)[N]) = delete;
+template <typename T, size_t N>
+static void PodZero(T (&aT)[N], size_t aNElem) = delete;
+
+/** Set the contents of the array |aT| to zero. */
+template <class T, size_t N>
+static MOZ_ALWAYS_INLINE void PodArrayZero(T (&aT)[N]) {
+ memset(aT, 0, N * sizeof(T));
+}
+
+template <typename T, size_t N>
+static MOZ_ALWAYS_INLINE void PodArrayZero(Array<T, N>& aArr) {
+ memset(&aArr[0], 0, N * sizeof(T));
+}
+
+/**
+ * Assign |*aSrc| to |*aDst|. The locations must not be the same and must not
+ * overlap.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodAssign(T* aDst, const T* aSrc) {
+ MOZ_ASSERT(aDst + 1 <= aSrc || aSrc + 1 <= aDst,
+ "destination and source must not overlap");
+ memcpy(reinterpret_cast<char*>(aDst), reinterpret_cast<const char*>(aSrc),
+ sizeof(T));
+}
+
+/**
+ * Copy |aNElem| T elements from |aSrc| to |aDst|. The two memory ranges must
+ * not overlap!
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodCopy(T* aDst, const T* aSrc, size_t aNElem) {
+ MOZ_ASSERT(aDst + aNElem <= aSrc || aSrc + aNElem <= aDst,
+ "destination and source must not overlap");
+ if (aNElem < 128) {
+ /*
+ * Avoid using operator= in this loop, as it may have been
+ * intentionally deleted by the POD type.
+ */
+ for (const T* srcend = aSrc + aNElem; aSrc < srcend; aSrc++, aDst++) {
+ PodAssign(aDst, aSrc);
+ }
+ } else {
+ memcpy(aDst, aSrc, aNElem * sizeof(T));
+ }
+}
+
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodCopy(volatile T* aDst, const volatile T* aSrc,
+ size_t aNElem) {
+ MOZ_ASSERT(aDst + aNElem <= aSrc || aSrc + aNElem <= aDst,
+ "destination and source must not overlap");
+
+ /*
+ * Volatile |aDst| requires extra work, because it's undefined behavior to
+ * modify volatile objects using the mem* functions. Just write out the
+ * loops manually, using operator= rather than memcpy for the same reason,
+ * and let the compiler optimize to the extent it can.
+ */
+ for (const volatile T* srcend = aSrc + aNElem; aSrc < srcend;
+ aSrc++, aDst++) {
+ *aDst = *aSrc;
+ }
+}
+
+/*
+ * Copy the contents of the array |aSrc| into the array |aDst|, both of size N.
+ * The arrays must not overlap!
+ */
+template <class T, size_t N>
+static MOZ_ALWAYS_INLINE void PodArrayCopy(T (&aDst)[N], const T (&aSrc)[N]) {
+ PodCopy(aDst, aSrc, N);
+}
+
+/**
+ * Copy the memory for |aNElem| T elements from |aSrc| to |aDst|. If the two
+ * memory ranges overlap, then the effect is as if the |aNElem| elements are
+ * first copied from |aSrc| to a temporary array, and then from the temporary
+ * array to |aDst|.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE void PodMove(T* aDst, const T* aSrc, size_t aNElem) {
+ MOZ_ASSERT(aNElem <= SIZE_MAX / sizeof(T),
+ "trying to move an impossible number of elements");
+ memmove(aDst, aSrc, aNElem * sizeof(T));
+}
+
+/**
+ * Looking for a PodEqual? Use ArrayEqual from ArrayUtils.h.
+ * Note that we *cannot* use memcmp for this, due to padding bytes, etc..
+ */
+
+} // namespace mozilla
+
+#endif /* mozilla_PodOperations_h */
diff --git a/mfbt/Poison.cpp b/mfbt/Poison.cpp
new file mode 100644
index 0000000000..d4ec08e703
--- /dev/null
+++ b/mfbt/Poison.cpp
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A poison value that can be used to fill a memory space with
+ * an address that leads to a safe crash when dereferenced.
+ */
+
+#include "mozilla/Poison.h"
+
+#include "mozilla/Assertions.h"
+#ifdef _WIN32
+# include <windows.h>
+#elif !defined(__OS2__)
+# include <unistd.h>
+# ifndef __wasi__
+# include <sys/mman.h>
+# ifndef MAP_ANON
+# ifdef MAP_ANONYMOUS
+# define MAP_ANON MAP_ANONYMOUS
+# else
+# error "Don't know how to get anonymous memory"
+# endif
+# endif
+# endif
+#endif
+
+// Freed memory is filled with a poison value, which we arrange to
+// form a pointer either to an always-unmapped region of the address
+// space, or to a page that has been reserved and rendered
+// inaccessible via OS primitives. See tests/TestPoisonArea.cpp for
+// extensive discussion of the requirements for this page. The code
+// from here to 'class FreeList' needs to be kept in sync with that
+// file.
+
+#ifdef _WIN32
+static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
+ return VirtualAlloc((void*)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+static void ReleaseRegion(void* aRegion, uintptr_t aSize) {
+ VirtualFree(aRegion, aSize, MEM_RELEASE);
+}
+
+static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
+ SYSTEM_INFO sinfo;
+ GetSystemInfo(&sinfo);
+ if (aRegion >= (uintptr_t)sinfo.lpMaximumApplicationAddress &&
+ aRegion + aSize >= (uintptr_t)sinfo.lpMaximumApplicationAddress) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static uintptr_t GetDesiredRegionSize() {
+ SYSTEM_INFO sinfo;
+ GetSystemInfo(&sinfo);
+ return sinfo.dwAllocationGranularity;
+}
+
+# define RESERVE_FAILED 0
+
+#elif defined(__OS2__)
+static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
+ // OS/2 doesn't support allocation at an arbitrary address,
+ // so return an address that is known to be invalid.
+ return (void*)0xFFFD0000;
+}
+
+static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; }
+
+static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
+ // There's no reliable way to probe an address in the system
+ // arena other than by touching it and seeing if a trap occurs.
+ return false;
+}
+
+static uintptr_t GetDesiredRegionSize() {
+ // Page size is fixed at 4k.
+ return 0x1000;
+}
+
+# define RESERVE_FAILED 0
+
+#elif defined(__wasi__)
+
+# define RESERVE_FAILED 0
+
+static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
+ return RESERVE_FAILED;
+}
+
+static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; }
+
+static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
+ const auto pageSize = 1 << 16;
+ MOZ_ASSERT(pageSize == sysconf(_SC_PAGESIZE));
+ auto heapSize = __builtin_wasm_memory_size(0) * pageSize;
+ return aRegion + aSize < heapSize;
+}
+
+static uintptr_t GetDesiredRegionSize() { return 0; }
+
+#else // __wasi__
+
+# include "mozilla/TaggedAnonymousMemory.h"
+
+static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
+ return MozTaggedAnonymousMmap(reinterpret_cast<void*>(aRegion), aSize,
+ PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0,
+ "poison");
+}
+
+static void ReleaseRegion(void* aRegion, uintptr_t aSize) {
+ munmap(aRegion, aSize);
+}
+
+static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
+# ifdef XP_SOLARIS
+ if (posix_madvise(reinterpret_cast<void*>(aRegion), aSize,
+ POSIX_MADV_NORMAL)) {
+# else
+ if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
+# endif
+ return true;
+ }
+ return false;
+}
+
+static uintptr_t GetDesiredRegionSize() { return sysconf(_SC_PAGESIZE); }
+
+# define RESERVE_FAILED MAP_FAILED
+
+#endif // system dependencies
+
+static_assert((sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8) &&
+ (sizeof(uintptr_t) == sizeof(void*)));
+
+static uintptr_t ReservePoisonArea(uintptr_t rgnsize) {
+ if (sizeof(uintptr_t) == 8) {
+ // Use the hardware-inaccessible region.
+ // We have to avoid 64-bit constants and shifts by 32 bits, since this
+ // code is compiled in 32-bit mode, although it is never executed there.
+ return (((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu)) &
+ ~(rgnsize - 1));
+ }
+
+ // First see if we can allocate the preferred poison address from the OS.
+ uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize - 1));
+ void* result = ReserveRegion(candidate, rgnsize);
+ if (result == (void*)candidate) {
+ // success - inaccessible page allocated
+ return candidate;
+ }
+
+ // That didn't work, so see if the preferred address is within a range
+ // of permanently inacessible memory.
+ if (ProbeRegion(candidate, rgnsize)) {
+ // success - selected page cannot be usable memory
+ if (result != RESERVE_FAILED) {
+ ReleaseRegion(result, rgnsize);
+ }
+ return candidate;
+ }
+
+ // The preferred address is already in use. Did the OS give us a
+ // consolation prize?
+ if (result != RESERVE_FAILED) {
+ return uintptr_t(result);
+ }
+
+ // It didn't, so try to allocate again, without any constraint on
+ // the address.
+ result = ReserveRegion(0, rgnsize);
+ if (result != RESERVE_FAILED) {
+ return uintptr_t(result);
+ }
+
+ MOZ_CRASH("no usable poison region identified");
+}
+
+static uintptr_t GetPoisonValue(uintptr_t aBase, uintptr_t aSize) {
+ if (aSize == 0) { // can't happen
+ return 0;
+ }
+ return aBase + aSize / 2 - 1;
+}
+
+// Poison is used so pervasively throughout the codebase that we decided it was
+// best to actually use ordered dynamic initialization of globals (AKA static
+// constructors) for this. This way everything will have properly initialized
+// poison -- except other dynamic initialization code in libmozglue, which there
+// shouldn't be much of. (libmozglue is one of the first things loaded, and
+// specifically comes before libxul, so nearly all gecko code runs strictly
+// after this.)
+extern "C" {
+uintptr_t gMozillaPoisonSize = GetDesiredRegionSize();
+uintptr_t gMozillaPoisonBase = ReservePoisonArea(gMozillaPoisonSize);
+uintptr_t gMozillaPoisonValue =
+ GetPoisonValue(gMozillaPoisonBase, gMozillaPoisonSize);
+}
diff --git a/mfbt/Poison.h b/mfbt/Poison.h
new file mode 100644
index 0000000000..5b1fae1fd1
--- /dev/null
+++ b/mfbt/Poison.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A poison value that can be used to fill a memory space with
+ * an address that leads to a safe crash when dereferenced.
+ */
+
+#ifndef mozilla_Poison_h
+#define mozilla_Poison_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdint.h>
+#include <string.h>
+
+MOZ_BEGIN_EXTERN_C
+
+extern MFBT_DATA uintptr_t gMozillaPoisonValue;
+
+/**
+ * @return the poison value.
+ */
+inline uintptr_t mozPoisonValue() { return gMozillaPoisonValue; }
+
+/**
+ * Overwrite the memory block of aSize bytes at aPtr with the poison value.
+ * Only a multiple of sizeof(uintptr_t) bytes are overwritten, the last
+ * few bytes (if any) are not overwritten.
+ */
+inline void mozWritePoison(void* aPtr, size_t aSize) {
+ const uintptr_t POISON = mozPoisonValue();
+ char* p = (char*)aPtr;
+ char* limit = p + (aSize & ~(sizeof(uintptr_t) - 1));
+ MOZ_ASSERT(aSize >= sizeof(uintptr_t), "poisoning this object has no effect");
+ for (; p < limit; p += sizeof(uintptr_t)) {
+ memcpy(p, &POISON, sizeof(POISON));
+ }
+}
+
+/* Values annotated by CrashReporter */
+extern MFBT_DATA uintptr_t gMozillaPoisonBase;
+extern MFBT_DATA uintptr_t gMozillaPoisonSize;
+
+MOZ_END_EXTERN_C
+
+#if defined(__cplusplus)
+
+namespace mozilla {
+
+/**
+ * A version of CorruptionCanary that is suitable as a member of objects that
+ * are statically allocated.
+ */
+class CorruptionCanaryForStatics {
+ public:
+ constexpr CorruptionCanaryForStatics() : mValue(kCanarySet) {}
+
+ // This is required to avoid static constructor bloat.
+ ~CorruptionCanaryForStatics() = default;
+
+ void Check() const {
+ if (mValue != kCanarySet) {
+ MOZ_CRASH("Canary check failed, check lifetime");
+ }
+ }
+
+ protected:
+ uintptr_t mValue;
+
+ private:
+ static const uintptr_t kCanarySet = 0x0f0b0f0b;
+};
+
+/**
+ * This class is designed to cause crashes when various kinds of memory
+ * corruption are observed. For instance, let's say we have a class C where we
+ * suspect out-of-bounds writes to some members. We can insert a member of type
+ * Poison near the members we suspect are being corrupted by out-of-bounds
+ * writes. Or perhaps we have a class K we suspect is subject to use-after-free
+ * violations, in which case it doesn't particularly matter where in the class
+ * we add the member of type Poison.
+ *
+ * In either case, we then insert calls to Check() throughout the code. Doing
+ * so enables us to narrow down the location where the corruption is occurring.
+ * A pleasant side-effect of these additional Check() calls is that crash
+ * signatures may become more regular, as crashes will ideally occur
+ * consolidated at the point of a Check(), rather than scattered about at
+ * various uses of the corrupted memory.
+ */
+class CorruptionCanary : public CorruptionCanaryForStatics {
+ public:
+ constexpr CorruptionCanary() = default;
+
+ ~CorruptionCanary() {
+ Check();
+ mValue = mozPoisonValue();
+ }
+};
+
+} // namespace mozilla
+
+#endif
+
+#endif /* mozilla_Poison_h */
diff --git a/mfbt/RandomNum.cpp b/mfbt/RandomNum.cpp
new file mode 100644
index 0000000000..96de5d4055
--- /dev/null
+++ b/mfbt/RandomNum.cpp
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RandomNum.h"
+
+#include <fcntl.h>
+#ifdef XP_UNIX
+# include <unistd.h>
+#endif
+
+#if defined(XP_WIN)
+
+// Microsoft doesn't "officially" support using RtlGenRandom() directly
+// anymore, and the Windows headers assume that __stdcall is
+// the default calling convention (which is true when Microsoft uses this
+// function to build their own CRT libraries).
+
+// We will explicitly declare it with the proper calling convention.
+
+# include "minwindef.h"
+# define RtlGenRandom SystemFunction036
+extern "C" BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer,
+ ULONG RandomBufferLength);
+
+#endif
+
+#if defined(ANDROID) || defined(XP_DARWIN) || defined(__DragonFly__) || \
+ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
+ defined(__wasi__)
+# include <stdlib.h>
+# define USE_ARC4RANDOM
+#endif
+
+#if defined(__linux__)
+# include <linux/random.h> // For GRND_NONBLOCK.
+# include <sys/syscall.h> // For SYS_getrandom.
+
+// Older glibc versions don't define SYS_getrandom, so we define it here if
+// it's not available. See bug 995069.
+# if defined(__x86_64__)
+# define GETRANDOM_NR 318
+# elif defined(__i386__)
+# define GETRANDOM_NR 355
+# elif defined(__aarch64__)
+# define GETRANDOM_NR 278
+# elif defined(__arm__)
+# define GETRANDOM_NR 384
+# elif defined(__powerpc__)
+# define GETRANDOM_NR 359
+# elif defined(__s390__)
+# define GETRANDOM_NR 349
+# elif defined(__mips__)
+# include <sgidefs.h>
+# if _MIPS_SIM == _MIPS_SIM_ABI32
+# define GETRANDOM_NR 4353
+# elif _MIPS_SIM == _MIPS_SIM_ABI64
+# define GETRANDOM_NR 5313
+# elif _MIPS_SIM == _MIPS_SIM_NABI32
+# define GETRANDOM_NR 6317
+# endif
+# endif
+
+# if defined(SYS_getrandom)
+// We have SYS_getrandom. Use it to check GETRANDOM_NR. Only do this if we set
+// GETRANDOM_NR so tier 3 platforms with recent glibc are not forced to define
+// it for no good reason.
+# if defined(GETRANDOM_NR)
+static_assert(GETRANDOM_NR == SYS_getrandom,
+ "GETRANDOM_NR should match the actual SYS_getrandom value");
+# endif
+# else
+# define SYS_getrandom GETRANDOM_NR
+# endif
+
+# if defined(GRND_NONBLOCK)
+static_assert(GRND_NONBLOCK == 1,
+ "If GRND_NONBLOCK is not 1 the #define below is wrong");
+# else
+# define GRND_NONBLOCK 1
+# endif
+
+#endif // defined(__linux__)
+
+namespace mozilla {
+
+MFBT_API bool GenerateRandomBytesFromOS(void* aBuffer, size_t aLength) {
+ MOZ_ASSERT(aBuffer);
+ MOZ_ASSERT(aLength > 0);
+
+#if defined(XP_WIN)
+ return !!RtlGenRandom(aBuffer, aLength);
+
+#elif defined(USE_ARC4RANDOM) // defined(XP_WIN)
+
+ arc4random_buf(aBuffer, aLength);
+ return true;
+
+#elif defined(XP_UNIX) // defined(USE_ARC4RANDOM)
+
+# if defined(__linux__)
+
+ long bytesGenerated = syscall(SYS_getrandom, aBuffer, aLength, GRND_NONBLOCK);
+
+ if (static_cast<unsigned long>(bytesGenerated) == aLength) {
+ return true;
+ }
+
+ // Fall-through to UNIX behavior if failed
+
+# endif // defined(__linux__)
+
+ int fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0) {
+ return false;
+ }
+
+ ssize_t bytesRead = read(fd, aBuffer, aLength);
+
+ close(fd);
+
+ return (static_cast<size_t>(bytesRead) == aLength);
+
+#else // defined(XP_UNIX)
+# error "Platform needs to implement GenerateRandomBytesFromOS()"
+#endif
+}
+
+MFBT_API Maybe<uint64_t> RandomUint64() {
+ uint64_t randomNum;
+ if (!GenerateRandomBytesFromOS(&randomNum, sizeof(randomNum))) {
+ return Nothing();
+ }
+
+ return Some(randomNum);
+}
+
+MFBT_API uint64_t RandomUint64OrDie() {
+ uint64_t randomNum;
+ MOZ_RELEASE_ASSERT(GenerateRandomBytesFromOS(&randomNum, sizeof(randomNum)));
+ return randomNum;
+}
+
+} // namespace mozilla
diff --git a/mfbt/RandomNum.h b/mfbt/RandomNum.h
new file mode 100644
index 0000000000..23a24837e9
--- /dev/null
+++ b/mfbt/RandomNum.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+/* Routines for generating random numbers */
+
+#ifndef mozilla_RandomNum_h_
+#define mozilla_RandomNum_h_
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Types.h"
+
+namespace mozilla {
+
+/**
+ * Generate cryptographically secure random bytes using the best facilities
+ * available on the current OS.
+ *
+ * Return value: true if random bytes were copied into `aBuffer` or false on
+ * error.
+ *
+ * Useful whenever a secure random number is needed and NSS isn't available.
+ * (Perhaps because it hasn't been initialized yet)
+ *
+ * Current mechanisms:
+ * Windows: RtlGenRandom()
+ * Android, Darwin, DragonFly, FreeBSD, OpenBSD, NetBSD: arc4random()
+ * Linux: getrandom() if available, "/dev/urandom" otherwise
+ * Other Unix: "/dev/urandom"
+ *
+ */
+[[nodiscard]] MFBT_API bool GenerateRandomBytesFromOS(void* aBuffer,
+ size_t aLength);
+
+/**
+ * Generate a cryptographically secure random 64-bit unsigned number using the
+ * best facilities available on the current OS.
+ */
+MFBT_API Maybe<uint64_t> RandomUint64();
+
+/**
+ * Like RandomUint64, but always returns a uint64_t or crashes with an assert
+ * if the underlying RandomUint64 call failed.
+ */
+MFBT_API uint64_t RandomUint64OrDie();
+
+} // namespace mozilla
+
+#endif // mozilla_RandomNum_h_
diff --git a/mfbt/Range.h b/mfbt/Range.h
new file mode 100644
index 0000000000..35cd4a0e07
--- /dev/null
+++ b/mfbt/Range.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Range_h
+#define mozilla_Range_h
+
+#include "mozilla/RangedPtr.h"
+#include "mozilla/Span.h"
+
+#include <stddef.h>
+#include <type_traits>
+
+namespace mozilla {
+
+// Range<T> is a tuple containing a pointer and a length.
+template <typename T>
+class Range {
+ template <typename U>
+ friend class Range;
+
+ // Reassignment of RangedPtrs is so (subtly) restrictive that we just make
+ // Range immutable.
+ const RangedPtr<T> mStart;
+ const RangedPtr<T> mEnd;
+
+ public:
+ Range() : mStart(nullptr, 0), mEnd(nullptr, 0) {}
+ Range(T* aPtr, size_t aLength)
+ : mStart(aPtr, aPtr, aPtr + aLength),
+ mEnd(aPtr + aLength, aPtr, aPtr + aLength) {
+ if (!aPtr) {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(
+ !aLength, "Range does not support nullptr with non-zero length.");
+ // ...because merely having a pointer to `nullptr + 1` is undefined
+ // behavior. UBSAN catches this as of clang-10.
+ }
+ }
+ Range(const RangedPtr<T>& aStart, const RangedPtr<T>& aEnd)
+ : mStart(aStart.get(), aStart.get(), aEnd.get()),
+ mEnd(aEnd.get(), aStart.get(), aEnd.get()) {
+ // Only accept two RangedPtrs within the same range.
+ aStart.checkIdenticalRange(aEnd);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aStart <= aEnd);
+ }
+
+ template <typename U, class = std::enable_if_t<
+ std::is_convertible_v<U (*)[], T (*)[]>, int>>
+ MOZ_IMPLICIT Range(const Range<U>& aOther)
+ : mStart(aOther.mStart), mEnd(aOther.mEnd) {}
+
+ MOZ_IMPLICIT Range(Span<T> aSpan) : Range(aSpan.Elements(), aSpan.Length()) {}
+
+ template <typename U, class = std::enable_if_t<
+ std::is_convertible_v<U (*)[], T (*)[]>, int>>
+ MOZ_IMPLICIT Range(const Span<U>& aSpan)
+ : Range(aSpan.Elements(), aSpan.Length()) {}
+
+ RangedPtr<T> begin() const { return mStart; }
+ RangedPtr<T> end() const { return mEnd; }
+ size_t length() const { return mEnd - mStart; }
+
+ T& operator[](size_t aOffset) const { return mStart[aOffset]; }
+
+ explicit operator bool() const { return mStart != nullptr; }
+
+ operator Span<T>() { return Span<T>(mStart.get(), length()); }
+
+ operator Span<const T>() const { return Span<T>(mStart.get(), length()); }
+};
+
+template <typename T>
+Span(Range<T>&) -> Span<T>;
+
+template <typename T>
+Span(const Range<T>&) -> Span<const T>;
+
+} // namespace mozilla
+
+#endif /* mozilla_Range_h */
diff --git a/mfbt/RangedArray.h b/mfbt/RangedArray.h
new file mode 100644
index 0000000000..4417e09e9d
--- /dev/null
+++ b/mfbt/RangedArray.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * A compile-time constant-length array, with bounds-checking assertions -- but
+ * unlike mozilla::Array, with indexes biased by a constant.
+ *
+ * Thus where mozilla::Array<int, 3> is a three-element array indexed by [0, 3),
+ * mozilla::RangedArray<int, 8, 3> is a three-element array indexed by [8, 11).
+ */
+
+#ifndef mozilla_RangedArray_h
+#define mozilla_RangedArray_h
+
+#include "mozilla/Array.h"
+
+namespace mozilla {
+
+template <typename T, size_t MinIndex, size_t Length>
+class RangedArray {
+ private:
+ typedef Array<T, Length> ArrayType;
+ ArrayType mArr;
+
+ public:
+ static size_t length() { return Length; }
+ static size_t minIndex() { return MinIndex; }
+
+ T& operator[](size_t aIndex) {
+ MOZ_ASSERT(aIndex == MinIndex || aIndex > MinIndex);
+ return mArr[aIndex - MinIndex];
+ }
+
+ const T& operator[](size_t aIndex) const {
+ MOZ_ASSERT(aIndex == MinIndex || aIndex > MinIndex);
+ return mArr[aIndex - MinIndex];
+ }
+
+ typedef typename ArrayType::iterator iterator;
+ typedef typename ArrayType::const_iterator const_iterator;
+ typedef typename ArrayType::reverse_iterator reverse_iterator;
+ typedef typename ArrayType::const_reverse_iterator const_reverse_iterator;
+
+ // Methods for range-based for loops.
+ iterator begin() { return mArr.begin(); }
+ const_iterator begin() const { return mArr.begin(); }
+ const_iterator cbegin() const { return mArr.cbegin(); }
+ iterator end() { return mArr.end(); }
+ const_iterator end() const { return mArr.end(); }
+ const_iterator cend() const { return mArr.cend(); }
+
+ // Methods for reverse iterating.
+ reverse_iterator rbegin() { return mArr.rbegin(); }
+ const_reverse_iterator rbegin() const { return mArr.rbegin(); }
+ const_reverse_iterator crbegin() const { return mArr.crbegin(); }
+ reverse_iterator rend() { return mArr.rend(); }
+ const_reverse_iterator rend() const { return mArr.rend(); }
+ const_reverse_iterator crend() const { return mArr.crend(); }
+};
+
+} // namespace mozilla
+
+#endif // mozilla_RangedArray_h
diff --git a/mfbt/RangedPtr.h b/mfbt/RangedPtr.h
new file mode 100644
index 0000000000..65db3b034a
--- /dev/null
+++ b/mfbt/RangedPtr.h
@@ -0,0 +1,311 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implements a smart pointer asserted to remain within a range specified at
+ * construction.
+ */
+
+#ifndef mozilla_RangedPtr_h
+#define mozilla_RangedPtr_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stdint.h>
+#include <cstddef>
+
+namespace mozilla {
+
+/*
+ * RangedPtr is a smart pointer restricted to an address range specified at
+ * creation. The pointer (and any smart pointers derived from it) must remain
+ * within the range [start, end] (inclusive of end to facilitate use as
+ * sentinels). Dereferencing or indexing into the pointer (or pointers derived
+ * from it) must remain within the range [start, end). All the standard pointer
+ * operators are defined on it; in debug builds these operations assert that the
+ * range specified at construction is respected.
+ *
+ * In theory passing a smart pointer instance as an argument can be slightly
+ * slower than passing a T* (due to ABI requirements for passing structs versus
+ * passing pointers), if the method being called isn't inlined. If you are in
+ * extremely performance-critical code, you may want to be careful using this
+ * smart pointer as an argument type.
+ *
+ * RangedPtr<T> intentionally does not implicitly convert to T*. Use get() to
+ * explicitly convert to T*. Keep in mind that the raw pointer of course won't
+ * implement bounds checking in debug builds.
+ */
+template <typename T>
+class RangedPtr {
+ template <typename U>
+ friend class RangedPtr;
+
+ T* mPtr;
+
+#if defined(DEBUG) || defined(FUZZING)
+ T* const mRangeStart;
+ T* const mRangeEnd;
+#endif
+
+ void checkSanity() {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mRangeStart <= mPtr);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr <= mRangeEnd);
+ }
+
+ /* Creates a new pointer for |aPtr|, restricted to this pointer's range. */
+ RangedPtr<T> create(T* aPtr) const {
+#if defined(DEBUG) || defined(FUZZING)
+ return RangedPtr<T>(aPtr, mRangeStart, mRangeEnd);
+#else
+ return RangedPtr<T>(aPtr, nullptr, size_t(0));
+#endif
+ }
+
+ uintptr_t asUintptr() const { return reinterpret_cast<uintptr_t>(mPtr); }
+
+ public:
+ RangedPtr(T* aPtr, T* aStart, T* aEnd)
+ : mPtr(aPtr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aStart),
+ mRangeEnd(aEnd)
+#endif
+ {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mRangeStart <= mRangeEnd);
+ checkSanity();
+ }
+ RangedPtr(T* aPtr, T* aStart, size_t aLength)
+ : mPtr(aPtr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aStart),
+ mRangeEnd(aStart + aLength)
+#endif
+ {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aLength <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT_DEBUG_OR_FUZZING(reinterpret_cast<uintptr_t>(mRangeStart) +
+ aLength * sizeof(T) >=
+ reinterpret_cast<uintptr_t>(mRangeStart));
+ checkSanity();
+ }
+
+ /* Equivalent to RangedPtr(aPtr, aPtr, aLength). */
+ RangedPtr(T* aPtr, size_t aLength)
+ : mPtr(aPtr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aPtr),
+ mRangeEnd(aPtr + aLength)
+#endif
+ {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aLength <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT_DEBUG_OR_FUZZING(reinterpret_cast<uintptr_t>(mRangeStart) +
+ aLength * sizeof(T) >=
+ reinterpret_cast<uintptr_t>(mRangeStart));
+ checkSanity();
+ }
+
+ /* Equivalent to RangedPtr(aArr, aArr, N). */
+ template <size_t N>
+ explicit RangedPtr(T (&aArr)[N])
+ : mPtr(aArr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aArr),
+ mRangeEnd(aArr + N)
+#endif
+ {
+ checkSanity();
+ }
+
+ RangedPtr(const RangedPtr& aOther)
+ : mPtr(aOther.mPtr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aOther.mRangeStart),
+ mRangeEnd(aOther.mRangeEnd)
+#endif
+ {
+ checkSanity();
+ }
+
+ template <typename U>
+ MOZ_IMPLICIT RangedPtr(const RangedPtr<U>& aOther)
+ : mPtr(aOther.mPtr)
+#if defined(DEBUG) || defined(FUZZING)
+ ,
+ mRangeStart(aOther.mRangeStart),
+ mRangeEnd(aOther.mRangeEnd)
+#endif
+ {
+ checkSanity();
+ }
+
+ T* get() const { return mPtr; }
+
+ explicit operator bool() const { return mPtr != nullptr; }
+
+ void checkIdenticalRange(const RangedPtr<T>& aOther) const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mRangeStart == aOther.mRangeStart);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mRangeEnd == aOther.mRangeEnd);
+ }
+
+ template <typename U>
+ RangedPtr<U> ReinterpretCast() const {
+#if defined(DEBUG) || defined(FUZZING)
+ return {reinterpret_cast<U*>(mPtr), reinterpret_cast<U*>(mRangeStart),
+ reinterpret_cast<U*>(mRangeEnd)};
+#else
+ return {reinterpret_cast<U*>(mPtr), nullptr, nullptr};
+#endif
+ }
+
+ /*
+ * You can only assign one RangedPtr into another if the two pointers have
+ * the same valid range:
+ *
+ * char arr1[] = "hi";
+ * char arr2[] = "bye";
+ * RangedPtr<char> p1(arr1, 2);
+ * p1 = RangedPtr<char>(arr1 + 1, arr1, arr1 + 2); // works
+ * p1 = RangedPtr<char>(arr2, 3); // asserts
+ */
+ RangedPtr<T>& operator=(const RangedPtr<T>& aOther) {
+ checkIdenticalRange(aOther);
+ mPtr = aOther.mPtr;
+ checkSanity();
+ return *this;
+ }
+
+ RangedPtr<T> operator+(size_t aInc) const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aInc <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT_DEBUG_OR_FUZZING(asUintptr() + aInc * sizeof(T) >= asUintptr());
+ return create(mPtr + aInc);
+ }
+
+ RangedPtr<T> operator-(size_t aDec) const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aDec <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT_DEBUG_OR_FUZZING(asUintptr() - aDec * sizeof(T) <= asUintptr());
+ return create(mPtr - aDec);
+ }
+
+ /*
+ * You can assign a raw pointer into a RangedPtr if the raw pointer is
+ * within the range specified at creation.
+ */
+ template <typename U>
+ RangedPtr<T>& operator=(U* aPtr) {
+ *this = create(aPtr);
+ return *this;
+ }
+
+ template <typename U>
+ RangedPtr<T>& operator=(const RangedPtr<U>& aPtr) {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mRangeStart <= aPtr.mPtr);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(aPtr.mPtr <= mRangeEnd);
+ mPtr = aPtr.mPtr;
+ checkSanity();
+ return *this;
+ }
+
+ RangedPtr<T>& operator++() { return (*this += 1); }
+
+ RangedPtr<T> operator++(int) {
+ RangedPtr<T> rcp = *this;
+ ++*this;
+ return rcp;
+ }
+
+ RangedPtr<T>& operator--() { return (*this -= 1); }
+
+ RangedPtr<T> operator--(int) {
+ RangedPtr<T> rcp = *this;
+ --*this;
+ return rcp;
+ }
+
+ RangedPtr<T>& operator+=(size_t aInc) {
+ *this = *this + aInc;
+ return *this;
+ }
+
+ RangedPtr<T>& operator-=(size_t aDec) {
+ *this = *this - aDec;
+ return *this;
+ }
+
+ T& operator[](ptrdiff_t aIndex) const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(size_t(aIndex > 0 ? aIndex : -aIndex) <=
+ size_t(-1) / sizeof(T));
+ return *create(mPtr + aIndex);
+ }
+
+ T& operator*() const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr >= mRangeStart);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr < mRangeEnd);
+ return *mPtr;
+ }
+
+ T* operator->() const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr >= mRangeStart);
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr < mRangeEnd);
+ return mPtr;
+ }
+
+ template <typename U>
+ bool operator==(const RangedPtr<U>& aOther) const {
+ return mPtr == aOther.mPtr;
+ }
+ template <typename U>
+ bool operator!=(const RangedPtr<U>& aOther) const {
+ return !(*this == aOther);
+ }
+
+ template <typename U>
+ bool operator==(const U* u) const {
+ return mPtr == u;
+ }
+ template <typename U>
+ bool operator!=(const U* u) const {
+ return !(*this == u);
+ }
+
+ bool operator==(std::nullptr_t) const { return mPtr == nullptr; }
+ bool operator!=(std::nullptr_t) const { return mPtr != nullptr; }
+
+ template <typename U>
+ bool operator<(const RangedPtr<U>& aOther) const {
+ return mPtr < aOther.mPtr;
+ }
+ template <typename U>
+ bool operator<=(const RangedPtr<U>& aOther) const {
+ return mPtr <= aOther.mPtr;
+ }
+
+ template <typename U>
+ bool operator>(const RangedPtr<U>& aOther) const {
+ return mPtr > aOther.mPtr;
+ }
+ template <typename U>
+ bool operator>=(const RangedPtr<U>& aOther) const {
+ return mPtr >= aOther.mPtr;
+ }
+
+ size_t operator-(const RangedPtr<T>& aOther) const {
+ MOZ_ASSERT_DEBUG_OR_FUZZING(mPtr >= aOther.mPtr);
+ return PointerRangeSize(aOther.mPtr, mPtr);
+ }
+
+ private:
+ RangedPtr() = delete;
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_RangedPtr_h */
diff --git a/mfbt/ReentrancyGuard.h b/mfbt/ReentrancyGuard.h
new file mode 100644
index 0000000000..56c963b418
--- /dev/null
+++ b/mfbt/ReentrancyGuard.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Small helper class for asserting uses of a class are non-reentrant. */
+
+#ifndef mozilla_ReentrancyGuard_h
+#define mozilla_ReentrancyGuard_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+/* Useful for implementing containers that assert non-reentrancy */
+class MOZ_RAII ReentrancyGuard {
+#ifdef DEBUG
+ bool& mEntered;
+#endif
+
+ public:
+ template <class T>
+#ifdef DEBUG
+ explicit ReentrancyGuard(T& aObj)
+ : mEntered(aObj.mEntered)
+#else
+ explicit ReentrancyGuard(T&)
+#endif
+ {
+#ifdef DEBUG
+ MOZ_ASSERT(!mEntered);
+ mEntered = true;
+#endif
+ }
+ ~ReentrancyGuard() {
+#ifdef DEBUG
+ mEntered = false;
+#endif
+ }
+
+ private:
+ ReentrancyGuard(const ReentrancyGuard&) = delete;
+ void operator=(const ReentrancyGuard&) = delete;
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_ReentrancyGuard_h */
diff --git a/mfbt/RefCountType.h b/mfbt/RefCountType.h
new file mode 100644
index 0000000000..e95a22a0ca
--- /dev/null
+++ b/mfbt/RefCountType.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_RefCountType_h
+#define mozilla_RefCountType_h
+
+#include <stdint.h>
+
+/**
+ * MozRefCountType is Mozilla's reference count type.
+ *
+ * We use the same type to represent the refcount of RefCounted objects
+ * as well, in order to be able to use the leak detection facilities
+ * that are implemented by XPCOM.
+ *
+ * Note that this type is not in the mozilla namespace so that it is
+ * usable for both C and C++ code.
+ */
+typedef uintptr_t MozRefCountType;
+
+/*
+ * This is the return type for AddRef() and Release() in nsISupports.
+ * IUnknown of COM returns an unsigned long from equivalent functions.
+ *
+ * The following ifdef exists to maintain binary compatibility with
+ * IUnknown, the base interface in Microsoft COM.
+ */
+#ifdef XP_WIN
+typedef unsigned long MozExternalRefCountType;
+#else
+typedef uint32_t MozExternalRefCountType;
+#endif
+
+#endif
diff --git a/mfbt/RefCounted.h b/mfbt/RefCounted.h
new file mode 100644
index 0000000000..5c083f3524
--- /dev/null
+++ b/mfbt/RefCounted.h
@@ -0,0 +1,327 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* CRTP refcounting templates. Do not use unless you are an Expert. */
+
+#ifndef mozilla_RefCounted_h
+#define mozilla_RefCounted_h
+
+#include <utility>
+#include <type_traits>
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/RefCountType.h"
+
+#ifdef __wasi__
+# include "mozilla/WasiAtomic.h"
+#else
+# include <atomic>
+#endif // __wasi__
+
+#if defined(MOZILLA_INTERNAL_API)
+# include "nsXPCOM.h"
+#endif
+
+#if defined(MOZILLA_INTERNAL_API) && defined(NS_BUILD_REFCNT_LOGGING)
+# define MOZ_REFCOUNTED_LEAK_CHECKING
+#endif
+
+namespace mozilla {
+
+/**
+ * RefCounted<T> is a sort of a "mixin" for a class T. RefCounted
+ * manages, well, refcounting for T, and because RefCounted is
+ * parameterized on T, RefCounted<T> can call T's destructor directly.
+ * This means T doesn't need to have a virtual dtor and so doesn't
+ * need a vtable.
+ *
+ * RefCounted<T> is created with refcount == 0. Newly-allocated
+ * RefCounted<T> must immediately be assigned to a RefPtr to make the
+ * refcount > 0. It's an error to allocate and free a bare
+ * RefCounted<T>, i.e. outside of the RefPtr machinery. Attempts to
+ * do so will abort DEBUG builds.
+ *
+ * Live RefCounted<T> have refcount > 0. The lifetime (refcounts) of
+ * live RefCounted<T> are controlled by RefPtr<T> and
+ * RefPtr<super/subclass of T>. Upon a transition from refcounted==1
+ * to 0, the RefCounted<T> "dies" and is destroyed. The "destroyed"
+ * state is represented in DEBUG builds by refcount==0xffffdead. This
+ * state distinguishes use-before-ref (refcount==0) from
+ * use-after-destroy (refcount==0xffffdead).
+ *
+ * Note that when deriving from RefCounted or AtomicRefCounted, you
+ * should add MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public
+ * section of your class, where ClassName is the name of your class.
+ *
+ * Note: SpiderMonkey should use js::RefCounted instead since that type
+ * will use appropriate js_delete and also not break ref-count logging.
+ */
+namespace detail {
+const MozRefCountType DEAD = 0xffffdead;
+
+// When building code that gets compiled into Gecko, try to use the
+// trace-refcount leak logging facilities.
+class RefCountLogger {
+ public:
+ // Called by `RefCounted`-like classes to log a successful AddRef call in the
+ // Gecko leak-logging system. This call is a no-op outside of Gecko. Should be
+ // called afer incrementing the reference count.
+ template <class T>
+ static void logAddRef(const T* aPointer, MozRefCountType aRefCount) {
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ const void* pointer = aPointer;
+ const char* typeName = aPointer->typeName();
+ uint32_t typeSize = aPointer->typeSize();
+ NS_LogAddRef(const_cast<void*>(pointer), aRefCount, typeName, typeSize);
+#endif
+ }
+
+ // Created by `RefCounted`-like classes to log a successful Release call in
+ // the Gecko leak-logging system. The constructor should be invoked before the
+ // refcount is decremented to avoid invoking `typeName()` with a zero
+ // reference count. This call is a no-op outside of Gecko.
+ class MOZ_STACK_CLASS ReleaseLogger final {
+ public:
+ template <class T>
+ explicit ReleaseLogger(const T* aPointer)
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ : mPointer(aPointer),
+ mTypeName(aPointer->typeName())
+#endif
+ {
+ }
+
+ void logRelease(MozRefCountType aRefCount) {
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ MOZ_ASSERT(aRefCount != DEAD);
+ NS_LogRelease(const_cast<void*>(mPointer), aRefCount, mTypeName);
+#endif
+ }
+
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ const void* mPointer;
+ const char* mTypeName;
+#endif
+ };
+};
+
+// This is used WeakPtr.h as well as this file.
+enum RefCountAtomicity { AtomicRefCount, NonAtomicRefCount };
+
+template <typename T, RefCountAtomicity Atomicity>
+class RC {
+ public:
+ explicit RC(T aCount) : mValue(aCount) {}
+
+ RC(const RC&) = delete;
+ RC& operator=(const RC&) = delete;
+ RC(RC&&) = delete;
+ RC& operator=(RC&&) = delete;
+
+ T operator++() { return ++mValue; }
+ T operator--() { return --mValue; }
+
+#ifdef DEBUG
+ void operator=(const T& aValue) { mValue = aValue; }
+#endif
+
+ operator T() const { return mValue; }
+
+ private:
+ T mValue;
+};
+
+template <typename T>
+class RC<T, AtomicRefCount> {
+ public:
+ explicit RC(T aCount) : mValue(aCount) {}
+
+ RC(const RC&) = delete;
+ RC& operator=(const RC&) = delete;
+ RC(RC&&) = delete;
+ RC& operator=(RC&&) = delete;
+
+ T operator++() {
+ // Memory synchronization is not required when incrementing a
+ // reference count. The first increment of a reference count on a
+ // thread is not important, since the first use of the object on a
+ // thread can happen before it. What is important is the transfer
+ // of the pointer to that thread, which may happen prior to the
+ // first increment on that thread. The necessary memory
+ // synchronization is done by the mechanism that transfers the
+ // pointer between threads.
+ return mValue.fetch_add(1, std::memory_order_relaxed) + 1;
+ }
+
+ T operator--() {
+ // Since this may be the last release on this thread, we need
+ // release semantics so that prior writes on this thread are visible
+ // to the thread that destroys the object when it reads mValue with
+ // acquire semantics.
+ T result = mValue.fetch_sub(1, std::memory_order_release) - 1;
+ if (result == 0) {
+ // We're going to destroy the object on this thread, so we need
+ // acquire semantics to synchronize with the memory released by
+ // the last release on other threads, that is, to ensure that
+ // writes prior to that release are now visible on this thread.
+#if defined(MOZ_TSAN) || defined(__wasi__)
+ // TSan doesn't understand std::atomic_thread_fence, so in order
+ // to avoid a false positive for every time a refcounted object
+ // is deleted, we replace the fence with an atomic operation.
+ mValue.load(std::memory_order_acquire);
+#else
+ std::atomic_thread_fence(std::memory_order_acquire);
+#endif
+ }
+ return result;
+ }
+
+#ifdef DEBUG
+ // This method is only called in debug builds, so we're not too concerned
+ // about its performance.
+ void operator=(const T& aValue) {
+ mValue.store(aValue, std::memory_order_seq_cst);
+ }
+#endif
+
+ operator T() const {
+ // Use acquire semantics since we're not sure what the caller is
+ // doing.
+ return mValue.load(std::memory_order_acquire);
+ }
+
+ T IncrementIfNonzero() {
+ // This can be a relaxed load as any write of 0 that we observe will leave
+ // the field in a permanently zero (or `DEAD`) state (so a "stale" read of 0
+ // is fine), and any other value is confirmed by the CAS below.
+ //
+ // This roughly matches rust's Arc::upgrade implementation as of rust 1.49.0
+ T prev = mValue.load(std::memory_order_relaxed);
+ while (prev != 0) {
+ MOZ_ASSERT(prev != detail::DEAD,
+ "Cannot IncrementIfNonzero if marked as dead!");
+ // TODO: It may be possible to use relaxed success ordering here?
+ if (mValue.compare_exchange_weak(prev, prev + 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ return prev + 1;
+ }
+ }
+ return 0;
+ }
+
+ private:
+ std::atomic<T> mValue;
+};
+
+template <typename T, RefCountAtomicity Atomicity>
+class RefCounted {
+ protected:
+ RefCounted() : mRefCnt(0) {}
+#ifdef DEBUG
+ ~RefCounted() { MOZ_ASSERT(mRefCnt == detail::DEAD); }
+#endif
+
+ public:
+ // Compatibility with RefPtr.
+ void AddRef() const {
+ // Note: this method must be thread safe for AtomicRefCounted.
+ MOZ_ASSERT(int32_t(mRefCnt) >= 0);
+ MozRefCountType cnt = ++mRefCnt;
+ detail::RefCountLogger::logAddRef(static_cast<const T*>(this), cnt);
+ }
+
+ void Release() const {
+ // Note: this method must be thread safe for AtomicRefCounted.
+ MOZ_ASSERT(int32_t(mRefCnt) > 0);
+ detail::RefCountLogger::ReleaseLogger logger(static_cast<const T*>(this));
+ MozRefCountType cnt = --mRefCnt;
+ // Note: it's not safe to touch |this| after decrementing the refcount,
+ // except for below.
+ logger.logRelease(cnt);
+ if (0 == cnt) {
+ // Because we have atomically decremented the refcount above, only
+ // one thread can get a 0 count here, so as long as we can assume that
+ // everything else in the system is accessing this object through
+ // RefPtrs, it's safe to access |this| here.
+#ifdef DEBUG
+ mRefCnt = detail::DEAD;
+#endif
+ delete static_cast<const T*>(this);
+ }
+ }
+
+ using HasThreadSafeRefCnt =
+ std::integral_constant<bool, Atomicity == AtomicRefCount>;
+
+ // Compatibility with wtf::RefPtr.
+ void ref() { AddRef(); }
+ void deref() { Release(); }
+ MozRefCountType refCount() const { return mRefCnt; }
+ bool hasOneRef() const {
+ MOZ_ASSERT(mRefCnt > 0);
+ return mRefCnt == 1;
+ }
+
+ private:
+ mutable RC<MozRefCountType, Atomicity> mRefCnt;
+};
+
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+// Passing override for the optional argument marks the typeName and
+// typeSize functions defined by this macro as overrides.
+# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...) \
+ virtual const char* typeName() const __VA_ARGS__ { return #T; } \
+ virtual size_t typeSize() const __VA_ARGS__ { return sizeof(*this); }
+#else
+# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...)
+#endif
+
+// Note that this macro is expanded unconditionally because it declares only
+// two small inline functions which will hopefully get eliminated by the linker
+// in non-leak-checking builds.
+#define MOZ_DECLARE_REFCOUNTED_TYPENAME(T) \
+ const char* typeName() const { return #T; } \
+ size_t typeSize() const { return sizeof(*this); }
+
+} // namespace detail
+
+template <typename T>
+class RefCounted : public detail::RefCounted<T, detail::NonAtomicRefCount> {
+ public:
+ ~RefCounted() {
+ static_assert(std::is_base_of<RefCounted, T>::value,
+ "T must derive from RefCounted<T>");
+ }
+};
+
+namespace external {
+
+/**
+ * AtomicRefCounted<T> is like RefCounted<T>, with an atomically updated
+ * reference counter.
+ *
+ * NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING
+ * instead.
+ */
+template <typename T>
+class AtomicRefCounted
+ : public mozilla::detail::RefCounted<T, mozilla::detail::AtomicRefCount> {
+ public:
+ ~AtomicRefCounted() {
+ static_assert(std::is_base_of<AtomicRefCounted, T>::value,
+ "T must derive from AtomicRefCounted<T>");
+ }
+};
+
+} // namespace external
+
+} // namespace mozilla
+
+#endif // mozilla_RefCounted_h
diff --git a/mfbt/RefPtr.h b/mfbt/RefPtr.h
new file mode 100644
index 0000000000..343e78d61e
--- /dev/null
+++ b/mfbt/RefPtr.h
@@ -0,0 +1,646 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_RefPtr_h
+#define mozilla_RefPtr_h
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DbgMacro.h"
+
+#include <type_traits>
+
+/*****************************************************************************/
+
+// template <class T> class RefPtrGetterAddRefs;
+
+class nsQueryReferent;
+class nsCOMPtr_helper;
+class nsISupports;
+
+namespace mozilla {
+template <class T>
+class MovingNotNull;
+template <class T>
+class NotNull;
+template <class T>
+class OwningNonNull;
+template <class T>
+class StaticLocalRefPtr;
+template <class T>
+class StaticRefPtr;
+
+// Traditionally, RefPtr supports automatic refcounting of any pointer type
+// with AddRef() and Release() methods that follow the traditional semantics.
+//
+// This traits class can be specialized to operate on other pointer types. For
+// example, we specialize this trait for opaque FFI types that represent
+// refcounted objects in Rust.
+//
+// Given the use of ConstRemovingRefPtrTraits below, U should not be a const-
+// qualified type.
+template <class U>
+struct RefPtrTraits {
+ static void AddRef(U* aPtr) { aPtr->AddRef(); }
+ static void Release(U* aPtr) { aPtr->Release(); }
+};
+
+} // namespace mozilla
+
+template <class T>
+class MOZ_IS_REFPTR RefPtr {
+ private:
+ void assign_with_AddRef(T* aRawPtr) {
+ if (aRawPtr) {
+ ConstRemovingRefPtrTraits<T>::AddRef(aRawPtr);
+ }
+ assign_assuming_AddRef(aRawPtr);
+ }
+
+ void assign_assuming_AddRef(T* aNewPtr) {
+ T* oldPtr = mRawPtr;
+ mRawPtr = aNewPtr;
+ if (oldPtr) {
+ ConstRemovingRefPtrTraits<T>::Release(oldPtr);
+ }
+ }
+
+ private:
+ T* MOZ_OWNING_REF mRawPtr;
+
+ public:
+ typedef T element_type;
+
+ ~RefPtr() {
+ if (mRawPtr) {
+ ConstRemovingRefPtrTraits<T>::Release(mRawPtr);
+ }
+ }
+
+ // Constructors
+
+ RefPtr()
+ : mRawPtr(nullptr)
+ // default constructor
+ {}
+
+ RefPtr(const RefPtr<T>& aSmartPtr)
+ : mRawPtr(aSmartPtr.mRawPtr)
+ // copy-constructor
+ {
+ if (mRawPtr) {
+ ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
+ }
+ }
+
+ RefPtr(RefPtr<T>&& aRefPtr) : mRawPtr(aRefPtr.mRawPtr) {
+ aRefPtr.mRawPtr = nullptr;
+ }
+
+ // construct from a raw pointer (of the right type)
+
+ MOZ_IMPLICIT RefPtr(T* aRawPtr) : mRawPtr(aRawPtr) {
+ if (mRawPtr) {
+ ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
+ }
+ }
+
+ MOZ_IMPLICIT RefPtr(decltype(nullptr)) : mRawPtr(nullptr) {}
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
+ MOZ_IMPLICIT RefPtr(already_AddRefed<I>& aSmartPtr)
+ : mRawPtr(aSmartPtr.take())
+ // construct from |already_AddRefed|
+ {}
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
+ MOZ_IMPLICIT RefPtr(already_AddRefed<I>&& aSmartPtr)
+ : mRawPtr(aSmartPtr.take())
+ // construct from |otherRefPtr.forget()|
+ {}
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
+ MOZ_IMPLICIT RefPtr(const RefPtr<I>& aSmartPtr)
+ : mRawPtr(aSmartPtr.get())
+ // copy-construct from a smart pointer with a related pointer type
+ {
+ if (mRawPtr) {
+ ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
+ }
+ }
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
+ MOZ_IMPLICIT RefPtr(RefPtr<I>&& aSmartPtr)
+ : mRawPtr(aSmartPtr.forget().take())
+ // construct from |Move(RefPtr<SomeSubclassOfT>)|.
+ {}
+
+ template <typename I,
+ typename = std::enable_if_t<!std::is_same_v<I, RefPtr<T>> &&
+ std::is_convertible_v<I, RefPtr<T>>>>
+ MOZ_IMPLICIT RefPtr(const mozilla::NotNull<I>& aSmartPtr)
+ : mRawPtr(RefPtr<T>(aSmartPtr.get()).forget().take())
+ // construct from |mozilla::NotNull|.
+ {}
+
+ template <typename I,
+ typename = std::enable_if_t<!std::is_same_v<I, RefPtr<T>> &&
+ std::is_convertible_v<I, RefPtr<T>>>>
+ MOZ_IMPLICIT RefPtr(mozilla::MovingNotNull<I>&& aSmartPtr)
+ : mRawPtr(RefPtr<T>(std::move(aSmartPtr).unwrapBasePtr()).forget().take())
+ // construct from |mozilla::MovingNotNull|.
+ {}
+
+ MOZ_IMPLICIT RefPtr(const nsQueryReferent& aHelper);
+ MOZ_IMPLICIT RefPtr(const nsCOMPtr_helper& aHelper);
+
+ // Defined in OwningNonNull.h
+ template <class U>
+ MOZ_IMPLICIT RefPtr(const mozilla::OwningNonNull<U>& aOther);
+
+ // Defined in StaticLocalPtr.h
+ template <class U>
+ MOZ_IMPLICIT RefPtr(const mozilla::StaticLocalRefPtr<U>& aOther);
+
+ // Defined in StaticPtr.h
+ template <class U>
+ MOZ_IMPLICIT RefPtr(const mozilla::StaticRefPtr<U>& aOther);
+
+ // Assignment operators
+
+ RefPtr<T>& operator=(decltype(nullptr)) {
+ assign_assuming_AddRef(nullptr);
+ return *this;
+ }
+
+ RefPtr<T>& operator=(const RefPtr<T>& aRhs)
+ // copy assignment operator
+ {
+ assign_with_AddRef(aRhs.mRawPtr);
+ return *this;
+ }
+
+ template <typename I>
+ RefPtr<T>& operator=(const RefPtr<I>& aRhs)
+ // assign from an RefPtr of a related pointer type
+ {
+ assign_with_AddRef(aRhs.get());
+ return *this;
+ }
+
+ RefPtr<T>& operator=(T* aRhs)
+ // assign from a raw pointer (of the right type)
+ {
+ assign_with_AddRef(aRhs);
+ return *this;
+ }
+
+ template <typename I>
+ RefPtr<T>& operator=(already_AddRefed<I>& aRhs)
+ // assign from |already_AddRefed|
+ {
+ assign_assuming_AddRef(aRhs.take());
+ return *this;
+ }
+
+ template <typename I>
+ RefPtr<T>& operator=(already_AddRefed<I>&& aRhs)
+ // assign from |otherRefPtr.forget()|
+ {
+ assign_assuming_AddRef(aRhs.take());
+ return *this;
+ }
+
+ RefPtr<T>& operator=(const nsQueryReferent& aQueryReferent);
+ RefPtr<T>& operator=(const nsCOMPtr_helper& aHelper);
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
+ RefPtr<T>& operator=(RefPtr<I>&& aRefPtr) {
+ assign_assuming_AddRef(aRefPtr.forget().take());
+ return *this;
+ }
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I, RefPtr<T>>>>
+ RefPtr<T>& operator=(const mozilla::NotNull<I>& aSmartPtr)
+ // assign from |mozilla::NotNull|.
+ {
+ assign_assuming_AddRef(RefPtr<T>(aSmartPtr.get()).forget().take());
+ return *this;
+ }
+
+ template <typename I,
+ typename = std::enable_if_t<std::is_convertible_v<I, RefPtr<T>>>>
+ RefPtr<T>& operator=(mozilla::MovingNotNull<I>&& aSmartPtr)
+ // assign from |mozilla::MovingNotNull|.
+ {
+ assign_assuming_AddRef(
+ RefPtr<T>(std::move(aSmartPtr).unwrapBasePtr()).forget().take());
+ return *this;
+ }
+
+ // Defined in OwningNonNull.h
+ template <class U>
+ RefPtr<T>& operator=(const mozilla::OwningNonNull<U>& aOther);
+
+ // Defined in StaticLocalPtr.h
+ template <class U>
+ RefPtr<T>& operator=(const mozilla::StaticLocalRefPtr<U>& aOther);
+
+ // Defined in StaticPtr.h
+ template <class U>
+ RefPtr<T>& operator=(const mozilla::StaticRefPtr<U>& aOther);
+
+ // Other pointer operators
+
+ void swap(RefPtr<T>& aRhs)
+ // ...exchange ownership with |aRhs|; can save a pair of refcount operations
+ {
+ T* temp = aRhs.mRawPtr;
+ aRhs.mRawPtr = mRawPtr;
+ mRawPtr = temp;
+ }
+
+ void swap(T*& aRhs)
+ // ...exchange ownership with |aRhs|; can save a pair of refcount operations
+ {
+ T* temp = aRhs;
+ aRhs = mRawPtr;
+ mRawPtr = temp;
+ }
+
+ already_AddRefed<T> MOZ_MAY_CALL_AFTER_MUST_RETURN forget()
+ // return the value of mRawPtr and null out mRawPtr. Useful for
+ // already_AddRefed return values.
+ {
+ T* temp = nullptr;
+ swap(temp);
+ return already_AddRefed<T>(temp);
+ }
+
+ template <typename I>
+ void forget(I** aRhs)
+ // Set the target of aRhs to the value of mRawPtr and null out mRawPtr.
+ // Useful to avoid unnecessary AddRef/Release pairs with "out"
+ // parameters where aRhs bay be a T** or an I** where I is a base class
+ // of T.
+ {
+ MOZ_ASSERT(aRhs, "Null pointer passed to forget!");
+ *aRhs = mRawPtr;
+ mRawPtr = nullptr;
+ }
+
+ void forget(nsISupports** aRhs) {
+ MOZ_ASSERT(aRhs, "Null pointer passed to forget!");
+ *aRhs = ToSupports(mRawPtr);
+ mRawPtr = nullptr;
+ }
+
+ T* get() const
+ /*
+ Prefer the implicit conversion provided automatically by |operator T*()
+ const|. Use |get()| to resolve ambiguity or to get a castable pointer.
+ */
+ {
+ return const_cast<T*>(mRawPtr);
+ }
+
+ operator T*() const&
+ /*
+ ...makes an |RefPtr| act like its underlying raw pointer type whenever it
+ is used in a context where a raw pointer is expected. It is this operator
+ that makes an |RefPtr| substitutable for a raw pointer.
+
+ Prefer the implicit use of this operator to calling |get()|, except where
+ necessary to resolve ambiguity.
+ */
+ {
+ return get();
+ }
+
+ // Don't allow implicit conversion of temporary RefPtr to raw pointer,
+ // because the refcount might be one and the pointer will immediately become
+ // invalid.
+ operator T*() const&& = delete;
+
+ // These are needed to avoid the deleted operator above. XXX Why is operator!
+ // needed separately? Shouldn't the compiler prefer using the non-deleted
+ // operator bool instead of the deleted operator T*?
+ explicit operator bool() const { return !!mRawPtr; }
+ bool operator!() const { return !mRawPtr; }
+
+ T* operator->() const MOZ_NO_ADDREF_RELEASE_ON_RETURN {
+ MOZ_ASSERT(mRawPtr != nullptr,
+ "You can't dereference a NULL RefPtr with operator->().");
+ return get();
+ }
+
+ template <typename R, typename... Args>
+ class Proxy {
+ typedef R (T::*member_function)(Args...);
+ T* mRawPtr;
+ member_function mFunction;
+
+ public:
+ Proxy(T* aRawPtr, member_function aFunction)
+ : mRawPtr(aRawPtr), mFunction(aFunction) {}
+ template <typename... ActualArgs>
+ R operator()(ActualArgs&&... aArgs) {
+ return ((*mRawPtr).*mFunction)(std::forward<ActualArgs>(aArgs)...);
+ }
+ };
+
+ template <typename R, typename... Args>
+ Proxy<R, Args...> operator->*(R (T::*aFptr)(Args...)) const {
+ MOZ_ASSERT(mRawPtr != nullptr,
+ "You can't dereference a NULL RefPtr with operator->*().");
+ return Proxy<R, Args...>(get(), aFptr);
+ }
+
+ RefPtr<T>* get_address()
+ // This is not intended to be used by clients. See |address_of|
+ // below.
+ {
+ return this;
+ }
+
+ const RefPtr<T>* get_address() const
+ // This is not intended to be used by clients. See |address_of|
+ // below.
+ {
+ return this;
+ }
+
+ public:
+ T& operator*() const {
+ MOZ_ASSERT(mRawPtr != nullptr,
+ "You can't dereference a NULL RefPtr with operator*().");
+ return *get();
+ }
+
+ T** StartAssignment() {
+ assign_assuming_AddRef(nullptr);
+ return reinterpret_cast<T**>(&mRawPtr);
+ }
+
+ private:
+ // This helper class makes |RefPtr<const T>| possible by casting away
+ // the constness from the pointer when calling AddRef() and Release().
+ //
+ // This is necessary because AddRef() and Release() implementations can't
+ // generally expected to be const themselves (without heavy use of |mutable|
+ // and |const_cast| in their own implementations).
+ //
+ // This should be sound because while |RefPtr<const T>| provides a
+ // const view of an object, the object itself should not be const (it
+ // would have to be allocated as |new const T| or similar to be const).
+ template <class U>
+ struct ConstRemovingRefPtrTraits {
+ static void AddRef(U* aPtr) { mozilla::RefPtrTraits<U>::AddRef(aPtr); }
+ static void Release(U* aPtr) { mozilla::RefPtrTraits<U>::Release(aPtr); }
+ };
+ template <class U>
+ struct ConstRemovingRefPtrTraits<const U> {
+ static void AddRef(const U* aPtr) {
+ mozilla::RefPtrTraits<U>::AddRef(const_cast<U*>(aPtr));
+ }
+ static void Release(const U* aPtr) {
+ mozilla::RefPtrTraits<U>::Release(const_cast<U*>(aPtr));
+ }
+ };
+};
+
+class nsCycleCollectionTraversalCallback;
+template <typename T>
+void CycleCollectionNoteChild(nsCycleCollectionTraversalCallback& aCallback,
+ T* aChild, const char* aName, uint32_t aFlags);
+
+template <typename T>
+inline void ImplCycleCollectionUnlink(RefPtr<T>& aField) {
+ aField = nullptr;
+}
+
+template <typename T>
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback, RefPtr<T>& aField,
+ const char* aName, uint32_t aFlags = 0) {
+ CycleCollectionNoteChild(aCallback, aField.get(), aName, aFlags);
+}
+
+template <class T>
+inline RefPtr<T>* address_of(RefPtr<T>& aPtr) {
+ return aPtr.get_address();
+}
+
+template <class T>
+inline const RefPtr<T>* address_of(const RefPtr<T>& aPtr) {
+ return aPtr.get_address();
+}
+
+template <class T>
+class RefPtrGetterAddRefs
+/*
+ ...
+
+ This class is designed to be used for anonymous temporary objects in the
+ argument list of calls that return COM interface pointers, e.g.,
+
+ RefPtr<IFoo> fooP;
+ ...->GetAddRefedPointer(getter_AddRefs(fooP))
+
+ DO NOT USE THIS TYPE DIRECTLY IN YOUR CODE. Use |getter_AddRefs()| instead.
+
+ When initialized with a |RefPtr|, as in the example above, it returns
+ a |void**|, a |T**|, or an |nsISupports**| as needed, that the
+ outer call (|GetAddRefedPointer| in this case) can fill in.
+
+ This type should be a nested class inside |RefPtr<T>|.
+*/
+{
+ public:
+ explicit RefPtrGetterAddRefs(RefPtr<T>& aSmartPtr)
+ : mTargetSmartPtr(aSmartPtr) {
+ // nothing else to do
+ }
+
+ operator void**() {
+ return reinterpret_cast<void**>(mTargetSmartPtr.StartAssignment());
+ }
+
+ operator T**() { return mTargetSmartPtr.StartAssignment(); }
+
+ T*& operator*() { return *(mTargetSmartPtr.StartAssignment()); }
+
+ private:
+ RefPtr<T>& mTargetSmartPtr;
+};
+
+template <class T>
+inline RefPtrGetterAddRefs<T> getter_AddRefs(RefPtr<T>& aSmartPtr)
+/*
+ Used around a |RefPtr| when
+ ...makes the class |RefPtrGetterAddRefs<T>| invisible.
+*/
+{
+ return RefPtrGetterAddRefs<T>(aSmartPtr);
+}
+
+// Comparing two |RefPtr|s
+
+template <class T, class U>
+inline bool operator==(const RefPtr<T>& aLhs, const RefPtr<U>& aRhs) {
+ return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs.get());
+}
+
+template <class T, class U>
+inline bool operator!=(const RefPtr<T>& aLhs, const RefPtr<U>& aRhs) {
+ return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs.get());
+}
+
+// Comparing an |RefPtr| to a raw pointer
+
+template <class T, class U>
+inline bool operator==(const RefPtr<T>& aLhs, const U* aRhs) {
+ return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs);
+}
+
+template <class T, class U>
+inline bool operator==(const U* aLhs, const RefPtr<T>& aRhs) {
+ return static_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
+}
+
+template <class T, class U>
+inline bool operator!=(const RefPtr<T>& aLhs, const U* aRhs) {
+ return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs);
+}
+
+template <class T, class U>
+inline bool operator!=(const U* aLhs, const RefPtr<T>& aRhs) {
+ return static_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
+}
+
+template <class T, class U>
+inline bool operator==(const RefPtr<T>& aLhs, U* aRhs) {
+ return static_cast<const T*>(aLhs.get()) == const_cast<const U*>(aRhs);
+}
+
+template <class T, class U>
+inline bool operator==(U* aLhs, const RefPtr<T>& aRhs) {
+ return const_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
+}
+
+template <class T, class U>
+inline bool operator!=(const RefPtr<T>& aLhs, U* aRhs) {
+ return static_cast<const T*>(aLhs.get()) != const_cast<const U*>(aRhs);
+}
+
+template <class T, class U>
+inline bool operator!=(U* aLhs, const RefPtr<T>& aRhs) {
+ return const_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
+}
+
+// Comparing an |RefPtr| to |nullptr|
+
+template <class T>
+inline bool operator==(const RefPtr<T>& aLhs, decltype(nullptr)) {
+ return aLhs.get() == nullptr;
+}
+
+template <class T>
+inline bool operator==(decltype(nullptr), const RefPtr<T>& aRhs) {
+ return nullptr == aRhs.get();
+}
+
+template <class T>
+inline bool operator!=(const RefPtr<T>& aLhs, decltype(nullptr)) {
+ return aLhs.get() != nullptr;
+}
+
+template <class T>
+inline bool operator!=(decltype(nullptr), const RefPtr<T>& aRhs) {
+ return nullptr != aRhs.get();
+}
+
+// MOZ_DBG support
+
+template <class T>
+std::ostream& operator<<(std::ostream& aOut, const RefPtr<T>& aObj) {
+ return mozilla::DebugValue(aOut, aObj.get());
+}
+
+/*****************************************************************************/
+
+template <class T>
+inline already_AddRefed<T> do_AddRef(T* aObj) {
+ RefPtr<T> ref(aObj);
+ return ref.forget();
+}
+
+template <class T>
+inline already_AddRefed<T> do_AddRef(const RefPtr<T>& aObj) {
+ RefPtr<T> ref(aObj);
+ return ref.forget();
+}
+
+namespace mozilla {
+
+template <typename T>
+class AlignmentFinder;
+
+// Provide a specialization of AlignmentFinder to allow MOZ_ALIGNOF(RefPtr<T>)
+// with an incomplete T.
+template <typename T>
+class AlignmentFinder<RefPtr<T>> {
+ public:
+ static const size_t alignment = alignof(T*);
+};
+
+/**
+ * Helper function to be able to conveniently write things like:
+ *
+ * already_AddRefed<T>
+ * f(...)
+ * {
+ * return MakeAndAddRef<T>(...);
+ * }
+ */
+template <typename T, typename... Args>
+already_AddRefed<T> MakeAndAddRef(Args&&... aArgs) {
+ RefPtr<T> p(new T(std::forward<Args>(aArgs)...));
+ return p.forget();
+}
+
+/**
+ * Helper function to be able to conveniently write things like:
+ *
+ * auto runnable =
+ * MakeRefPtr<ErrorCallbackRunnable<nsIDOMGetUserMediaSuccessCallback>>(
+ * mOnSuccess, mOnFailure, *error, mWindowID);
+ */
+template <typename T, typename... Args>
+RefPtr<T> MakeRefPtr(Args&&... aArgs) {
+ RefPtr<T> p(new T(std::forward<Args>(aArgs)...));
+ return p;
+}
+
+} // namespace mozilla
+
+/**
+ * Deduction guide to allow simple `RefPtr` definitions from an
+ * already_AddRefed<T> without repeating the type, e.g.:
+ *
+ * RefPtr ptr = MakeAndAddRef<SomeType>(...);
+ */
+template <typename T>
+RefPtr(already_AddRefed<T>) -> RefPtr<T>;
+
+#endif /* mozilla_RefPtr_h */
diff --git a/mfbt/Result.h b/mfbt/Result.h
new file mode 100644
index 0000000000..052920fdbf
--- /dev/null
+++ b/mfbt/Result.h
@@ -0,0 +1,873 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A type suitable for returning either a value or an error from a function. */
+
+#ifndef mozilla_Result_h
+#define mozilla_Result_h
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/CompactPair.h"
+#include "mozilla/MaybeStorageBase.h"
+
+namespace mozilla {
+
+/**
+ * Empty struct, indicating success for operations that have no return value.
+ * For example, if you declare another empty struct `struct OutOfMemory {};`,
+ * then `Result<Ok, OutOfMemory>` represents either success or OOM.
+ */
+struct Ok {};
+
+/**
+ * A tag used to differentiate between GenericErrorResult created by the Err
+ * function (completely new error) and GenericErrorResult created by the
+ * Result::propagateErr function (propagated error). This can be used to track
+ * error propagation and eventually produce error stacks for logging/debugging
+ * purposes.
+ */
+struct ErrorPropagationTag {};
+
+template <typename E>
+class GenericErrorResult;
+template <typename V, typename E>
+class Result;
+
+namespace detail {
+
+enum class PackingStrategy {
+ Variant,
+ NullIsOk,
+ LowBitTagIsError,
+ PackedVariant,
+ ZeroIsEmptyError,
+};
+
+template <typename T>
+struct UnusedZero;
+
+template <typename V, typename E, PackingStrategy Strategy>
+class ResultImplementation;
+
+template <typename V>
+struct EmptyWrapper : V {
+ constexpr EmptyWrapper() = default;
+ explicit constexpr EmptyWrapper(const V&) {}
+ explicit constexpr EmptyWrapper(std::in_place_t) {}
+
+ constexpr V* addr() { return this; }
+ constexpr const V* addr() const { return this; }
+};
+
+// The purpose of AlignedStorageOrEmpty is to make an empty class look like
+// std::aligned_storage_t for the purposes of the PackingStrategy::NullIsOk
+// specializations of ResultImplementation below. We can't use
+// std::aligned_storage_t itself with an empty class, since it would no longer
+// be empty.
+template <typename V>
+using AlignedStorageOrEmpty =
+ std::conditional_t<std::is_empty_v<V>, EmptyWrapper<V>,
+ MaybeStorageBase<V>>;
+
+template <typename V, typename E>
+class ResultImplementationNullIsOkBase {
+ protected:
+ using ErrorStorageType = typename UnusedZero<E>::StorageType;
+
+ static constexpr auto kNullValue = UnusedZero<E>::nullValue;
+
+ static_assert(std::is_trivially_copyable_v<ErrorStorageType>);
+
+ // XXX This can't be statically asserted in general, if ErrorStorageType is
+ // not a basic type. With C++20 bit_cast, we could probably re-add such as
+ // assertion. static_assert(kNullValue == decltype(kNullValue)(0));
+
+ CompactPair<AlignedStorageOrEmpty<V>, ErrorStorageType> mValue;
+
+ public:
+ explicit constexpr ResultImplementationNullIsOkBase(const V& aSuccessValue)
+ : mValue(aSuccessValue, kNullValue) {}
+ explicit constexpr ResultImplementationNullIsOkBase(V&& aSuccessValue)
+ : mValue(std::move(aSuccessValue), kNullValue) {}
+ template <typename... Args>
+ explicit constexpr ResultImplementationNullIsOkBase(std::in_place_t,
+ Args&&... aArgs)
+ : mValue(std::piecewise_construct,
+ std::tuple(std::in_place, std::forward<Args>(aArgs)...),
+ std::tuple(kNullValue)) {}
+ explicit constexpr ResultImplementationNullIsOkBase(E aErrorValue)
+ : mValue(std::piecewise_construct, std::tuple<>(),
+ std::tuple(UnusedZero<E>::Store(std::move(aErrorValue)))) {
+ MOZ_ASSERT(mValue.second() != kNullValue);
+ }
+
+ constexpr ResultImplementationNullIsOkBase(
+ ResultImplementationNullIsOkBase&& aOther)
+ : mValue(std::piecewise_construct, std::tuple<>(),
+ std::tuple(aOther.mValue.second())) {
+ if constexpr (!std::is_empty_v<V>) {
+ if (isOk()) {
+ new (mValue.first().addr()) V(std::move(*aOther.mValue.first().addr()));
+ }
+ }
+ }
+ ResultImplementationNullIsOkBase& operator=(
+ ResultImplementationNullIsOkBase&& aOther) {
+ if constexpr (!std::is_empty_v<V>) {
+ if (isOk()) {
+ mValue.first().addr()->~V();
+ }
+ }
+ mValue.second() = std::move(aOther.mValue.second());
+ if constexpr (!std::is_empty_v<V>) {
+ if (isOk()) {
+ new (mValue.first().addr()) V(std::move(*aOther.mValue.first().addr()));
+ }
+ }
+ return *this;
+ }
+
+ constexpr bool isOk() const { return mValue.second() == kNullValue; }
+
+ constexpr const V& inspect() const { return *mValue.first().addr(); }
+ constexpr V unwrap() { return std::move(*mValue.first().addr()); }
+ constexpr void updateAfterTracing(V&& aValue) {
+ MOZ_ASSERT(isOk());
+ if (!std::is_empty_v<V>) {
+ mValue.first().addr()->~V();
+ new (mValue.first().addr()) V(std::move(aValue));
+ }
+ }
+
+ constexpr decltype(auto) inspectErr() const {
+ return UnusedZero<E>::Inspect(mValue.second());
+ }
+ constexpr E unwrapErr() { return UnusedZero<E>::Unwrap(mValue.second()); }
+ constexpr void updateErrorAfterTracing(E&& aErrorValue) {
+ mValue.second() = UnusedZero<E>::Store(std::move(aErrorValue));
+ }
+};
+
+template <typename V, typename E,
+ bool IsVTriviallyDestructible = std::is_trivially_destructible_v<V>>
+class ResultImplementationNullIsOk;
+
+template <typename V, typename E>
+class ResultImplementationNullIsOk<V, E, true>
+ : public ResultImplementationNullIsOkBase<V, E> {
+ public:
+ using ResultImplementationNullIsOkBase<V,
+ E>::ResultImplementationNullIsOkBase;
+};
+
+template <typename V, typename E>
+class ResultImplementationNullIsOk<V, E, false>
+ : public ResultImplementationNullIsOkBase<V, E> {
+ public:
+ using ResultImplementationNullIsOkBase<V,
+ E>::ResultImplementationNullIsOkBase;
+
+ ResultImplementationNullIsOk(ResultImplementationNullIsOk&&) = default;
+ ResultImplementationNullIsOk& operator=(ResultImplementationNullIsOk&&) =
+ default;
+
+ ~ResultImplementationNullIsOk() {
+ if (this->isOk()) {
+ this->mValue.first().addr()->~V();
+ }
+ }
+};
+
+/**
+ * Specialization for when the success type is one of integral, pointer, or
+ * enum, where 0 is unused, and the error type is an empty struct.
+ */
+template <typename V, typename E>
+class ResultImplementation<V, E, PackingStrategy::ZeroIsEmptyError> {
+ static_assert(std::is_integral_v<V> || std::is_pointer_v<V> ||
+ std::is_enum_v<V>);
+ static_assert(std::is_empty_v<E>);
+
+ V mValue;
+
+ public:
+ static constexpr PackingStrategy Strategy = PackingStrategy::ZeroIsEmptyError;
+
+ explicit constexpr ResultImplementation(V aValue) : mValue(aValue) {}
+ explicit constexpr ResultImplementation(E aErrorValue) : mValue(V(0)) {}
+
+ constexpr bool isOk() const { return mValue != V(0); }
+
+ constexpr V inspect() const { return mValue; }
+ constexpr V unwrap() { return inspect(); }
+
+ constexpr E inspectErr() const { return E(); }
+ constexpr E unwrapErr() { return inspectErr(); }
+
+ constexpr void updateAfterTracing(V&& aValue) {
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aValue));
+ }
+ constexpr void updateErrorAfterTracing(E&& aErrorValue) {
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aErrorValue));
+ }
+};
+
+/**
+ * Specialization for when the success type is default-constructible and the
+ * error type is a value type which can never have the value 0 (as determined by
+ * UnusedZero<>).
+ */
+template <typename V, typename E>
+class ResultImplementation<V, E, PackingStrategy::NullIsOk>
+ : public ResultImplementationNullIsOk<V, E> {
+ public:
+ static constexpr PackingStrategy Strategy = PackingStrategy::NullIsOk;
+ using ResultImplementationNullIsOk<V, E>::ResultImplementationNullIsOk;
+};
+
+template <size_t S>
+using UnsignedIntType = std::conditional_t<
+ S == 1, std::uint8_t,
+ std::conditional_t<
+ S == 2, std::uint16_t,
+ std::conditional_t<S == 3 || S == 4, std::uint32_t,
+ std::conditional_t<S <= 8, std::uint64_t, void>>>>;
+
+/**
+ * Specialization for when alignment permits using the least significant bit
+ * as a tag bit.
+ */
+template <typename V, typename E>
+class ResultImplementation<V, E, PackingStrategy::LowBitTagIsError> {
+ static_assert(std::is_trivially_copyable_v<V> &&
+ std::is_trivially_destructible_v<V>);
+ static_assert(std::is_trivially_copyable_v<E> &&
+ std::is_trivially_destructible_v<E>);
+
+ static constexpr size_t kRequiredSize = std::max(sizeof(V), sizeof(E));
+
+ using StorageType = UnsignedIntType<kRequiredSize>;
+
+#if defined(__clang__)
+ alignas(std::max(alignof(V), alignof(E))) StorageType mBits;
+#else
+ // Some gcc versions choke on using std::max with alignas, see
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94929 (and this seems to have
+ // regressed in some gcc 9.x version before being fixed again) Keeping the
+ // code above since we would eventually drop this when we no longer support
+ // gcc versions with the bug.
+ alignas(alignof(V) > alignof(E) ? alignof(V) : alignof(E)) StorageType mBits;
+#endif
+
+ public:
+ static constexpr PackingStrategy Strategy = PackingStrategy::LowBitTagIsError;
+
+ explicit constexpr ResultImplementation(V aValue) : mBits(0) {
+ if constexpr (!std::is_empty_v<V>) {
+ std::memcpy(&mBits, &aValue, sizeof(V));
+ MOZ_ASSERT((mBits & 1) == 0);
+ } else {
+ (void)aValue;
+ }
+ }
+ explicit constexpr ResultImplementation(E aErrorValue) : mBits(1) {
+ if constexpr (!std::is_empty_v<E>) {
+ std::memcpy(&mBits, &aErrorValue, sizeof(E));
+ MOZ_ASSERT((mBits & 1) == 0);
+ mBits |= 1;
+ } else {
+ (void)aErrorValue;
+ }
+ }
+
+ constexpr bool isOk() const { return (mBits & 1) == 0; }
+
+ constexpr V inspect() const {
+ V res;
+ std::memcpy(&res, &mBits, sizeof(V));
+ return res;
+ }
+ constexpr V unwrap() { return inspect(); }
+
+ constexpr E inspectErr() const {
+ const auto bits = mBits ^ 1;
+ E res;
+ std::memcpy(&res, &bits, sizeof(E));
+ return res;
+ }
+ constexpr E unwrapErr() { return inspectErr(); }
+
+ constexpr void updateAfterTracing(V&& aValue) {
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aValue));
+ }
+ constexpr void updateErrorAfterTracing(E&& aErrorValue) {
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aErrorValue));
+ }
+};
+
+// Return true if any of the struct can fit in a word.
+template <typename V, typename E>
+struct IsPackableVariant {
+ struct VEbool {
+ explicit constexpr VEbool(V&& aValue) : v(std::move(aValue)), ok(true) {}
+ explicit constexpr VEbool(E&& aErrorValue)
+ : e(std::move(aErrorValue)), ok(false) {}
+ V v;
+ E e;
+ bool ok;
+ };
+ struct EVbool {
+ explicit constexpr EVbool(V&& aValue) : v(std::move(aValue)), ok(true) {}
+ explicit constexpr EVbool(E&& aErrorValue)
+ : e(std::move(aErrorValue)), ok(false) {}
+ E e;
+ V v;
+ bool ok;
+ };
+
+ using Impl =
+ std::conditional_t<sizeof(VEbool) <= sizeof(EVbool), VEbool, EVbool>;
+
+ static const bool value = sizeof(Impl) <= sizeof(uintptr_t);
+};
+
+/**
+ * Specialization for when both type are not using all the bytes, in order to
+ * use one byte as a tag.
+ */
+template <typename V, typename E>
+class ResultImplementation<V, E, PackingStrategy::PackedVariant> {
+ using Impl = typename IsPackableVariant<V, E>::Impl;
+ Impl data;
+
+ public:
+ static constexpr PackingStrategy Strategy = PackingStrategy::PackedVariant;
+
+ explicit constexpr ResultImplementation(V aValue) : data(std::move(aValue)) {}
+ explicit constexpr ResultImplementation(E aErrorValue)
+ : data(std::move(aErrorValue)) {}
+
+ constexpr bool isOk() const { return data.ok; }
+
+ constexpr const V& inspect() const { return data.v; }
+ constexpr V unwrap() { return std::move(data.v); }
+
+ constexpr const E& inspectErr() const { return data.e; }
+ constexpr E unwrapErr() { return std::move(data.e); }
+
+ constexpr void updateAfterTracing(V&& aValue) {
+ MOZ_ASSERT(data.ok);
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aValue));
+ }
+ constexpr void updateErrorAfterTracing(E&& aErrorValue) {
+ MOZ_ASSERT(!data.ok);
+ this->~ResultImplementation();
+ new (this) ResultImplementation(std::move(aErrorValue));
+ }
+};
+
+// To use nullptr as a special value, we need the counter part to exclude zero
+// from its range of valid representations.
+//
+// By default assume that zero can be represented.
+template <typename T>
+struct UnusedZero {
+ static const bool value = false;
+};
+
+// This template can be used as a helper for specializing UnusedZero for scoped
+// enum types which never use 0 as an error value, e.g.
+//
+// namespace mozilla::detail {
+//
+// template <>
+// struct UnusedZero<MyEnumType> : UnusedZeroEnum<MyEnumType> {};
+//
+// } // namespace mozilla::detail
+//
+template <typename T>
+struct UnusedZeroEnum {
+ using StorageType = std::underlying_type_t<T>;
+
+ static constexpr bool value = true;
+ static constexpr StorageType nullValue = 0;
+
+ static constexpr T Inspect(const StorageType& aValue) {
+ return static_cast<T>(aValue);
+ }
+ static constexpr T Unwrap(StorageType aValue) {
+ return static_cast<T>(aValue);
+ }
+ static constexpr StorageType Store(T aValue) {
+ return static_cast<StorageType>(aValue);
+ }
+};
+
+// A bit of help figuring out which of the above specializations to use.
+//
+// We begin by safely assuming types don't have a spare bit, unless they are
+// empty.
+template <typename T>
+struct HasFreeLSB {
+ static const bool value = std::is_empty_v<T>;
+};
+
+// As an incomplete type, void* does not have a spare bit.
+template <>
+struct HasFreeLSB<void*> {
+ static const bool value = false;
+};
+
+// The lowest bit of a properly-aligned pointer is always zero if the pointee
+// type is greater than byte-aligned. That bit is free to use if it's masked
+// out of such pointers before they're dereferenced.
+template <typename T>
+struct HasFreeLSB<T*> {
+ static const bool value = (alignof(T) & 1) == 0;
+};
+
+// Select one of the previous result implementation based on the properties of
+// the V and E types.
+template <typename V, typename E>
+struct SelectResultImpl {
+ static const PackingStrategy value =
+ (UnusedZero<V>::value && std::is_empty_v<E>)
+ ? PackingStrategy::ZeroIsEmptyError
+ : (HasFreeLSB<V>::value && HasFreeLSB<E>::value)
+ ? PackingStrategy::LowBitTagIsError
+ : (UnusedZero<E>::value && sizeof(E) <= sizeof(uintptr_t))
+ ? PackingStrategy::NullIsOk
+ : (std::is_default_constructible_v<V> &&
+ std::is_default_constructible_v<E> && IsPackableVariant<V, E>::value)
+ ? PackingStrategy::PackedVariant
+ : PackingStrategy::Variant;
+
+ using Type = ResultImplementation<V, E, value>;
+};
+
+template <typename T>
+struct IsResult : std::false_type {};
+
+template <typename V, typename E>
+struct IsResult<Result<V, E>> : std::true_type {};
+
+} // namespace detail
+
+template <typename V, typename E>
+constexpr auto ToResult(Result<V, E>&& aValue)
+ -> decltype(std::forward<Result<V, E>>(aValue)) {
+ return std::forward<Result<V, E>>(aValue);
+}
+
+/**
+ * Result<V, E> represents the outcome of an operation that can either succeed
+ * or fail. It contains either a success value of type V or an error value of
+ * type E.
+ *
+ * All Result methods are const, so results are basically immutable.
+ * This is just like Variant<V, E> but with a slightly different API, and the
+ * following cases are optimized so Result can be stored more efficiently:
+ *
+ * - If both the success and error types do not use their least significant bit,
+ * are trivially copyable and destructible, Result<V, E> is guaranteed to be as
+ * large as the larger type. This is determined via the HasFreeLSB trait. By
+ * default, empty classes (in particular Ok) and aligned pointer types are
+ * assumed to have a free LSB, but you can specialize this trait for other
+ * types. If the success type is empty, the representation is guaranteed to be
+ * all zero bits on success. Do not change this representation! There is JIT
+ * code that depends on it. (Implementation note: The lowest bit is used as a
+ * tag bit: 0 to indicate the Result's bits are a success value, 1 to indicate
+ * the Result's bits (with the 1 masked out) encode an error value)
+ *
+ * - Else, if the error type can't have a all-zero bits representation and is
+ * not larger than a pointer, a CompactPair is used to represent this rather
+ * than a Variant. This has shown to be better optimizable, and the template
+ * code is much simpler than that of Variant, so it should also compile faster.
+ * Whether an error type can't be all-zero bits, is determined via the
+ * UnusedZero trait. MFBT doesn't declare any public type UnusedZero, but
+ * nsresult is declared UnusedZero in XPCOM.
+ *
+ * The purpose of Result is to reduce the screwups caused by using `false` or
+ * `nullptr` to indicate errors.
+ * What screwups? See <https://bugzilla.mozilla.org/show_bug.cgi?id=912928> for
+ * a partial list.
+ *
+ * Result<const V, E> or Result<V, const E> are not meaningful. The success or
+ * error values in a Result instance are non-modifiable in-place anyway. This
+ * guarantee must also be maintained when evolving Result. They can be
+ * unwrap()ped, but this loses const qualification. However, Result<const V, E>
+ * or Result<V, const E> may be misleading and prevent movability. Just use
+ * Result<V, E>. (Result<const V*, E> may make sense though, just Result<const
+ * V* const, E> is not possible.)
+ */
+template <typename V, typename E>
+class [[nodiscard]] Result final {
+ // See class comment on Result<const V, E> and Result<V, const E>.
+ static_assert(!std::is_const_v<V>);
+ static_assert(!std::is_const_v<E>);
+ static_assert(!std::is_reference_v<V>);
+ static_assert(!std::is_reference_v<E>);
+
+ using Impl = typename detail::SelectResultImpl<V, E>::Type;
+
+ Impl mImpl;
+ // Are you getting this error?
+ // > error: implicit instantiation of undefined template
+ // > 'mozilla::detail::ResultImplementation<$V,$E,
+ // > mozilla::detail::PackingStrategy::Variant>'
+ // You need to include "ResultVariant.h"!
+
+ public:
+ static constexpr detail::PackingStrategy Strategy = Impl::Strategy;
+ using ok_type = V;
+ using err_type = E;
+
+ /** Create a success result. */
+ MOZ_IMPLICIT constexpr Result(V&& aValue) : mImpl(std::move(aValue)) {
+ MOZ_ASSERT(isOk());
+ }
+
+ /** Create a success result. */
+ MOZ_IMPLICIT constexpr Result(const V& aValue) : mImpl(aValue) {
+ MOZ_ASSERT(isOk());
+ }
+
+ /** Create a success result in-place. */
+ template <typename... Args>
+ explicit constexpr Result(std::in_place_t, Args&&... aArgs)
+ : mImpl(std::in_place, std::forward<Args>(aArgs)...) {
+ MOZ_ASSERT(isOk());
+ }
+
+ /** Create an error result. */
+ explicit constexpr Result(const E& aErrorValue) : mImpl(aErrorValue) {
+ MOZ_ASSERT(isErr());
+ }
+ explicit constexpr Result(E&& aErrorValue) : mImpl(std::move(aErrorValue)) {
+ MOZ_ASSERT(isErr());
+ }
+
+ /**
+ * Create a (success/error) result from another (success/error) result with
+ * different but convertible value and error types.
+ */
+ template <typename V2, typename E2,
+ typename = std::enable_if_t<std::is_convertible_v<V2, V> &&
+ std::is_convertible_v<E2, E>>>
+ MOZ_IMPLICIT constexpr Result(Result<V2, E2>&& aOther)
+ : mImpl(aOther.isOk() ? Impl{aOther.unwrap()}
+ : Impl{aOther.unwrapErr()}) {}
+
+ /**
+ * Implementation detail of MOZ_TRY().
+ * Create an error result from another error result.
+ */
+ template <typename E2>
+ MOZ_IMPLICIT constexpr Result(GenericErrorResult<E2>&& aErrorResult)
+ : mImpl(std::move(aErrorResult.mErrorValue)) {
+ static_assert(std::is_convertible_v<E2, E>, "E2 must be convertible to E");
+ MOZ_ASSERT(isErr());
+ }
+
+ /**
+ * Implementation detail of MOZ_TRY().
+ * Create an error result from another error result.
+ */
+ template <typename E2>
+ MOZ_IMPLICIT constexpr Result(const GenericErrorResult<E2>& aErrorResult)
+ : mImpl(aErrorResult.mErrorValue) {
+ static_assert(std::is_convertible_v<E2, E>, "E2 must be convertible to E");
+ MOZ_ASSERT(isErr());
+ }
+
+ Result(const Result&) = delete;
+ Result(Result&&) = default;
+ Result& operator=(const Result&) = delete;
+ Result& operator=(Result&&) = default;
+
+ /** True if this Result is a success result. */
+ constexpr bool isOk() const { return mImpl.isOk(); }
+
+ /** True if this Result is an error result. */
+ constexpr bool isErr() const { return !mImpl.isOk(); }
+
+ /** Take the success value from this Result, which must be a success result.
+ */
+ constexpr V unwrap() {
+ MOZ_ASSERT(isOk());
+ return mImpl.unwrap();
+ }
+
+ /**
+ * Take the success value from this Result, which must be a success result.
+ * If it is an error result, then return the aValue.
+ */
+ constexpr V unwrapOr(V aValue) {
+ return MOZ_LIKELY(isOk()) ? mImpl.unwrap() : std::move(aValue);
+ }
+
+ /** Take the error value from this Result, which must be an error result. */
+ constexpr E unwrapErr() {
+ MOZ_ASSERT(isErr());
+ return mImpl.unwrapErr();
+ }
+
+ /** Used only for GC tracing. If used in Rooted<Result<...>>, V must have a
+ * GCPolicy for tracing it. */
+ constexpr void updateAfterTracing(V&& aValue) {
+ mImpl.updateAfterTracing(std::move(aValue));
+ }
+
+ /** Used only for GC tracing. If used in Rooted<Result<...>>, E must have a
+ * GCPolicy for tracing it. */
+ constexpr void updateErrorAfterTracing(E&& aErrorValue) {
+ mImpl.updateErrorAfterTracing(std::move(aErrorValue));
+ }
+
+ /** See the success value from this Result, which must be a success result. */
+ constexpr decltype(auto) inspect() const {
+ static_assert(!std::is_reference_v<
+ std::invoke_result_t<decltype(&Impl::inspect), Impl>> ||
+ std::is_const_v<std::remove_reference_t<
+ std::invoke_result_t<decltype(&Impl::inspect), Impl>>>);
+ MOZ_ASSERT(isOk());
+ return mImpl.inspect();
+ }
+
+ /** See the error value from this Result, which must be an error result. */
+ constexpr decltype(auto) inspectErr() const {
+ static_assert(
+ !std::is_reference_v<
+ std::invoke_result_t<decltype(&Impl::inspectErr), Impl>> ||
+ std::is_const_v<std::remove_reference_t<
+ std::invoke_result_t<decltype(&Impl::inspectErr), Impl>>>);
+ MOZ_ASSERT(isErr());
+ return mImpl.inspectErr();
+ }
+
+ /** Propagate the error value from this Result, which must be an error result.
+ *
+ * This can be used to propagate an error from a function call to the caller
+ * with a different value type, but the same error type:
+ *
+ * Result<T1, E> Func1() {
+ * Result<T2, E> res = Func2();
+ * if (res.isErr()) { return res.propagateErr(); }
+ * }
+ */
+ constexpr GenericErrorResult<E> propagateErr() {
+ MOZ_ASSERT(isErr());
+ return GenericErrorResult<E>{mImpl.unwrapErr(), ErrorPropagationTag{}};
+ }
+
+ /**
+ * Map a function V -> V2 over this result's success variant. If this result
+ * is an error, do not invoke the function and propagate the error.
+ *
+ * Mapping over success values invokes the function to produce a new success
+ * value:
+ *
+ * // Map Result<int, E> to another Result<int, E>
+ * Result<int, E> res(5);
+ * Result<int, E> res2 = res.map([](int x) { return x * x; });
+ * MOZ_ASSERT(res.isOk());
+ * MOZ_ASSERT(res2.unwrap() == 25);
+ *
+ * // Map Result<const char*, E> to Result<size_t, E>
+ * Result<const char*, E> res("hello, map!");
+ * Result<size_t, E> res2 = res.map(strlen);
+ * MOZ_ASSERT(res.isOk());
+ * MOZ_ASSERT(res2.unwrap() == 11);
+ *
+ * Mapping over an error does not invoke the function and propagates the
+ * error:
+ *
+ * Result<V, int> res(5);
+ * MOZ_ASSERT(res.isErr());
+ * Result<V2, int> res2 = res.map([](V v) { ... });
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res2.unwrapErr() == 5);
+ */
+ template <typename F>
+ constexpr auto map(F f) -> Result<std::invoke_result_t<F, V>, E> {
+ using RetResult = Result<std::invoke_result_t<F, V>, E>;
+ return MOZ_LIKELY(isOk()) ? RetResult(f(unwrap())) : RetResult(unwrapErr());
+ }
+
+ /**
+ * Map a function E -> E2 over this result's error variant. If this result is
+ * a success, do not invoke the function and move the success over.
+ *
+ * Mapping over error values invokes the function to produce a new error
+ * value:
+ *
+ * // Map Result<V, int> to another Result<V, int>
+ * Result<V, int> res(5);
+ * Result<V, int> res2 = res.mapErr([](int x) { return x * x; });
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res2.unwrapErr() == 25);
+ *
+ * // Map Result<V, const char*> to Result<V, size_t>
+ * Result<V, const char*> res("hello, mapErr!");
+ * Result<V, size_t> res2 = res.mapErr(strlen);
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res2.unwrapErr() == 14);
+ *
+ * Mapping over a success does not invoke the function and moves the success:
+ *
+ * Result<int, E> res(5);
+ * MOZ_ASSERT(res.isOk());
+ * Result<int, E2> res2 = res.mapErr([](E e) { ... });
+ * MOZ_ASSERT(res2.isOk());
+ * MOZ_ASSERT(res2.unwrap() == 5);
+ */
+ template <typename F>
+ constexpr auto mapErr(F f) {
+ using RetResult = Result<V, std::invoke_result_t<F, E>>;
+ return MOZ_UNLIKELY(isErr()) ? RetResult(f(unwrapErr()))
+ : RetResult(unwrap());
+ }
+
+ /**
+ * Map a function E -> Result<V, E2> over this result's error variant. If
+ * this result is a success, do not invoke the function and move the success
+ * over.
+ *
+ * `orElse`ing over error values invokes the function to produce a new
+ * result:
+ *
+ * // `orElse` Result<V, int> error variant to another Result<V, int>
+ * // error variant or Result<V, int> success variant
+ * auto orElse = [](int x) -> Result<V, int> {
+ * if (x != 6) {
+ * return Err(x * x);
+ * }
+ * return V(...);
+ * };
+ *
+ * Result<V, int> res(5);
+ * auto res2 = res.orElse(orElse);
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res2.unwrapErr() == 25);
+ *
+ * Result<V, int> res3(6);
+ * auto res4 = res3.orElse(orElse);
+ * MOZ_ASSERT(res4.isOk());
+ * MOZ_ASSERT(res4.unwrap() == ...);
+ *
+ * // `orElse` Result<V, const char*> error variant to Result<V, size_t>
+ * // error variant or Result<V, size_t> success variant
+ * auto orElse = [](const char* s) -> Result<V, size_t> {
+ * if (strcmp(s, "foo")) {
+ * return Err(strlen(s));
+ * }
+ * return V(...);
+ * };
+ *
+ * Result<V, const char*> res("hello, orElse!");
+ * auto res2 = res.orElse(orElse);
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res2.unwrapErr() == 14);
+ *
+ * Result<V, const char*> res3("foo");
+ * auto res4 = ress.orElse(orElse);
+ * MOZ_ASSERT(res4.isOk());
+ * MOZ_ASSERT(res4.unwrap() == ...);
+ *
+ * `orElse`ing over a success does not invoke the function and moves the
+ * success:
+ *
+ * Result<int, E> res(5);
+ * MOZ_ASSERT(res.isOk());
+ * Result<int, E2> res2 = res.orElse([](E e) { ... });
+ * MOZ_ASSERT(res2.isOk());
+ * MOZ_ASSERT(res2.unwrap() == 5);
+ */
+ template <typename F>
+ auto orElse(F f) -> Result<V, typename std::invoke_result_t<F, E>::err_type> {
+ return MOZ_UNLIKELY(isErr()) ? f(unwrapErr()) : unwrap();
+ }
+
+ /**
+ * Given a function V -> Result<V2, E>, apply it to this result's success
+ * value and return its result. If this result is an error value, it is
+ * propagated.
+ *
+ * This is sometimes called "flatMap" or ">>=" in other contexts.
+ *
+ * `andThen`ing over success values invokes the function to produce a new
+ * result:
+ *
+ * Result<const char*, Error> res("hello, andThen!");
+ * Result<HtmlFreeString, Error> res2 = res.andThen([](const char* s) {
+ * return containsHtmlTag(s)
+ * ? Result<HtmlFreeString, Error>(Error("Invalid: contains HTML"))
+ * : Result<HtmlFreeString, Error>(HtmlFreeString(s));
+ * }
+ * });
+ * MOZ_ASSERT(res2.isOk());
+ * MOZ_ASSERT(res2.unwrap() == HtmlFreeString("hello, andThen!");
+ *
+ * `andThen`ing over error results does not invoke the function, and just
+ * propagates the error result:
+ *
+ * Result<int, const char*> res("some error");
+ * auto res2 = res.andThen([](int x) { ... });
+ * MOZ_ASSERT(res2.isErr());
+ * MOZ_ASSERT(res.unwrapErr() == res2.unwrapErr());
+ */
+ template <typename F, typename = std::enable_if_t<detail::IsResult<
+ std::invoke_result_t<F, V&&>>::value>>
+ constexpr auto andThen(F f) -> std::invoke_result_t<F, V&&> {
+ return MOZ_LIKELY(isOk()) ? f(unwrap()) : propagateErr();
+ }
+};
+
+/**
+ * A type that auto-converts to an error Result. This is like a Result without
+ * a success type. It's the best return type for functions that always return
+ * an error--functions designed to build and populate error objects. It's also
+ * useful in error-handling macros; see MOZ_TRY for an example.
+ */
+template <typename E>
+class [[nodiscard]] GenericErrorResult {
+ E mErrorValue;
+
+ template <typename V, typename E2>
+ friend class Result;
+
+ public:
+ explicit constexpr GenericErrorResult(const E& aErrorValue)
+ : mErrorValue(aErrorValue) {}
+
+ explicit constexpr GenericErrorResult(E&& aErrorValue)
+ : mErrorValue(std::move(aErrorValue)) {}
+
+ constexpr GenericErrorResult(const E& aErrorValue, const ErrorPropagationTag&)
+ : GenericErrorResult(aErrorValue) {}
+
+ constexpr GenericErrorResult(E&& aErrorValue, const ErrorPropagationTag&)
+ : GenericErrorResult(std::move(aErrorValue)) {}
+};
+
+template <typename E>
+inline constexpr auto Err(E&& aErrorValue) {
+ return GenericErrorResult<std::decay_t<E>>(std::forward<E>(aErrorValue));
+}
+
+} // namespace mozilla
+
+#endif // mozilla_Result_h
diff --git a/mfbt/ResultExtensions.h b/mfbt/ResultExtensions.h
new file mode 100644
index 0000000000..97f197d800
--- /dev/null
+++ b/mfbt/ResultExtensions.h
@@ -0,0 +1,371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Extensions to the Result type to enable simpler handling of XPCOM/NSPR
+ * results. */
+
+#ifndef mozilla_ResultExtensions_h
+#define mozilla_ResultExtensions_h
+
+#include "mozilla/Assertions.h"
+#include "nscore.h"
+#include "prtypes.h"
+#include "mozilla/dom/quota/RemoveParen.h"
+
+namespace mozilla {
+
+struct ErrorPropagationTag;
+
+// Allow nsresult errors to automatically convert to nsresult values, so MOZ_TRY
+// can be used in XPCOM methods with Result<T, nserror> results.
+template <>
+class [[nodiscard]] GenericErrorResult<nsresult> {
+ nsresult mErrorValue;
+
+ template <typename V, typename E2>
+ friend class Result;
+
+ public:
+ explicit GenericErrorResult(nsresult aErrorValue) : mErrorValue(aErrorValue) {
+ MOZ_ASSERT(NS_FAILED(aErrorValue));
+ }
+
+ GenericErrorResult(nsresult aErrorValue, const ErrorPropagationTag&)
+ : GenericErrorResult(aErrorValue) {}
+
+ operator nsresult() const { return mErrorValue; }
+};
+
+// Allow MOZ_TRY to handle `PRStatus` values.
+template <typename E = nsresult>
+inline Result<Ok, E> ToResult(PRStatus aValue);
+
+} // namespace mozilla
+
+#include "mozilla/Result.h"
+
+namespace mozilla {
+
+template <typename ResultType>
+struct ResultTypeTraits;
+
+template <>
+struct ResultTypeTraits<nsresult> {
+ static nsresult From(nsresult aValue) { return aValue; }
+};
+
+template <typename E>
+inline Result<Ok, E> ToResult(nsresult aValue) {
+ if (NS_FAILED(aValue)) {
+ return Err(ResultTypeTraits<E>::From(aValue));
+ }
+ return Ok();
+}
+
+template <typename E>
+inline Result<Ok, E> ToResult(PRStatus aValue) {
+ if (aValue == PR_SUCCESS) {
+ return Ok();
+ }
+ return Err(ResultTypeTraits<E>::From(NS_ERROR_FAILURE));
+}
+
+namespace detail {
+template <typename R>
+auto ResultRefAsParam(R& aResult) {
+ return &aResult;
+}
+
+template <typename R, typename E, typename RArgMapper, typename Func,
+ typename... Args>
+Result<R, E> ToResultInvokeInternal(const Func& aFunc,
+ const RArgMapper& aRArgMapper,
+ Args&&... aArgs) {
+ // XXX Thereotically, if R is a pointer to a non-refcounted type, this might
+ // be a non-owning pointer, but unless we find a case where this actually is
+ // relevant, it's safe to forbid any raw pointer result.
+ static_assert(
+ !std::is_pointer_v<R>,
+ "Raw pointer results are not supported, please specify a smart pointer "
+ "result type explicitly, so that getter_AddRefs is used");
+
+ R res;
+ nsresult rv = aFunc(std::forward<Args>(aArgs)..., aRArgMapper(res));
+ if (NS_FAILED(rv)) {
+ return Err(ResultTypeTraits<E>::From(rv));
+ }
+ return res;
+}
+
+template <typename T>
+struct outparam_as_pointer;
+
+template <typename T>
+struct outparam_as_pointer<T*> {
+ using type = T*;
+};
+
+template <typename T>
+struct outparam_as_reference;
+
+template <typename T>
+struct outparam_as_reference<T*> {
+ using type = T&;
+};
+
+template <typename R, typename E, template <typename> typename RArg,
+ typename Func, typename... Args>
+using to_result_retval_t =
+ decltype(std::declval<Func&>()(
+ std::declval<Args&&>()...,
+ std::declval<typename RArg<decltype(ResultRefAsParam(
+ std::declval<R&>()))>::type>()),
+ Result<R, E>(Err(ResultTypeTraits<E>::From(NS_ERROR_FAILURE))));
+
+// There are two ToResultInvokeSelector overloads, which cover the cases of a) a
+// pointer-typed output parameter, and b) a reference-typed output parameter,
+// using to_result_retval_t in connection with outparam_as_pointer and
+// outparam_as_reference type traits. These type traits may be specialized for
+// types other than raw pointers to allow calling functions with argument types
+// that implicitly convert/bind to a raw pointer/reference. The overload that is
+// used is selected by expression SFINAE: the decltype expression in
+// to_result_retval_t is only valid in either case.
+template <typename R, typename E, typename Func, typename... Args>
+auto ToResultInvokeSelector(const Func& aFunc, Args&&... aArgs)
+ -> to_result_retval_t<R, E, outparam_as_pointer, Func, Args...> {
+ return ToResultInvokeInternal<R, E>(
+ aFunc, [](R& res) -> decltype(auto) { return ResultRefAsParam(res); },
+ std::forward<Args>(aArgs)...);
+}
+
+template <typename R, typename E, typename Func, typename... Args>
+auto ToResultInvokeSelector(const Func& aFunc, Args&&... aArgs)
+ -> to_result_retval_t<R, E, outparam_as_reference, Func, Args...> {
+ return ToResultInvokeInternal<R, E>(
+ aFunc, [](R& res) -> decltype(auto) { return *ResultRefAsParam(res); },
+ std::forward<Args>(aArgs)...);
+}
+
+} // namespace detail
+
+/**
+ * Adapts a function with a nsresult error type and an R* output parameter as
+ * the last parameter to a function returning a mozilla::Result<R, nsresult>
+ * object.
+ *
+ * This can also be used with member functions together with std::men_fn, e.g.
+ *
+ * nsCOMPtr<nsIFile> file = ...;
+ * auto existsOrErr = ToResultInvoke<bool>(std::mem_fn(&nsIFile::Exists),
+ * *file);
+ *
+ * but it is more convenient to use the member function version, which has the
+ * additional benefit of enabling the deduction of the success result type:
+ *
+ * nsCOMPtr<nsIFile> file = ...;
+ * auto existsOrErr = ToResultInvokeMember(*file, &nsIFile::Exists);
+ */
+template <typename R, typename E = nsresult, typename Func, typename... Args>
+Result<R, E> ToResultInvoke(const Func& aFunc, Args&&... aArgs) {
+ return detail::ToResultInvokeSelector<R, E, Func, Args&&...>(
+ aFunc, std::forward<Args>(aArgs)...);
+}
+
+namespace detail {
+template <typename T>
+struct tag {
+ using type = T;
+};
+
+template <typename... Ts>
+struct select_last {
+ using type = typename decltype((tag<Ts>{}, ...))::type;
+};
+
+template <typename... Ts>
+using select_last_t = typename select_last<Ts...>::type;
+
+template <>
+struct select_last<> {
+ using type = void;
+};
+
+template <typename E, typename RArg, typename T, typename Func,
+ typename... Args>
+auto ToResultInvokeMemberInternal(T& aObj, const Func& aFunc, Args&&... aArgs) {
+ if constexpr (std::is_pointer_v<RArg> ||
+ (std::is_lvalue_reference_v<RArg> &&
+ !std::is_const_v<std::remove_reference_t<RArg>>)) {
+ auto lambda = [&](RArg res) {
+ return (aObj.*aFunc)(std::forward<Args>(aArgs)..., res);
+ };
+ return detail::ToResultInvokeSelector<
+ std::remove_reference_t<std::remove_pointer_t<RArg>>, E,
+ decltype(lambda)>(lambda);
+ } else {
+ // No output parameter present, return a Result<Ok, E>
+ return mozilla::ToResult<E>((aObj.*aFunc)(std::forward<Args>(aArgs)...));
+ }
+}
+
+// For use in MOZ_TO_RESULT_INVOKE_MEMBER/MOZ_TO_RESULT_INVOKE_MEMBER_TYPED.
+template <typename T>
+auto DerefHelper(const T&) -> T&;
+
+template <typename T>
+auto DerefHelper(T*) -> T&;
+
+template <template <class> class SmartPtr, typename T,
+ typename = decltype(*std::declval<const SmartPtr<T>>())>
+auto DerefHelper(const SmartPtr<T>&) -> T&;
+
+template <typename T>
+using DerefedType =
+ std::remove_reference_t<decltype(DerefHelper(std::declval<const T&>()))>;
+} // namespace detail
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>>
+auto ToResultInvokeMember(T& aObj, nsresult (U::*aFunc)(XArgs...),
+ Args&&... aArgs) {
+ return detail::ToResultInvokeMemberInternal<E,
+ detail::select_last_t<XArgs...>>(
+ aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>>
+auto ToResultInvokeMember(const T& aObj, nsresult (U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return detail::ToResultInvokeMemberInternal<E,
+ detail::select_last_t<XArgs...>>(
+ aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args>
+auto ToResultInvokeMember(T* const aObj, nsresult (U::*aFunc)(XArgs...),
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args>
+auto ToResultInvokeMember(const T* const aObj,
+ nsresult (U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, template <class> class SmartPtr, typename T,
+ typename U, typename... XArgs, typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>,
+ typename = decltype(*std::declval<const SmartPtr<T>>())>
+auto ToResultInvokeMember(const SmartPtr<T>& aObj,
+ nsresult (U::*aFunc)(XArgs...), Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, template <class> class SmartPtr, typename T,
+ typename U, typename... XArgs, typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>,
+ typename = decltype(*std::declval<const SmartPtr<T>>())>
+auto ToResultInvokeMember(const SmartPtr<const T>& aObj,
+ nsresult (U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+#if defined(XP_WIN) && !defined(_WIN64)
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>>
+auto ToResultInvokeMember(T& aObj, nsresult (__stdcall U::*aFunc)(XArgs...),
+ Args&&... aArgs) {
+ return detail::ToResultInvokeMemberInternal<E,
+ detail::select_last_t<XArgs...>>(
+ aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>>
+auto ToResultInvokeMember(const T& aObj,
+ nsresult (__stdcall U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return detail::ToResultInvokeMemberInternal<E,
+ detail::select_last_t<XArgs...>>(
+ aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args>
+auto ToResultInvokeMember(T* const aObj,
+ nsresult (__stdcall U::*aFunc)(XArgs...),
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, typename T, typename U, typename... XArgs,
+ typename... Args>
+auto ToResultInvokeMember(const T* const aObj,
+ nsresult (__stdcall U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, template <class> class SmartPtr, typename T,
+ typename U, typename... XArgs, typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>,
+ typename = decltype(*std::declval<const SmartPtr<T>>())>
+auto ToResultInvokeMember(const SmartPtr<T>& aObj,
+ nsresult (__stdcall U::*aFunc)(XArgs...),
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+
+template <typename E = nsresult, template <class> class SmartPtr, typename T,
+ typename U, typename... XArgs, typename... Args,
+ typename = std::enable_if_t<std::is_base_of_v<U, T>>,
+ typename = decltype(*std::declval<const SmartPtr<T>>())>
+auto ToResultInvokeMember(const SmartPtr<const T>& aObj,
+ nsresult (__stdcall U::*aFunc)(XArgs...) const,
+ Args&&... aArgs) {
+ return ToResultInvokeMember<E>(*aObj, aFunc, std::forward<Args>(aArgs)...);
+}
+#endif
+
+// Macro version of ToResultInvokeMember for member functions. The macro has
+// the advantage of not requiring spelling out the member function's declarator
+// type name, at the expense of having a non-standard syntax. It can be used
+// like this:
+//
+// nsCOMPtr<nsIFile> file;
+// auto existsOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(file, Exists);
+#define MOZ_TO_RESULT_INVOKE_MEMBER(obj, methodname, ...) \
+ ::mozilla::ToResultInvokeMember( \
+ (obj), &::mozilla::detail::DerefedType<decltype(obj)>::methodname, \
+ ##__VA_ARGS__)
+
+// Macro version of ToResultInvokeMember for member functions, where the result
+// type does not match the output parameter type. The macro has the advantage
+// of not requiring spelling out the member function's declarator type name, at
+// the expense of having a non-standard syntax. It can be used like this:
+//
+// nsCOMPtr<nsIFile> file;
+// auto existsOrErr =
+// MOZ_TO_RESULT_INVOKE_MEMBER_TYPED(nsCOMPtr<nsIFile>, file, Clone);
+#define MOZ_TO_RESULT_INVOKE_MEMBER_TYPED(resultType, obj, methodname, ...) \
+ ::mozilla::ToResultInvoke<MOZ_REMOVE_PAREN(resultType)>( \
+ ::std::mem_fn( \
+ &::mozilla::detail::DerefedType<decltype(obj)>::methodname), \
+ (obj), ##__VA_ARGS__)
+
+} // namespace mozilla
+
+#endif // mozilla_ResultExtensions_h
diff --git a/mfbt/ResultVariant.h b/mfbt/ResultVariant.h
new file mode 100644
index 0000000000..790ff8d642
--- /dev/null
+++ b/mfbt/ResultVariant.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A type suitable for returning either a value or an error from a function. */
+
+#ifndef mozilla_ResultVariant_h
+#define mozilla_ResultVariant_h
+
+#include "mozilla/MaybeStorageBase.h"
+#include "mozilla/Result.h"
+#include "mozilla/Variant.h"
+
+namespace mozilla::detail {
+
+template <typename V, typename E>
+class ResultImplementation<V, E, PackingStrategy::Variant> {
+ mozilla::Variant<V, E> mStorage;
+
+ public:
+ static constexpr PackingStrategy Strategy = PackingStrategy::Variant;
+
+ ResultImplementation(ResultImplementation&&) = default;
+ ResultImplementation(const ResultImplementation&) = delete;
+ ResultImplementation& operator=(const ResultImplementation&) = delete;
+ ResultImplementation& operator=(ResultImplementation&&) = default;
+
+ explicit ResultImplementation(V&& aValue) : mStorage(std::move(aValue)) {}
+ explicit ResultImplementation(const V& aValue) : mStorage(aValue) {}
+ template <typename... Args>
+ explicit ResultImplementation(std::in_place_t, Args&&... aArgs)
+ : mStorage(VariantType<V>{}, std::forward<Args>(aArgs)...) {}
+
+ explicit ResultImplementation(const E& aErrorValue) : mStorage(aErrorValue) {}
+ explicit ResultImplementation(E&& aErrorValue)
+ : mStorage(std::move(aErrorValue)) {}
+
+ bool isOk() const { return mStorage.template is<V>(); }
+
+ // The callers of these functions will assert isOk() has the proper value, so
+ // these functions (in all ResultImplementation specializations) don't need
+ // to do so.
+ V unwrap() { return std::move(mStorage.template as<V>()); }
+ const V& inspect() const { return mStorage.template as<V>(); }
+
+ E unwrapErr() { return std::move(mStorage.template as<E>()); }
+ const E& inspectErr() const { return mStorage.template as<E>(); }
+
+ void updateAfterTracing(V&& aValue) {
+ mStorage.template emplace<V>(std::move(aValue));
+ }
+ void updateErrorAfterTracing(E&& aErrorValue) {
+ mStorage.template emplace<E>(std::move(aErrorValue));
+ }
+};
+
+} // namespace mozilla::detail
+
+#endif // mozilla_ResultVariant_h
diff --git a/mfbt/ReverseIterator.h b/mfbt/ReverseIterator.h
new file mode 100644
index 0000000000..c9e77ffc89
--- /dev/null
+++ b/mfbt/ReverseIterator.h
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* An iterator that acts like another iterator, but iterating in
+ * the negative direction. (Note that not all iterators can iterate
+ * in the negative direction.) */
+
+#ifndef mozilla_ReverseIterator_h
+#define mozilla_ReverseIterator_h
+
+#include <utility>
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+// This should only be used in cases where std::reverse_iterator cannot be used,
+// because the underlying iterator is not a proper bidirectional iterator, but
+// rather, e.g., a stashing iterator such as IntegerIterator. It is less
+// efficient than std::reverse_iterator for proper bidirectional iterators.
+template <typename IteratorT>
+class ReverseIterator {
+ public:
+ using value_type = typename IteratorT::value_type;
+ using pointer = typename IteratorT::pointer;
+ using reference = typename IteratorT::reference;
+ using difference_type = typename IteratorT::difference_type;
+ using iterator_category = typename IteratorT::iterator_category;
+
+ explicit ReverseIterator(IteratorT aIter) : mCurrent(std::move(aIter)) {}
+
+ // The return type is not reference, but rather the return type of
+ // Iterator::operator*(), which might be value_type, to allow this to work
+ // with stashing iterators such as IntegerIterator, see also Bug 1175485.
+ decltype(*std::declval<IteratorT>()) operator*() const {
+ IteratorT tmp = mCurrent;
+ return *--tmp;
+ }
+
+ /* Difference operator */
+ difference_type operator-(const ReverseIterator& aOther) const {
+ return aOther.mCurrent - mCurrent;
+ }
+
+ /* Increments and decrements operators */
+
+ ReverseIterator& operator++() {
+ --mCurrent;
+ return *this;
+ }
+ ReverseIterator& operator--() {
+ ++mCurrent;
+ return *this;
+ }
+ ReverseIterator operator++(int) {
+ auto ret = *this;
+ mCurrent--;
+ return ret;
+ }
+ ReverseIterator operator--(int) {
+ auto ret = *this;
+ mCurrent++;
+ return ret;
+ }
+
+ /* Comparison operators */
+
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator==(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator!=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator<(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator<=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator>(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+ template <typename Iterator1, typename Iterator2>
+ friend bool operator>=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2);
+
+ private:
+ IteratorT mCurrent;
+};
+
+template <typename Iterator1, typename Iterator2>
+bool operator==(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent == aIter2.mCurrent;
+}
+
+template <typename Iterator1, typename Iterator2>
+bool operator!=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent != aIter2.mCurrent;
+}
+
+template <typename Iterator1, typename Iterator2>
+bool operator<(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent > aIter2.mCurrent;
+}
+
+template <typename Iterator1, typename Iterator2>
+bool operator<=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent >= aIter2.mCurrent;
+}
+
+template <typename Iterator1, typename Iterator2>
+bool operator>(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent < aIter2.mCurrent;
+}
+
+template <typename Iterator1, typename Iterator2>
+bool operator>=(const ReverseIterator<Iterator1>& aIter1,
+ const ReverseIterator<Iterator2>& aIter2) {
+ return aIter1.mCurrent <= aIter2.mCurrent;
+}
+
+namespace detail {
+
+template <typename IteratorT,
+ typename ReverseIteratorT = ReverseIterator<IteratorT>>
+class IteratorRange {
+ public:
+ typedef IteratorT iterator;
+ typedef IteratorT const_iterator;
+ typedef ReverseIteratorT reverse_iterator;
+ typedef ReverseIteratorT const_reverse_iterator;
+
+ IteratorRange(IteratorT aIterBegin, IteratorT aIterEnd)
+ : mIterBegin(std::move(aIterBegin)), mIterEnd(std::move(aIterEnd)) {}
+
+ iterator begin() const { return mIterBegin; }
+ const_iterator cbegin() const { return begin(); }
+ iterator end() const { return mIterEnd; }
+ const_iterator cend() const { return end(); }
+ reverse_iterator rbegin() const { return reverse_iterator(mIterEnd); }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ reverse_iterator rend() const { return reverse_iterator(mIterBegin); }
+ const_reverse_iterator crend() const { return rend(); }
+
+ IteratorT mIterBegin;
+ IteratorT mIterEnd;
+};
+
+} // namespace detail
+
+template <typename Range>
+detail::IteratorRange<typename Range::reverse_iterator> Reversed(
+ Range& aRange) {
+ return {aRange.rbegin(), aRange.rend()};
+}
+
+template <typename Range>
+detail::IteratorRange<typename Range::const_reverse_iterator> Reversed(
+ const Range& aRange) {
+ return {aRange.rbegin(), aRange.rend()};
+}
+
+} // namespace mozilla
+
+#endif // mozilla_ReverseIterator_h
diff --git a/mfbt/RollingMean.h b/mfbt/RollingMean.h
new file mode 100644
index 0000000000..f971b1fb13
--- /dev/null
+++ b/mfbt/RollingMean.h
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Calculate the rolling mean of a series of values. */
+
+#ifndef mozilla_RollingMean_h_
+#define mozilla_RollingMean_h_
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Vector.h"
+
+#include <stddef.h>
+#include <type_traits>
+
+namespace mozilla {
+
+/**
+ * RollingMean<T> calculates a rolling mean of the values it is given. It
+ * accumulates the total as values are added and removed. The second type
+ * argument S specifies the type of the total. This may need to be a bigger
+ * type in order to maintain that the sum of all values in the average doesn't
+ * exceed the maximum input value.
+ *
+ * WARNING: Float types are not supported due to rounding errors.
+ */
+template <typename T, typename S>
+class RollingMean {
+ private:
+ size_t mInsertIndex;
+ size_t mMaxValues;
+ Vector<T> mValues;
+ S mTotal;
+
+ public:
+ static_assert(!std::is_floating_point_v<T>,
+ "floating-point types are unsupported due to rounding "
+ "errors");
+
+ explicit RollingMean(size_t aMaxValues)
+ : mInsertIndex(0), mMaxValues(aMaxValues), mTotal(0) {
+ MOZ_ASSERT(aMaxValues > 0);
+ }
+
+ RollingMean& operator=(RollingMean&& aOther) = default;
+
+ /**
+ * Insert a value into the rolling mean.
+ */
+ bool insert(T aValue) {
+ MOZ_ASSERT(mValues.length() <= mMaxValues);
+
+ if (mValues.length() == mMaxValues) {
+ mTotal = mTotal - mValues[mInsertIndex] + aValue;
+ mValues[mInsertIndex] = aValue;
+ } else {
+ if (!mValues.append(aValue)) {
+ return false;
+ }
+ mTotal = mTotal + aValue;
+ }
+
+ mInsertIndex = (mInsertIndex + 1) % mMaxValues;
+ return true;
+ }
+
+ /**
+ * Calculate the rolling mean.
+ */
+ T mean() const {
+ MOZ_ASSERT(!empty());
+ return T(mTotal / int64_t(mValues.length()));
+ }
+
+ bool empty() const { return mValues.empty(); }
+
+ /**
+ * Remove all values from the rolling mean.
+ */
+ void clear() {
+ mValues.clear();
+ mInsertIndex = 0;
+ mTotal = T(0);
+ }
+
+ size_t maxValues() const { return mMaxValues; }
+};
+
+} // namespace mozilla
+
+#endif // mozilla_RollingMean_h_
diff --git a/mfbt/SHA1.cpp b/mfbt/SHA1.cpp
new file mode 100644
index 0000000000..2a315c5c06
--- /dev/null
+++ b/mfbt/SHA1.cpp
@@ -0,0 +1,405 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/SHA1.h"
+
+#include <string.h>
+
+using mozilla::NativeEndian;
+using mozilla::SHA1Sum;
+
+static inline uint32_t SHA_ROTL(uint32_t aT, uint32_t aN) {
+ MOZ_ASSERT(aN < 32);
+ return (aT << aN) | (aT >> (32 - aN));
+}
+
+static void shaCompress(volatile unsigned* aX, const uint32_t* aBuf);
+
+#define SHA_F1(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
+#define SHA_F2(X, Y, Z) ((X) ^ (Y) ^ (Z))
+#define SHA_F3(X, Y, Z) (((X) & (Y)) | ((Z) & ((X) | (Y))))
+#define SHA_F4(X, Y, Z) ((X) ^ (Y) ^ (Z))
+
+#define SHA_MIX(n, a, b, c) XW(n) = SHA_ROTL(XW(a) ^ XW(b) ^ XW(c) ^ XW(n), 1)
+
+SHA1Sum::SHA1Sum() : mSize(0), mDone(false) {
+ // Initialize H with constants from FIPS180-1.
+ mH[0] = 0x67452301L;
+ mH[1] = 0xefcdab89L;
+ mH[2] = 0x98badcfeL;
+ mH[3] = 0x10325476L;
+ mH[4] = 0xc3d2e1f0L;
+}
+
+/*
+ * Explanation of H array and index values:
+ *
+ * The context's H array is actually the concatenation of two arrays
+ * defined by SHA1, the H array of state variables (5 elements),
+ * and the W array of intermediate values, of which there are 16 elements.
+ * The W array starts at H[5], that is W[0] is H[5].
+ * Although these values are defined as 32-bit values, we use 64-bit
+ * variables to hold them because the AMD64 stores 64 bit values in
+ * memory MUCH faster than it stores any smaller values.
+ *
+ * Rather than passing the context structure to shaCompress, we pass
+ * this combined array of H and W values. We do not pass the address
+ * of the first element of this array, but rather pass the address of an
+ * element in the middle of the array, element X. Presently X[0] is H[11].
+ * So we pass the address of H[11] as the address of array X to shaCompress.
+ * Then shaCompress accesses the members of the array using positive AND
+ * negative indexes.
+ *
+ * Pictorially: (each element is 8 bytes)
+ * H | H0 H1 H2 H3 H4 W0 W1 W2 W3 W4 W5 W6 W7 W8 W9 Wa Wb Wc Wd We Wf |
+ * X |-11-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 |
+ *
+ * The byte offset from X[0] to any member of H and W is always
+ * representable in a signed 8-bit value, which will be encoded
+ * as a single byte offset in the X86-64 instruction set.
+ * If we didn't pass the address of H[11], and instead passed the
+ * address of H[0], the offsets to elements H[16] and above would be
+ * greater than 127, not representable in a signed 8-bit value, and the
+ * x86-64 instruction set would encode every such offset as a 32-bit
+ * signed number in each instruction that accessed element H[16] or
+ * higher. This results in much bigger and slower code.
+ */
+#define H2X 11 /* X[0] is H[11], and H[0] is X[-11] */
+#define W2X 6 /* X[0] is W[6], and W[0] is X[-6] */
+
+/*
+ * SHA: Add data to context.
+ */
+void SHA1Sum::update(const void* aData, uint32_t aLen) {
+ MOZ_ASSERT(!mDone, "SHA1Sum can only be used to compute a single hash.");
+
+ const uint8_t* data = static_cast<const uint8_t*>(aData);
+
+ if (aLen == 0) {
+ return;
+ }
+
+ /* Accumulate the byte count. */
+ unsigned int lenB = static_cast<unsigned int>(mSize) & 63U;
+
+ mSize += aLen;
+
+ /* Read the data into W and process blocks as they get full. */
+ unsigned int togo;
+ if (lenB > 0) {
+ togo = 64U - lenB;
+ if (aLen < togo) {
+ togo = aLen;
+ }
+ memcpy(mU.mB + lenB, data, togo);
+ aLen -= togo;
+ data += togo;
+ lenB = (lenB + togo) & 63U;
+ if (!lenB) {
+ shaCompress(&mH[H2X], mU.mW);
+ }
+ }
+
+ while (aLen >= 64U) {
+ aLen -= 64U;
+ shaCompress(&mH[H2X], reinterpret_cast<const uint32_t*>(data));
+ data += 64U;
+ }
+
+ if (aLen > 0) {
+ memcpy(mU.mB, data, aLen);
+ }
+}
+
+/*
+ * SHA: Generate hash value
+ */
+void SHA1Sum::finish(SHA1Sum::Hash& aHashOut) {
+ MOZ_ASSERT(!mDone, "SHA1Sum can only be used to compute a single hash.");
+
+ uint64_t size = mSize;
+ uint32_t lenB = uint32_t(size) & 63;
+
+ static const uint8_t bulk_pad[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ /* Pad with a binary 1 (e.g. 0x80), then zeroes, then length in bits. */
+ update(bulk_pad, (((55 + 64) - lenB) & 63) + 1);
+ MOZ_ASSERT((uint32_t(mSize) & 63) == 56);
+
+ /* Convert size from bytes to bits. */
+ size <<= 3;
+ mU.mW[14] = NativeEndian::swapToBigEndian(uint32_t(size >> 32));
+ mU.mW[15] = NativeEndian::swapToBigEndian(uint32_t(size));
+ shaCompress(&mH[H2X], mU.mW);
+
+ /* Output hash. */
+ mU.mW[0] = NativeEndian::swapToBigEndian(mH[0]);
+ mU.mW[1] = NativeEndian::swapToBigEndian(mH[1]);
+ mU.mW[2] = NativeEndian::swapToBigEndian(mH[2]);
+ mU.mW[3] = NativeEndian::swapToBigEndian(mH[3]);
+ mU.mW[4] = NativeEndian::swapToBigEndian(mH[4]);
+ memcpy(aHashOut, mU.mW, 20);
+ mDone = true;
+}
+
+/*
+ * SHA: Compression function, unrolled.
+ *
+ * Some operations in shaCompress are done as 5 groups of 16 operations.
+ * Others are done as 4 groups of 20 operations.
+ * The code below shows that structure.
+ *
+ * The functions that compute the new values of the 5 state variables
+ * A-E are done in 4 groups of 20 operations (or you may also think
+ * of them as being done in 16 groups of 5 operations). They are
+ * done by the SHA_RNDx macros below, in the right column.
+ *
+ * The functions that set the 16 values of the W array are done in
+ * 5 groups of 16 operations. The first group is done by the
+ * LOAD macros below, the latter 4 groups are done by SHA_MIX below,
+ * in the left column.
+ *
+ * gcc's optimizer observes that each member of the W array is assigned
+ * a value 5 times in this code. It reduces the number of store
+ * operations done to the W array in the context (that is, in the X array)
+ * by creating a W array on the stack, and storing the W values there for
+ * the first 4 groups of operations on W, and storing the values in the
+ * context's W array only in the fifth group. This is undesirable.
+ * It is MUCH bigger code than simply using the context's W array, because
+ * all the offsets to the W array in the stack are 32-bit signed offsets,
+ * and it is no faster than storing the values in the context's W array.
+ *
+ * The original code for sha_fast.c prevented this creation of a separate
+ * W array in the stack by creating a W array of 80 members, each of
+ * whose elements is assigned only once. It also separated the computations
+ * of the W array values and the computations of the values for the 5
+ * state variables into two separate passes, W's, then A-E's so that the
+ * second pass could be done all in registers (except for accessing the W
+ * array) on machines with fewer registers. The method is suboptimal
+ * for machines with enough registers to do it all in one pass, and it
+ * necessitates using many instructions with 32-bit offsets.
+ *
+ * This code eliminates the separate W array on the stack by a completely
+ * different means: by declaring the X array volatile. This prevents
+ * the optimizer from trying to reduce the use of the X array by the
+ * creation of a MORE expensive W array on the stack. The result is
+ * that all instructions use signed 8-bit offsets and not 32-bit offsets.
+ *
+ * The combination of this code and the -O3 optimizer flag on GCC 3.4.3
+ * results in code that is 3 times faster than the previous NSS sha_fast
+ * code on AMD64.
+ */
+static void shaCompress(volatile unsigned* aX, const uint32_t* aBuf) {
+ unsigned A, B, C, D, E;
+
+#define XH(n) aX[n - H2X]
+#define XW(n) aX[n - W2X]
+
+#define K0 0x5a827999L
+#define K1 0x6ed9eba1L
+#define K2 0x8f1bbcdcL
+#define K3 0xca62c1d6L
+
+#define SHA_RND1(a, b, c, d, e, n) \
+ a = SHA_ROTL(b, 5) + SHA_F1(c, d, e) + a + XW(n) + K0; \
+ c = SHA_ROTL(c, 30)
+#define SHA_RND2(a, b, c, d, e, n) \
+ a = SHA_ROTL(b, 5) + SHA_F2(c, d, e) + a + XW(n) + K1; \
+ c = SHA_ROTL(c, 30)
+#define SHA_RND3(a, b, c, d, e, n) \
+ a = SHA_ROTL(b, 5) + SHA_F3(c, d, e) + a + XW(n) + K2; \
+ c = SHA_ROTL(c, 30)
+#define SHA_RND4(a, b, c, d, e, n) \
+ a = SHA_ROTL(b, 5) + SHA_F4(c, d, e) + a + XW(n) + K3; \
+ c = SHA_ROTL(c, 30)
+
+#define LOAD(n) XW(n) = NativeEndian::swapToBigEndian(aBuf[n])
+
+ A = XH(0);
+ B = XH(1);
+ C = XH(2);
+ D = XH(3);
+ E = XH(4);
+
+ LOAD(0);
+ SHA_RND1(E, A, B, C, D, 0);
+ LOAD(1);
+ SHA_RND1(D, E, A, B, C, 1);
+ LOAD(2);
+ SHA_RND1(C, D, E, A, B, 2);
+ LOAD(3);
+ SHA_RND1(B, C, D, E, A, 3);
+ LOAD(4);
+ SHA_RND1(A, B, C, D, E, 4);
+ LOAD(5);
+ SHA_RND1(E, A, B, C, D, 5);
+ LOAD(6);
+ SHA_RND1(D, E, A, B, C, 6);
+ LOAD(7);
+ SHA_RND1(C, D, E, A, B, 7);
+ LOAD(8);
+ SHA_RND1(B, C, D, E, A, 8);
+ LOAD(9);
+ SHA_RND1(A, B, C, D, E, 9);
+ LOAD(10);
+ SHA_RND1(E, A, B, C, D, 10);
+ LOAD(11);
+ SHA_RND1(D, E, A, B, C, 11);
+ LOAD(12);
+ SHA_RND1(C, D, E, A, B, 12);
+ LOAD(13);
+ SHA_RND1(B, C, D, E, A, 13);
+ LOAD(14);
+ SHA_RND1(A, B, C, D, E, 14);
+ LOAD(15);
+ SHA_RND1(E, A, B, C, D, 15);
+
+ SHA_MIX(0, 13, 8, 2);
+ SHA_RND1(D, E, A, B, C, 0);
+ SHA_MIX(1, 14, 9, 3);
+ SHA_RND1(C, D, E, A, B, 1);
+ SHA_MIX(2, 15, 10, 4);
+ SHA_RND1(B, C, D, E, A, 2);
+ SHA_MIX(3, 0, 11, 5);
+ SHA_RND1(A, B, C, D, E, 3);
+
+ SHA_MIX(4, 1, 12, 6);
+ SHA_RND2(E, A, B, C, D, 4);
+ SHA_MIX(5, 2, 13, 7);
+ SHA_RND2(D, E, A, B, C, 5);
+ SHA_MIX(6, 3, 14, 8);
+ SHA_RND2(C, D, E, A, B, 6);
+ SHA_MIX(7, 4, 15, 9);
+ SHA_RND2(B, C, D, E, A, 7);
+ SHA_MIX(8, 5, 0, 10);
+ SHA_RND2(A, B, C, D, E, 8);
+ SHA_MIX(9, 6, 1, 11);
+ SHA_RND2(E, A, B, C, D, 9);
+ SHA_MIX(10, 7, 2, 12);
+ SHA_RND2(D, E, A, B, C, 10);
+ SHA_MIX(11, 8, 3, 13);
+ SHA_RND2(C, D, E, A, B, 11);
+ SHA_MIX(12, 9, 4, 14);
+ SHA_RND2(B, C, D, E, A, 12);
+ SHA_MIX(13, 10, 5, 15);
+ SHA_RND2(A, B, C, D, E, 13);
+ SHA_MIX(14, 11, 6, 0);
+ SHA_RND2(E, A, B, C, D, 14);
+ SHA_MIX(15, 12, 7, 1);
+ SHA_RND2(D, E, A, B, C, 15);
+
+ SHA_MIX(0, 13, 8, 2);
+ SHA_RND2(C, D, E, A, B, 0);
+ SHA_MIX(1, 14, 9, 3);
+ SHA_RND2(B, C, D, E, A, 1);
+ SHA_MIX(2, 15, 10, 4);
+ SHA_RND2(A, B, C, D, E, 2);
+ SHA_MIX(3, 0, 11, 5);
+ SHA_RND2(E, A, B, C, D, 3);
+ SHA_MIX(4, 1, 12, 6);
+ SHA_RND2(D, E, A, B, C, 4);
+ SHA_MIX(5, 2, 13, 7);
+ SHA_RND2(C, D, E, A, B, 5);
+ SHA_MIX(6, 3, 14, 8);
+ SHA_RND2(B, C, D, E, A, 6);
+ SHA_MIX(7, 4, 15, 9);
+ SHA_RND2(A, B, C, D, E, 7);
+
+ SHA_MIX(8, 5, 0, 10);
+ SHA_RND3(E, A, B, C, D, 8);
+ SHA_MIX(9, 6, 1, 11);
+ SHA_RND3(D, E, A, B, C, 9);
+ SHA_MIX(10, 7, 2, 12);
+ SHA_RND3(C, D, E, A, B, 10);
+ SHA_MIX(11, 8, 3, 13);
+ SHA_RND3(B, C, D, E, A, 11);
+ SHA_MIX(12, 9, 4, 14);
+ SHA_RND3(A, B, C, D, E, 12);
+ SHA_MIX(13, 10, 5, 15);
+ SHA_RND3(E, A, B, C, D, 13);
+ SHA_MIX(14, 11, 6, 0);
+ SHA_RND3(D, E, A, B, C, 14);
+ SHA_MIX(15, 12, 7, 1);
+ SHA_RND3(C, D, E, A, B, 15);
+
+ SHA_MIX(0, 13, 8, 2);
+ SHA_RND3(B, C, D, E, A, 0);
+ SHA_MIX(1, 14, 9, 3);
+ SHA_RND3(A, B, C, D, E, 1);
+ SHA_MIX(2, 15, 10, 4);
+ SHA_RND3(E, A, B, C, D, 2);
+ SHA_MIX(3, 0, 11, 5);
+ SHA_RND3(D, E, A, B, C, 3);
+ SHA_MIX(4, 1, 12, 6);
+ SHA_RND3(C, D, E, A, B, 4);
+ SHA_MIX(5, 2, 13, 7);
+ SHA_RND3(B, C, D, E, A, 5);
+ SHA_MIX(6, 3, 14, 8);
+ SHA_RND3(A, B, C, D, E, 6);
+ SHA_MIX(7, 4, 15, 9);
+ SHA_RND3(E, A, B, C, D, 7);
+ SHA_MIX(8, 5, 0, 10);
+ SHA_RND3(D, E, A, B, C, 8);
+ SHA_MIX(9, 6, 1, 11);
+ SHA_RND3(C, D, E, A, B, 9);
+ SHA_MIX(10, 7, 2, 12);
+ SHA_RND3(B, C, D, E, A, 10);
+ SHA_MIX(11, 8, 3, 13);
+ SHA_RND3(A, B, C, D, E, 11);
+
+ SHA_MIX(12, 9, 4, 14);
+ SHA_RND4(E, A, B, C, D, 12);
+ SHA_MIX(13, 10, 5, 15);
+ SHA_RND4(D, E, A, B, C, 13);
+ SHA_MIX(14, 11, 6, 0);
+ SHA_RND4(C, D, E, A, B, 14);
+ SHA_MIX(15, 12, 7, 1);
+ SHA_RND4(B, C, D, E, A, 15);
+
+ SHA_MIX(0, 13, 8, 2);
+ SHA_RND4(A, B, C, D, E, 0);
+ SHA_MIX(1, 14, 9, 3);
+ SHA_RND4(E, A, B, C, D, 1);
+ SHA_MIX(2, 15, 10, 4);
+ SHA_RND4(D, E, A, B, C, 2);
+ SHA_MIX(3, 0, 11, 5);
+ SHA_RND4(C, D, E, A, B, 3);
+ SHA_MIX(4, 1, 12, 6);
+ SHA_RND4(B, C, D, E, A, 4);
+ SHA_MIX(5, 2, 13, 7);
+ SHA_RND4(A, B, C, D, E, 5);
+ SHA_MIX(6, 3, 14, 8);
+ SHA_RND4(E, A, B, C, D, 6);
+ SHA_MIX(7, 4, 15, 9);
+ SHA_RND4(D, E, A, B, C, 7);
+ SHA_MIX(8, 5, 0, 10);
+ SHA_RND4(C, D, E, A, B, 8);
+ SHA_MIX(9, 6, 1, 11);
+ SHA_RND4(B, C, D, E, A, 9);
+ SHA_MIX(10, 7, 2, 12);
+ SHA_RND4(A, B, C, D, E, 10);
+ SHA_MIX(11, 8, 3, 13);
+ SHA_RND4(E, A, B, C, D, 11);
+ SHA_MIX(12, 9, 4, 14);
+ SHA_RND4(D, E, A, B, C, 12);
+ SHA_MIX(13, 10, 5, 15);
+ SHA_RND4(C, D, E, A, B, 13);
+ SHA_MIX(14, 11, 6, 0);
+ SHA_RND4(B, C, D, E, A, 14);
+ SHA_MIX(15, 12, 7, 1);
+ SHA_RND4(A, B, C, D, E, 15);
+
+ XH(0) = XH(0) + A;
+ XH(1) = XH(1) + B;
+ XH(2) = XH(2) + C;
+ XH(3) = XH(3) + D;
+ XH(4) = XH(4) + E;
+}
diff --git a/mfbt/SHA1.h b/mfbt/SHA1.h
new file mode 100644
index 0000000000..1c1bd99a5c
--- /dev/null
+++ b/mfbt/SHA1.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Simple class for computing SHA1. */
+
+#ifndef mozilla_SHA1_h
+#define mozilla_SHA1_h
+
+#include "mozilla/Types.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace mozilla {
+
+/**
+ * This class computes the SHA1 hash of a byte sequence, or of the concatenation
+ * of multiple sequences. For example, computing the SHA1 of two sequences of
+ * bytes could be done as follows:
+ *
+ * void SHA1(const uint8_t* buf1, uint32_t size1,
+ * const uint8_t* buf2, uint32_t size2,
+ * SHA1Sum::Hash& hash)
+ * {
+ * SHA1Sum s;
+ * s.update(buf1, size1);
+ * s.update(buf2, size2);
+ * s.finish(hash);
+ * }
+ *
+ * The finish method may only be called once and cannot be followed by calls
+ * to update.
+ */
+class SHA1Sum {
+ union {
+ uint32_t mW[16]; /* input buffer */
+ uint8_t mB[64];
+ } mU;
+ uint64_t mSize; /* count of hashed bytes. */
+ unsigned mH[22]; /* 5 state variables, 16 tmp values, 1 extra */
+ bool mDone;
+
+ public:
+ MFBT_API SHA1Sum();
+
+ static const size_t kHashSize = 20;
+ typedef uint8_t Hash[kHashSize];
+
+ /* Add len bytes of dataIn to the data sequence being hashed. */
+ MFBT_API void update(const void* aData, uint32_t aLength);
+
+ /* Compute the final hash of all data into hashOut. */
+ MFBT_API void finish(SHA1Sum::Hash& aHashOut);
+};
+
+} /* namespace mozilla */
+
+#endif /* mozilla_SHA1_h */
diff --git a/mfbt/SPSCQueue.h b/mfbt/SPSCQueue.h
new file mode 100644
index 0000000000..bd4223d70a
--- /dev/null
+++ b/mfbt/SPSCQueue.h
@@ -0,0 +1,420 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Single producer single consumer lock-free and wait-free queue. */
+
+#ifndef mozilla_LockFreeQueue_h
+#define mozilla_LockFreeQueue_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/PodOperations.h"
+#include <algorithm>
+#include <atomic>
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <thread>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+template <typename T, bool IsPod = std::is_trivial<T>::value>
+struct MemoryOperations {
+ /**
+ * This allows zeroing (using memset) or default-constructing a number of
+ * elements calling the constructors if necessary.
+ */
+ static void ConstructDefault(T* aDestination, size_t aCount);
+ /**
+ * This allows either moving (if T supports it) or copying a number of
+ * elements from a `aSource` pointer to a `aDestination` pointer.
+ * If it is safe to do so and this call copies, this uses PodCopy. Otherwise,
+ * constructors and destructors are called in a loop.
+ */
+ static void MoveOrCopy(T* aDestination, T* aSource, size_t aCount);
+};
+
+template <typename T>
+struct MemoryOperations<T, true> {
+ static void ConstructDefault(T* aDestination, size_t aCount) {
+ PodZero(aDestination, aCount);
+ }
+ static void MoveOrCopy(T* aDestination, T* aSource, size_t aCount) {
+ PodCopy(aDestination, aSource, aCount);
+ }
+};
+
+template <typename T>
+struct MemoryOperations<T, false> {
+ static void ConstructDefault(T* aDestination, size_t aCount) {
+ for (size_t i = 0; i < aCount; i++) {
+ aDestination[i] = T();
+ }
+ }
+ static void MoveOrCopy(T* aDestination, T* aSource, size_t aCount) {
+ std::move(aSource, aSource + aCount, aDestination);
+ }
+};
+} // namespace detail
+
+/**
+ * This data structure allows producing data from one thread, and consuming it
+ * on another thread, safely and without explicit synchronization.
+ *
+ * The role for the producer and the consumer must be constant, i.e., the
+ * producer should always be on one thread and the consumer should always be on
+ * another thread.
+ *
+ * Some words about the inner workings of this class:
+ * - Capacity is fixed. Only one allocation is performed, in the constructor.
+ * When reading and writing, the return value of the method allows checking if
+ * the ring buffer is empty or full.
+ * - We always keep the read index at least one element ahead of the write
+ * index, so we can distinguish between an empty and a full ring buffer: an
+ * empty ring buffer is when the write index is at the same position as the
+ * read index. A full buffer is when the write index is exactly one position
+ * before the read index.
+ * - We synchronize updates to the read index after having read the data, and
+ * the write index after having written the data. This means that the each
+ * thread can only touch a portion of the buffer that is not touched by the
+ * other thread.
+ * - Callers are expected to provide buffers. When writing to the queue,
+ * elements are copied into the internal storage from the buffer passed in.
+ * When reading from the queue, the user is expected to provide a buffer.
+ * Because this is a ring buffer, data might not be contiguous in memory;
+ * providing an external buffer to copy into is an easy way to have linear
+ * data for further processing.
+ */
+template <typename T>
+class SPSCRingBufferBase {
+ public:
+ /**
+ * Constructor for a ring buffer.
+ *
+ * This performs an allocation on the heap, but is the only allocation that
+ * will happen for the life time of a `SPSCRingBufferBase`.
+ *
+ * @param Capacity The maximum number of element this ring buffer will hold.
+ */
+ explicit SPSCRingBufferBase(int aCapacity)
+ : mReadIndex(0),
+ mWriteIndex(0),
+ /* One more element to distinguish from empty and full buffer. */
+ mCapacity(aCapacity + 1) {
+ MOZ_RELEASE_ASSERT(aCapacity != std::numeric_limits<int>::max());
+ MOZ_RELEASE_ASSERT(mCapacity > 0);
+
+ mData = std::make_unique<T[]>(StorageCapacity());
+
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ }
+ /**
+ * Push `aCount` zero or default constructed elements in the array.
+ *
+ * Only safely called on the producer thread.
+ *
+ * @param count The number of elements to enqueue.
+ * @return The number of element enqueued.
+ */
+ [[nodiscard]] int EnqueueDefault(int aCount) {
+ return Enqueue(nullptr, aCount);
+ }
+ /**
+ * @brief Put an element in the queue.
+ *
+ * Only safely called on the producer thread.
+ *
+ * @param element The element to put in the queue.
+ *
+ * @return 1 if the element was inserted, 0 otherwise.
+ */
+ [[nodiscard]] int Enqueue(T& aElement) { return Enqueue(&aElement, 1); }
+ /**
+ * Push `aCount` elements in the ring buffer.
+ *
+ * Only safely called on the producer thread.
+ *
+ * @param elements a pointer to a buffer containing at least `count` elements.
+ * If `elements` is nullptr, zero or default constructed elements are enqueud.
+ * @param count The number of elements to read from `elements`
+ * @return The number of elements successfully coped from `elements` and
+ * inserted into the ring buffer.
+ */
+ [[nodiscard]] int Enqueue(T* aElements, int aCount) {
+#ifdef DEBUG
+ AssertCorrectThread(mProducerId);
+#endif
+
+ int rdIdx = mReadIndex.load(std::memory_order_acquire);
+ int wrIdx = mWriteIndex.load(std::memory_order_relaxed);
+
+ if (IsFull(rdIdx, wrIdx)) {
+ return 0;
+ }
+
+ int toWrite = std::min(AvailableWriteInternal(rdIdx, wrIdx), aCount);
+
+ /* First part, from the write index to the end of the array. */
+ int firstPart = std::min(StorageCapacity() - wrIdx, toWrite);
+ /* Second part, from the beginning of the array */
+ int secondPart = toWrite - firstPart;
+
+ if (aElements) {
+ detail::MemoryOperations<T>::MoveOrCopy(mData.get() + wrIdx, aElements,
+ firstPart);
+ detail::MemoryOperations<T>::MoveOrCopy(
+ mData.get(), aElements + firstPart, secondPart);
+ } else {
+ detail::MemoryOperations<T>::ConstructDefault(mData.get() + wrIdx,
+ firstPart);
+ detail::MemoryOperations<T>::ConstructDefault(mData.get(), secondPart);
+ }
+
+ mWriteIndex.store(IncrementIndex(wrIdx, toWrite),
+ std::memory_order_release);
+
+ return toWrite;
+ }
+ /**
+ * Retrieve at most `count` elements from the ring buffer, and copy them to
+ * `elements`, if non-null.
+ *
+ * Only safely called on the consumer side.
+ *
+ * @param elements A pointer to a buffer with space for at least `count`
+ * elements. If `elements` is `nullptr`, `count` element will be discarded.
+ * @param count The maximum number of elements to Dequeue.
+ * @return The number of elements written to `elements`.
+ */
+ [[nodiscard]] int Dequeue(T* elements, int count) {
+#ifdef DEBUG
+ AssertCorrectThread(mConsumerId);
+#endif
+
+ int wrIdx = mWriteIndex.load(std::memory_order_acquire);
+ int rdIdx = mReadIndex.load(std::memory_order_relaxed);
+
+ if (IsEmpty(rdIdx, wrIdx)) {
+ return 0;
+ }
+
+ int toRead = std::min(AvailableReadInternal(rdIdx, wrIdx), count);
+
+ int firstPart = std::min(StorageCapacity() - rdIdx, toRead);
+ int secondPart = toRead - firstPart;
+
+ if (elements) {
+ detail::MemoryOperations<T>::MoveOrCopy(elements, mData.get() + rdIdx,
+ firstPart);
+ detail::MemoryOperations<T>::MoveOrCopy(elements + firstPart, mData.get(),
+ secondPart);
+ }
+
+ mReadIndex.store(IncrementIndex(rdIdx, toRead), std::memory_order_release);
+
+ return toRead;
+ }
+ /**
+ * Get the number of available elements for consuming.
+ *
+ * This can be less than the actual number of elements in the queue, since the
+ * mWriteIndex is updated at the very end of the Enqueue method on the
+ * producer thread, but consequently always returns a number of elements such
+ * that a call to Dequeue return this number of elements.
+ *
+ * @return The number of available elements for reading.
+ */
+ int AvailableRead() const {
+ return AvailableReadInternal(mReadIndex.load(std::memory_order_relaxed),
+ mWriteIndex.load(std::memory_order_relaxed));
+ }
+ /**
+ * Get the number of available elements for writing.
+ *
+ * This can be less than than the actual number of slots that are available,
+ * because mReadIndex is updated at the very end of the Deque method. It
+ * always returns a number such that a call to Enqueue with this number will
+ * succeed in enqueuing this number of elements.
+ *
+ * @return The number of empty slots in the buffer, available for writing.
+ */
+ int AvailableWrite() const {
+ return AvailableWriteInternal(mReadIndex.load(std::memory_order_relaxed),
+ mWriteIndex.load(std::memory_order_relaxed));
+ }
+ /**
+ * Get the total Capacity, for this ring buffer.
+ *
+ * Can be called safely on any thread.
+ *
+ * @return The maximum Capacity of this ring buffer.
+ */
+ int Capacity() const { return StorageCapacity() - 1; }
+
+ /**
+ * Reset the consumer thread id to the current thread. The caller must
+ * guarantee that the last call to Dequeue() on the previous consumer thread
+ * has completed, and subsequent calls to Dequeue() will only happen on the
+ * current thread.
+ */
+ void ResetConsumerThreadId() {
+#ifdef DEBUG
+ mConsumerId = std::this_thread::get_id();
+#endif
+
+ // When changing consumer from thread A to B, the last Dequeue on A (synced
+ // by mReadIndex.store with memory_order_release) must be picked up by B
+ // through an acquire operation.
+ std::ignore = mReadIndex.load(std::memory_order_acquire);
+ }
+
+ /**
+ * Reset the producer thread id to the current thread. The caller must
+ * guarantee that the last call to Enqueue() on the previous consumer thread
+ * has completed, and subsequent calls to Dequeue() will only happen on the
+ * current thread.
+ */
+ void ResetProducerThreadId() {
+#ifdef DEBUG
+ mProducerId = std::this_thread::get_id();
+#endif
+
+ // When changing producer from thread A to B, the last Enqueue on A (synced
+ // by mWriteIndex.store with memory_order_release) must be picked up by B
+ // through an acquire operation.
+ std::ignore = mWriteIndex.load(std::memory_order_acquire);
+ }
+
+ private:
+ /** Return true if the ring buffer is empty.
+ *
+ * This can be called from the consumer or the producer thread.
+ *
+ * @param aReadIndex the read index to consider
+ * @param writeIndex the write index to consider
+ * @return true if the ring buffer is empty, false otherwise.
+ **/
+ bool IsEmpty(int aReadIndex, int aWriteIndex) const {
+ return aWriteIndex == aReadIndex;
+ }
+ /** Return true if the ring buffer is full.
+ *
+ * This happens if the write index is exactly one element behind the read
+ * index.
+ *
+ * This can be called from the consummer or the producer thread.
+ *
+ * @param aReadIndex the read index to consider
+ * @param writeIndex the write index to consider
+ * @return true if the ring buffer is full, false otherwise.
+ **/
+ bool IsFull(int aReadIndex, int aWriteIndex) const {
+ return (aWriteIndex + 1) % StorageCapacity() == aReadIndex;
+ }
+ /**
+ * Return the size of the storage. It is one more than the number of elements
+ * that can be stored in the buffer.
+ *
+ * This can be called from any thread.
+ *
+ * @return the number of elements that can be stored in the buffer.
+ */
+ int StorageCapacity() const { return mCapacity; }
+ /**
+ * Returns the number of elements available for reading.
+ *
+ * This can be called from the consummer or producer thread, but see the
+ * comment in `AvailableRead`.
+ *
+ * @return the number of available elements for reading.
+ */
+ int AvailableReadInternal(int aReadIndex, int aWriteIndex) const {
+ if (aWriteIndex >= aReadIndex) {
+ return aWriteIndex - aReadIndex;
+ } else {
+ return aWriteIndex + StorageCapacity() - aReadIndex;
+ }
+ }
+ /**
+ * Returns the number of empty elements, available for writing.
+ *
+ * This can be called from the consummer or producer thread, but see the
+ * comment in `AvailableWrite`.
+ *
+ * @return the number of elements that can be written into the array.
+ */
+ int AvailableWriteInternal(int aReadIndex, int aWriteIndex) const {
+ /* We subtract one element here to always keep at least one sample
+ * free in the buffer, to distinguish between full and empty array. */
+ int rv = aReadIndex - aWriteIndex - 1;
+ if (aWriteIndex >= aReadIndex) {
+ rv += StorageCapacity();
+ }
+ return rv;
+ }
+ /**
+ * Increments an index, wrapping it around the storage.
+ *
+ * Incrementing `mWriteIndex` can be done on the producer thread.
+ * Incrementing `mReadIndex` can be done on the consummer thread.
+ *
+ * @param index a reference to the index to increment.
+ * @param increment the number by which `index` is incremented.
+ * @return the new index.
+ */
+ int IncrementIndex(int aIndex, int aIncrement) const {
+ MOZ_ASSERT(aIncrement >= 0 && aIncrement < StorageCapacity() &&
+ aIndex < StorageCapacity());
+ return (aIndex + aIncrement) % StorageCapacity();
+ }
+ /**
+ * @brief This allows checking that Enqueue (resp. Dequeue) are always
+ * called by the right thread.
+ *
+ * The role of the thread are assigned the first time they call Enqueue or
+ * Dequeue, and cannot change, except by a ResetThreadId method.
+ *
+ * @param id the id of the thread that has called the calling method first.
+ */
+#ifdef DEBUG
+ static void AssertCorrectThread(std::thread::id& aId) {
+ if (aId == std::thread::id()) {
+ aId = std::this_thread::get_id();
+ return;
+ }
+ MOZ_ASSERT(aId == std::this_thread::get_id());
+ }
+#endif
+ /** Index at which the oldest element is. */
+ std::atomic<int> mReadIndex;
+ /** Index at which to write new elements. `mWriteIndex` is always at
+ * least one element ahead of `mReadIndex`. */
+ std::atomic<int> mWriteIndex;
+ /** Maximum number of elements that can be stored in the ring buffer. */
+ const int mCapacity;
+ /** Data storage, of size `mCapacity + 1` */
+ std::unique_ptr<T[]> mData;
+#ifdef DEBUG
+ /** The id of the only thread that is allowed to read from the queue. */
+ mutable std::thread::id mConsumerId;
+ /** The id of the only thread that is allowed to write from the queue. */
+ mutable std::thread::id mProducerId;
+#endif
+};
+
+/**
+ * Instantiation of the `SPSCRingBufferBase` type. This is safe to use
+ * from two threads, one producer, one consumer (that never change role),
+ * without explicit synchronization.
+ */
+template <typename T>
+using SPSCQueue = SPSCRingBufferBase<T>;
+
+} // namespace mozilla
+
+#endif // mozilla_LockFreeQueue_h
diff --git a/mfbt/STYLE b/mfbt/STYLE
new file mode 100644
index 0000000000..43bf809d52
--- /dev/null
+++ b/mfbt/STYLE
@@ -0,0 +1,11 @@
+MFBT uses standard Mozilla style, with the following exceptions.
+
+- Some of the files use a lower-case letter at the start of function names.
+ This is because MFBT used to use a different style, and was later converted
+ to standard Mozilla style. These functions have not been changed to use an
+ upper-case letter because it would cause a lot of churn in other parts of the
+ codebase. However, new files should follow standard Mozilla style and use an
+ upper-case letter at the start of function names.
+
+- Imported third-party code (such as decimal/*, double-conversion/source/*, and
+ lz4/*) remains in its original style.
diff --git a/mfbt/Saturate.h b/mfbt/Saturate.h
new file mode 100644
index 0000000000..777e326934
--- /dev/null
+++ b/mfbt/Saturate.h
@@ -0,0 +1,248 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Provides saturation arithmetics for scalar types. */
+
+#ifndef mozilla_Saturate_h
+#define mozilla_Saturate_h
+
+#include <limits>
+#include <stdint.h>
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+namespace detail {
+
+/**
+ * |SaturateOp<T>| wraps scalar values for saturation arithmetics. Usage:
+ *
+ * uint32_t value = 1;
+ *
+ * ++SaturateOp<uint32_t>(value); // value is 2
+ * --SaturateOp<uint32_t>(value); // value is 1
+ * --SaturateOp<uint32_t>(value); // value is 0
+ * --SaturateOp<uint32_t>(value); // value is still 0
+ *
+ * Please add new operators when required.
+ *
+ * |SaturateOp<T>| will saturate at the minimum and maximum values of
+ * type T. If you need other bounds, implement a clamped-type class and
+ * specialize the type traits accordingly.
+ */
+template <typename T>
+class SaturateOp {
+ public:
+ explicit SaturateOp(T& aValue) : mValue(aValue) {
+ // We should actually check for |std::is_scalar<T>::value| to be
+ // true, but this type trait is not available everywhere. Relax
+ // this assertion if you want to use floating point values as well.
+ static_assert(std::is_integral_v<T>,
+ "Integral type required in instantiation");
+ }
+
+ // Add and subtract operators
+
+ T operator+(const T& aRhs) const { return T(mValue) += aRhs; }
+
+ T operator-(const T& aRhs) const { return T(mValue) -= aRhs; }
+
+ // Compound operators
+
+ const T& operator+=(const T& aRhs) const {
+ const T min = std::numeric_limits<T>::min();
+ const T max = std::numeric_limits<T>::max();
+
+ if (aRhs > static_cast<T>(0)) {
+ mValue = (max - aRhs) < mValue ? max : mValue + aRhs;
+ } else {
+ mValue = (min - aRhs) > mValue ? min : mValue + aRhs;
+ }
+ return mValue;
+ }
+
+ const T& operator-=(const T& aRhs) const {
+ const T min = std::numeric_limits<T>::min();
+ const T max = std::numeric_limits<T>::max();
+
+ if (aRhs > static_cast<T>(0)) {
+ mValue = (min + aRhs) > mValue ? min : mValue - aRhs;
+ } else {
+ mValue = (max + aRhs) < mValue ? max : mValue - aRhs;
+ }
+ return mValue;
+ }
+
+ // Increment and decrement operators
+
+ const T& operator++() const // prefix
+ {
+ return operator+=(static_cast<T>(1));
+ }
+
+ T operator++(int) const // postfix
+ {
+ const T value(mValue);
+ operator++();
+ return value;
+ }
+
+ const T& operator--() const // prefix
+ {
+ return operator-=(static_cast<T>(1));
+ }
+
+ T operator--(int) const // postfix
+ {
+ const T value(mValue);
+ operator--();
+ return value;
+ }
+
+ private:
+ SaturateOp(const SaturateOp<T>&) = delete;
+ SaturateOp(SaturateOp<T>&&) = delete;
+ SaturateOp& operator=(const SaturateOp<T>&) = delete;
+ SaturateOp& operator=(SaturateOp<T>&&) = delete;
+
+ T& mValue;
+};
+
+/**
+ * |Saturate<T>| is a value type for saturation arithmetics. It's
+ * built on top of |SaturateOp<T>|.
+ */
+template <typename T>
+class Saturate {
+ public:
+ Saturate() = default;
+ MOZ_IMPLICIT Saturate(const Saturate<T>&) = default;
+
+ MOZ_IMPLICIT Saturate(Saturate<T>&& aValue) {
+ mValue = std::move(aValue.mValue);
+ }
+
+ explicit Saturate(const T& aValue) : mValue(aValue) {}
+
+ const T& value() const { return mValue; }
+
+ // Compare operators
+
+ bool operator==(const Saturate<T>& aRhs) const {
+ return mValue == aRhs.mValue;
+ }
+
+ bool operator!=(const Saturate<T>& aRhs) const { return !operator==(aRhs); }
+
+ bool operator==(const T& aRhs) const { return mValue == aRhs; }
+
+ bool operator!=(const T& aRhs) const { return !operator==(aRhs); }
+
+ // Assignment operators
+
+ Saturate<T>& operator=(const Saturate<T>&) = default;
+
+ Saturate<T>& operator=(Saturate<T>&& aRhs) {
+ mValue = std::move(aRhs.mValue);
+ return *this;
+ }
+
+ // Add and subtract operators
+
+ Saturate<T> operator+(const Saturate<T>& aRhs) const {
+ Saturate<T> lhs(mValue);
+ return lhs += aRhs.mValue;
+ }
+
+ Saturate<T> operator+(const T& aRhs) const {
+ Saturate<T> lhs(mValue);
+ return lhs += aRhs;
+ }
+
+ Saturate<T> operator-(const Saturate<T>& aRhs) const {
+ Saturate<T> lhs(mValue);
+ return lhs -= aRhs.mValue;
+ }
+
+ Saturate<T> operator-(const T& aRhs) const {
+ Saturate<T> lhs(mValue);
+ return lhs -= aRhs;
+ }
+
+ // Compound operators
+
+ Saturate<T>& operator+=(const Saturate<T>& aRhs) {
+ SaturateOp<T>(mValue) += aRhs.mValue;
+ return *this;
+ }
+
+ Saturate<T>& operator+=(const T& aRhs) {
+ SaturateOp<T>(mValue) += aRhs;
+ return *this;
+ }
+
+ Saturate<T>& operator-=(const Saturate<T>& aRhs) {
+ SaturateOp<T>(mValue) -= aRhs.mValue;
+ return *this;
+ }
+
+ Saturate<T>& operator-=(const T& aRhs) {
+ SaturateOp<T>(mValue) -= aRhs;
+ return *this;
+ }
+
+ // Increment and decrement operators
+
+ Saturate<T>& operator++() // prefix
+ {
+ ++SaturateOp<T>(mValue);
+ return *this;
+ }
+
+ Saturate<T> operator++(int) // postfix
+ {
+ return Saturate<T>(SaturateOp<T>(mValue)++);
+ }
+
+ Saturate<T>& operator--() // prefix
+ {
+ --SaturateOp<T>(mValue);
+ return *this;
+ }
+
+ Saturate<T> operator--(int) // postfix
+ {
+ return Saturate<T>(SaturateOp<T>(mValue)--);
+ }
+
+ private:
+ T mValue;
+};
+
+} // namespace detail
+
+typedef detail::Saturate<int8_t> SaturateInt8;
+typedef detail::Saturate<int16_t> SaturateInt16;
+typedef detail::Saturate<int32_t> SaturateInt32;
+typedef detail::Saturate<uint8_t> SaturateUint8;
+typedef detail::Saturate<uint16_t> SaturateUint16;
+typedef detail::Saturate<uint32_t> SaturateUint32;
+
+} // namespace mozilla
+
+template <typename LhsT, typename RhsT>
+bool operator==(LhsT aLhs, const mozilla::detail::Saturate<RhsT>& aRhs) {
+ return aRhs.operator==(static_cast<RhsT>(aLhs));
+}
+
+template <typename LhsT, typename RhsT>
+bool operator!=(LhsT aLhs, const mozilla::detail::Saturate<RhsT>& aRhs) {
+ return !(aLhs == aRhs);
+}
+
+#endif // mozilla_Saturate_h
diff --git a/mfbt/ScopeExit.h b/mfbt/ScopeExit.h
new file mode 100644
index 0000000000..9ddcd4b8f0
--- /dev/null
+++ b/mfbt/ScopeExit.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* RAII class for executing arbitrary actions at scope end. */
+
+#ifndef mozilla_ScopeExit_h
+#define mozilla_ScopeExit_h
+
+/*
+ * See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4189.pdf for a
+ * standards-track version of this.
+ *
+ * Error handling can be complex when various actions need to be performed that
+ * need to be undone if an error occurs midway. This can be handled with a
+ * collection of boolean state variables and gotos, which can get clunky and
+ * error-prone:
+ *
+ * {
+ * if (!a.setup())
+ * goto fail;
+ * isASetup = true;
+ *
+ * if (!b.setup())
+ * goto fail;
+ * isBSetup = true;
+ *
+ * ...
+ * return true;
+ *
+ * fail:
+ * if (isASetup)
+ * a.teardown();
+ * if (isBSetup)
+ * b.teardown();
+ * return false;
+ * }
+ *
+ * ScopeExit is a mechanism to simplify this pattern by keeping an RAII guard
+ * class that will perform the teardown on destruction, unless released. So the
+ * above would become:
+ *
+ * {
+ * if (!a.setup()) {
+ * return false;
+ * }
+ * auto guardA = MakeScopeExit([&] {
+ * a.teardown();
+ * });
+ *
+ * if (!b.setup()) {
+ * return false;
+ * }
+ * auto guardB = MakeScopeExit([&] {
+ * b.teardown();
+ * });
+ *
+ * ...
+ * guardA.release();
+ * guardB.release();
+ * return true;
+ * }
+ *
+ * This header provides:
+ *
+ * - |ScopeExit| - a container for a cleanup call, automically called at the
+ * end of the scope;
+ * - |MakeScopeExit| - a convenience function for constructing a |ScopeExit|
+ * with a given cleanup routine, commonly used with a lambda function.
+ *
+ * Note that the RAII classes defined in this header do _not_ perform any form
+ * of reference-counting or garbage-collection. These classes have exactly two
+ * behaviors:
+ *
+ * - if |release()| has not been called, the cleanup is always performed at
+ * the end of the scope;
+ * - if |release()| has been called, nothing will happen at the end of the
+ * scope.
+ */
+
+#include <utility>
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+template <typename ExitFunction>
+class MOZ_STACK_CLASS ScopeExit {
+ ExitFunction mExitFunction;
+ bool mExecuteOnDestruction;
+
+ public:
+ explicit ScopeExit(ExitFunction&& cleanup)
+ : mExitFunction(std::move(cleanup)), mExecuteOnDestruction(true) {}
+
+ ScopeExit(ScopeExit&& rhs)
+ : mExitFunction(std::move(rhs.mExitFunction)),
+ mExecuteOnDestruction(rhs.mExecuteOnDestruction) {
+ rhs.release();
+ }
+
+ ~ScopeExit() {
+ if (mExecuteOnDestruction) {
+ mExitFunction();
+ }
+ }
+
+ void release() { mExecuteOnDestruction = false; }
+
+ private:
+ explicit ScopeExit(const ScopeExit&) = delete;
+ ScopeExit& operator=(const ScopeExit&) = delete;
+ ScopeExit& operator=(ScopeExit&&) = delete;
+};
+
+template <typename ExitFunction>
+[[nodiscard]] ScopeExit<ExitFunction> MakeScopeExit(
+ ExitFunction&& exitFunction) {
+ return ScopeExit<ExitFunction>(std::move(exitFunction));
+}
+
+} /* namespace mozilla */
+
+#endif /* mozilla_ScopeExit_h */
diff --git a/mfbt/SegmentedVector.h b/mfbt/SegmentedVector.h
new file mode 100644
index 0000000000..c22c3e8d1f
--- /dev/null
+++ b/mfbt/SegmentedVector.h
@@ -0,0 +1,359 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// A simple segmented vector class.
+//
+// This class should be used in preference to mozilla::Vector or nsTArray when
+// you are simply gathering items in order to later iterate over them.
+//
+// - In the case where you don't know the final size in advance, using
+// SegmentedVector avoids the need to repeatedly allocate increasingly large
+// buffers and copy the data into them.
+//
+// - In the case where you know the final size in advance and so can set the
+// capacity appropriately, using SegmentedVector still avoids the need for
+// large allocations (which can trigger OOMs).
+
+#ifndef mozilla_SegmentedVector_h
+#define mozilla_SegmentedVector_h
+
+#include <new> // for placement new
+#include <utility>
+
+#include "mozilla/AllocPolicy.h"
+#include "mozilla/Array.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/OperatorNewExtensions.h"
+
+#ifdef IMPL_LIBXUL
+# include "mozilla/Likely.h"
+# include "mozilla/mozalloc_oom.h"
+#endif // IMPL_LIBXUL
+
+namespace mozilla {
+
+// |IdealSegmentSize| specifies how big each segment will be in bytes (or as
+// close as is possible). Use the following guidelines to choose a size.
+//
+// - It should be a power-of-two, to avoid slop.
+//
+// - It should not be too small, so that segment allocations are infrequent,
+// and so that per-segment bookkeeping overhead is low. Typically each
+// segment should be able to hold hundreds of elements, at least.
+//
+// - It should not be too large, so that OOMs are unlikely when allocating
+// segments, and so that not too much space is wasted when the final segment
+// is not full.
+//
+// The ideal size depends on how the SegmentedVector is used and the size of
+// |T|, but reasonable sizes include 1024, 4096 (the default), 8192, and 16384.
+//
+template <typename T, size_t IdealSegmentSize = 4096,
+ typename AllocPolicy = MallocAllocPolicy>
+class SegmentedVector : private AllocPolicy {
+ template <size_t SegmentCapacity>
+ struct SegmentImpl
+ : public mozilla::LinkedListElement<SegmentImpl<SegmentCapacity>> {
+ private:
+ uint32_t mLength;
+ alignas(T) MOZ_INIT_OUTSIDE_CTOR
+ unsigned char mData[sizeof(T) * SegmentCapacity];
+
+ // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
+ // -Werror compile error) to reinterpret_cast<> |mData| to |T*|, even
+ // through |void*|. Placing the latter cast in these separate functions
+ // breaks the chain such that affected GCC versions no longer warn/error.
+ void* RawData() { return mData; }
+
+ public:
+ SegmentImpl() : mLength(0) {}
+
+ ~SegmentImpl() {
+ for (uint32_t i = 0; i < mLength; i++) {
+ (*this)[i].~T();
+ }
+ }
+
+ uint32_t Length() const { return mLength; }
+
+ T* Elems() { return reinterpret_cast<T*>(RawData()); }
+
+ T& operator[](size_t aIndex) {
+ MOZ_ASSERT(aIndex < mLength);
+ return Elems()[aIndex];
+ }
+
+ const T& operator[](size_t aIndex) const {
+ MOZ_ASSERT(aIndex < mLength);
+ return Elems()[aIndex];
+ }
+
+ template <typename U>
+ void Append(U&& aU) {
+ MOZ_ASSERT(mLength < SegmentCapacity);
+ // Pre-increment mLength so that the bounds-check in operator[] passes.
+ mLength++;
+ T* elem = &(*this)[mLength - 1];
+ new (KnownNotNull, elem) T(std::forward<U>(aU));
+ }
+
+ void PopLast() {
+ MOZ_ASSERT(mLength > 0);
+ (*this)[mLength - 1].~T();
+ mLength--;
+ }
+ };
+
+ // See how many we elements we can fit in a segment of IdealSegmentSize. If
+ // IdealSegmentSize is too small, it'll be just one. The +1 is because
+ // kSingleElementSegmentSize already accounts for one element.
+ static const size_t kSingleElementSegmentSize = sizeof(SegmentImpl<1>);
+ static const size_t kSegmentCapacity =
+ kSingleElementSegmentSize <= IdealSegmentSize
+ ? (IdealSegmentSize - kSingleElementSegmentSize) / sizeof(T) + 1
+ : 1;
+
+ public:
+ typedef SegmentImpl<kSegmentCapacity> Segment;
+
+ // The |aIdealSegmentSize| is only for sanity checking. If it's specified, we
+ // check that the actual segment size is as close as possible to it. This
+ // serves as a sanity check for SegmentedVectorCapacity's capacity
+ // computation.
+ explicit SegmentedVector(size_t aIdealSegmentSize = 0) {
+ // The difference between the actual segment size and the ideal segment
+ // size should be less than the size of a single element... unless the
+ // ideal size was too small, in which case the capacity should be one.
+ MOZ_ASSERT_IF(
+ aIdealSegmentSize != 0,
+ (sizeof(Segment) > aIdealSegmentSize && kSegmentCapacity == 1) ||
+ aIdealSegmentSize - sizeof(Segment) < sizeof(T));
+ }
+
+ SegmentedVector(SegmentedVector&& aOther)
+ : mSegments(std::move(aOther.mSegments)) {}
+ SegmentedVector& operator=(SegmentedVector&& aOther) {
+ if (&aOther != this) {
+ this->~SegmentedVector();
+ new (this) SegmentedVector(std::move(aOther));
+ }
+ return *this;
+ }
+
+ ~SegmentedVector() { Clear(); }
+
+ bool IsEmpty() const { return !mSegments.getFirst(); }
+
+ // Note that this is O(n) rather than O(1), but the constant factor is very
+ // small because it only has to do one addition per segment.
+ size_t Length() const {
+ size_t n = 0;
+ for (auto segment = mSegments.getFirst(); segment;
+ segment = segment->getNext()) {
+ n += segment->Length();
+ }
+ return n;
+ }
+
+ // Returns false if the allocation failed. (If you are using an infallible
+ // allocation policy, use InfallibleAppend() instead.)
+ template <typename U>
+ [[nodiscard]] bool Append(U&& aU) {
+ Segment* last = mSegments.getLast();
+ if (!last || last->Length() == kSegmentCapacity) {
+ last = this->template pod_malloc<Segment>(1);
+ if (!last) {
+ return false;
+ }
+ new (KnownNotNull, last) Segment();
+ mSegments.insertBack(last);
+ }
+ last->Append(std::forward<U>(aU));
+ return true;
+ }
+
+ // You should probably only use this instead of Append() if you are using an
+ // infallible allocation policy. It will crash if the allocation fails.
+ template <typename U>
+ void InfallibleAppend(U&& aU) {
+ bool ok = Append(std::forward<U>(aU));
+
+#ifdef IMPL_LIBXUL
+ if (MOZ_UNLIKELY(!ok)) {
+ mozalloc_handle_oom(sizeof(Segment));
+ }
+#else
+ MOZ_RELEASE_ASSERT(ok);
+#endif // MOZ_INTERNAL_API
+ }
+
+ void Clear() {
+ Segment* segment;
+ while ((segment = mSegments.popFirst())) {
+ segment->~Segment();
+ this->free_(segment, 1);
+ }
+ }
+
+ T& GetLast() {
+ MOZ_ASSERT(!IsEmpty());
+ Segment* last = mSegments.getLast();
+ return (*last)[last->Length() - 1];
+ }
+
+ const T& GetLast() const {
+ MOZ_ASSERT(!IsEmpty());
+ Segment* last = mSegments.getLast();
+ return (*last)[last->Length() - 1];
+ }
+
+ void PopLast() {
+ MOZ_ASSERT(!IsEmpty());
+ Segment* last = mSegments.getLast();
+ last->PopLast();
+ if (!last->Length()) {
+ mSegments.popLast();
+ last->~Segment();
+ this->free_(last, 1);
+ }
+ }
+
+ // Equivalent to calling |PopLast| |aNumElements| times, but potentially
+ // more efficient.
+ void PopLastN(uint32_t aNumElements) {
+ MOZ_ASSERT(aNumElements <= Length());
+
+ Segment* last;
+
+ // Pop full segments for as long as we can. Note that this loop
+ // cleanly handles the case when the initial last segment is not
+ // full and we are popping more elements than said segment contains.
+ do {
+ last = mSegments.getLast();
+
+ // The list is empty. We're all done.
+ if (!last) {
+ return;
+ }
+
+ // Check to see if the list contains too many elements. Handle
+ // that in the epilogue.
+ uint32_t segmentLen = last->Length();
+ if (segmentLen > aNumElements) {
+ break;
+ }
+
+ // Destroying the segment destroys all elements contained therein.
+ mSegments.popLast();
+ last->~Segment();
+ this->free_(last, 1);
+
+ MOZ_ASSERT(aNumElements >= segmentLen);
+ aNumElements -= segmentLen;
+ if (aNumElements == 0) {
+ return;
+ }
+ } while (true);
+
+ // Handle the case where the last segment contains more elements
+ // than we want to pop.
+ MOZ_ASSERT(last);
+ MOZ_ASSERT(last == mSegments.getLast());
+ MOZ_ASSERT(aNumElements < last->Length());
+ for (uint32_t i = 0; i < aNumElements; ++i) {
+ last->PopLast();
+ }
+ MOZ_ASSERT(last->Length() != 0);
+ }
+
+ // Use this class to iterate over a SegmentedVector, like so:
+ //
+ // for (auto iter = v.Iter(); !iter.Done(); iter.Next()) {
+ // MyElem& elem = iter.Get();
+ // f(elem);
+ // }
+ //
+ // Note, adding new entries to the SegmentedVector while using iterators
+ // is supported, but removing is not!
+ // If an iterator has entered Done() state, adding more entries to the
+ // vector doesn't affect it.
+ class IterImpl {
+ friend class SegmentedVector;
+
+ Segment* mSegment;
+ size_t mIndex;
+
+ explicit IterImpl(SegmentedVector* aVector, bool aFromFirst)
+ : mSegment(aFromFirst ? aVector->mSegments.getFirst()
+ : aVector->mSegments.getLast()),
+ mIndex(aFromFirst ? 0 : (mSegment ? mSegment->Length() - 1 : 0)) {
+ MOZ_ASSERT_IF(mSegment, mSegment->Length() > 0);
+ }
+
+ public:
+ bool Done() const {
+ MOZ_ASSERT_IF(mSegment, mSegment->isInList());
+ MOZ_ASSERT_IF(mSegment, mIndex < mSegment->Length());
+ return !mSegment;
+ }
+
+ T& Get() {
+ MOZ_ASSERT(!Done());
+ return (*mSegment)[mIndex];
+ }
+
+ const T& Get() const {
+ MOZ_ASSERT(!Done());
+ return (*mSegment)[mIndex];
+ }
+
+ void Next() {
+ MOZ_ASSERT(!Done());
+ mIndex++;
+ if (mIndex == mSegment->Length()) {
+ mSegment = mSegment->getNext();
+ mIndex = 0;
+ }
+ }
+
+ void Prev() {
+ MOZ_ASSERT(!Done());
+ if (mIndex == 0) {
+ mSegment = mSegment->getPrevious();
+ if (mSegment) {
+ mIndex = mSegment->Length() - 1;
+ }
+ } else {
+ --mIndex;
+ }
+ }
+ };
+
+ IterImpl Iter() { return IterImpl(this, true); }
+ IterImpl IterFromLast() { return IterImpl(this, false); }
+
+ // Measure the memory consumption of the vector excluding |this|. Note that
+ // it only measures the vector itself. If the vector elements contain
+ // pointers to other memory blocks, those blocks must be measured separately
+ // during a subsequent iteration over the vector.
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ return mSegments.sizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ // Like sizeOfExcludingThis(), but measures |this| as well.
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ private:
+ mozilla::LinkedList<Segment> mSegments;
+};
+
+} // namespace mozilla
+
+#endif /* mozilla_SegmentedVector_h */
diff --git a/mfbt/SharedLibrary.h b/mfbt/SharedLibrary.h
new file mode 100644
index 0000000000..8879f033a1
--- /dev/null
+++ b/mfbt/SharedLibrary.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Path charset agnostic wrappers for prlink.h. */
+
+#ifndef mozilla_SharedLibrary_h
+#define mozilla_SharedLibrary_h
+
+#ifdef MOZILLA_INTERNAL_API
+
+# include "prlink.h"
+# include "mozilla/Char16.h"
+
+namespace mozilla {
+
+//
+// Load the specified library.
+//
+// @param aPath path to the library
+// @param aFlags takes PR_LD_* flags (see prlink.h)
+//
+inline PRLibrary*
+# ifdef XP_WIN
+LoadLibraryWithFlags(char16ptr_t aPath, PRUint32 aFlags = 0)
+# else
+LoadLibraryWithFlags(const char* aPath, PRUint32 aFlags = 0)
+# endif
+{
+ PRLibSpec libSpec;
+# ifdef XP_WIN
+ libSpec.type = PR_LibSpec_PathnameU;
+ libSpec.value.pathname_u = aPath;
+# else
+ libSpec.type = PR_LibSpec_Pathname;
+ libSpec.value.pathname = aPath;
+# endif
+ return PR_LoadLibraryWithFlags(libSpec, aFlags);
+}
+
+} /* namespace mozilla */
+
+#endif /* MOZILLA_INTERNAL_API */
+
+#endif /* mozilla_SharedLibrary_h */
diff --git a/mfbt/SmallPointerArray.h b/mfbt/SmallPointerArray.h
new file mode 100644
index 0000000000..c63e3980f9
--- /dev/null
+++ b/mfbt/SmallPointerArray.h
@@ -0,0 +1,270 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A vector of pointers space-optimized for a small number of elements. */
+
+#ifndef mozilla_SmallPointerArray_h
+#define mozilla_SmallPointerArray_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/PodOperations.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <new>
+#include <vector>
+
+namespace mozilla {
+
+// Array class for situations where a small number of NON-NULL elements (<= 2)
+// is expected, a large number of elements must be accommodated if necessary,
+// and the size of the class must be minimal. Typical vector implementations
+// will fulfill the first two requirements by simply adding inline storage
+// alongside the rest of their member variables. While this strategy works,
+// it brings unnecessary storage overhead for vectors with an expected small
+// number of elements. This class is intended to deal with that problem.
+//
+// This class is similar in performance to a vector class. Accessing its
+// elements when it has not grown over a size of 2 does not require an extra
+// level of indirection and will therefore be faster.
+//
+// The minimum (inline) size is 2 * sizeof(void*).
+//
+// Any modification of the array invalidates any outstanding iterators.
+template <typename T>
+class SmallPointerArray {
+ public:
+ SmallPointerArray() {
+ // List-initialization would be nicer, but it only lets you initialize the
+ // first union member.
+ mArray[0].mValue = nullptr;
+ mArray[1].mVector = nullptr;
+ }
+
+ ~SmallPointerArray() {
+ if (!first()) {
+ delete maybeVector();
+ }
+ }
+
+ SmallPointerArray(SmallPointerArray&& aOther) {
+ PodCopy(mArray, aOther.mArray, 2);
+ aOther.mArray[0].mValue = nullptr;
+ aOther.mArray[1].mVector = nullptr;
+ }
+
+ SmallPointerArray& operator=(SmallPointerArray&& aOther) {
+ std::swap(mArray, aOther.mArray);
+ return *this;
+ }
+
+ void Clear() {
+ if (first()) {
+ first() = nullptr;
+ new (&mArray[1].mValue) std::vector<T*>*(nullptr);
+ return;
+ }
+
+ delete maybeVector();
+ mArray[1].mVector = nullptr;
+ }
+
+ void AppendElement(T* aElement) {
+ // Storing nullptr as an element is not permitted, but we do check for it
+ // to avoid corruption issues in non-debug builds.
+
+ // In addition to this we assert in debug builds to point out mistakes to
+ // users of the class.
+ MOZ_ASSERT(aElement != nullptr);
+ if (aElement == nullptr) {
+ return;
+ }
+
+ if (!first()) {
+ auto* vec = maybeVector();
+ if (!vec) {
+ first() = aElement;
+ new (&mArray[1].mValue) T*(nullptr);
+ return;
+ }
+
+ vec->push_back(aElement);
+ return;
+ }
+
+ if (!second()) {
+ second() = aElement;
+ return;
+ }
+
+ auto* vec = new std::vector<T*>({first(), second(), aElement});
+ first() = nullptr;
+ new (&mArray[1].mVector) std::vector<T*>*(vec);
+ }
+
+ bool RemoveElement(T* aElement) {
+ MOZ_ASSERT(aElement != nullptr);
+ if (aElement == nullptr) {
+ return false;
+ }
+
+ if (first() == aElement) {
+ // Expected case.
+ T* maybeSecond = second();
+ first() = maybeSecond;
+ if (maybeSecond) {
+ second() = nullptr;
+ } else {
+ new (&mArray[1].mVector) std::vector<T*>*(nullptr);
+ }
+
+ return true;
+ }
+
+ if (first()) {
+ if (second() == aElement) {
+ second() = nullptr;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto* vec = maybeVector()) {
+ for (auto iter = vec->begin(); iter != vec->end(); iter++) {
+ if (*iter == aElement) {
+ vec->erase(iter);
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ bool Contains(T* aElement) const {
+ MOZ_ASSERT(aElement != nullptr);
+ if (aElement == nullptr) {
+ return false;
+ }
+
+ if (T* v = first()) {
+ return v == aElement || second() == aElement;
+ }
+
+ if (auto* vec = maybeVector()) {
+ return std::find(vec->begin(), vec->end(), aElement) != vec->end();
+ }
+
+ return false;
+ }
+
+ size_t Length() const {
+ if (first()) {
+ return second() ? 2 : 1;
+ }
+
+ if (auto* vec = maybeVector()) {
+ return vec->size();
+ }
+
+ return 0;
+ }
+
+ bool IsEmpty() const { return Length() == 0; }
+
+ T* ElementAt(size_t aIndex) const {
+ MOZ_ASSERT(aIndex < Length());
+ if (first()) {
+ return mArray[aIndex].mValue;
+ }
+
+ auto* vec = maybeVector();
+ MOZ_ASSERT(vec, "must have backing vector if accessing an element");
+ return (*vec)[aIndex];
+ }
+
+ T* operator[](size_t aIndex) const { return ElementAt(aIndex); }
+
+ using iterator = T**;
+ using const_iterator = const T**;
+
+ // Methods for range-based for loops. Manipulation invalidates these.
+ iterator begin() { return beginInternal(); }
+ const_iterator begin() const { return beginInternal(); }
+ const_iterator cbegin() const { return begin(); }
+ iterator end() { return beginInternal() + Length(); }
+ const_iterator end() const { return beginInternal() + Length(); }
+ const_iterator cend() const { return end(); }
+
+ private:
+ T** beginInternal() const {
+ if (first()) {
+ static_assert(sizeof(T*) == sizeof(Element),
+ "pointer ops on &first() must produce adjacent "
+ "Element::mValue arms");
+ return &first();
+ }
+
+ auto* vec = maybeVector();
+ if (!vec) {
+ return &first();
+ }
+
+ if (vec->empty()) {
+ return nullptr;
+ }
+
+ return &(*vec)[0];
+ }
+
+ // Accessors for |mArray| element union arms.
+
+ T*& first() const { return const_cast<T*&>(mArray[0].mValue); }
+
+ T*& second() const {
+ MOZ_ASSERT(first(), "first() must be non-null to have a T* second pointer");
+ return const_cast<T*&>(mArray[1].mValue);
+ }
+
+ std::vector<T*>* maybeVector() const {
+ MOZ_ASSERT(!first(),
+ "function must only be called when this is either empty or has "
+ "std::vector-backed elements");
+ return mArray[1].mVector;
+ }
+
+ // In C++ active-union-arm terms:
+ //
+ // - mArray[0].mValue is always active: a possibly null T*;
+ // - if mArray[0].mValue is null, mArray[1].mVector is active: a possibly
+ // null std::vector<T*>*; if mArray[0].mValue isn't null, mArray[1].mValue
+ // is active: a possibly null T*.
+ //
+ // SmallPointerArray begins empty, with mArray[1].mVector active and null.
+ // Code that makes mArray[0].mValue non-null, i.e. assignments to first(),
+ // must placement-new mArray[1].mValue with the proper value; code that goes
+ // the opposite direction, making mArray[0].mValue null, must placement-new
+ // mArray[1].mVector with the proper value.
+ //
+ // When !mArray[0].mValue && !mArray[1].mVector, the array is empty.
+ //
+ // When mArray[0].mValue && !mArray[1].mValue, the array has size 1 and
+ // contains mArray[0].mValue.
+ //
+ // When mArray[0] && mArray[1], the array has size 2 and contains
+ // mArray[0].mValue and mArray[1].mValue.
+ //
+ // When !mArray[0].mValue && mArray[1].mVector, mArray[1].mVector contains
+ // the contents of an array of arbitrary size (even less than two if it ever
+ // contained three elements and elements were removed).
+ union Element {
+ T* mValue;
+ std::vector<T*>* mVector;
+ } mArray[2];
+};
+
+} // namespace mozilla
+
+#endif // mozilla_SmallPointerArray_h
diff --git a/mfbt/Span.h b/mfbt/Span.h
new file mode 100644
index 0000000000..d9ba1af220
--- /dev/null
+++ b/mfbt/Span.h
@@ -0,0 +1,973 @@
+///////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
+//
+// This code is licensed under the MIT License (MIT).
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// Adapted from
+// https://github.com/Microsoft/GSL/blob/3819df6e378ffccf0e29465afe99c3b324c2aa70/include/gsl/span
+// and
+// https://github.com/Microsoft/GSL/blob/3819df6e378ffccf0e29465afe99c3b324c2aa70/include/gsl/gsl_util
+
+#ifndef mozilla_Span_h
+#define mozilla_Span_h
+
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <limits>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+
+template <typename T, size_t Length>
+class Array;
+
+// Stuff from gsl_util
+
+// narrow_cast(): a searchable way to do narrowing casts of values
+template <class T, class U>
+inline constexpr T narrow_cast(U&& u) {
+ return static_cast<T>(std::forward<U>(u));
+}
+
+// end gsl_util
+
+// [views.constants], constants
+// This was -1 in gsl::span, but using size_t for sizes instead of ptrdiff_t
+// and reserving a magic value that realistically doesn't occur in
+// compile-time-constant Span sizes makes things a lot less messy in terms of
+// comparison between signed and unsigned.
+constexpr const size_t dynamic_extent = std::numeric_limits<size_t>::max();
+
+template <class ElementType, size_t Extent = dynamic_extent>
+class Span;
+
+// implementation details
+namespace span_details {
+
+template <class T>
+struct is_span_oracle : std::false_type {};
+
+template <class ElementType, size_t Extent>
+struct is_span_oracle<mozilla::Span<ElementType, Extent>> : std::true_type {};
+
+template <class T>
+struct is_span : public is_span_oracle<std::remove_cv_t<T>> {};
+
+template <class T>
+struct is_std_array_oracle : std::false_type {};
+
+template <class ElementType, size_t Extent>
+struct is_std_array_oracle<std::array<ElementType, Extent>> : std::true_type {};
+
+template <class T>
+struct is_std_array : public is_std_array_oracle<std::remove_cv_t<T>> {};
+
+template <size_t From, size_t To>
+struct is_allowed_extent_conversion
+ : public std::integral_constant<bool, From == To ||
+ From == mozilla::dynamic_extent ||
+ To == mozilla::dynamic_extent> {};
+
+template <class From, class To>
+struct is_allowed_element_type_conversion
+ : public std::integral_constant<
+ bool, std::is_convertible_v<From (*)[], To (*)[]>> {};
+
+struct SpanKnownBounds {};
+
+template <class SpanT, bool IsConst>
+class span_iterator {
+ using element_type_ = typename SpanT::element_type;
+
+ template <class ElementType, size_t Extent>
+ friend class ::mozilla::Span;
+
+ public:
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = std::remove_const_t<element_type_>;
+ using difference_type = ptrdiff_t;
+
+ using reference =
+ std::conditional_t<IsConst, const element_type_, element_type_>&;
+ using pointer = std::add_pointer_t<reference>;
+
+ constexpr span_iterator() : span_iterator(nullptr, 0, SpanKnownBounds{}) {}
+
+ constexpr span_iterator(const SpanT* span, typename SpanT::index_type index)
+ : span_(span), index_(index) {
+ MOZ_RELEASE_ASSERT(span == nullptr ||
+ (index_ >= 0 && index <= span_->Length()));
+ }
+
+ private:
+ // For whatever reason, the compiler doesn't like optimizing away the above
+ // MOZ_RELEASE_ASSERT when `span_iterator` is constructed for
+ // obviously-correct cases like `span.begin()` or `span.end()`. We provide
+ // this private constructor for such cases.
+ constexpr span_iterator(const SpanT* span, typename SpanT::index_type index,
+ SpanKnownBounds)
+ : span_(span), index_(index) {}
+
+ public:
+ // `other` is already correct by construction; we do not need to go through
+ // the release assert above. Put differently, this constructor is effectively
+ // a copy constructor and therefore needs no assertions.
+ friend class span_iterator<SpanT, true>;
+ constexpr MOZ_IMPLICIT span_iterator(const span_iterator<SpanT, false>& other)
+ : span_(other.span_), index_(other.index_) {}
+
+ constexpr span_iterator<SpanT, IsConst>& operator=(
+ const span_iterator<SpanT, IsConst>&) = default;
+
+ constexpr reference operator*() const {
+ MOZ_RELEASE_ASSERT(span_);
+ return (*span_)[index_];
+ }
+
+ constexpr pointer operator->() const {
+ MOZ_RELEASE_ASSERT(span_);
+ return &((*span_)[index_]);
+ }
+
+ constexpr span_iterator& operator++() {
+ ++index_;
+ return *this;
+ }
+
+ constexpr span_iterator operator++(int) {
+ auto ret = *this;
+ ++(*this);
+ return ret;
+ }
+
+ constexpr span_iterator& operator--() {
+ --index_;
+ return *this;
+ }
+
+ constexpr span_iterator operator--(int) {
+ auto ret = *this;
+ --(*this);
+ return ret;
+ }
+
+ constexpr span_iterator operator+(difference_type n) const {
+ auto ret = *this;
+ return ret += n;
+ }
+
+ constexpr span_iterator& operator+=(difference_type n) {
+ MOZ_RELEASE_ASSERT(span_ && (index_ + n) >= 0 &&
+ (index_ + n) <= span_->Length());
+ index_ += n;
+ return *this;
+ }
+
+ constexpr span_iterator operator-(difference_type n) const {
+ auto ret = *this;
+ return ret -= n;
+ }
+
+ constexpr span_iterator& operator-=(difference_type n) { return *this += -n; }
+
+ constexpr difference_type operator-(const span_iterator& rhs) const {
+ MOZ_RELEASE_ASSERT(span_ == rhs.span_);
+ return index_ - rhs.index_;
+ }
+
+ constexpr reference operator[](difference_type n) const {
+ return *(*this + n);
+ }
+
+ constexpr friend bool operator==(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ // Iterators from different spans are uncomparable. A diagnostic assertion
+ // should be enough to check this, though. To ensure that no iterators from
+ // different spans are ever considered equal, still compare them in release
+ // builds.
+ MOZ_DIAGNOSTIC_ASSERT(lhs.span_ == rhs.span_);
+ return lhs.index_ == rhs.index_ && lhs.span_ == rhs.span_;
+ }
+
+ constexpr friend bool operator!=(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ return !(lhs == rhs);
+ }
+
+ constexpr friend bool operator<(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ MOZ_DIAGNOSTIC_ASSERT(lhs.span_ == rhs.span_);
+ return lhs.index_ < rhs.index_;
+ }
+
+ constexpr friend bool operator<=(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ return !(rhs < lhs);
+ }
+
+ constexpr friend bool operator>(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ return rhs < lhs;
+ }
+
+ constexpr friend bool operator>=(const span_iterator& lhs,
+ const span_iterator& rhs) {
+ return !(rhs > lhs);
+ }
+
+ void swap(span_iterator& rhs) {
+ std::swap(index_, rhs.index_);
+ std::swap(span_, rhs.span_);
+ }
+
+ protected:
+ const SpanT* span_;
+ size_t index_;
+};
+
+template <class Span, bool IsConst>
+inline constexpr span_iterator<Span, IsConst> operator+(
+ typename span_iterator<Span, IsConst>::difference_type n,
+ const span_iterator<Span, IsConst>& rhs) {
+ return rhs + n;
+}
+
+template <size_t Ext>
+class extent_type {
+ public:
+ using index_type = size_t;
+
+ static_assert(Ext >= 0, "A fixed-size Span must be >= 0 in size.");
+
+ constexpr extent_type() = default;
+
+ template <index_type Other>
+ constexpr MOZ_IMPLICIT extent_type(extent_type<Other> ext) {
+ static_assert(
+ Other == Ext || Other == dynamic_extent,
+ "Mismatch between fixed-size extent and size of initializing data.");
+ MOZ_RELEASE_ASSERT(ext.size() == Ext);
+ }
+
+ constexpr MOZ_IMPLICIT extent_type(index_type length) {
+ MOZ_RELEASE_ASSERT(length == Ext);
+ }
+
+ constexpr index_type size() const { return Ext; }
+};
+
+template <>
+class extent_type<dynamic_extent> {
+ public:
+ using index_type = size_t;
+
+ template <index_type Other>
+ explicit constexpr extent_type(extent_type<Other> ext) : size_(ext.size()) {}
+
+ explicit constexpr extent_type(index_type length) : size_(length) {}
+
+ constexpr index_type size() const { return size_; }
+
+ private:
+ index_type size_;
+};
+} // namespace span_details
+
+/**
+ * Span - slices for C++
+ *
+ * Span implements Rust's slice concept for C++. It's called "Span" instead of
+ * "Slice" to follow the naming used in C++ Core Guidelines.
+ *
+ * A Span wraps a pointer and a length that identify a non-owning view to a
+ * contiguous block of memory of objects of the same type. Various types,
+ * including (pre-decay) C arrays, XPCOM strings, nsTArray, mozilla::Array,
+ * mozilla::Range and contiguous standard-library containers, auto-convert
+ * into Spans when attempting to pass them as arguments to methods that take
+ * Spans. (Span itself autoconverts into mozilla::Range.)
+ *
+ * Like Rust's slices, Span provides safety against out-of-bounds access by
+ * performing run-time bound checks. However, unlike Rust's slices, Span
+ * cannot provide safety against use-after-free.
+ *
+ * (Note: Span is like Rust's slice only conceptually. Due to the lack of
+ * ABI guarantees, you should still decompose spans/slices to raw pointer
+ * and length parts when crossing the FFI. The Elements() and data() methods
+ * are guaranteed to return a non-null pointer even for zero-length spans,
+ * so the pointer can be used as a raw part of a Rust slice without further
+ * checks.)
+ *
+ * In addition to having constructors (with the support of deduction guides)
+ * that take various well-known types, a Span for an arbitrary type can be
+ * constructed from a pointer and a length or a pointer and another pointer
+ * pointing just past the last element.
+ *
+ * A Span<const char> or Span<const char16_t> can be obtained for const char*
+ * or const char16_t pointing to a zero-terminated string using the
+ * MakeStringSpan() function (which treats a nullptr argument equivalently
+ * to the empty string). Corresponding implicit constructor does not exist
+ * in order to avoid accidental construction in cases where const char* or
+ * const char16_t* do not point to a zero-terminated string.
+ *
+ * Span has methods that follow the Mozilla naming style and methods that
+ * don't. The methods that follow the Mozilla naming style are meant to be
+ * used directly from Mozilla code. The methods that don't are meant for
+ * integration with C++11 range-based loops and with meta-programming that
+ * expects the same methods that are found on the standard-library
+ * containers. For example, to decompose a Span into its parts in Mozilla
+ * code, use Elements() and Length() (as with nsTArray) instead of data()
+ * and size() (as with std::vector).
+ *
+ * The pointer and length wrapped by a Span cannot be changed after a Span has
+ * been created. When new values are required, simply create a new Span. Span
+ * has a method called Subspan() that works analogously to the Substring()
+ * method of XPCOM strings taking a start index and an optional length. As a
+ * Mozilla extension (relative to Microsoft's gsl::span that mozilla::Span is
+ * based on), Span has methods From(start), To(end) and FromTo(start, end)
+ * that correspond to Rust's &slice[start..], &slice[..end] and
+ * &slice[start..end], respectively. (That is, the end index is the index of
+ * the first element not to be included in the new subspan.)
+ *
+ * When indicating a Span that's only read from, const goes inside the type
+ * parameter. Don't put const in front of Span. That is:
+ * size_t ReadsFromOneSpanAndWritesToAnother(Span<const uint8_t> aReadFrom,
+ * Span<uint8_t> aWrittenTo);
+ *
+ * Any Span<const T> can be viewed as Span<const uint8_t> using the function
+ * AsBytes(). Any Span<T> can be viewed as Span<uint8_t> using the function
+ * AsWritableBytes().
+ *
+ * Note that iterators from different Span instances are uncomparable, even if
+ * they refer to the same memory. This also applies to any spans derived via
+ * Subspan etc.
+ */
+template <class ElementType, size_t Extent /* = dynamic_extent */>
+class Span {
+ public:
+ // constants and types
+ using element_type = ElementType;
+ using value_type = std::remove_cv_t<element_type>;
+ using index_type = size_t;
+ using pointer = element_type*;
+ using reference = element_type&;
+
+ using iterator =
+ span_details::span_iterator<Span<ElementType, Extent>, false>;
+ using const_iterator =
+ span_details::span_iterator<Span<ElementType, Extent>, true>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ constexpr static const index_type extent = Extent;
+
+ // [Span.cons], Span constructors, copy, assignment, and destructor
+ // "Dependent" is needed to make "std::enable_if_t<(Dependent ||
+ // Extent == 0 || Extent == dynamic_extent)>" SFINAE,
+ // since
+ // "std::enable_if_t<(Extent == 0 || Extent == dynamic_extent)>" is
+ // ill-formed when Extent is neither of the extreme values.
+ /**
+ * Constructor with no args.
+ */
+ template <bool Dependent = false,
+ class = std::enable_if_t<(Dependent || Extent == 0 ||
+ Extent == dynamic_extent)>>
+ constexpr Span() : storage_(nullptr, span_details::extent_type<0>()) {}
+
+ /**
+ * Constructor for nullptr.
+ */
+ constexpr MOZ_IMPLICIT Span(std::nullptr_t) : Span() {}
+
+ /**
+ * Constructor for pointer and length.
+ */
+ constexpr Span(pointer aPtr, index_type aLength) : storage_(aPtr, aLength) {}
+
+ /**
+ * Constructor for start pointer and pointer past end.
+ */
+ constexpr Span(pointer aStartPtr, pointer aEndPtr)
+ : storage_(aStartPtr, std::distance(aStartPtr, aEndPtr)) {}
+
+ /**
+ * Constructor for pair of Span iterators.
+ */
+ template <typename OtherElementType, size_t OtherExtent, bool IsConst>
+ constexpr Span(
+ span_details::span_iterator<Span<OtherElementType, OtherExtent>, IsConst>
+ aBegin,
+ span_details::span_iterator<Span<OtherElementType, OtherExtent>, IsConst>
+ aEnd)
+ : storage_(aBegin == aEnd ? nullptr : &*aBegin, aEnd - aBegin) {}
+
+ /**
+ * Constructor for {iterator,size_t}
+ */
+ template <typename OtherElementType, size_t OtherExtent, bool IsConst>
+ constexpr Span(
+ span_details::span_iterator<Span<OtherElementType, OtherExtent>, IsConst>
+ aBegin,
+ index_type aLength)
+ : storage_(!aLength ? nullptr : &*aBegin, aLength) {}
+
+ /**
+ * Constructor for C array.
+ */
+ template <size_t N>
+ constexpr MOZ_IMPLICIT Span(element_type (&aArr)[N])
+ : storage_(&aArr[0], span_details::extent_type<N>()) {}
+
+ // Implicit constructors for char* and char16_t* pointers are deleted in order
+ // to avoid accidental construction in cases where a pointer does not point to
+ // a zero-terminated string. A Span<const char> or Span<const char16_t> can be
+ // obtained for const char* or const char16_t pointing to a zero-terminated
+ // string using the MakeStringSpan() function.
+ // (This must be a template because otherwise it will prevent the previous
+ // array constructor to match because an array decays to a pointer. This only
+ // exists to point to the above explanation, since there's no other
+ // constructor that would match.)
+ template <
+ typename T,
+ typename = std::enable_if_t<
+ std::is_pointer_v<T> &&
+ (std::is_same_v<std::remove_const_t<std::decay_t<T>>, char> ||
+ std::is_same_v<std::remove_const_t<std::decay_t<T>>, char16_t>)>>
+ Span(T& aStr) = delete;
+
+ /**
+ * Constructor for std::array.
+ */
+ template <size_t N,
+ class ArrayElementType = std::remove_const_t<element_type>>
+ constexpr MOZ_IMPLICIT Span(std::array<ArrayElementType, N>& aArr)
+ : storage_(&aArr[0], span_details::extent_type<N>()) {}
+
+ /**
+ * Constructor for const std::array.
+ */
+ template <size_t N>
+ constexpr MOZ_IMPLICIT Span(
+ const std::array<std::remove_const_t<element_type>, N>& aArr)
+ : storage_(&aArr[0], span_details::extent_type<N>()) {}
+
+ /**
+ * Constructor for mozilla::Array.
+ */
+ template <size_t N,
+ class ArrayElementType = std::remove_const_t<element_type>>
+ constexpr MOZ_IMPLICIT Span(mozilla::Array<ArrayElementType, N>& aArr)
+ : storage_(&aArr[0], span_details::extent_type<N>()) {}
+
+ /**
+ * Constructor for const mozilla::Array.
+ */
+ template <size_t N>
+ constexpr MOZ_IMPLICIT Span(
+ const mozilla::Array<std::remove_const_t<element_type>, N>& aArr)
+ : storage_(&aArr[0], span_details::extent_type<N>()) {}
+
+ /**
+ * Constructor for mozilla::UniquePtr holding an array and length.
+ */
+ template <class ArrayElementType = std::add_pointer<element_type>,
+ class DeleterType>
+ constexpr Span(const mozilla::UniquePtr<ArrayElementType, DeleterType>& aPtr,
+ index_type aLength)
+ : storage_(aPtr.get(), aLength) {}
+
+ // NB: the SFINAE here uses .data() as a incomplete/imperfect proxy for the
+ // requirement on Container to be a contiguous sequence container.
+ /**
+ * Constructor for standard-library containers.
+ */
+ template <
+ class Container,
+ class Dummy = std::enable_if_t<
+ !std::is_const_v<Container> &&
+ !span_details::is_span<Container>::value &&
+ !span_details::is_std_array<Container>::value &&
+ std::is_convertible_v<typename Container::pointer, pointer> &&
+ std::is_convertible_v<typename Container::pointer,
+ decltype(std::declval<Container>().data())>,
+ Container>>
+ constexpr MOZ_IMPLICIT Span(Container& cont, Dummy* = nullptr)
+ : Span(cont.data(), ReleaseAssertedCast<index_type>(cont.size())) {}
+
+ /**
+ * Constructor for standard-library containers (const version).
+ */
+ template <
+ class Container,
+ class = std::enable_if_t<
+ std::is_const_v<element_type> &&
+ !span_details::is_span<Container>::value &&
+ std::is_convertible_v<typename Container::pointer, pointer> &&
+ std::is_convertible_v<typename Container::pointer,
+ decltype(std::declval<Container>().data())>>>
+ constexpr MOZ_IMPLICIT Span(const Container& cont)
+ : Span(cont.data(), ReleaseAssertedCast<index_type>(cont.size())) {}
+
+ // NB: the SFINAE here uses .Elements() as a incomplete/imperfect proxy for
+ // the requirement on Container to be a contiguous sequence container.
+ /**
+ * Constructor for contiguous Mozilla containers.
+ */
+ template <
+ class Container,
+ class = std::enable_if_t<
+ !std::is_const_v<Container> &&
+ !span_details::is_span<Container>::value &&
+ !span_details::is_std_array<Container>::value &&
+ std::is_convertible_v<typename Container::value_type*, pointer> &&
+ std::is_convertible_v<
+ typename Container::value_type*,
+ decltype(std::declval<Container>().Elements())>>>
+ constexpr MOZ_IMPLICIT Span(Container& cont, void* = nullptr)
+ : Span(cont.Elements(), ReleaseAssertedCast<index_type>(cont.Length())) {}
+
+ /**
+ * Constructor for contiguous Mozilla containers (const version).
+ */
+ template <
+ class Container,
+ class = std::enable_if_t<
+ std::is_const_v<element_type> &&
+ !span_details::is_span<Container>::value &&
+ std::is_convertible_v<typename Container::value_type*, pointer> &&
+ std::is_convertible_v<
+ typename Container::value_type*,
+ decltype(std::declval<Container>().Elements())>>>
+ constexpr MOZ_IMPLICIT Span(const Container& cont, void* = nullptr)
+ : Span(cont.Elements(), ReleaseAssertedCast<index_type>(cont.Length())) {}
+
+ /**
+ * Constructor from other Span.
+ */
+ constexpr Span(const Span& other) = default;
+
+ /**
+ * Constructor from other Span.
+ */
+ constexpr Span(Span&& other) = default;
+
+ /**
+ * Constructor from other Span with conversion of element type.
+ */
+ template <
+ class OtherElementType, size_t OtherExtent,
+ class = std::enable_if_t<span_details::is_allowed_extent_conversion<
+ OtherExtent, Extent>::value &&
+ span_details::is_allowed_element_type_conversion<
+ OtherElementType, element_type>::value>>
+ constexpr MOZ_IMPLICIT Span(const Span<OtherElementType, OtherExtent>& other)
+ : storage_(other.data(),
+ span_details::extent_type<OtherExtent>(other.size())) {}
+
+ /**
+ * Constructor from other Span with conversion of element type.
+ */
+ template <
+ class OtherElementType, size_t OtherExtent,
+ class = std::enable_if_t<span_details::is_allowed_extent_conversion<
+ OtherExtent, Extent>::value &&
+ span_details::is_allowed_element_type_conversion<
+ OtherElementType, element_type>::value>>
+ constexpr MOZ_IMPLICIT Span(Span<OtherElementType, OtherExtent>&& other)
+ : storage_(other.data(),
+ span_details::extent_type<OtherExtent>(other.size())) {}
+
+ ~Span() = default;
+ constexpr Span& operator=(const Span& other) = default;
+
+ constexpr Span& operator=(Span&& other) = default;
+
+ // [Span.sub], Span subviews
+ /**
+ * Subspan with first N elements with compile-time N.
+ */
+ template <size_t Count>
+ constexpr Span<element_type, Count> First() const {
+ MOZ_RELEASE_ASSERT(Count <= size());
+ return {data(), Count};
+ }
+
+ /**
+ * Subspan with last N elements with compile-time N.
+ */
+ template <size_t Count>
+ constexpr Span<element_type, Count> Last() const {
+ const size_t len = size();
+ MOZ_RELEASE_ASSERT(Count <= len);
+ return {data() + (len - Count), Count};
+ }
+
+ /**
+ * Subspan with compile-time start index and length.
+ */
+ template <size_t Offset, size_t Count = dynamic_extent>
+ constexpr Span<element_type, Count> Subspan() const {
+ const size_t len = size();
+ MOZ_RELEASE_ASSERT(Offset <= len &&
+ (Count == dynamic_extent || (Offset + Count <= len)));
+ return {data() + Offset, Count == dynamic_extent ? len - Offset : Count};
+ }
+
+ /**
+ * Subspan with first N elements with run-time N.
+ */
+ constexpr Span<element_type, dynamic_extent> First(index_type aCount) const {
+ MOZ_RELEASE_ASSERT(aCount <= size());
+ return {data(), aCount};
+ }
+
+ /**
+ * Subspan with last N elements with run-time N.
+ */
+ constexpr Span<element_type, dynamic_extent> Last(index_type aCount) const {
+ const size_t len = size();
+ MOZ_RELEASE_ASSERT(aCount <= len);
+ return {data() + (len - aCount), aCount};
+ }
+
+ /**
+ * Subspan with run-time start index and length.
+ */
+ constexpr Span<element_type, dynamic_extent> Subspan(
+ index_type aStart, index_type aLength = dynamic_extent) const {
+ const size_t len = size();
+ MOZ_RELEASE_ASSERT(aStart <= len && (aLength == dynamic_extent ||
+ (aStart + aLength <= len)));
+ return {data() + aStart,
+ aLength == dynamic_extent ? len - aStart : aLength};
+ }
+
+ /**
+ * Subspan with run-time start index. (Rust's &foo[start..])
+ */
+ constexpr Span<element_type, dynamic_extent> From(index_type aStart) const {
+ return Subspan(aStart);
+ }
+
+ /**
+ * Subspan with run-time exclusive end index. (Rust's &foo[..end])
+ */
+ constexpr Span<element_type, dynamic_extent> To(index_type aEnd) const {
+ return Subspan(0, aEnd);
+ }
+
+ /// std::span-compatible method name
+ constexpr auto subspan(index_type aStart,
+ index_type aLength = dynamic_extent) const {
+ return Subspan(aStart, aLength);
+ }
+ /// std::span-compatible method name
+ constexpr auto from(index_type aStart) const { return From(aStart); }
+ /// std::span-compatible method name
+ constexpr auto to(index_type aEnd) const { return To(aEnd); }
+
+ /**
+ * Subspan with run-time start index and exclusive end index.
+ * (Rust's &foo[start..end])
+ */
+ constexpr Span<element_type, dynamic_extent> FromTo(index_type aStart,
+ index_type aEnd) const {
+ MOZ_RELEASE_ASSERT(aStart <= aEnd);
+ return Subspan(aStart, aEnd - aStart);
+ }
+
+ // [Span.obs], Span observers
+ /**
+ * Number of elements in the span.
+ */
+ constexpr index_type Length() const { return size(); }
+
+ /**
+ * Number of elements in the span (standard-libray duck typing version).
+ */
+ constexpr index_type size() const { return storage_.size(); }
+
+ /**
+ * Size of the span in bytes.
+ */
+ constexpr index_type LengthBytes() const { return size_bytes(); }
+
+ /**
+ * Size of the span in bytes (standard-library naming style version).
+ */
+ constexpr index_type size_bytes() const {
+ return size() * narrow_cast<index_type>(sizeof(element_type));
+ }
+
+ /**
+ * Checks if the the length of the span is zero.
+ */
+ constexpr bool IsEmpty() const { return empty(); }
+
+ /**
+ * Checks if the the length of the span is zero (standard-libray duck
+ * typing version).
+ */
+ constexpr bool empty() const { return size() == 0; }
+
+ // [Span.elem], Span element access
+ constexpr reference operator[](index_type idx) const {
+ MOZ_RELEASE_ASSERT(idx < storage_.size());
+ return data()[idx];
+ }
+
+ /**
+ * Access element of span by index (standard-library duck typing version).
+ */
+ constexpr reference at(index_type idx) const { return this->operator[](idx); }
+
+ constexpr reference operator()(index_type idx) const {
+ return this->operator[](idx);
+ }
+
+ /**
+ * Pointer to the first element of the span. The return value is never
+ * nullptr, not ever for zero-length spans, so it can be passed as-is
+ * to std::slice::from_raw_parts() in Rust.
+ */
+ constexpr pointer Elements() const { return data(); }
+
+ /**
+ * Pointer to the first element of the span (standard-libray duck typing
+ * version). The return value is never nullptr, not ever for zero-length
+ * spans, so it can be passed as-is to std::slice::from_raw_parts() in Rust.
+ */
+ constexpr pointer data() const { return storage_.data(); }
+
+ // [Span.iter], Span iterator support
+ iterator begin() const { return {this, 0, span_details::SpanKnownBounds{}}; }
+ iterator end() const {
+ return {this, Length(), span_details::SpanKnownBounds{}};
+ }
+
+ const_iterator cbegin() const {
+ return {this, 0, span_details::SpanKnownBounds{}};
+ }
+ const_iterator cend() const {
+ return {this, Length(), span_details::SpanKnownBounds{}};
+ }
+
+ reverse_iterator rbegin() const { return reverse_iterator{end()}; }
+ reverse_iterator rend() const { return reverse_iterator{begin()}; }
+
+ const_reverse_iterator crbegin() const {
+ return const_reverse_iterator{cend()};
+ }
+ const_reverse_iterator crend() const {
+ return const_reverse_iterator{cbegin()};
+ }
+
+ template <size_t SplitPoint>
+ constexpr std::pair<Span<ElementType, SplitPoint>,
+ Span<ElementType, Extent - SplitPoint>>
+ SplitAt() const {
+ static_assert(Extent != dynamic_extent);
+ static_assert(SplitPoint <= Extent);
+ return {First<SplitPoint>(), Last<Extent - SplitPoint>()};
+ }
+
+ constexpr std::pair<Span<ElementType, dynamic_extent>,
+ Span<ElementType, dynamic_extent>>
+ SplitAt(const index_type aSplitPoint) const {
+ MOZ_RELEASE_ASSERT(aSplitPoint <= Length());
+ return {First(aSplitPoint), Last(Length() - aSplitPoint)};
+ }
+
+ constexpr Span<std::add_const_t<ElementType>, Extent> AsConst() const {
+ return {Elements(), Length()};
+ }
+
+ private:
+ // this implementation detail class lets us take advantage of the
+ // empty base class optimization to pay for only storage of a single
+ // pointer in the case of fixed-size Spans
+ template <class ExtentType>
+ class storage_type : public ExtentType {
+ public:
+ template <class OtherExtentType>
+ constexpr storage_type(pointer elements, OtherExtentType ext)
+ : ExtentType(ext)
+ // Replace nullptr with aligned bogus pointer for Rust slice
+ // compatibility. See
+ // https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
+ ,
+ data_(elements ? elements
+ : reinterpret_cast<pointer>(alignof(element_type))) {
+ const size_t extentSize = ExtentType::size();
+ MOZ_RELEASE_ASSERT((!elements && extentSize == 0) ||
+ (elements && extentSize != dynamic_extent));
+ }
+
+ constexpr pointer data() const { return data_; }
+
+ private:
+ pointer data_;
+ };
+
+ storage_type<span_details::extent_type<Extent>> storage_;
+};
+
+template <typename T, size_t OtherExtent, bool IsConst>
+Span(span_details::span_iterator<Span<T, OtherExtent>, IsConst> aBegin,
+ span_details::span_iterator<Span<T, OtherExtent>, IsConst> aEnd)
+ -> Span<std::conditional_t<IsConst, std::add_const_t<T>, T>>;
+
+template <typename T, size_t Extent>
+Span(T (&)[Extent]) -> Span<T, Extent>;
+
+template <class Container>
+Span(Container&) -> Span<typename Container::value_type>;
+
+template <class Container>
+Span(const Container&) -> Span<const typename Container::value_type>;
+
+template <typename T, size_t Extent>
+Span(mozilla::Array<T, Extent>&) -> Span<T, Extent>;
+
+template <typename T, size_t Extent>
+Span(const mozilla::Array<T, Extent>&) -> Span<const T, Extent>;
+
+// [Span.comparison], Span comparison operators
+template <class ElementType, size_t FirstExtent, size_t SecondExtent>
+inline constexpr bool operator==(const Span<ElementType, FirstExtent>& l,
+ const Span<ElementType, SecondExtent>& r) {
+ return (l.size() == r.size()) &&
+ std::equal(l.data(), l.data() + l.size(), r.data());
+}
+
+template <class ElementType, size_t Extent>
+inline constexpr bool operator!=(const Span<ElementType, Extent>& l,
+ const Span<ElementType, Extent>& r) {
+ return !(l == r);
+}
+
+template <class ElementType, size_t Extent>
+inline constexpr bool operator<(const Span<ElementType, Extent>& l,
+ const Span<ElementType, Extent>& r) {
+ return std::lexicographical_compare(l.data(), l.data() + l.size(), r.data(),
+ r.data() + r.size());
+}
+
+template <class ElementType, size_t Extent>
+inline constexpr bool operator<=(const Span<ElementType, Extent>& l,
+ const Span<ElementType, Extent>& r) {
+ return !(l > r);
+}
+
+template <class ElementType, size_t Extent>
+inline constexpr bool operator>(const Span<ElementType, Extent>& l,
+ const Span<ElementType, Extent>& r) {
+ return r < l;
+}
+
+template <class ElementType, size_t Extent>
+inline constexpr bool operator>=(const Span<ElementType, Extent>& l,
+ const Span<ElementType, Extent>& r) {
+ return !(l < r);
+}
+
+namespace span_details {
+// if we only supported compilers with good constexpr support then
+// this pair of classes could collapse down to a constexpr function
+
+// we should use a narrow_cast<> to go to size_t, but older compilers may not
+// see it as constexpr and so will fail compilation of the template
+template <class ElementType, size_t Extent>
+struct calculate_byte_size
+ : std::integral_constant<size_t,
+ static_cast<size_t>(sizeof(ElementType) *
+ static_cast<size_t>(Extent))> {
+};
+
+template <class ElementType>
+struct calculate_byte_size<ElementType, dynamic_extent>
+ : std::integral_constant<size_t, dynamic_extent> {};
+} // namespace span_details
+
+// [Span.objectrep], views of object representation
+/**
+ * View span as Span<const uint8_t>.
+ */
+template <class ElementType, size_t Extent>
+Span<const uint8_t,
+ span_details::calculate_byte_size<ElementType, Extent>::value>
+AsBytes(Span<ElementType, Extent> s) {
+ return {reinterpret_cast<const uint8_t*>(s.data()), s.size_bytes()};
+}
+
+/**
+ * View span as Span<uint8_t>.
+ */
+template <class ElementType, size_t Extent,
+ class = std::enable_if_t<!std::is_const_v<ElementType>>>
+Span<uint8_t, span_details::calculate_byte_size<ElementType, Extent>::value>
+AsWritableBytes(Span<ElementType, Extent> s) {
+ return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
+}
+
+/**
+ * View a span of uint8_t as a span of char.
+ */
+inline Span<const char> AsChars(Span<const uint8_t> s) {
+ return {reinterpret_cast<const char*>(s.data()), s.size()};
+}
+
+/**
+ * View a writable span of uint8_t as a span of char.
+ */
+inline Span<char> AsWritableChars(Span<uint8_t> s) {
+ return {reinterpret_cast<char*>(s.data()), s.size()};
+}
+
+/**
+ * Create span from a zero-terminated C string. nullptr is
+ * treated as the empty string.
+ */
+constexpr Span<const char> MakeStringSpan(const char* aZeroTerminated) {
+ if (!aZeroTerminated) {
+ return Span<const char>();
+ }
+ return Span<const char>(aZeroTerminated,
+ std::char_traits<char>::length(aZeroTerminated));
+}
+
+/**
+ * Create span from a zero-terminated UTF-16 C string. nullptr is
+ * treated as the empty string.
+ */
+constexpr Span<const char16_t> MakeStringSpan(const char16_t* aZeroTerminated) {
+ if (!aZeroTerminated) {
+ return Span<const char16_t>();
+ }
+ return Span<const char16_t>(
+ aZeroTerminated, std::char_traits<char16_t>::length(aZeroTerminated));
+}
+
+} // namespace mozilla
+
+#endif // mozilla_Span_h
diff --git a/mfbt/SplayTree.h b/mfbt/SplayTree.h
new file mode 100644
index 0000000000..08765c0b11
--- /dev/null
+++ b/mfbt/SplayTree.h
@@ -0,0 +1,305 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * A sorted tree with optimal access times, where recently-accessed elements
+ * are faster to access again.
+ */
+
+#ifndef mozilla_SplayTree_h
+#define mozilla_SplayTree_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+template <class T, class C>
+class SplayTree;
+
+template <typename T>
+class SplayTreeNode {
+ public:
+ template <class A, class B>
+ friend class SplayTree;
+
+ SplayTreeNode() : mLeft(nullptr), mRight(nullptr), mParent(nullptr) {}
+
+ private:
+ T* mLeft;
+ T* mRight;
+ T* mParent;
+};
+
+/**
+ * Class which represents a splay tree.
+ * Splay trees are balanced binary search trees for which search, insert and
+ * remove are all amortized O(log n), but where accessing a node makes it
+ * faster to access that node in the future.
+ *
+ * T indicates the type of tree elements, Comparator must have a static
+ * compare(const T&, const T&) method ordering the elements. The compare
+ * method must be free from side effects.
+ */
+template <typename T, class Comparator>
+class SplayTree {
+ T* mRoot;
+
+ public:
+ constexpr SplayTree() : mRoot(nullptr) {}
+
+ bool empty() const { return !mRoot; }
+
+ T* find(const T& aValue) {
+ if (empty()) {
+ return nullptr;
+ }
+
+ T* last = lookup(aValue);
+ splay(last);
+ return Comparator::compare(aValue, *last) == 0 ? last : nullptr;
+ }
+
+ void insert(T* aValue) {
+ MOZ_ASSERT(!find(*aValue), "Duplicate elements are not allowed.");
+
+ if (!mRoot) {
+ mRoot = aValue;
+ return;
+ }
+ T* last = lookup(*aValue);
+ int cmp = Comparator::compare(*aValue, *last);
+
+ finishInsertion(last, cmp, aValue);
+ }
+
+ T* findOrInsert(const T& aValue);
+
+ T* remove(const T& aValue) {
+ T* last = lookup(aValue);
+ MOZ_ASSERT(last, "This tree must contain the element being removed.");
+ MOZ_ASSERT(Comparator::compare(aValue, *last) == 0);
+
+ // Splay the tree so that the item to remove is the root.
+ splay(last);
+ MOZ_ASSERT(last == mRoot);
+
+ // Find another node which can be swapped in for the root: either the
+ // rightmost child of the root's left, or the leftmost child of the
+ // root's right.
+ T* swap;
+ T* swapChild;
+ if (mRoot->mLeft) {
+ swap = mRoot->mLeft;
+ while (swap->mRight) {
+ swap = swap->mRight;
+ }
+ swapChild = swap->mLeft;
+ } else if (mRoot->mRight) {
+ swap = mRoot->mRight;
+ while (swap->mLeft) {
+ swap = swap->mLeft;
+ }
+ swapChild = swap->mRight;
+ } else {
+ T* result = mRoot;
+ mRoot = nullptr;
+ return result;
+ }
+
+ // The selected node has at most one child, in swapChild. Detach it
+ // from the subtree by replacing it with that child.
+ if (swap == swap->mParent->mLeft) {
+ swap->mParent->mLeft = swapChild;
+ } else {
+ swap->mParent->mRight = swapChild;
+ }
+ if (swapChild) {
+ swapChild->mParent = swap->mParent;
+ }
+
+ // Make the selected node the new root.
+ mRoot = swap;
+ mRoot->mParent = nullptr;
+ mRoot->mLeft = last->mLeft;
+ mRoot->mRight = last->mRight;
+ if (mRoot->mLeft) {
+ mRoot->mLeft->mParent = mRoot;
+ }
+ if (mRoot->mRight) {
+ mRoot->mRight->mParent = mRoot;
+ }
+
+ last->mLeft = nullptr;
+ last->mRight = nullptr;
+ return last;
+ }
+
+ T* removeMin() {
+ MOZ_ASSERT(mRoot, "No min to remove!");
+
+ T* min = mRoot;
+ while (min->mLeft) {
+ min = min->mLeft;
+ }
+ return remove(*min);
+ }
+
+ // For testing purposes only.
+ void checkCoherency() { checkCoherency(mRoot, nullptr); }
+
+ private:
+ /**
+ * Returns the node in this comparing equal to |aValue|, or a node just
+ * greater or just less than |aValue| if there is no such node.
+ */
+ T* lookup(const T& aValue) {
+ MOZ_ASSERT(!empty());
+
+ T* node = mRoot;
+ T* parent;
+ do {
+ parent = node;
+ int c = Comparator::compare(aValue, *node);
+ if (c == 0) {
+ return node;
+ } else if (c < 0) {
+ node = node->mLeft;
+ } else {
+ node = node->mRight;
+ }
+ } while (node);
+ return parent;
+ }
+
+ void finishInsertion(T* aLast, int32_t aCmp, T* aNew) {
+ MOZ_ASSERT(aCmp, "Nodes shouldn't be equal!");
+
+ T** parentPointer = (aCmp < 0) ? &aLast->mLeft : &aLast->mRight;
+ MOZ_ASSERT(!*parentPointer);
+ *parentPointer = aNew;
+ aNew->mParent = aLast;
+
+ splay(aNew);
+ }
+
+ /**
+ * Rotate the tree until |node| is at the root of the tree. Performing
+ * the rotations in this fashion preserves the amortized balancing of
+ * the tree.
+ */
+ void splay(T* aNode) {
+ MOZ_ASSERT(aNode);
+
+ while (aNode != mRoot) {
+ T* parent = aNode->mParent;
+ if (parent == mRoot) {
+ // Zig rotation.
+ rotate(aNode);
+ MOZ_ASSERT(aNode == mRoot);
+ return;
+ }
+ T* grandparent = parent->mParent;
+ if ((parent->mLeft == aNode) == (grandparent->mLeft == parent)) {
+ // Zig-zig rotation.
+ rotate(parent);
+ rotate(aNode);
+ } else {
+ // Zig-zag rotation.
+ rotate(aNode);
+ rotate(aNode);
+ }
+ }
+ }
+
+ void rotate(T* aNode) {
+ // Rearrange nodes so that aNode becomes the parent of its current
+ // parent, while preserving the sortedness of the tree.
+ T* parent = aNode->mParent;
+ if (parent->mLeft == aNode) {
+ // x y
+ // y c ==> a x
+ // a b b c
+ parent->mLeft = aNode->mRight;
+ if (aNode->mRight) {
+ aNode->mRight->mParent = parent;
+ }
+ aNode->mRight = parent;
+ } else {
+ MOZ_ASSERT(parent->mRight == aNode);
+ // x y
+ // a y ==> x c
+ // b c a b
+ parent->mRight = aNode->mLeft;
+ if (aNode->mLeft) {
+ aNode->mLeft->mParent = parent;
+ }
+ aNode->mLeft = parent;
+ }
+ aNode->mParent = parent->mParent;
+ parent->mParent = aNode;
+ if (T* grandparent = aNode->mParent) {
+ if (grandparent->mLeft == parent) {
+ grandparent->mLeft = aNode;
+ } else {
+ grandparent->mRight = aNode;
+ }
+ } else {
+ mRoot = aNode;
+ }
+ }
+
+ T* checkCoherency(T* aNode, T* aMinimum) {
+ if (mRoot) {
+ MOZ_RELEASE_ASSERT(!mRoot->mParent);
+ }
+ if (!aNode) {
+ MOZ_RELEASE_ASSERT(!mRoot);
+ return nullptr;
+ }
+ if (!aNode->mParent) {
+ MOZ_RELEASE_ASSERT(aNode == mRoot);
+ }
+ if (aMinimum) {
+ MOZ_RELEASE_ASSERT(Comparator::compare(*aMinimum, *aNode) < 0);
+ }
+ if (aNode->mLeft) {
+ MOZ_RELEASE_ASSERT(aNode->mLeft->mParent == aNode);
+ T* leftMaximum = checkCoherency(aNode->mLeft, aMinimum);
+ MOZ_RELEASE_ASSERT(Comparator::compare(*leftMaximum, *aNode) < 0);
+ }
+ if (aNode->mRight) {
+ MOZ_RELEASE_ASSERT(aNode->mRight->mParent == aNode);
+ return checkCoherency(aNode->mRight, aNode);
+ }
+ return aNode;
+ }
+
+ SplayTree(const SplayTree&) = delete;
+ void operator=(const SplayTree&) = delete;
+};
+
+template <typename T, class Comparator>
+T* SplayTree<T, Comparator>::findOrInsert(const T& aValue) {
+ if (!mRoot) {
+ mRoot = new T(aValue);
+ return mRoot;
+ }
+
+ T* last = lookup(aValue);
+ int cmp = Comparator::compare(aValue, *last);
+ if (!cmp) {
+ return last;
+ }
+
+ T* t = new T(aValue);
+ finishInsertion(last, cmp, t);
+ return t;
+}
+
+} /* namespace mozilla */
+
+#endif /* mozilla_SplayTree_h */
diff --git a/mfbt/StaticAnalysisFunctions.h b/mfbt/StaticAnalysisFunctions.h
new file mode 100644
index 0000000000..b073055c05
--- /dev/null
+++ b/mfbt/StaticAnalysisFunctions.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_StaticAnalysisFunctions_h
+#define mozilla_StaticAnalysisFunctions_h
+
+#ifndef __cplusplus
+# ifndef bool
+# include <stdbool.h>
+# endif
+# define MOZ_CONSTEXPR
+#else // __cplusplus
+# include "mozilla/Attributes.h"
+# define MOZ_CONSTEXPR constexpr
+#endif
+/*
+ * Functions that are used as markers in Gecko code for static analysis. Their
+ * purpose is to have different AST nodes generated during compile time and to
+ * match them based on different checkers implemented in build/clang-plugin
+ */
+
+#ifdef MOZ_CLANG_PLUGIN
+
+# ifdef __cplusplus
+/**
+ * MOZ_KnownLive - used to opt an argument out of the CanRunScript checker so
+ * that we don't check it if is a strong ref.
+ *
+ * Example:
+ * canRunScript(MOZ_KnownLive(rawPointer));
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE T* MOZ_KnownLive(T* ptr) {
+ return ptr;
+}
+
+/**
+ * Ditto, but for references.
+ */
+template <typename T>
+static MOZ_ALWAYS_INLINE T& MOZ_KnownLive(T& ref) {
+ return ref;
+}
+
+# endif
+
+/**
+ * MOZ_AssertAssignmentTest - used in MOZ_ASSERT in order to test the possible
+ * presence of assignment instead of logical comparisons.
+ *
+ * Example:
+ * MOZ_ASSERT(retVal = true);
+ */
+static MOZ_ALWAYS_INLINE MOZ_CONSTEXPR bool MOZ_AssertAssignmentTest(
+ bool exprResult) {
+ return exprResult;
+}
+
+# define MOZ_CHECK_ASSERT_ASSIGNMENT(expr) MOZ_AssertAssignmentTest(!!(expr))
+
+#else
+
+# define MOZ_CHECK_ASSERT_ASSIGNMENT(expr) (!!(expr))
+# define MOZ_KnownLive(expr) (expr)
+
+#endif /* MOZ_CLANG_PLUGIN */
+#endif /* StaticAnalysisFunctions_h */
diff --git a/mfbt/TaggedAnonymousMemory.cpp b/mfbt/TaggedAnonymousMemory.cpp
new file mode 100644
index 0000000000..382b9cef7a
--- /dev/null
+++ b/mfbt/TaggedAnonymousMemory.cpp
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef XP_LINUX
+
+# include "mozilla/TaggedAnonymousMemory.h"
+
+# include <sys/types.h>
+# include <sys/mman.h>
+# include <sys/prctl.h>
+# include <sys/syscall.h>
+# include <unistd.h>
+
+# include "mozilla/Assertions.h"
+
+// These constants are copied from <sys/prctl.h>, because the headers
+// used for building may not have them even though the running kernel
+// supports them.
+# ifndef PR_SET_VMA
+# define PR_SET_VMA 0x53564d41
+# endif
+# ifndef PR_SET_VMA_ANON_NAME
+# define PR_SET_VMA_ANON_NAME 0
+# endif
+
+namespace mozilla {
+
+// Returns 0 for success and -1 (with errno) for error.
+static int TagAnonymousMemoryAligned(const void* aPtr, size_t aLength,
+ const char* aTag) {
+ return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
+ reinterpret_cast<unsigned long>(aPtr), aLength,
+ reinterpret_cast<unsigned long>(aTag));
+}
+
+// On some architectures, it's possible for the page size to be larger
+// than the PAGE_SIZE we were compiled with. This computes the
+// equivalent of PAGE_MASK.
+static uintptr_t GetPageMask() {
+ static uintptr_t mask = 0;
+
+ if (mask == 0) {
+ uintptr_t pageSize = sysconf(_SC_PAGESIZE);
+ mask = ~(pageSize - 1);
+ MOZ_ASSERT((pageSize & (pageSize - 1)) == 0,
+ "Page size must be a power of 2!");
+ }
+ return mask;
+}
+
+} // namespace mozilla
+
+void MozTagAnonymousMemory(const void* aPtr, size_t aLength, const char* aTag) {
+ // The kernel will round up the end of the range to the next page
+ // boundary if it's not aligned (comments indicate this behavior is
+ // based on that of madvise), but it will reject the request if the
+ // start is not aligned. We therefore round down the start address
+ // and adjust the length accordingly.
+ uintptr_t addr = reinterpret_cast<uintptr_t>(aPtr);
+ uintptr_t end = addr + aLength;
+ uintptr_t addrRounded = addr & mozilla::GetPageMask();
+ const void* ptrRounded = reinterpret_cast<const void*>(addrRounded);
+
+ // Ignore the return value. TagAnonymousMemoryAligned will harmlessly fail on
+ // kernels without CONFIG_ANON_VMA_NAME.
+ mozilla::TagAnonymousMemoryAligned(ptrRounded, end - addrRounded, aTag);
+}
+
+void* MozTaggedAnonymousMmap(void* aAddr, size_t aLength, int aProt, int aFlags,
+ int aFd, off_t aOffset, const char* aTag) {
+ void* mapped = mmap(aAddr, aLength, aProt, aFlags, aFd, aOffset);
+ if ((aFlags & MAP_ANONYMOUS) == MAP_ANONYMOUS && mapped != MAP_FAILED) {
+ // Ignore the return value. TagAnonymousMemoryAligned will harmlessly fail
+ // on kernels without CONFIG_ANON_VMA_NAME.
+ mozilla::TagAnonymousMemoryAligned(mapped, aLength, aTag);
+ }
+ return mapped;
+}
+
+#endif // XP_LINUX
diff --git a/mfbt/TaggedAnonymousMemory.h b/mfbt/TaggedAnonymousMemory.h
new file mode 100644
index 0000000000..7ca5e60c9d
--- /dev/null
+++ b/mfbt/TaggedAnonymousMemory.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Linux kernels since 5.17 have a feature for assigning names to
+// ranges of anonymous memory (i.e., memory that doesn't have a "name"
+// in the form of an underlying mapped file). These names are
+// reported in /proc/<pid>/smaps alongside system-level memory usage
+// information such as Proportional Set Size (memory usage adjusted
+// for sharing between processes), which allows reporting this
+// information at a finer granularity than would otherwise be possible
+// (e.g., separating malloc() heap from JS heap).
+//
+// Existing memory can be tagged with MozTagAnonymousMemory(); it will
+// tag the range of complete pages containing the given interval, so
+// the results may be inexact if the range isn't page-aligned.
+// MozTaggedAnonymousMmap() can be used like mmap() with an extra
+// parameter, and will tag the returned memory if the mapping was
+// successful (and if it was in fact anonymous).
+//
+// NOTE: The pointer given as the "tag" argument MUST remain valid as
+// long as the mapping exists. The referenced string is read when
+// /proc/<pid>/smaps or /proc/<pid>/maps is read, not when the tag is
+// established, so freeing it or changing its contents will have
+// unexpected results. Using a static string is probably best.
+//
+// Also note that this header can be used by both C and C++ code.
+
+#ifndef mozilla_TaggedAnonymousMemory_h
+#define mozilla_TaggedAnonymousMemory_h
+
+#ifndef XP_WIN
+
+# ifdef __wasi__
+# include <stdlib.h>
+# else
+# include <sys/types.h>
+# include <sys/mman.h>
+# endif // __wasi__
+
+# include "mozilla/Types.h"
+
+# ifdef XP_LINUX
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+MFBT_API void MozTagAnonymousMemory(const void* aPtr, size_t aLength,
+ const char* aTag);
+
+MFBT_API void* MozTaggedAnonymousMmap(void* aAddr, size_t aLength, int aProt,
+ int aFlags, int aFd, off_t aOffset,
+ const char* aTag);
+
+# ifdef __cplusplus
+} // extern "C"
+# endif
+
+# else // XP_LINUX
+
+static inline void MozTagAnonymousMemory(const void* aPtr, size_t aLength,
+ const char* aTag) {}
+
+static inline void* MozTaggedAnonymousMmap(void* aAddr, size_t aLength,
+ int aProt, int aFlags, int aFd,
+ off_t aOffset, const char* aTag) {
+# ifdef __wasi__
+ MOZ_CRASH("We don't use this memory for WASI right now.");
+ return nullptr;
+# else
+ return mmap(aAddr, aLength, aProt, aFlags, aFd, aOffset);
+# endif
+}
+
+# endif // XP_LINUX
+
+#endif // !XP_WIN
+
+#endif // mozilla_TaggedAnonymousMemory_h
diff --git a/mfbt/Tainting.h b/mfbt/Tainting.h
new file mode 100644
index 0000000000..2df6176f89
--- /dev/null
+++ b/mfbt/Tainting.h
@@ -0,0 +1,348 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Creates a Tainted<> wrapper to enforce data validation before use.
+ */
+
+#ifndef mozilla_Tainting_h
+#define mozilla_Tainting_h
+
+#include <utility>
+#include "mozilla/MacroArgs.h"
+
+namespace mozilla {
+
+template <typename T>
+class Tainted;
+
+namespace ipc {
+template <typename>
+struct IPDLParamTraits;
+}
+
+/*
+ * The Tainted<> class allows data to be wrapped and considered 'tainted'; which
+ * requires explicit validation of the data before it can be used for
+ * comparisons or in arithmetic.
+ *
+ * Tainted<> objects are intended to be passed down callstacks (still in
+ * Tainted<> form) to whatever location is appropriate to validate (or complete
+ * validation) of the data before finally unwrapping it.
+ *
+ * Tainting data ensures that validation actually occurs and is not forgotten,
+ * increase consideration of validation so it can be as strict as possible, and
+ * makes it clear from a code point of view where and what validation is
+ * performed.
+ */
+
+// ====================================================================
+// ====================================================================
+/*
+ * Simple Tainted<foo> class
+ *
+ * Class should not support any de-reference or comparison operator and instead
+ * force all access to the member variable through the MOZ_VALIDATE macros.
+ *
+ * While the Coerce() function is publicly accessible on the class, it should
+ * only be used by the MOZ_VALIDATE macros, and static analysis will prevent
+ * it being used elsewhere.
+ */
+
+template <typename T>
+class Tainted {
+ private:
+ T mValue;
+
+ public:
+ explicit Tainted() = default;
+
+ template <typename U>
+ explicit Tainted(U&& aValue) : mValue(std::forward<U>(aValue)) {}
+
+ T& Coerce() { return this->mValue; }
+ const T& Coerce() const { return this->mValue; }
+
+ friend struct mozilla::ipc::IPDLParamTraits<Tainted<T>>;
+};
+
+// ====================================================================
+// ====================================================================
+/*
+ * This section contains obscure, non-user-facing C++ to support
+ * variable-argument macros.
+ */
+#define MOZ_TAINT_GLUE(a, b) a b
+
+// We use the same variable name in the nested scope, shadowing the outer
+// scope - this allows the user to write the same variable name in the
+// macro's condition without using a magic name like 'value'.
+//
+// We explicitly do not mark it MOZ_MAYBE_UNUSED because the condition
+// should always make use of tainted_value, not doing so should cause an
+// unused variable warning. That would only happen when we are bypssing
+// validation.
+//
+// The separate bool variable is required to allow condition to be a lambda
+// expression; lambdas cannot be placed directly inside ASSERTs.
+#define MOZ_VALIDATE_AND_GET_HELPER3(tainted_value, condition, \
+ assertionstring) \
+ [&]() { \
+ auto& tmp = tainted_value.Coerce(); \
+ auto& tainted_value = tmp; \
+ bool test = (condition); \
+ MOZ_RELEASE_ASSERT(test, assertionstring); \
+ return tmp; \
+ }()
+
+#define MOZ_VALIDATE_AND_GET_HELPER2(tainted_value, condition) \
+ MOZ_VALIDATE_AND_GET_HELPER3(tainted_value, condition, \
+ "MOZ_VALIDATE_AND_GET(" #tainted_value \
+ ", " #condition ") has failed")
+
+// ====================================================================
+// ====================================================================
+/*
+ * Macros to validate and un-taint a value.
+ *
+ * All macros accept the tainted variable as the first argument, and a
+ * condition as the second argument. If the condition is satisfied,
+ * then the value is considered valid.
+ *
+ * This file contains documentation and examples for the functions;
+ * more usage examples are present in mfbt/tests/gtest/TestTainting.cpp
+ */
+
+/*
+ * MOZ_VALIDATE_AND_GET is the bread-and-butter validation function.
+ * It confirms the value abides by the condition specified and then
+ * returns the untainted value.
+ *
+ * If the condition is not satisified, we RELEASE_ASSERT.
+ *
+ * Examples:
+ *
+ * int bar;
+ * Tainted<int> foo;
+ * int comparisonVariable = 20;
+ *
+ * bar = MOZ_VALIDATE_AND_GET(foo, foo < 20);
+ * bar = MOZ_VALIDATE_AND_GET(foo, foo < comparisonVariable);
+ *
+ * Note that while the comparison of foo < 20 works inside the macro,
+ * doing so outside the macro (such as with `if (foo < 20)` will
+ * (intentionally) fail during compilation. We do this to ensure that
+ * all validation logic is self-contained inside the macro.
+ *
+ *
+ * The macro also supports supplying a custom string to the
+ * MOZ_RELEASE_ASSERT. This is strongly encouraged because it
+ * provides the author the opportunity to explain by way of an
+ * english comment what is happening.
+ *
+ * Good things to include in the comment:
+ * - What the validation is doing or what it means
+ * - The impact that could occur if validation was bypassed.
+ * e.g. 'This value is used to allocate memory, so sane values
+ * should be enforced.''
+ * - How validation could change in the future to be more or less
+ * restrictive.
+ *
+ * Example:
+ *
+ * bar = MOZ_VALIDATE_AND_GET(
+ * foo, foo < 20,
+ * "foo must be less than 20 because higher values represent decibel"
+ * "levels greater than a a jet engine inside your ear.");
+ *
+ *
+ * The condition can also be a lambda function if you need to
+ * define temporary variables or perform more complex validation.
+ *
+ * Square brackets represent the capture group - local variables
+ * can be specified here to capture them and use them inside the
+ * lambda. Prefacing the variable with '&' means the variable is
+ * captured by-reference. It is typically better to capture
+ * variables by reference rather than making them parameters.
+ *
+ * When using this technique:
+ * - the tainted value must be present and should be captured
+ * by reference. (You could make it a parameter if you wish, but
+ * it's more typing.)
+ * - the entire lambda function must be enclosed in parens
+ * (if you omit this, you might get errors of the form:
+ * 'use of undeclared identifier 'MOZ_VALIDATE_AND_GET_HELPER4')
+ *
+ * Example:
+ *
+ * bar = MOZ_VALIDATE_AND_GET(foo, ([&foo, &comparisonVariable]() {
+ * bool intermediateResult = externalFunction(foo);
+ * if (intermediateResult || comparisonVariable < 4) {
+ * return true;
+ * }
+ * return false;
+ * }()));
+ *
+ *
+ * You can also define a lambda external to the macro if you prefer
+ * this over a static function.
+ *
+ * This is possible, and supported, but requires a different syntax.
+ * Instead of specifying the tainted value in the capture group [&foo],
+ * it must be provided as an argument of the unwrapped type.
+ * (The argument name can be anything you choose of course.)
+ *
+ * Example:
+ *
+ * auto lambda1 = [](int foo) {
+ * bool intermediateResult = externalFunction(foo);
+ * if (intermediateResult) {
+ * return true;
+ * }
+ * return false;
+ * };
+ * bar = MOZ_VALIDATE_AND_GET(foo, lambda1(foo));
+ *
+ *
+ * Arguments:
+ * tainted_value - the name of the Tainted<> variable
+ * condition - a comparison involving the tainted value
+ * assertionstring [optional] - A string to include in the RELEASE_ASSERT
+ */
+#define MOZ_VALIDATE_AND_GET(...) \
+ MOZ_TAINT_GLUE(MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_VALIDATE_AND_GET_HELPER, \
+ __VA_ARGS__), \
+ (__VA_ARGS__))
+
+/*
+ * MOZ_IS_VALID is the other most common use, it allows one to test
+ * validity without asserting, for use in a if/else statement.
+ *
+ * It supports the same lambda behavior, but does not support a
+ * comment explaining the validation.
+ *
+ * Example:
+ *
+ * if (MOZ_IS_VALID(foo, foo < 20)) {
+ * ...
+ * }
+ *
+ *
+ * Arguments:
+ * tainted_value - the name of the Tainted<> variable
+ * condition - a comparison involving the tainted value
+ */
+#define MOZ_IS_VALID(tainted_value, condition) \
+ [&]() { \
+ auto& tmp = tainted_value.Coerce(); \
+ auto& tainted_value = tmp; \
+ return (condition); \
+ }()
+
+/*
+ * MOZ_VALIDATE_OR is a shortcut that tests validity and if invalid,
+ * return an alternate value.
+ *
+ * Note that the following will not work:
+ * MOZ_RELEASE_ASSERT(MOZ_VALIDATE_OR(foo, foo < 20, 100) == EXPECTED_VALUE);
+ * MOZ_ASSERT(MOZ_VALIDATE_OR(foo, foo < 20, 100) == EXPECTED_VALUE);
+ * This is because internally, many MOZ_VALIDATE macros use lambda
+ * expressions (for variable shadowing purposes) and lambas cannot be
+ * expressions in (potentially) unevaluated operands.
+ *
+ * Example:
+ *
+ * bar = MOZ_VALIDATE_OR(foo, foo < 20, 100);
+ *
+ *
+ * Arguments:
+ * tainted_value - the name of the Tainted<> variable
+ * condition - a comparison involving the tainted value
+ * alternate_value - the value to use if the condition is false
+ */
+#define MOZ_VALIDATE_OR(tainted_value, condition, alternate_value) \
+ (MOZ_IS_VALID(tainted_value, condition) ? tainted_value.Coerce() \
+ : alternate_value)
+
+/*
+ * MOZ_FIND_AND_VALIDATE is for testing validity of a tainted value by comparing
+ * it against a list of known safe values. Returns a pointer to the matched
+ * safe value or nullptr if none was found.
+ *
+ * Note that for the comparison the macro will loop over the list and that the
+ * current element being tested against is provided as list_item.
+ *
+ * Example:
+ *
+ * Tainted<int> aId;
+ * NSTArray<Person> list;
+ * const Person* foo = MOZ_FIND_AND_VALIDATE(aId, list_item.id == aId, list);
+ *
+ * // Typically you would do nothing if invalid data is passed:
+ * if (MOZ_UNLIKELY(!foo)) {
+ * return;
+ * }
+ *
+ * // Or alternately you can crash on invalid data
+ * MOZ_RELEASE_ASSERT(foo != nullptr, "Invalid person id sent from content
+ * process.");
+ *
+ * Arguments:
+ * tainted_value - the name of the Tainted<> variable
+ * condition - a condition involving the tainted value and list_item
+ * validation_list - a list of known safe values to compare against
+ */
+#define MOZ_FIND_AND_VALIDATE(tainted_value, condition, validation_list) \
+ [&]() { \
+ auto& tmp = tainted_value.Coerce(); \
+ auto& tainted_value = tmp; \
+ const auto macro_find_it = \
+ std::find_if(validation_list.cbegin(), validation_list.cend(), \
+ [&](const auto& list_item) { return condition; }); \
+ return macro_find_it != validation_list.cend() ? &*macro_find_it \
+ : nullptr; \
+ }()
+
+/*
+ * MOZ_NO_VALIDATE allows unsafe removal of the Taint wrapper.
+ * A justification string is required to explain why this is acceptable.
+ *
+ * Example:
+ *
+ * bar = MOZ_NO_VALIDATE(
+ * foo,
+ * "Value is used to match against a dictionary key in the parent."
+ * "If there's no key present, there won't be a match."
+ * "There is no risk of grabbing a cross-origin value from the dictionary,"
+ * "because the IPC actor is instatiated per-content-process and the "
+ * "dictionary is not shared between actors.");
+ *
+ *
+ * Arguments:
+ * tainted_value - the name of the Tainted<> variable
+ * justification - a human-understandable string explaining why it is
+ * permissible to omit validation
+ */
+#define MOZ_NO_VALIDATE(tainted_value, justification) \
+ [&tainted_value] { \
+ static_assert(sizeof(justification) > 3, \
+ "Must provide a justification string."); \
+ return tainted_value.Coerce(); \
+ }()
+
+/*
+ TODO:
+
+ - Figure out if there are helpers that would be useful for Strings and
+ Principals
+ - Write static analysis to enforce invariants:
+ - No use of .Coerce() except in the header file.
+ - No constant passed to the condition of MOZ_VALIDATE_AND_GET
+ */
+
+} // namespace mozilla
+
+#endif /* mozilla_Tainting_h */
diff --git a/mfbt/TemplateLib.h b/mfbt/TemplateLib.h
new file mode 100644
index 0000000000..8c620390b3
--- /dev/null
+++ b/mfbt/TemplateLib.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Reusable template meta-functions on types and compile-time values. Meta-
+ * functions are placed inside the 'tl' namespace to avoid conflict with non-
+ * meta functions of the same name (e.g., mozilla::tl::FloorLog2 vs.
+ * mozilla::FloorLog2).
+ *
+ * When constexpr support becomes universal, we should probably use that instead
+ * of some of these templates, for simplicity.
+ */
+
+#ifndef mozilla_TemplateLib_h
+#define mozilla_TemplateLib_h
+
+#include <limits.h>
+#include <stddef.h>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace tl {
+
+/** Compute min/max. */
+template <size_t Size, size_t... Rest>
+struct Min {
+ static constexpr size_t value =
+ Size < Min<Rest...>::value ? Size : Min<Rest...>::value;
+};
+
+template <size_t Size>
+struct Min<Size> {
+ static constexpr size_t value = Size;
+};
+
+template <size_t Size, size_t... Rest>
+struct Max {
+ static constexpr size_t value =
+ Size > Max<Rest...>::value ? Size : Max<Rest...>::value;
+};
+
+template <size_t Size>
+struct Max<Size> {
+ static constexpr size_t value = Size;
+};
+
+/** Compute floor(log2(i)). */
+template <size_t I>
+struct FloorLog2 {
+ static const size_t value = 1 + FloorLog2<I / 2>::value;
+};
+template <>
+struct FloorLog2<0> { /* Error */
+};
+template <>
+struct FloorLog2<1> {
+ static const size_t value = 0;
+};
+
+/** Compute ceiling(log2(i)). */
+template <size_t I>
+struct CeilingLog2 {
+ static const size_t value = FloorLog2<2 * I - 1>::value;
+};
+
+/** Round up to the nearest power of 2. */
+template <size_t I>
+struct RoundUpPow2 {
+ static const size_t value = size_t(1) << CeilingLog2<I>::value;
+};
+template <>
+struct RoundUpPow2<0> {
+ static const size_t value = 1;
+};
+
+/** Compute the number of bits in the given unsigned type. */
+template <typename T>
+struct BitSize {
+ static const size_t value = sizeof(T) * CHAR_BIT;
+};
+
+/**
+ * Produce an N-bit mask, where N <= BitSize<size_t>::value. Handle the
+ * language-undefined edge case when N = BitSize<size_t>::value.
+ */
+template <size_t N>
+struct NBitMask {
+ // Assert the precondition. On success this evaluates to 0. Otherwise it
+ // triggers divide-by-zero at compile time: a guaranteed compile error in
+ // C++11, and usually one in C++98. Add this value to |value| to assure
+ // its computation.
+ static const size_t checkPrecondition =
+ 0 / size_t(N < BitSize<size_t>::value);
+ static const size_t value = (size_t(1) << N) - 1 + checkPrecondition;
+};
+template <>
+struct NBitMask<BitSize<size_t>::value> {
+ static const size_t value = size_t(-1);
+};
+
+/**
+ * For the unsigned integral type size_t, compute a mask M for N such that
+ * for all X, !(X & M) implies X * N will not overflow (w.r.t size_t)
+ */
+template <size_t N>
+struct MulOverflowMask {
+ static const size_t value =
+ ~NBitMask<BitSize<size_t>::value - CeilingLog2<N>::value>::value;
+};
+template <>
+struct MulOverflowMask<0> { /* Error */
+};
+template <>
+struct MulOverflowMask<1> {
+ static const size_t value = 0;
+};
+
+} // namespace tl
+
+} // namespace mozilla
+
+#endif /* mozilla_TemplateLib_h */
diff --git a/mfbt/TextUtils.h b/mfbt/TextUtils.h
new file mode 100644
index 0000000000..ec497c52ee
--- /dev/null
+++ b/mfbt/TextUtils.h
@@ -0,0 +1,295 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Character/text operations. */
+
+#ifndef mozilla_TextUtils_h
+#define mozilla_TextUtils_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Latin1.h"
+
+#ifdef MOZ_HAS_JSRUST
+// Can't include mozilla/Encoding.h here.
+extern "C" {
+// Declared as uint8_t instead of char to match declaration in another header.
+size_t encoding_ascii_valid_up_to(uint8_t const* buffer, size_t buffer_len);
+}
+#endif
+
+namespace mozilla {
+
+// See Utf8.h for IsUtf8() and conversions between UTF-8 and UTF-16.
+// See Latin1.h for testing UTF-16 and UTF-8 for Latin1ness and
+// for conversions to and from Latin1.
+
+// The overloads below are not templated in order to make
+// implicit conversions to span work as expected for the Span
+// overloads.
+
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(unsigned char aChar) { return aChar < 0x80; }
+
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(signed char aChar) {
+ return IsAscii(static_cast<unsigned char>(aChar));
+}
+
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(char aChar) {
+ return IsAscii(static_cast<unsigned char>(aChar));
+}
+
+#ifdef __cpp_char8_t
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(char8_t aChar) {
+ return IsAscii(static_cast<unsigned char>(aChar));
+}
+#endif
+
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(char16_t aChar) { return aChar < 0x80; }
+
+/** Returns true iff |aChar| is ASCII, i.e. in the range [0, 0x80). */
+inline constexpr bool IsAscii(char32_t aChar) { return aChar < 0x80; }
+
+/**
+ * Returns |true| iff |aString| contains only ASCII characters, that is,
+ * characters in the range [0x00, 0x80).
+ *
+ * @param aString a 8-bit wide string to scan
+ */
+inline bool IsAscii(mozilla::Span<const char> aString) {
+#if MOZ_HAS_JSRUST()
+ size_t length = aString.Length();
+ const char* ptr = aString.Elements();
+ // For short strings, avoid the function call, since, the SIMD
+ // code won't have a chance to kick in anyway.
+ if (length < mozilla::detail::kShortStringLimitForInlinePaths) {
+ const uint8_t* uptr = reinterpret_cast<const uint8_t*>(ptr);
+ uint8_t accu = 0;
+ for (size_t i = 0; i < length; i++) {
+ accu |= uptr[i];
+ }
+ return accu < 0x80;
+ }
+ return encoding_mem_is_ascii(ptr, length);
+#else
+ for (char c : aString) {
+ if (!IsAscii(c)) {
+ return false;
+ }
+ }
+ return true;
+#endif
+}
+
+/**
+ * Returns |true| iff |aString| contains only ASCII characters, that is,
+ * characters in the range [0x00, 0x80).
+ *
+ * @param aString a 16-bit wide string to scan
+ */
+inline bool IsAscii(mozilla::Span<const char16_t> aString) {
+#if MOZ_HAS_JSRUST()
+ size_t length = aString.Length();
+ const char16_t* ptr = aString.Elements();
+ // For short strings, calling into Rust is a pessimization, and the SIMD
+ // code won't have a chance to kick in anyway.
+ // 16 is a bit larger than logically necessary for this function alone,
+ // but it's important that the limit here matches the limit used in
+ // LossyConvertUtf16toLatin1!
+ if (length < mozilla::detail::kShortStringLimitForInlinePaths) {
+ char16_t accu = 0;
+ for (size_t i = 0; i < length; i++) {
+ accu |= ptr[i];
+ }
+ return accu < 0x80;
+ }
+ return encoding_mem_is_basic_latin(ptr, length);
+#else
+ for (char16_t c : aString) {
+ if (!IsAscii(c)) {
+ return false;
+ }
+ }
+ return true;
+#endif
+}
+
+/**
+ * Returns true iff every character in the null-terminated string pointed to by
+ * |aChar| is ASCII, i.e. in the range [0, 0x80).
+ */
+template <typename Char>
+constexpr bool IsAsciiNullTerminated(const Char* aChar) {
+ while (Char c = *aChar++) {
+ if (!IsAscii(c)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#if MOZ_HAS_JSRUST()
+/**
+ * Returns the index of the first non-ASCII byte or
+ * the length of the string if there are none.
+ */
+inline size_t AsciiValidUpTo(mozilla::Span<const char> aString) {
+ return encoding_ascii_valid_up_to(
+ reinterpret_cast<const uint8_t*>(aString.Elements()), aString.Length());
+}
+
+/**
+ * Returns the index of the first unpaired surrogate or
+ * the length of the string if there are none.
+ */
+inline size_t Utf16ValidUpTo(mozilla::Span<const char16_t> aString) {
+ return encoding_mem_utf16_valid_up_to(aString.Elements(), aString.Length());
+}
+
+/**
+ * Replaces unpaired surrogates with U+FFFD in the argument.
+ *
+ * Note: If you have an nsAString, use EnsureUTF16Validity() from
+ * nsReadableUtils.h instead to avoid unsharing a valid shared
+ * string.
+ */
+inline void EnsureUtf16ValiditySpan(mozilla::Span<char16_t> aString) {
+ encoding_mem_ensure_utf16_validity(aString.Elements(), aString.Length());
+}
+
+/**
+ * Convert ASCII to UTF-16. In debug builds, assert that the input is
+ * ASCII.
+ *
+ * The length of aDest must not be less than the length of aSource.
+ */
+inline void ConvertAsciitoUtf16(mozilla::Span<const char> aSource,
+ mozilla::Span<char16_t> aDest) {
+ MOZ_ASSERT(IsAscii(aSource));
+ ConvertLatin1toUtf16(aSource, aDest);
+}
+
+#endif // MOZ_HAS_JSRUST
+
+/**
+ * Returns true iff |aChar| matches Ascii Whitespace.
+ *
+ * This function is intended to match the Infra standard
+ * (https://infra.spec.whatwg.org/#ascii-whitespace)
+ */
+template <typename Char>
+constexpr bool IsAsciiWhitespace(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return uc == 0x9 || uc == 0xA || uc == 0xC || uc == 0xD || uc == 0x20;
+}
+
+/**
+ * Returns true iff |aChar| matches [a-z].
+ *
+ * This function is basically what you thought islower was, except its behavior
+ * doesn't depend on the user's current locale.
+ */
+template <typename Char>
+constexpr bool IsAsciiLowercaseAlpha(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return 'a' <= uc && uc <= 'z';
+}
+
+/**
+ * Returns true iff |aChar| matches [A-Z].
+ *
+ * This function is basically what you thought isupper was, except its behavior
+ * doesn't depend on the user's current locale.
+ */
+template <typename Char>
+constexpr bool IsAsciiUppercaseAlpha(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return 'A' <= uc && uc <= 'Z';
+}
+
+/**
+ * Returns true iff |aChar| matches [a-zA-Z].
+ *
+ * This function is basically what you thought isalpha was, except its behavior
+ * doesn't depend on the user's current locale.
+ */
+template <typename Char>
+constexpr bool IsAsciiAlpha(Char aChar) {
+ return IsAsciiLowercaseAlpha(aChar) || IsAsciiUppercaseAlpha(aChar);
+}
+
+/**
+ * Returns true iff |aChar| matches [0-9].
+ *
+ * This function is basically what you thought isdigit was, except its behavior
+ * doesn't depend on the user's current locale.
+ */
+template <typename Char>
+constexpr bool IsAsciiDigit(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return '0' <= uc && uc <= '9';
+}
+
+/**
+ * Returns true iff |aChar| matches [0-9a-fA-F].
+ *
+ * This function is basically isxdigit, but guaranteed to be only for ASCII.
+ */
+template <typename Char>
+constexpr bool IsAsciiHexDigit(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+ return ('0' <= uc && uc <= '9') || ('a' <= uc && uc <= 'f') ||
+ ('A' <= uc && uc <= 'F');
+}
+
+/**
+ * Returns true iff |aChar| matches [a-zA-Z0-9].
+ *
+ * This function is basically what you thought isalnum was, except its behavior
+ * doesn't depend on the user's current locale.
+ */
+template <typename Char>
+constexpr bool IsAsciiAlphanumeric(Char aChar) {
+ return IsAsciiDigit(aChar) || IsAsciiAlpha(aChar);
+}
+
+/**
+ * Converts an ASCII alphanumeric digit [0-9a-zA-Z] to number as if in base-36.
+ * (This function therefore works for decimal, hexadecimal, etc.).
+ */
+template <typename Char>
+uint8_t AsciiAlphanumericToNumber(Char aChar) {
+ using UnsignedChar = typename detail::MakeUnsignedChar<Char>::Type;
+ auto uc = static_cast<UnsignedChar>(aChar);
+
+ if ('0' <= uc && uc <= '9') {
+ return uc - '0';
+ }
+
+ if ('A' <= uc && uc <= 'Z') {
+ return uc - 'A' + 10;
+ }
+
+ // Ideally this function would be constexpr, but unfortunately gcc at least as
+ // of 6.4 forbids non-constexpr function calls in unevaluated constexpr
+ // function calls. See bug 1453456. So for now, just assert and leave the
+ // entire function non-constexpr.
+ MOZ_ASSERT('a' <= uc && uc <= 'z',
+ "non-ASCII alphanumeric character can't be converted to number");
+ return uc - 'a' + 10;
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_TextUtils_h */
diff --git a/mfbt/ThreadLocal.h b/mfbt/ThreadLocal.h
new file mode 100644
index 0000000000..55c9fbcac6
--- /dev/null
+++ b/mfbt/ThreadLocal.h
@@ -0,0 +1,256 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Cross-platform lightweight thread local data wrappers. */
+
+#ifndef mozilla_ThreadLocal_h
+#define mozilla_ThreadLocal_h
+
+#if !defined(XP_WIN) && !defined(__wasi__)
+# include <pthread.h>
+#endif
+
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+namespace detail {
+
+#ifdef XP_MACOSX
+# if defined(__has_feature)
+# if __has_feature(cxx_thread_local)
+# define MACOSX_HAS_THREAD_LOCAL
+# endif
+# endif
+#endif
+
+/*
+ * Thread Local Storage helpers.
+ *
+ * Usage:
+ *
+ * Do not directly instantiate this class. Instead, use the
+ * MOZ_THREAD_LOCAL macro to declare or define instances. The macro
+ * takes a type name as its argument.
+ *
+ * Declare like this:
+ * extern MOZ_THREAD_LOCAL(int) tlsInt;
+ * Define like this:
+ * MOZ_THREAD_LOCAL(int) tlsInt;
+ * or:
+ * static MOZ_THREAD_LOCAL(int) tlsInt;
+ *
+ * Only static-storage-duration (e.g. global variables, or static class members)
+ * objects of this class should be instantiated. This class relies on
+ * zero-initialization, which is implicit for static-storage-duration objects.
+ * It doesn't have a custom default constructor, to avoid static initializers.
+ *
+ * API usage:
+ *
+ * // Create a TLS item.
+ * //
+ * // Note that init() should be invoked before the first use of set()
+ * // or get(). It is ok to call it multiple times. This must be
+ * // called in a way that avoids possible races with other threads.
+ * MOZ_THREAD_LOCAL(int) tlsKey;
+ * if (!tlsKey.init()) {
+ * // deal with the error
+ * }
+ *
+ * // Set the TLS value
+ * tlsKey.set(123);
+ *
+ * // Get the TLS value
+ * int value = tlsKey.get();
+ */
+
+// Integral types narrower than void* must be extended to avoid
+// warnings from valgrind on some platforms. This helper type
+// achieves that without penalizing the common case of ThreadLocals
+// instantiated using a pointer type.
+template <typename S>
+struct Helper {
+ typedef uintptr_t Type;
+};
+
+template <typename S>
+struct Helper<S*> {
+ typedef S* Type;
+};
+
+#if defined(XP_WIN)
+/*
+ * ThreadLocalKeyStorage uses Thread Local APIs that are declared in
+ * processthreadsapi.h. To use this class on Windows, include that file
+ * (or windows.h) before including ThreadLocal.h.
+ *
+ * TLS_OUT_OF_INDEXES is a #define that is used to detect whether
+ * an appropriate header has been included prior to this file
+ */
+# if defined(TLS_OUT_OF_INDEXES)
+/* Despite not being used for MOZ_THREAD_LOCAL, we expose an implementation for
+ * Windows for cases where it's not desirable to use thread_local */
+template <typename T>
+class ThreadLocalKeyStorage {
+ public:
+ ThreadLocalKeyStorage() : mKey(TLS_OUT_OF_INDEXES) {}
+
+ inline bool initialized() const { return mKey != TLS_OUT_OF_INDEXES; }
+
+ inline void init() { mKey = TlsAlloc(); }
+
+ inline T get() const {
+ void* h = TlsGetValue(mKey);
+ return static_cast<T>(reinterpret_cast<typename Helper<T>::Type>(h));
+ }
+
+ inline bool set(const T aValue) {
+ void* h = const_cast<void*>(reinterpret_cast<const void*>(
+ static_cast<typename Helper<T>::Type>(aValue)));
+ return TlsSetValue(mKey, h);
+ }
+
+ private:
+ unsigned long mKey;
+};
+# endif
+#elif defined(__wasi__)
+// There are no threads on WASI, so we just use a global variable.
+template <typename T>
+class ThreadLocalKeyStorage {
+ public:
+ constexpr ThreadLocalKeyStorage() : mInited(false) {}
+
+ inline bool initialized() const { return mInited; }
+
+ inline void init() { mInited = true; }
+
+ inline T get() const { return mVal; }
+
+ inline bool set(const T aValue) {
+ mVal = aValue;
+ return true;
+ }
+
+ private:
+ bool mInited;
+ T mVal;
+};
+#else
+template <typename T>
+class ThreadLocalKeyStorage {
+ public:
+ constexpr ThreadLocalKeyStorage() : mKey(0), mInited(false) {}
+
+ inline bool initialized() const { return mInited; }
+
+ inline void init() { mInited = !pthread_key_create(&mKey, nullptr); }
+
+ inline T get() const {
+ void* h = pthread_getspecific(mKey);
+ return static_cast<T>(reinterpret_cast<typename Helper<T>::Type>(h));
+ }
+
+ inline bool set(const T aValue) {
+ const void* h = reinterpret_cast<const void*>(
+ static_cast<typename Helper<T>::Type>(aValue));
+ return !pthread_setspecific(mKey, h);
+ }
+
+ private:
+ pthread_key_t mKey;
+ bool mInited;
+};
+#endif
+
+template <typename T>
+class ThreadLocalNativeStorage {
+ public:
+ // __thread does not allow non-trivial constructors, but we can
+ // instead rely on zero-initialization.
+ inline bool initialized() const { return true; }
+
+ inline void init() {}
+
+ inline T get() const { return mValue; }
+
+ inline bool set(const T aValue) {
+ mValue = aValue;
+ return true;
+ }
+
+ private:
+ T mValue;
+};
+
+template <typename T, template <typename U> class Storage>
+class ThreadLocal : public Storage<T> {
+ public:
+ [[nodiscard]] inline bool init();
+
+ void infallibleInit() {
+ MOZ_RELEASE_ASSERT(init(), "Infallible TLS initialization failed");
+ }
+
+ inline T get() const;
+
+ inline void set(const T aValue);
+
+ using Type = T;
+};
+
+template <typename T, template <typename U> class Storage>
+inline bool ThreadLocal<T, Storage>::init() {
+ static_assert(std::is_pointer_v<T> || std::is_integral_v<T>,
+ "mozilla::ThreadLocal must be used with a pointer or "
+ "integral type");
+ static_assert(sizeof(T) <= sizeof(void*),
+ "mozilla::ThreadLocal can't be used for types larger than "
+ "a pointer");
+
+ if (!Storage<T>::initialized()) {
+ Storage<T>::init();
+ }
+ return Storage<T>::initialized();
+}
+
+template <typename T, template <typename U> class Storage>
+inline T ThreadLocal<T, Storage>::get() const {
+ MOZ_ASSERT(Storage<T>::initialized());
+ return Storage<T>::get();
+}
+
+template <typename T, template <typename U> class Storage>
+inline void ThreadLocal<T, Storage>::set(const T aValue) {
+ MOZ_ASSERT(Storage<T>::initialized());
+ bool succeeded = Storage<T>::set(aValue);
+ if (!succeeded) {
+ MOZ_CRASH();
+ }
+}
+
+#if (defined(XP_WIN) || defined(MACOSX_HAS_THREAD_LOCAL)) && \
+ !defined(__MINGW32__)
+# define MOZ_THREAD_LOCAL(TYPE) \
+ thread_local ::mozilla::detail::ThreadLocal< \
+ TYPE, ::mozilla::detail::ThreadLocalNativeStorage>
+#elif defined(HAVE_THREAD_TLS_KEYWORD)
+# define MOZ_THREAD_LOCAL(TYPE) \
+ __thread ::mozilla::detail::ThreadLocal< \
+ TYPE, ::mozilla::detail::ThreadLocalNativeStorage>
+#else
+# define MOZ_THREAD_LOCAL(TYPE) \
+ ::mozilla::detail::ThreadLocal<TYPE, \
+ ::mozilla::detail::ThreadLocalKeyStorage>
+#endif
+
+} // namespace detail
+} // namespace mozilla
+
+#endif /* mozilla_ThreadLocal_h */
diff --git a/mfbt/ThreadSafeWeakPtr.h b/mfbt/ThreadSafeWeakPtr.h
new file mode 100644
index 0000000000..d5176f5ffa
--- /dev/null
+++ b/mfbt/ThreadSafeWeakPtr.h
@@ -0,0 +1,309 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A thread-safe weak pointer */
+
+/**
+ * Derive from SupportsThreadSafeWeakPtr to allow thread-safe weak pointers to
+ * an atomically refcounted derived class. These thread-safe weak pointers may
+ * be safely accessed and converted to strong pointers on multiple threads.
+ *
+ * Note that SupportsThreadSafeWeakPtr defines the same member functions as
+ * AtomicRefCounted, so you should not separately inherit from it.
+ *
+ * ThreadSafeWeakPtr and its implementation is distinct from the normal WeakPtr
+ * which is not thread-safe. The interface discipline and implementation details
+ * are different enough that these two implementations are separated for now for
+ * efficiency reasons. If you don't actually need to use weak pointers on
+ * multiple threads, you can just use WeakPtr instead.
+ *
+ * When deriving from SupportsThreadSafeWeakPtr, you should add
+ * MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public section of your
+ * class, where ClassName is the name of your class.
+ *
+ * Example usage:
+ *
+ * class C : public SupportsThreadSafeWeakPtr<C>
+ * {
+ * public:
+ * MOZ_DECLARE_REFCOUNTED_TYPENAME(C)
+ * void doStuff();
+ * };
+ *
+ * ThreadSafeWeakPtr<C> weak;
+ * {
+ * RefPtr<C> strong = new C;
+ * if (strong) {
+ * strong->doStuff();
+ * }
+ * // Make a new weak reference to the object from the strong reference.
+ * weak = strong;
+ * }
+ * MOZ_ASSERT(!bool(weak), "Weak pointers are cleared after all "
+ * "strong references are released.");
+ *
+ * // Convert the weak reference to a strong reference for usage.
+ * RefPtr<C> other(weak);
+ * if (other) {
+ * other->doStuff();
+ * }
+ */
+
+#ifndef mozilla_ThreadSafeWeakPtr_h
+#define mozilla_ThreadSafeWeakPtr_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/RefCountType.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
+
+namespace mozilla {
+
+template <typename T>
+class ThreadSafeWeakPtr;
+
+template <typename T>
+class SupportsThreadSafeWeakPtr;
+
+namespace detail {
+
+class SupportsThreadSafeWeakPtrBase {};
+
+// A shared weak reference that is used to track a SupportsThreadSafeWeakPtr
+// object. This object owns the reference count for the tracked object, and can
+// perform atomic refcount upgrades.
+class ThreadSafeWeakReference
+ : public external::AtomicRefCounted<ThreadSafeWeakReference> {
+ public:
+ explicit ThreadSafeWeakReference(SupportsThreadSafeWeakPtrBase* aPtr)
+ : mPtr(aPtr) {}
+
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ const char* typeName() const { return "ThreadSafeWeakReference"; }
+ size_t typeSize() const { return sizeof(*this); }
+#endif
+
+ private:
+ template <typename U>
+ friend class mozilla::SupportsThreadSafeWeakPtr;
+ template <typename U>
+ friend class mozilla::ThreadSafeWeakPtr;
+
+ // Number of strong references to the underlying data structure.
+ //
+ // Other than the initial strong `AddRef` call incrementing this value to 1,
+ // which must occur before any weak references are taken, once this value
+ // reaches `0` again it cannot be changed.
+ RC<MozRefCountType, AtomicRefCount> mStrongCnt{0};
+
+ // Raw pointer to the tracked object. It is never valid to read this value
+ // outside of `ThreadSafeWeakPtr::getRefPtr()`.
+ SupportsThreadSafeWeakPtrBase* MOZ_NON_OWNING_REF mPtr;
+};
+
+} // namespace detail
+
+// For usage documentation for SupportsThreadSafeWeakPtr, see the header-level
+// documentation.
+//
+// To understand the layout of SupportsThreadSafeWeakPtr, consider the following
+// simplified declaration:
+//
+// class MyType: SupportsThreadSafeWeakPtr { uint32_t mMyData; ... }
+//
+// Which will result in the following layout:
+//
+// +--------------------+
+// | MyType | <===============================================+
+// +--------------------+ I
+// | RefPtr mWeakRef o======> +-------------------------------------+ I
+// | uint32_t mMyData | | ThreadSafeWeakReference | I
+// +--------------------+ +-------------------------------------+ I
+// | RC mRefCount | I
+// | RC mStrongCount | I
+// | SupportsThreadSafeWeakPtrBase* mPtr o====+
+// +-------------------------------------+
+//
+// The mRefCount inherited from AtomicRefCounted<ThreadSafeWeakReference> is the
+// weak count. This means MyType implicitly holds a weak reference, so if the
+// weak count ever hits 0, we know all strong *and* weak references are gone,
+// and it's safe to free the ThreadSafeWeakReference. MyType's AddRef and
+// Release implementations otherwise only manipulate mStrongCount.
+//
+// It's necessary to keep the counts in a separate allocation because we need
+// to be able to delete MyType while weak references still exist. This ensures
+// that weak references can still access all the state necessary to check if
+// they can be upgraded (mStrongCount).
+template <typename T>
+class SupportsThreadSafeWeakPtr : public detail::SupportsThreadSafeWeakPtrBase {
+ protected:
+ using ThreadSafeWeakReference = detail::ThreadSafeWeakReference;
+
+ // The `this` pointer will not have subclasses initialized yet, but it will
+ // also not be read until a weak pointer is upgraded, which should be after
+ // this point.
+ SupportsThreadSafeWeakPtr() : mWeakRef(new ThreadSafeWeakReference(this)) {
+ static_assert(std::is_base_of_v<SupportsThreadSafeWeakPtr, T>,
+ "T must derive from SupportsThreadSafeWeakPtr");
+ }
+
+ public:
+ // Compatibility with RefPtr
+ MozExternalRefCountType AddRef() const {
+ auto& refCnt = mWeakRef->mStrongCnt;
+ MOZ_ASSERT(int32_t(refCnt) >= 0);
+ MozRefCountType cnt = ++refCnt;
+ detail::RefCountLogger::logAddRef(static_cast<const T*>(this), cnt);
+ return cnt;
+ }
+
+ MozExternalRefCountType Release() const {
+ auto& refCnt = mWeakRef->mStrongCnt;
+ MOZ_ASSERT(int32_t(refCnt) > 0);
+ detail::RefCountLogger::ReleaseLogger logger(static_cast<const T*>(this));
+ MozRefCountType cnt = --refCnt;
+ logger.logRelease(cnt);
+ if (0 == cnt) {
+ // Because we have atomically decremented the refcount above, only one
+ // thread can get a 0 count here. Thus, it is safe to access and destroy
+ // |this| here.
+ // No other thread can acquire a strong reference to |this| anymore
+ // through our weak pointer, as upgrading a weak pointer always uses
+ // |IncrementIfNonzero|, meaning the refcount can't leave a zero reference
+ // state.
+ // NOTE: We can't update our refcount to the marker `DEAD` value here, as
+ // it may still be read by mWeakRef.
+ delete static_cast<const T*>(this);
+ }
+ return cnt;
+ }
+
+ using HasThreadSafeRefCnt = std::true_type;
+
+ // Compatibility with wtf::RefPtr
+ void ref() { AddRef(); }
+ void deref() { Release(); }
+ MozRefCountType refCount() const { return mWeakRef->mStrongCnt; }
+ bool hasOneRef() const { return refCount() == 1; }
+
+ private:
+ template <typename U>
+ friend class ThreadSafeWeakPtr;
+
+ ThreadSafeWeakReference* getThreadSafeWeakReference() const {
+ return mWeakRef;
+ }
+
+ const RefPtr<ThreadSafeWeakReference> mWeakRef;
+};
+
+// A thread-safe variant of a weak pointer
+template <typename T>
+class ThreadSafeWeakPtr {
+ using ThreadSafeWeakReference = detail::ThreadSafeWeakReference;
+
+ public:
+ ThreadSafeWeakPtr() = default;
+
+ ThreadSafeWeakPtr& operator=(const ThreadSafeWeakPtr& aOther) = default;
+ ThreadSafeWeakPtr(const ThreadSafeWeakPtr& aOther) = default;
+
+ ThreadSafeWeakPtr& operator=(ThreadSafeWeakPtr&& aOther) = default;
+ ThreadSafeWeakPtr(ThreadSafeWeakPtr&& aOther) = default;
+
+ ThreadSafeWeakPtr& operator=(const RefPtr<T>& aOther) {
+ if (aOther) {
+ // Get the underlying shared weak reference to the object.
+ mRef = aOther->getThreadSafeWeakReference();
+ } else {
+ mRef = nullptr;
+ }
+ return *this;
+ }
+
+ explicit ThreadSafeWeakPtr(const RefPtr<T>& aOther) { *this = aOther; }
+
+ ThreadSafeWeakPtr& operator=(decltype(nullptr)) {
+ mRef = nullptr;
+ return *this;
+ }
+
+ explicit ThreadSafeWeakPtr(decltype(nullptr)) {}
+
+ // Use the explicit `IsNull()` or `IsDead()` methods instead.
+ explicit operator bool() const = delete;
+
+ // Check if the ThreadSafeWeakPtr was created wrapping a null pointer.
+ bool IsNull() const { return !mRef; }
+
+ // Check if the managed object is nullptr or has already been destroyed. Once
+ // IsDead returns true, this ThreadSafeWeakPtr can never be upgraded again
+ // (until it has been re-assigned), but a false return value does NOT imply
+ // that any future upgrade will be successful.
+ bool IsDead() const { return IsNull() || size_t(mRef->mStrongCnt) == 0; }
+
+ bool operator==(const ThreadSafeWeakPtr& aOther) const {
+ return mRef == aOther.mRef;
+ }
+
+ bool operator==(const RefPtr<T>& aOther) const {
+ return *this == aOther.get();
+ }
+
+ friend bool operator==(const RefPtr<T>& aStrong,
+ const ThreadSafeWeakPtr& aWeak) {
+ return aWeak == aStrong.get();
+ }
+
+ bool operator==(const T* aOther) const {
+ if (!mRef) {
+ return !aOther;
+ }
+ return aOther && aOther->getThreadSafeWeakReference() == mRef;
+ }
+
+ template <typename U>
+ bool operator!=(const U& aOther) const {
+ return !(*this == aOther);
+ }
+
+ // Convert the weak pointer to a strong RefPtr.
+ explicit operator RefPtr<T>() const { return getRefPtr(); }
+
+ private:
+ // Gets a new strong reference of the proper type T to the tracked object.
+ already_AddRefed<T> getRefPtr() const {
+ if (!mRef) {
+ return nullptr;
+ }
+ // Increment our strong reference count only if it is nonzero, meaning that
+ // the object is still alive.
+ MozRefCountType cnt = mRef->mStrongCnt.IncrementIfNonzero();
+ if (cnt == 0) {
+ return nullptr;
+ }
+
+ RefPtr<T> ptr = already_AddRefed<T>(static_cast<T*>(mRef->mPtr));
+ detail::RefCountLogger::logAddRef(ptr.get(), cnt);
+ return ptr.forget();
+ }
+
+ // A shared weak reference to an object. Note that this may be null so as to
+ // save memory (at the slight cost of an extra null check) if no object is
+ // being tracked.
+ RefPtr<ThreadSafeWeakReference> mRef;
+};
+
+} // namespace mozilla
+
+template <typename T>
+inline already_AddRefed<T> do_AddRef(
+ const mozilla::ThreadSafeWeakPtr<T>& aObj) {
+ RefPtr<T> ref(aObj);
+ return ref.forget();
+}
+
+#endif /* mozilla_ThreadSafeWeakPtr_h */
diff --git a/mfbt/ThreadSafety.h b/mfbt/ThreadSafety.h
new file mode 100644
index 0000000000..9b18c71bd0
--- /dev/null
+++ b/mfbt/ThreadSafety.h
@@ -0,0 +1,140 @@
+// Note: the file is largely imported directly from WebRTC upstream, so
+// comments may not completely apply to Mozilla's usage.
+//
+// Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+// Borrowed from
+// https://code.google.com/p/gperftools/source/browse/src/base/thread_annotations.h
+// but adapted for clang attributes instead of the gcc.
+//
+// This header file contains the macro definitions for thread safety
+// annotations that allow the developers to document the locking policies
+// of their multi-threaded code. The annotations can also help program
+// analysis tools to identify potential thread safety issues.
+
+#ifndef mozilla_ThreadSafety_h
+#define mozilla_ThreadSafety_h
+#include "mozilla/Attributes.h"
+
+#if defined(__clang__) && (__clang_major__ >= 11) && !defined(SWIG)
+# define MOZ_THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+// Allow for localized suppression of thread-safety warnings; finer-grained
+// than MOZ_NO_THREAD_SAFETY_ANALYSIS
+# define MOZ_PUSH_IGNORE_THREAD_SAFETY \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wthread-safety\"")
+# define MOZ_POP_THREAD_SAFETY _Pragma("GCC diagnostic pop")
+
+#else
+# define MOZ_THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+# define MOZ_PUSH_IGNORE_THREAD_SAFETY
+# define MOZ_POP_THREAD_SAFETY
+#endif
+
+// Document if a shared variable/field needs to be protected by a lock.
+// MOZ_GUARDED_BY allows the user to specify a particular lock that should be
+// held when accessing the annotated variable, while MOZ_GUARDED_VAR only
+// indicates a shared variable should be guarded (by any lock). MOZ_GUARDED_VAR
+// is primarily used when the client cannot express the name of the lock.
+#define MOZ_GUARDED_BY(x) MOZ_THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#define MOZ_GUARDED_VAR MOZ_THREAD_ANNOTATION_ATTRIBUTE__(guarded_var)
+
+// Document if the memory location pointed to by a pointer should be guarded
+// by a lock when dereferencing the pointer. Similar to MOZ_GUARDED_VAR,
+// MOZ_PT_GUARDED_VAR is primarily used when the client cannot express the
+// name of the lock. Note that a pointer variable to a shared memory location
+// could itself be a shared variable. For example, if a shared global pointer
+// q, which is guarded by mu1, points to a shared memory location that is
+// guarded by mu2, q should be annotated as follows:
+// int *q MOZ_GUARDED_BY(mu1) MOZ_PT_GUARDED_BY(mu2);
+#define MOZ_PT_GUARDED_BY(x) MOZ_THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#define MOZ_PT_GUARDED_VAR MOZ_THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_var)
+
+// Document the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both MOZ_ACQUIRED_AFTER
+// and MOZ_ACQUIRED_BEFORE.)
+#define MOZ_ACQUIRED_AFTER(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+#define MOZ_ACQUIRED_BEFORE(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// The following three annotations document the lock requirements for
+// functions/methods.
+
+// Document if a function expects certain locks to be held before it is called
+#define MOZ_REQUIRES(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define MOZ_REQUIRES_SHARED(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// Document the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as google3's Mutex locks are
+// non-reentrant).
+#define MOZ_EXCLUDES(x) MOZ_THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x))
+
+// Document the lock the annotated function returns without acquiring it.
+#define MOZ_RETURN_CAPABILITY(x) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// Document if a class/type is a lockable type (such as the Mutex class).
+#define MOZ_CAPABILITY(x) MOZ_THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
+
+// Document if a class is a scoped lockable type (such as the MutexLock class).
+#define MOZ_SCOPED_CAPABILITY MOZ_THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// The following annotations specify lock and unlock primitives.
+#define MOZ_CAPABILITY_ACQUIRE(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+#define MOZ_EXCLUSIVE_RELEASE(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+
+#define MOZ_ACQUIRE_SHARED(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+#define MOZ_TRY_ACQUIRE(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define MOZ_SHARED_TRYLOCK_FUNCTION(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+#define MOZ_CAPABILITY_RELEASE(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// An escape hatch for thread safety analysis to ignore the annotated function.
+#define MOZ_NO_THREAD_SAFETY_ANALYSIS \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+// Newer capabilities
+#define MOZ_ASSERT_CAPABILITY(x) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
+
+#define MOZ_ASSERT_SHARED_CAPABILITY(x) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
+
+// Additions from current clang assertions.
+// Note: new-style definitions, since these didn't exist in the old style
+#define MOZ_RELEASE_SHARED(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+
+#define MOZ_RELEASE_GENERIC(...) \
+ MOZ_THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__))
+
+// Mozilla additions:
+
+// AutoUnlock is supported by clang currently, but oddly you must use
+// MOZ_EXCLUSIVE_RELEASE() for both the RAII constructor *and* the destructor.
+// This hides the ugliness until they fix it upstream.
+#define MOZ_SCOPED_UNLOCK_RELEASE(...) MOZ_EXCLUSIVE_RELEASE(__VA_ARGS__)
+#define MOZ_SCOPED_UNLOCK_REACQUIRE(...) MOZ_EXCLUSIVE_RELEASE(__VA_ARGS__)
+
+#endif /* mozilla_ThreadSafety_h */
diff --git a/mfbt/ToString.h b/mfbt/ToString.h
new file mode 100644
index 0000000000..a184d870b1
--- /dev/null
+++ b/mfbt/ToString.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Utilities for converting an object to a string representation. */
+
+#ifndef mozilla_ToString_h
+#define mozilla_ToString_h
+
+#include <string>
+#include <sstream>
+
+namespace mozilla {
+
+/**
+ * A convenience function for converting an object to a string representation.
+ * Supports any object which can be streamed to an std::ostream.
+ */
+template <typename T>
+std::string ToString(const T& aValue) {
+ std::ostringstream stream;
+ stream << aValue;
+ return stream.str();
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_ToString_h */
diff --git a/mfbt/Try.h b/mfbt/Try.h
new file mode 100644
index 0000000000..a650a33ea2
--- /dev/null
+++ b/mfbt/Try.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Try_h
+#define mozilla_Try_h
+
+#include "mozilla/Result.h"
+
+/**
+ * MOZ_TRY(expr) is the C++ equivalent of Rust's `try!(expr);`. First, it
+ * evaluates expr, which must produce a Result value. On success, it
+ * discards the result altogether. On error, it immediately returns an error
+ * Result from the enclosing function.
+ */
+#define MOZ_TRY(expr) \
+ do { \
+ auto mozTryTempResult_ = ::mozilla::ToResult(expr); \
+ if (MOZ_UNLIKELY(mozTryTempResult_.isErr())) { \
+ return mozTryTempResult_.propagateErr(); \
+ } \
+ } while (0)
+
+/**
+ * MOZ_TRY_VAR(target, expr) is the C++ equivalent of Rust's `target =
+ * try!(expr);`. First, it evaluates expr, which must produce a Result value. On
+ * success, the result's success value is assigned to target. On error,
+ * immediately returns the error result. |target| must be an lvalue.
+ */
+#define MOZ_TRY_VAR(target, expr) \
+ do { \
+ auto mozTryVarTempResult_ = (expr); \
+ if (MOZ_UNLIKELY(mozTryVarTempResult_.isErr())) { \
+ return mozTryVarTempResult_.propagateErr(); \
+ } \
+ (target) = mozTryVarTempResult_.unwrap(); \
+ } while (0)
+
+#endif // mozilla_Try_h
diff --git a/mfbt/TsanOptions.h b/mfbt/TsanOptions.h
new file mode 100644
index 0000000000..f276251038
--- /dev/null
+++ b/mfbt/TsanOptions.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Default options for ThreadSanitizer. */
+
+#ifndef mozilla_TsanOptions_h
+#define mozilla_TsanOptions_h
+
+#include "mozilla/Compiler.h"
+
+#ifndef _MSC_VER // Not supported by clang-cl yet
+
+//
+// When running with ThreadSanitizer, we need to explicitly set some
+// options specific to our codebase to prevent errors during runtime.
+// To override these, set the TSAN_OPTIONS environment variable.
+//
+// Currently, these are:
+//
+// abort_on_error=1 - Causes TSan to abort instead of using exit().
+// halt_on_error=1 - Causes TSan to stop on the first race detected.
+//
+// report_signal_unsafe=0 - Required to avoid TSan deadlocks when
+// receiving external signals (e.g. SIGINT manually on console).
+//
+// allocator_may_return_null=1 - Tell TSan to return NULL when an allocation
+// fails instead of aborting the program. This allows us to handle failing
+// allocations the same way we would handle them with a regular allocator and
+// also uncovers potential bugs that might occur in these situations.
+//
+extern "C" const char* __tsan_default_options() {
+ return "halt_on_error=1:abort_on_error=1:report_signal_unsafe=0"
+ ":allocator_may_return_null=1";
+}
+
+// These are default suppressions for external libraries that probably
+// every application would want to include if it potentially loads external
+// libraries like GTK/X and hence their dependencies.
+# define MOZ_TSAN_DEFAULT_EXTLIB_SUPPRESSIONS \
+ "called_from_lib:libappmenu-gtk3-parser\n" \
+ "called_from_lib:libatk-1\n" \
+ "called_from_lib:libcairo.so\n" \
+ "called_from_lib:libcairo-gobject\n" \
+ "called_from_lib:libdconfsettings\n" \
+ "called_from_lib:libEGL_nvidia\n" \
+ "called_from_lib:libfontconfig.so\n" \
+ "called_from_lib:libfontconfig1\n" \
+ "called_from_lib:libgdk-3\n" \
+ "called_from_lib:libgdk_pixbuf\n" \
+ "called_from_lib:libgdk-x11\n" \
+ "called_from_lib:libgio-2\n" \
+ "called_from_lib:libglib-1\n" \
+ "called_from_lib:libglib-2\n" \
+ "called_from_lib:libgobject\n" \
+ "called_from_lib:libgtk-3\n" \
+ "called_from_lib:libgtk-x11\n" \
+ "called_from_lib:libgvfscommon\n" \
+ "called_from_lib:libgvfsdbus\n" \
+ "called_from_lib:libibus-1\n" \
+ "called_from_lib:libnvidia-eglcore\n" \
+ "called_from_lib:libnvidia-glsi\n" \
+ "called_from_lib:libogg.so\n" \
+ "called_from_lib:libpango-1\n" \
+ "called_from_lib:libpangocairo\n" \
+ "called_from_lib:libpangoft2\n" \
+ "called_from_lib:pango-basic-fc\n" \
+ "called_from_lib:libpixman-1\n" \
+ "called_from_lib:libpulse.so\n" \
+ "called_from_lib:libpulsecommon\n" \
+ "called_from_lib:libsecret-1\n" \
+ "called_from_lib:libunity-gtk3-parser\n" \
+ "called_from_lib:libvorbis.so\n" \
+ "called_from_lib:libvorbisfile\n" \
+ "called_from_lib:libwayland-client\n" \
+ "called_from_lib:libX11.so\n" \
+ "called_from_lib:libX11-xcb\n" \
+ "called_from_lib:libXau\n" \
+ "called_from_lib:libxcb.so\n" \
+ "called_from_lib:libXcomposite\n" \
+ "called_from_lib:libXcursor\n" \
+ "called_from_lib:libXdamage\n" \
+ "called_from_lib:libXdmcp\n" \
+ "called_from_lib:libXext\n" \
+ "called_from_lib:libXfixes\n" \
+ "called_from_lib:libXi.so\n" \
+ "called_from_lib:libXrandr\n" \
+ "called_from_lib:libXrender\n" \
+ "called_from_lib:libXss\n"
+
+#endif // _MSC_VER
+
+#endif /* mozilla_TsanOptions_h */
diff --git a/mfbt/TypedEnumBits.h b/mfbt/TypedEnumBits.h
new file mode 100644
index 0000000000..4a415d0600
--- /dev/null
+++ b/mfbt/TypedEnumBits.h
@@ -0,0 +1,135 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS allows using a typed enum as bit flags.
+ */
+
+#ifndef mozilla_TypedEnumBits_h
+#define mozilla_TypedEnumBits_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/IntegerTypeTraits.h"
+
+namespace mozilla {
+
+/*
+ * The problem that CastableTypedEnumResult aims to solve is that
+ * typed enums are not convertible to bool, and there is no way to make them
+ * be, yet user code wants to be able to write
+ *
+ * if (myFlags & Flags::SOME_PARTICULAR_FLAG) (1)
+ *
+ * There are different approaches to solving this. Most of them require
+ * adapting user code. For example, we could implement operator! and have
+ * the user write
+ *
+ * if (!!(myFlags & Flags::SOME_PARTICULAR_FLAG)) (2)
+ *
+ * Or we could supply a IsNonZero() or Any() function returning whether
+ * an enum value is nonzero, and have the user write
+ *
+ * if (Any(Flags & Flags::SOME_PARTICULAR_FLAG)) (3)
+ *
+ * But instead, we choose to preserve the original user syntax (1) as it
+ * is inherently more readable, and to ease porting existing code to typed
+ * enums. We achieve this by having operator& and other binary bitwise
+ * operators have as return type a class, CastableTypedEnumResult,
+ * that wraps a typed enum but adds bool convertibility.
+ */
+template <typename E>
+class CastableTypedEnumResult {
+ private:
+ const E mValue;
+
+ public:
+ explicit constexpr CastableTypedEnumResult(E aValue) : mValue(aValue) {}
+
+ constexpr operator E() const { return mValue; }
+
+ template <typename DestinationType>
+ explicit constexpr operator DestinationType() const {
+ return DestinationType(mValue);
+ }
+
+ constexpr bool operator!() const { return !bool(mValue); }
+};
+
+#define MOZ_CASTABLETYPEDENUMRESULT_BINOP(Op, OtherType, ReturnType) \
+ template <typename E> \
+ constexpr ReturnType operator Op(const OtherType& aE, \
+ const CastableTypedEnumResult<E>& aR) { \
+ return ReturnType(aE Op OtherType(aR)); \
+ } \
+ template <typename E> \
+ constexpr ReturnType operator Op(const CastableTypedEnumResult<E>& aR, \
+ const OtherType& aE) { \
+ return ReturnType(OtherType(aR) Op aE); \
+ } \
+ template <typename E> \
+ constexpr ReturnType operator Op(const CastableTypedEnumResult<E>& aR1, \
+ const CastableTypedEnumResult<E>& aR2) { \
+ return ReturnType(OtherType(aR1) Op OtherType(aR2)); \
+ }
+
+MOZ_CASTABLETYPEDENUMRESULT_BINOP(|, E, CastableTypedEnumResult<E>)
+MOZ_CASTABLETYPEDENUMRESULT_BINOP(&, E, CastableTypedEnumResult<E>)
+MOZ_CASTABLETYPEDENUMRESULT_BINOP(^, E, CastableTypedEnumResult<E>)
+MOZ_CASTABLETYPEDENUMRESULT_BINOP(==, E, bool)
+MOZ_CASTABLETYPEDENUMRESULT_BINOP(!=, E, bool)
+
+template <typename E>
+constexpr CastableTypedEnumResult<E> operator~(
+ const CastableTypedEnumResult<E>& aR) {
+ return CastableTypedEnumResult<E>(~(E(aR)));
+}
+
+#define MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(Op) \
+ template <typename E> \
+ E& operator Op(E & aR1, const CastableTypedEnumResult<E>& aR2) { \
+ return aR1 Op E(aR2); \
+ }
+
+MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(&=)
+MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(|=)
+MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(^=)
+
+#undef MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP
+
+#undef MOZ_CASTABLETYPEDENUMRESULT_BINOP
+
+namespace detail {
+template <typename E>
+struct UnsignedIntegerTypeForEnum : UnsignedStdintTypeForSize<sizeof(E)> {};
+} // namespace detail
+
+} // namespace mozilla
+
+#define MOZ_MAKE_ENUM_CLASS_BINOP_IMPL(Name, Op) \
+ inline constexpr mozilla::CastableTypedEnumResult<Name> operator Op( \
+ Name a, Name b) { \
+ typedef mozilla::CastableTypedEnumResult<Name> Result; \
+ typedef mozilla::detail::UnsignedIntegerTypeForEnum<Name>::Type U; \
+ return Result(Name(U(a) Op U(b))); \
+ } \
+ \
+ inline Name& operator Op##=(Name & a, Name b) { return a = a Op b; }
+
+/**
+ * MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS generates standard bitwise operators
+ * for the given enum type. Use this to enable using an enum type as bit-field.
+ */
+#define MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Name) \
+ MOZ_MAKE_ENUM_CLASS_BINOP_IMPL(Name, |) \
+ MOZ_MAKE_ENUM_CLASS_BINOP_IMPL(Name, &) \
+ MOZ_MAKE_ENUM_CLASS_BINOP_IMPL(Name, ^) \
+ inline constexpr mozilla::CastableTypedEnumResult<Name> operator~(Name a) { \
+ typedef mozilla::CastableTypedEnumResult<Name> Result; \
+ typedef mozilla::detail::UnsignedIntegerTypeForEnum<Name>::Type U; \
+ return Result(Name(~(U(a)))); \
+ }
+
+#endif // mozilla_TypedEnumBits_h
diff --git a/mfbt/Types.h b/mfbt/Types.h
new file mode 100644
index 0000000000..47a9be2cc4
--- /dev/null
+++ b/mfbt/Types.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* mfbt foundational types and macros. */
+
+#ifndef mozilla_Types_h
+#define mozilla_Types_h
+
+/*
+ * This header must be valid C and C++, includable by code embedding either
+ * SpiderMonkey or Gecko.
+ */
+
+/* Expose all <stdint.h> types and size_t. */
+#include <stddef.h>
+#include <stdint.h>
+
+/* Implement compiler and linker macros needed for APIs. */
+
+/*
+ * MOZ_EXPORT is used to declare and define a symbol or type which is externally
+ * visible to users of the current library. It encapsulates various decorations
+ * needed to properly export the method's symbol.
+ *
+ * api.h:
+ * extern MOZ_EXPORT int MeaningOfLife(void);
+ * extern MOZ_EXPORT int LuggageCombination;
+ *
+ * api.c:
+ * int MeaningOfLife(void) { return 42; }
+ * int LuggageCombination = 12345;
+ *
+ * If you are merely sharing a method across files, just use plain |extern|.
+ * These macros are designed for use by library interfaces -- not for normal
+ * methods or data used cross-file.
+ */
+#if defined(WIN32)
+# define MOZ_EXPORT __declspec(dllexport)
+#else /* Unix */
+# ifdef HAVE_VISIBILITY_ATTRIBUTE
+# define MOZ_EXPORT __attribute__((visibility("default")))
+# elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+# define MOZ_EXPORT __global
+# else
+# define MOZ_EXPORT /* nothing */
+# endif
+#endif
+
+/*
+ * Whereas implementers use MOZ_EXPORT to declare and define library symbols,
+ * users use MOZ_IMPORT_API and MOZ_IMPORT_DATA to access them. Most often the
+ * implementer of the library will expose an API macro which expands to either
+ * the export or import version of the macro, depending upon the compilation
+ * mode.
+ */
+#ifdef _WIN32
+# if defined(__MWERKS__)
+# define MOZ_IMPORT_API /* nothing */
+# else
+# define MOZ_IMPORT_API __declspec(dllimport)
+# endif
+#else
+# define MOZ_IMPORT_API MOZ_EXPORT
+#endif
+
+#if defined(_WIN32) && !defined(__MWERKS__)
+# define MOZ_IMPORT_DATA __declspec(dllimport)
+#else
+# define MOZ_IMPORT_DATA MOZ_EXPORT
+#endif
+
+/*
+ * Consistent with the above comment, the MFBT_API and MFBT_DATA macros expose
+ * export mfbt declarations when building mfbt, and they expose import mfbt
+ * declarations when using mfbt.
+ */
+#if defined(IMPL_MFBT) || \
+ (defined(JS_STANDALONE) && !defined(MOZ_MEMORY) && \
+ (defined(EXPORT_JS_API) || defined(STATIC_EXPORTABLE_JS_API)))
+# define MFBT_API MOZ_EXPORT
+# define MFBT_DATA MOZ_EXPORT
+#else
+# if defined(JS_STANDALONE) && !defined(MOZ_MEMORY) && defined(STATIC_JS_API)
+# define MFBT_API
+# define MFBT_DATA
+# else
+/*
+ * On linux mozglue is linked in the program and we link libxul.so with
+ * -z,defs. Normally that causes the linker to reject undefined references in
+ * libxul.so, but as a loophole it allows undefined references to weak
+ * symbols. We add the weak attribute to the import version of the MFBT API
+ * macros to exploit this.
+ */
+# if defined(MOZ_GLUE_IN_PROGRAM)
+# define MFBT_API __attribute__((weak)) MOZ_IMPORT_API
+# define MFBT_DATA __attribute__((weak)) MOZ_IMPORT_DATA
+# else
+# define MFBT_API MOZ_IMPORT_API
+# define MFBT_DATA MOZ_IMPORT_DATA
+# endif
+# endif
+#endif
+
+/*
+ * C symbols in C++ code must be declared immediately within |extern "C"|
+ * blocks. However, in C code, they need not be declared specially. This
+ * difference is abstracted behind the MOZ_BEGIN_EXTERN_C and MOZ_END_EXTERN_C
+ * macros, so that the user need not know whether he is being used in C or C++
+ * code.
+ *
+ * MOZ_BEGIN_EXTERN_C
+ *
+ * extern MOZ_EXPORT int MostRandomNumber(void);
+ * ...other declarations...
+ *
+ * MOZ_END_EXTERN_C
+ *
+ * This said, it is preferable to just use |extern "C"| in C++ header files for
+ * its greater clarity.
+ */
+#ifdef __cplusplus
+# define MOZ_BEGIN_EXTERN_C extern "C" {
+# define MOZ_END_EXTERN_C }
+#else
+# define MOZ_BEGIN_EXTERN_C
+# define MOZ_END_EXTERN_C
+#endif
+
+/*
+ * GCC's typeof is available when decltype is not.
+ */
+#if defined(__GNUC__) && defined(__cplusplus) && \
+ !defined(__GXX_EXPERIMENTAL_CXX0X__) && __cplusplus < 201103L
+# define decltype __typeof__
+#endif
+
+#endif /* mozilla_Types_h */
diff --git a/mfbt/UniquePtr.h b/mfbt/UniquePtr.h
new file mode 100644
index 0000000000..9b51e58db3
--- /dev/null
+++ b/mfbt/UniquePtr.h
@@ -0,0 +1,737 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Smart pointer managing sole ownership of a resource. */
+
+#ifndef mozilla_UniquePtr_h
+#define mozilla_UniquePtr_h
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/CompactPair.h"
+#include "mozilla/Compiler.h"
+
+namespace mozilla {
+
+template <typename T>
+class DefaultDelete;
+template <typename T, class D = DefaultDelete<T>>
+class UniquePtr;
+
+} // namespace mozilla
+
+namespace mozilla {
+
+namespace detail {
+
+struct HasPointerTypeHelper {
+ template <class U>
+ static double Test(...);
+ template <class U>
+ static char Test(typename U::pointer* = 0);
+};
+
+template <class T>
+class HasPointerType
+ : public std::integral_constant<bool, sizeof(HasPointerTypeHelper::Test<T>(
+ 0)) == 1> {};
+
+template <class T, class D, bool = HasPointerType<D>::value>
+struct PointerTypeImpl {
+ typedef typename D::pointer Type;
+};
+
+template <class T, class D>
+struct PointerTypeImpl<T, D, false> {
+ typedef T* Type;
+};
+
+template <class T, class D>
+struct PointerType {
+ typedef typename PointerTypeImpl<T, std::remove_reference_t<D>>::Type Type;
+};
+
+} // namespace detail
+
+/**
+ * UniquePtr is a smart pointer that wholly owns a resource. Ownership may be
+ * transferred out of a UniquePtr through explicit action, but otherwise the
+ * resource is destroyed when the UniquePtr is destroyed.
+ *
+ * UniquePtr is similar to C++98's std::auto_ptr, but it improves upon auto_ptr
+ * in one crucial way: it's impossible to copy a UniquePtr. Copying an auto_ptr
+ * obviously *can't* copy ownership of its singly-owned resource. So what
+ * happens if you try to copy one? Bizarrely, ownership is implicitly
+ * *transferred*, preserving single ownership but breaking code that assumes a
+ * copy of an object is identical to the original. (This is why auto_ptr is
+ * prohibited in STL containers.)
+ *
+ * UniquePtr solves this problem by being *movable* rather than copyable.
+ * Instead of passing a |UniquePtr u| directly to the constructor or assignment
+ * operator, you pass |Move(u)|. In doing so you indicate that you're *moving*
+ * ownership out of |u|, into the target of the construction/assignment. After
+ * the transfer completes, |u| contains |nullptr| and may be safely destroyed.
+ * This preserves single ownership but also allows UniquePtr to be moved by
+ * algorithms that have been made move-safe. (Note: if |u| is instead a
+ * temporary expression, don't use |Move()|: just pass the expression, because
+ * it's already move-ready. For more information see Move.h.)
+ *
+ * UniquePtr is also better than std::auto_ptr in that the deletion operation is
+ * customizable. An optional second template parameter specifies a class that
+ * (through its operator()(T*)) implements the desired deletion policy. If no
+ * policy is specified, mozilla::DefaultDelete<T> is used -- which will either
+ * |delete| or |delete[]| the resource, depending whether the resource is an
+ * array. Custom deletion policies ideally should be empty classes (no member
+ * fields, no member fields in base classes, no virtual methods/inheritance),
+ * because then UniquePtr can be just as efficient as a raw pointer.
+ *
+ * Use of UniquePtr proceeds like so:
+ *
+ * UniquePtr<int> g1; // initializes to nullptr
+ * g1.reset(new int); // switch resources using reset()
+ * g1 = nullptr; // clears g1, deletes the int
+ *
+ * UniquePtr<int> g2(new int); // owns that int
+ * int* p = g2.release(); // g2 leaks its int -- still requires deletion
+ * delete p; // now freed
+ *
+ * struct S { int x; S(int x) : x(x) {} };
+ * UniquePtr<S> g3, g4(new S(5));
+ * g3 = std::move(g4); // g3 owns the S, g4 cleared
+ * S* p = g3.get(); // g3 still owns |p|
+ * assert(g3->x == 5); // operator-> works (if .get() != nullptr)
+ * assert((*g3).x == 5); // also operator* (again, if not cleared)
+ * std::swap(g3, g4); // g4 now owns the S, g3 cleared
+ * g3.swap(g4); // g3 now owns the S, g4 cleared
+ * UniquePtr<S> g5(std::move(g3)); // g5 owns the S, g3 cleared
+ * g5.reset(); // deletes the S, g5 cleared
+ *
+ * struct FreePolicy { void operator()(void* p) { free(p); } };
+ * UniquePtr<int, FreePolicy> g6(static_cast<int*>(malloc(sizeof(int))));
+ * int* ptr = g6.get();
+ * g6 = nullptr; // calls free(ptr)
+ *
+ * Now, carefully note a few things you *can't* do:
+ *
+ * UniquePtr<int> b1;
+ * b1 = new int; // BAD: can only assign another UniquePtr
+ * int* ptr = b1; // BAD: no auto-conversion to pointer, use get()
+ *
+ * UniquePtr<int> b2(b1); // BAD: can't copy a UniquePtr
+ * UniquePtr<int> b3 = b1; // BAD: can't copy-assign a UniquePtr
+ *
+ * (Note that changing a UniquePtr to store a direct |new| expression is
+ * permitted, but usually you should use MakeUnique, defined at the end of this
+ * header.)
+ *
+ * A few miscellaneous notes:
+ *
+ * UniquePtr, when not instantiated for an array type, can be move-constructed
+ * and move-assigned, not only from itself but from "derived" UniquePtr<U, E>
+ * instantiations where U converts to T and E converts to D. If you want to use
+ * this, you're going to have to specify a deletion policy for both UniquePtr
+ * instantations, and T pretty much has to have a virtual destructor. In other
+ * words, this doesn't work:
+ *
+ * struct Base { virtual ~Base() {} };
+ * struct Derived : Base {};
+ *
+ * UniquePtr<Base> b1;
+ * // BAD: DefaultDelete<Base> and DefaultDelete<Derived> don't interconvert
+ * UniquePtr<Derived> d1(std::move(b));
+ *
+ * UniquePtr<Base> b2;
+ * UniquePtr<Derived, DefaultDelete<Base>> d2(std::move(b2)); // okay
+ *
+ * UniquePtr is specialized for array types. Specializing with an array type
+ * creates a smart-pointer version of that array -- not a pointer to such an
+ * array.
+ *
+ * UniquePtr<int[]> arr(new int[5]);
+ * arr[0] = 4;
+ *
+ * What else is different? Deletion of course uses |delete[]|. An operator[]
+ * is provided. Functionality that doesn't make sense for arrays is removed.
+ * The constructors and mutating methods only accept array pointers (not T*, U*
+ * that converts to T*, or UniquePtr<U[]> or UniquePtr<U>) or |nullptr|.
+ *
+ * It's perfectly okay for a function to return a UniquePtr. This transfers
+ * the UniquePtr's sole ownership of the data, to the fresh UniquePtr created
+ * in the calling function, that will then solely own that data. Such functions
+ * can return a local variable UniquePtr, |nullptr|, |UniquePtr(ptr)| where
+ * |ptr| is a |T*|, or a UniquePtr |Move()|'d from elsewhere.
+ *
+ * UniquePtr will commonly be a member of a class, with lifetime equivalent to
+ * that of that class. If you want to expose the related resource, you could
+ * expose a raw pointer via |get()|, but ownership of a raw pointer is
+ * inherently unclear. So it's better to expose a |const UniquePtr&| instead.
+ * This prohibits mutation but still allows use of |get()| when needed (but
+ * operator-> is preferred). Of course, you can only use this smart pointer as
+ * long as the enclosing class instance remains live -- no different than if you
+ * exposed the |get()| raw pointer.
+ *
+ * To pass a UniquePtr-managed resource as a pointer, use a |const UniquePtr&|
+ * argument. To specify an inout parameter (where the method may or may not
+ * take ownership of the resource, or reset it), or to specify an out parameter
+ * (where simply returning a |UniquePtr| isn't possible), use a |UniquePtr&|
+ * argument. To unconditionally transfer ownership of a UniquePtr
+ * into a method, use a |UniquePtr| argument. To conditionally transfer
+ * ownership of a resource into a method, should the method want it, use a
+ * |UniquePtr&&| argument.
+ */
+template <typename T, class D>
+class UniquePtr {
+ public:
+ typedef T ElementType;
+ typedef D DeleterType;
+ typedef typename detail::PointerType<T, DeleterType>::Type Pointer;
+
+ private:
+ mozilla::CompactPair<Pointer, DeleterType> mTuple;
+
+ Pointer& ptr() { return mTuple.first(); }
+ const Pointer& ptr() const { return mTuple.first(); }
+
+ DeleterType& del() { return mTuple.second(); }
+ const DeleterType& del() const { return mTuple.second(); }
+
+ public:
+ /**
+ * Construct a UniquePtr containing |nullptr|.
+ */
+ constexpr UniquePtr() : mTuple(static_cast<Pointer>(nullptr), DeleterType()) {
+ static_assert(!std::is_pointer_v<D>, "must provide a deleter instance");
+ static_assert(!std::is_reference_v<D>, "must provide a deleter instance");
+ }
+
+ /**
+ * Construct a UniquePtr containing |aPtr|.
+ */
+ explicit UniquePtr(Pointer aPtr) : mTuple(aPtr, DeleterType()) {
+ static_assert(!std::is_pointer_v<D>, "must provide a deleter instance");
+ static_assert(!std::is_reference_v<D>, "must provide a deleter instance");
+ }
+
+ UniquePtr(Pointer aPtr,
+ std::conditional_t<std::is_reference_v<D>, D, const D&> aD1)
+ : mTuple(aPtr, aD1) {}
+
+ UniquePtr(Pointer aPtr, std::remove_reference_t<D>&& aD2)
+ : mTuple(aPtr, std::move(aD2)) {
+ static_assert(!std::is_reference_v<D>,
+ "rvalue deleter can't be stored by reference");
+ }
+
+ UniquePtr(UniquePtr&& aOther)
+ : mTuple(aOther.release(),
+ std::forward<DeleterType>(aOther.get_deleter())) {}
+
+ MOZ_IMPLICIT constexpr UniquePtr(decltype(nullptr)) : UniquePtr() {}
+
+ template <typename U, class E>
+ MOZ_IMPLICIT UniquePtr(
+ UniquePtr<U, E>&& aOther,
+ std::enable_if_t<
+ std::is_convertible_v<typename UniquePtr<U, E>::Pointer, Pointer> &&
+ !std::is_array_v<U> &&
+ (std::is_reference_v<D> ? std::is_same_v<D, E>
+ : std::is_convertible_v<E, D>),
+ int>
+ aDummy = 0)
+ : mTuple(aOther.release(), std::forward<E>(aOther.get_deleter())) {}
+
+ ~UniquePtr() { reset(nullptr); }
+
+ UniquePtr& operator=(UniquePtr&& aOther) {
+ reset(aOther.release());
+ get_deleter() = std::forward<DeleterType>(aOther.get_deleter());
+ return *this;
+ }
+
+ template <typename U, typename E>
+ UniquePtr& operator=(UniquePtr<U, E>&& aOther) {
+ static_assert(
+ std::is_convertible_v<typename UniquePtr<U, E>::Pointer, Pointer>,
+ "incompatible UniquePtr pointees");
+ static_assert(!std::is_array_v<U>,
+ "can't assign from UniquePtr holding an array");
+
+ reset(aOther.release());
+ get_deleter() = std::forward<E>(aOther.get_deleter());
+ return *this;
+ }
+
+ UniquePtr& operator=(decltype(nullptr)) {
+ reset(nullptr);
+ return *this;
+ }
+
+ std::add_lvalue_reference_t<T> operator*() const {
+ MOZ_ASSERT(get(), "dereferencing a UniquePtr containing nullptr with *");
+ return *get();
+ }
+ Pointer operator->() const {
+ MOZ_ASSERT(get(), "dereferencing a UniquePtr containing nullptr with ->");
+ return get();
+ }
+
+ explicit operator bool() const { return get() != nullptr; }
+
+ Pointer get() const { return ptr(); }
+
+ DeleterType& get_deleter() { return del(); }
+ const DeleterType& get_deleter() const { return del(); }
+
+ [[nodiscard]] Pointer release() {
+ Pointer p = ptr();
+ ptr() = nullptr;
+ return p;
+ }
+
+ void reset(Pointer aPtr = Pointer()) {
+ Pointer old = ptr();
+ ptr() = aPtr;
+ if (old != nullptr) {
+ get_deleter()(old);
+ }
+ }
+
+ void swap(UniquePtr& aOther) { mTuple.swap(aOther.mTuple); }
+
+ UniquePtr(const UniquePtr& aOther) = delete; // construct using std::move()!
+ void operator=(const UniquePtr& aOther) =
+ delete; // assign using std::move()!
+};
+
+// In case you didn't read the comment by the main definition (you should!): the
+// UniquePtr<T[]> specialization exists to manage array pointers. It deletes
+// such pointers using delete[], it will reject construction and modification
+// attempts using U* or U[]. Otherwise it works like the normal UniquePtr.
+template <typename T, class D>
+class UniquePtr<T[], D> {
+ public:
+ typedef T* Pointer;
+ typedef T ElementType;
+ typedef D DeleterType;
+
+ private:
+ mozilla::CompactPair<Pointer, DeleterType> mTuple;
+
+ public:
+ /**
+ * Construct a UniquePtr containing nullptr.
+ */
+ constexpr UniquePtr() : mTuple(static_cast<Pointer>(nullptr), DeleterType()) {
+ static_assert(!std::is_pointer_v<D>, "must provide a deleter instance");
+ static_assert(!std::is_reference_v<D>, "must provide a deleter instance");
+ }
+
+ /**
+ * Construct a UniquePtr containing |aPtr|.
+ */
+ explicit UniquePtr(Pointer aPtr) : mTuple(aPtr, DeleterType()) {
+ static_assert(!std::is_pointer_v<D>, "must provide a deleter instance");
+ static_assert(!std::is_reference_v<D>, "must provide a deleter instance");
+ }
+
+ // delete[] knows how to handle *only* an array of a single class type. For
+ // delete[] to work correctly, it must know the size of each element, the
+ // fields and base classes of each element requiring destruction, and so on.
+ // So forbid all overloads which would end up invoking delete[] on a pointer
+ // of the wrong type.
+ template <typename U>
+ UniquePtr(U&& aU,
+ std::enable_if_t<
+ std::is_pointer_v<U> && std::is_convertible_v<U, Pointer>, int>
+ aDummy = 0) = delete;
+
+ UniquePtr(Pointer aPtr,
+ std::conditional_t<std::is_reference_v<D>, D, const D&> aD1)
+ : mTuple(aPtr, aD1) {}
+
+ UniquePtr(Pointer aPtr, std::remove_reference_t<D>&& aD2)
+ : mTuple(aPtr, std::move(aD2)) {
+ static_assert(!std::is_reference_v<D>,
+ "rvalue deleter can't be stored by reference");
+ }
+
+ // Forbidden for the same reasons as stated above.
+ template <typename U, typename V>
+ UniquePtr(U&& aU, V&& aV,
+ std::enable_if_t<
+ std::is_pointer_v<U> && std::is_convertible_v<U, Pointer>, int>
+ aDummy = 0) = delete;
+
+ UniquePtr(UniquePtr&& aOther)
+ : mTuple(aOther.release(),
+ std::forward<DeleterType>(aOther.get_deleter())) {}
+
+ MOZ_IMPLICIT
+ UniquePtr(decltype(nullptr)) : mTuple(nullptr, DeleterType()) {
+ static_assert(!std::is_pointer_v<D>, "must provide a deleter instance");
+ static_assert(!std::is_reference_v<D>, "must provide a deleter instance");
+ }
+
+ ~UniquePtr() { reset(nullptr); }
+
+ UniquePtr& operator=(UniquePtr&& aOther) {
+ reset(aOther.release());
+ get_deleter() = std::forward<DeleterType>(aOther.get_deleter());
+ return *this;
+ }
+
+ UniquePtr& operator=(decltype(nullptr)) {
+ reset();
+ return *this;
+ }
+
+ explicit operator bool() const { return get() != nullptr; }
+
+ T& operator[](decltype(sizeof(int)) aIndex) const { return get()[aIndex]; }
+ Pointer get() const { return mTuple.first(); }
+
+ DeleterType& get_deleter() { return mTuple.second(); }
+ const DeleterType& get_deleter() const { return mTuple.second(); }
+
+ [[nodiscard]] Pointer release() {
+ Pointer p = mTuple.first();
+ mTuple.first() = nullptr;
+ return p;
+ }
+
+ void reset(Pointer aPtr = Pointer()) {
+ Pointer old = mTuple.first();
+ mTuple.first() = aPtr;
+ if (old != nullptr) {
+ mTuple.second()(old);
+ }
+ }
+
+ void reset(decltype(nullptr)) {
+ Pointer old = mTuple.first();
+ mTuple.first() = nullptr;
+ if (old != nullptr) {
+ mTuple.second()(old);
+ }
+ }
+
+ template <typename U>
+ void reset(U) = delete;
+
+ void swap(UniquePtr& aOther) { mTuple.swap(aOther.mTuple); }
+
+ UniquePtr(const UniquePtr& aOther) = delete; // construct using std::move()!
+ void operator=(const UniquePtr& aOther) =
+ delete; // assign using std::move()!
+};
+
+/**
+ * A default deletion policy using plain old operator delete.
+ *
+ * Note that this type can be specialized, but authors should beware of the risk
+ * that the specialization may at some point cease to match (either because it
+ * gets moved to a different compilation unit or the signature changes). If the
+ * non-specialized (|delete|-based) version compiles for that type but does the
+ * wrong thing, bad things could happen.
+ *
+ * This is a non-issue for types which are always incomplete (i.e. opaque handle
+ * types), since |delete|-ing such a type will always trigger a compilation
+ * error.
+ */
+template <typename T>
+class DefaultDelete {
+ public:
+ constexpr DefaultDelete() = default;
+
+ template <typename U>
+ MOZ_IMPLICIT DefaultDelete(
+ const DefaultDelete<U>& aOther,
+ std::enable_if_t<std::is_convertible_v<U*, T*>, int> aDummy = 0) {}
+
+ void operator()(T* aPtr) const {
+ static_assert(sizeof(T) > 0, "T must be complete");
+ delete aPtr;
+ }
+};
+
+/** A default deletion policy using operator delete[]. */
+template <typename T>
+class DefaultDelete<T[]> {
+ public:
+ constexpr DefaultDelete() = default;
+
+ void operator()(T* aPtr) const {
+ static_assert(sizeof(T) > 0, "T must be complete");
+ delete[] aPtr;
+ }
+
+ template <typename U>
+ void operator()(U* aPtr) const = delete;
+};
+
+template <typename T, class D, typename U, class E>
+bool operator==(const UniquePtr<T, D>& aX, const UniquePtr<U, E>& aY) {
+ return aX.get() == aY.get();
+}
+
+template <typename T, class D, typename U, class E>
+bool operator!=(const UniquePtr<T, D>& aX, const UniquePtr<U, E>& aY) {
+ return aX.get() != aY.get();
+}
+
+template <typename T, class D>
+bool operator==(const UniquePtr<T, D>& aX, const T* aY) {
+ return aX.get() == aY;
+}
+
+template <typename T, class D>
+bool operator==(const T* aY, const UniquePtr<T, D>& aX) {
+ return aY == aX.get();
+}
+
+template <typename T, class D>
+bool operator!=(const UniquePtr<T, D>& aX, const T* aY) {
+ return aX.get() != aY;
+}
+
+template <typename T, class D>
+bool operator!=(const T* aY, const UniquePtr<T, D>& aX) {
+ return aY != aX.get();
+}
+
+template <typename T, class D>
+bool operator==(const UniquePtr<T, D>& aX, decltype(nullptr)) {
+ return !aX;
+}
+
+template <typename T, class D>
+bool operator==(decltype(nullptr), const UniquePtr<T, D>& aX) {
+ return !aX;
+}
+
+template <typename T, class D>
+bool operator!=(const UniquePtr<T, D>& aX, decltype(nullptr)) {
+ return bool(aX);
+}
+
+template <typename T, class D>
+bool operator!=(decltype(nullptr), const UniquePtr<T, D>& aX) {
+ return bool(aX);
+}
+
+// No operator<, operator>, operator<=, operator>= for now because simplicity.
+
+namespace detail {
+
+template <typename T>
+struct UniqueSelector {
+ typedef UniquePtr<T> SingleObject;
+};
+
+template <typename T>
+struct UniqueSelector<T[]> {
+ typedef UniquePtr<T[]> UnknownBound;
+};
+
+template <typename T, decltype(sizeof(int)) N>
+struct UniqueSelector<T[N]> {
+ typedef UniquePtr<T[N]> KnownBound;
+};
+
+} // namespace detail
+
+/**
+ * MakeUnique is a helper function for allocating new'd objects and arrays,
+ * returning a UniquePtr containing the resulting pointer. The semantics of
+ * MakeUnique<Type>(...) are as follows.
+ *
+ * If Type is an array T[n]:
+ * Disallowed, deleted, no overload for you!
+ * If Type is an array T[]:
+ * MakeUnique<T[]>(size_t) is the only valid overload. The pointer returned
+ * is as if by |new T[n]()|, which value-initializes each element. (If T
+ * isn't a class type, this will zero each element. If T is a class type,
+ * then roughly speaking, each element will be constructed using its default
+ * constructor. See C++11 [dcl.init]p7 for the full gory details.)
+ * If Type is non-array T:
+ * The arguments passed to MakeUnique<T>(...) are forwarded into a
+ * |new T(...)| call, initializing the T as would happen if executing
+ * |T(...)|.
+ *
+ * There are various benefits to using MakeUnique instead of |new| expressions.
+ *
+ * First, MakeUnique eliminates use of |new| from code entirely. If objects are
+ * only created through UniquePtr, then (assuming all explicit release() calls
+ * are safe, including transitively, and no type-safety casting funniness)
+ * correctly maintained ownership of the UniquePtr guarantees no leaks are
+ * possible. (This pays off best if a class is only ever created through a
+ * factory method on the class, using a private constructor.)
+ *
+ * Second, initializing a UniquePtr using a |new| expression requires repeating
+ * the name of the new'd type, whereas MakeUnique in concert with the |auto|
+ * keyword names it only once:
+ *
+ * UniquePtr<char> ptr1(new char()); // repetitive
+ * auto ptr2 = MakeUnique<char>(); // shorter
+ *
+ * Of course this assumes the reader understands the operation MakeUnique
+ * performs. In the long run this is probably a reasonable assumption. In the
+ * short run you'll have to use your judgment about what readers can be expected
+ * to know, or to quickly look up.
+ *
+ * Third, a call to MakeUnique can be assigned directly to a UniquePtr. In
+ * contrast you can't assign a pointer into a UniquePtr without using the
+ * cumbersome reset().
+ *
+ * UniquePtr<char> p;
+ * p = new char; // ERROR
+ * p.reset(new char); // works, but fugly
+ * p = MakeUnique<char>(); // preferred
+ *
+ * (And third, although not relevant to Mozilla: MakeUnique is exception-safe.
+ * An exception thrown after |new T| succeeds will leak that memory, unless the
+ * pointer is assigned to an object that will manage its ownership. UniquePtr
+ * ably serves this function.)
+ */
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::SingleObject MakeUnique(Args&&... aArgs) {
+ return UniquePtr<T>(new T(std::forward<Args>(aArgs)...));
+}
+
+template <typename T>
+typename detail::UniqueSelector<T>::UnknownBound MakeUnique(
+ decltype(sizeof(int)) aN) {
+ using ArrayType = std::remove_extent_t<T>;
+ return UniquePtr<T>(new ArrayType[aN]());
+}
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::KnownBound MakeUnique(Args&&... aArgs) =
+ delete;
+
+/**
+ * WrapUnique is a helper function to transfer ownership from a raw pointer
+ * into a UniquePtr<T>. It can only be used with a single non-array type.
+ *
+ * It is generally used this way:
+ *
+ * auto p = WrapUnique(new char);
+ *
+ * It can be used when MakeUnique is not usable, for example, when the
+ * constructor you are using is private, or you want to use aggregate
+ * initialization.
+ */
+
+template <typename T>
+typename detail::UniqueSelector<T>::SingleObject WrapUnique(T* aPtr) {
+ return UniquePtr<T>(aPtr);
+}
+
+} // namespace mozilla
+
+namespace std {
+
+template <typename T, class D>
+void swap(mozilla::UniquePtr<T, D>& aX, mozilla::UniquePtr<T, D>& aY) {
+ aX.swap(aY);
+}
+
+} // namespace std
+
+/**
+TempPtrToSetter(UniquePtr<T>*) -> T**-ish
+TempPtrToSetter(std::unique_ptr<T>*) -> T**-ish
+
+Make a temporary class to support assigning to UniquePtr/unique_ptr via passing
+a pointer to the callee.
+
+Often, APIs will be shaped like this trivial example:
+```
+nsresult Foo::NewChildBar(Bar** out) {
+ if (!IsOk()) return NS_ERROR_FAILURE;
+ *out = new Bar(this);
+ return NS_OK;
+}
+```
+
+In order to make this work with unique ptrs, it's often either risky or
+overwrought:
+```
+Bar* bar = nullptr;
+const auto cleanup = MakeScopeExit([&]() {
+ if (bar) {
+ delete bar;
+ }
+});
+if (FAILED(foo->NewChildBar(&bar)) {
+ // handle it
+}
+```
+
+```
+UniquePtr<Bar> bar;
+{
+ Bar* raw = nullptr;
+ const auto res = foo->NewChildBar(&bar);
+ bar.reset(raw);
+ if (FAILED(res) {
+ // handle it
+ }
+}
+```
+TempPtrToSettable is a shorthand for the latter approach, allowing something
+cleaner but also safe:
+
+```
+UniquePtr<Bar> bar;
+if (FAILED(foo->NewChildBar(TempPtrToSetter(&bar))) {
+ // handle it
+}
+```
+*/
+
+namespace mozilla {
+namespace detail {
+
+template <class T, class UniquePtrT>
+class MOZ_TEMPORARY_CLASS TempPtrToSetterT final {
+ private:
+ UniquePtrT* const mDest;
+ T* mNewVal;
+
+ public:
+ explicit TempPtrToSetterT(UniquePtrT* dest)
+ : mDest(dest), mNewVal(mDest->get()) {}
+
+ operator T**() { return &mNewVal; }
+
+ ~TempPtrToSetterT() {
+ if (mDest->get() != mNewVal) {
+ mDest->reset(mNewVal);
+ }
+ }
+};
+
+} // namespace detail
+
+template <class T, class Deleter>
+auto TempPtrToSetter(UniquePtr<T, Deleter>* const p) {
+ return detail::TempPtrToSetterT<T, UniquePtr<T, Deleter>>{p};
+}
+
+template <class T, class Deleter>
+auto TempPtrToSetter(std::unique_ptr<T, Deleter>* const p) {
+ return detail::TempPtrToSetterT<T, std::unique_ptr<T, Deleter>>{p};
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_UniquePtr_h */
diff --git a/mfbt/UniquePtrExtensions.cpp b/mfbt/UniquePtrExtensions.cpp
new file mode 100644
index 0000000000..229c942196
--- /dev/null
+++ b/mfbt/UniquePtrExtensions.cpp
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "UniquePtrExtensions.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/DebugOnly.h"
+
+#ifdef XP_WIN
+# include <windows.h>
+#else
+# include <errno.h>
+# include <unistd.h>
+#endif
+
+namespace mozilla {
+namespace detail {
+
+void FileHandleDeleter::operator()(FileHandleHelper aHelper) {
+ if (aHelper != nullptr) {
+ DebugOnly<bool> ok;
+#ifdef XP_WIN
+ ok = CloseHandle(aHelper);
+#else
+ ok = close(aHelper) == 0 || errno == EINTR;
+#endif
+ MOZ_ASSERT(ok, "failed to close file handle");
+ }
+}
+
+} // namespace detail
+} // namespace mozilla
diff --git a/mfbt/UniquePtrExtensions.h b/mfbt/UniquePtrExtensions.h
new file mode 100644
index 0000000000..2679440e50
--- /dev/null
+++ b/mfbt/UniquePtrExtensions.h
@@ -0,0 +1,315 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Useful extensions to UniquePtr. */
+
+#ifndef mozilla_UniquePtrExtensions_h
+#define mozilla_UniquePtrExtensions_h
+
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/fallible.h"
+#include "mozilla/UniquePtr.h"
+
+#ifdef XP_WIN
+# include <cstdint>
+#endif
+#if defined(XP_DARWIN) && !defined(RUST_BINDGEN)
+# include <mach/mach.h>
+#endif
+
+namespace mozilla {
+
+/**
+ * MakeUniqueFallible works exactly like MakeUnique, except that the memory
+ * allocation performed is done fallibly, i.e. it can return nullptr.
+ */
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::SingleObject MakeUniqueFallible(
+ Args&&... aArgs) {
+ return UniquePtr<T>(new (fallible) T(std::forward<Args>(aArgs)...));
+}
+
+template <typename T>
+typename detail::UniqueSelector<T>::UnknownBound MakeUniqueFallible(
+ decltype(sizeof(int)) aN) {
+ using ArrayType = std::remove_extent_t<T>;
+ return UniquePtr<T>(new (fallible) ArrayType[aN]());
+}
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::KnownBound MakeUniqueFallible(
+ Args&&... aArgs) = delete;
+
+/**
+ * MakeUniqueForOverwrite and MakeUniqueFallibleForOverwrite are like MakeUnique
+ * and MakeUniqueFallible except they use default-initialization. This is
+ * useful, for example, when you have a POD type array that will be overwritten
+ * directly after construction and so zero-initialization is a waste.
+ */
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::SingleObject MakeUniqueForOverwrite() {
+ return UniquePtr<T>(new T);
+}
+
+template <typename T>
+typename detail::UniqueSelector<T>::UnknownBound MakeUniqueForOverwrite(
+ decltype(sizeof(int)) aN) {
+ using ArrayType = std::remove_extent_t<T>;
+ return UniquePtr<T>(new ArrayType[aN]);
+}
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::KnownBound MakeUniqueForOverwrite(
+ Args&&... aArgs) = delete;
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::SingleObject
+MakeUniqueForOverwriteFallible() {
+ return UniquePtr<T>(new (fallible) T);
+}
+
+template <typename T>
+typename detail::UniqueSelector<T>::UnknownBound MakeUniqueForOverwriteFallible(
+ decltype(sizeof(int)) aN) {
+ using ArrayType = std::remove_extent_t<T>;
+ return UniquePtr<T>(new (fallible) ArrayType[aN]);
+}
+
+template <typename T, typename... Args>
+typename detail::UniqueSelector<T>::KnownBound MakeUniqueForOverwriteFallible(
+ Args&&... aArgs) = delete;
+
+namespace detail {
+
+template <typename T>
+struct FreePolicy {
+ void operator()(const void* ptr) { free(const_cast<void*>(ptr)); }
+};
+
+#if defined(XP_WIN)
+// Can't include <windows.h> to get the actual definition of HANDLE
+// because of namespace pollution.
+typedef void* FileHandleType;
+#elif defined(XP_UNIX)
+typedef int FileHandleType;
+#else
+# error "Unsupported OS?"
+#endif
+
+struct FileHandleHelper {
+ MOZ_IMPLICIT FileHandleHelper(FileHandleType aHandle) : mHandle(aHandle) {
+#if defined(XP_UNIX) && (defined(DEBUG) || defined(FUZZING))
+ MOZ_RELEASE_ASSERT(aHandle == kInvalidHandle || aHandle > 2);
+#endif
+ }
+
+ MOZ_IMPLICIT constexpr FileHandleHelper(std::nullptr_t)
+ : mHandle(kInvalidHandle) {}
+
+ bool operator!=(std::nullptr_t) const {
+#ifdef XP_WIN
+ // Windows uses both nullptr and INVALID_HANDLE_VALUE (-1 cast to
+ // HANDLE) in different situations, but nullptr is more reliably
+ // null while -1 is also valid input to some calls that take
+ // handles. So class considers both to be null (since neither
+ // should be closed) but default-constructs as nullptr.
+ if (mHandle == (void*)-1) {
+ return false;
+ }
+#endif
+ return mHandle != kInvalidHandle;
+ }
+
+ operator FileHandleType() const { return mHandle; }
+
+#ifdef XP_WIN
+ // NSPR uses an integer type for PROsfd, so this conversion is
+ // provided for working with it without needing reinterpret casts
+ // everywhere.
+ operator std::intptr_t() const {
+ return reinterpret_cast<std::intptr_t>(mHandle);
+ }
+#endif
+
+ // When there's only one user-defined conversion operator, the
+ // compiler will use that to derive equality, but that doesn't work
+ // when the conversion is ambiguoug (the XP_WIN case above).
+ bool operator==(const FileHandleHelper& aOther) const {
+ return mHandle == aOther.mHandle;
+ }
+
+ private:
+ FileHandleType mHandle;
+
+#ifdef XP_WIN
+ // See above for why this is nullptr. (Also, INVALID_HANDLE_VALUE
+ // can't be expressed as a constexpr.)
+ static constexpr FileHandleType kInvalidHandle = nullptr;
+#else
+ static constexpr FileHandleType kInvalidHandle = -1;
+#endif
+};
+
+struct FileHandleDeleter {
+ using pointer = FileHandleHelper;
+ using receiver = FileHandleType;
+ MFBT_API void operator()(FileHandleHelper aHelper);
+};
+
+#if defined(XP_DARWIN) && !defined(RUST_BINDGEN)
+struct MachPortHelper {
+ MOZ_IMPLICIT MachPortHelper(mach_port_t aPort) : mPort(aPort) {}
+
+ MOZ_IMPLICIT constexpr MachPortHelper(std::nullptr_t)
+ : mPort(MACH_PORT_NULL) {}
+
+ bool operator!=(std::nullptr_t) const { return mPort != MACH_PORT_NULL; }
+
+ operator const mach_port_t&() const { return mPort; }
+ operator mach_port_t&() { return mPort; }
+
+ private:
+ mach_port_t mPort;
+};
+
+struct MachSendRightDeleter {
+ using pointer = MachPortHelper;
+ using receiver = mach_port_t;
+ MFBT_API void operator()(MachPortHelper aHelper) {
+ DebugOnly<kern_return_t> kr =
+ mach_port_deallocate(mach_task_self(), aHelper);
+ MOZ_ASSERT(kr == KERN_SUCCESS, "failed to deallocate mach send right");
+ }
+};
+
+struct MachReceiveRightDeleter {
+ using pointer = MachPortHelper;
+ using receiver = mach_port_t;
+ MFBT_API void operator()(MachPortHelper aHelper) {
+ DebugOnly<kern_return_t> kr = mach_port_mod_refs(
+ mach_task_self(), aHelper, MACH_PORT_RIGHT_RECEIVE, -1);
+ MOZ_ASSERT(kr == KERN_SUCCESS, "failed to release mach receive right");
+ }
+};
+
+struct MachPortSetDeleter {
+ using pointer = MachPortHelper;
+ using receiver = mach_port_t;
+ MFBT_API void operator()(MachPortHelper aHelper) {
+ DebugOnly<kern_return_t> kr = mach_port_mod_refs(
+ mach_task_self(), aHelper, MACH_PORT_RIGHT_PORT_SET, -1);
+ MOZ_ASSERT(kr == KERN_SUCCESS, "failed to release mach port set");
+ }
+};
+#endif
+
+} // namespace detail
+
+template <typename T>
+using UniqueFreePtr = UniquePtr<T, detail::FreePolicy<T>>;
+
+// A RAII class for the OS construct used for open files and similar
+// objects: a file descriptor on Unix or a handle on Windows.
+using UniqueFileHandle =
+ UniquePtr<detail::FileHandleType, detail::FileHandleDeleter>;
+
+#if defined(XP_DARWIN) && !defined(RUST_BINDGEN)
+// A RAII class for a Mach port that names a send right.
+using UniqueMachSendRight =
+ UniquePtr<mach_port_t, detail::MachSendRightDeleter>;
+// A RAII class for a Mach port that names a receive right.
+using UniqueMachReceiveRight =
+ UniquePtr<mach_port_t, detail::MachReceiveRightDeleter>;
+// A RAII class for a Mach port set.
+using UniqueMachPortSet = UniquePtr<mach_port_t, detail::MachPortSetDeleter>;
+
+// Increases the user reference count for MACH_PORT_RIGHT_SEND by 1 and returns
+// a new UniqueMachSendRight to manage the additional right.
+inline UniqueMachSendRight RetainMachSendRight(mach_port_t aPort) {
+ kern_return_t kr =
+ mach_port_mod_refs(mach_task_self(), aPort, MACH_PORT_RIGHT_SEND, 1);
+ if (kr == KERN_SUCCESS) {
+ return UniqueMachSendRight(aPort);
+ }
+ return nullptr;
+}
+#endif
+
+namespace detail {
+
+struct HasReceiverTypeHelper {
+ template <class U>
+ static double Test(...);
+ template <class U>
+ static char Test(typename U::receiver* = 0);
+};
+
+template <class T>
+class HasReceiverType
+ : public std::integral_constant<bool, sizeof(HasReceiverTypeHelper::Test<T>(
+ 0)) == 1> {};
+
+template <class T, class D, bool = HasReceiverType<D>::value>
+struct ReceiverTypeImpl {
+ using Type = typename D::receiver;
+};
+
+template <class T, class D>
+struct ReceiverTypeImpl<T, D, false> {
+ using Type = typename PointerType<T, D>::Type;
+};
+
+template <class T, class D>
+struct ReceiverType {
+ using Type = typename ReceiverTypeImpl<T, std::remove_reference_t<D>>::Type;
+};
+
+template <typename T, typename D>
+class MOZ_TEMPORARY_CLASS UniquePtrGetterTransfers {
+ public:
+ using Ptr = UniquePtr<T, D>;
+ using Receiver = typename detail::ReceiverType<T, D>::Type;
+
+ explicit UniquePtrGetterTransfers(Ptr& p)
+ : mPtr(p), mReceiver(typename Ptr::Pointer(nullptr)) {}
+ ~UniquePtrGetterTransfers() { mPtr.reset(mReceiver); }
+
+ operator Receiver*() { return &mReceiver; }
+ Receiver& operator*() { return mReceiver; }
+
+ // operator void** is conditionally enabled if `Receiver` is a pointer.
+ template <typename U = Receiver,
+ std::enable_if_t<
+ std::is_pointer_v<U> && std::is_same_v<U, Receiver>, int> = 0>
+ operator void**() {
+ return reinterpret_cast<void**>(&mReceiver);
+ }
+
+ private:
+ Ptr& mPtr;
+ Receiver mReceiver;
+};
+
+} // namespace detail
+
+// Helper for passing a UniquePtr to an old-style function that uses raw
+// pointers for out params. Example usage:
+//
+// void AllocateFoo(Foo** out) { *out = new Foo(); }
+// UniquePtr<Foo> foo;
+// AllocateFoo(getter_Transfers(foo));
+template <typename T, typename D>
+auto getter_Transfers(UniquePtr<T, D>& up) {
+ return detail::UniquePtrGetterTransfers<T, D>(up);
+}
+
+} // namespace mozilla
+
+#endif // mozilla_UniquePtrExtensions_h
diff --git a/mfbt/Unused.cpp b/mfbt/Unused.cpp
new file mode 100644
index 0000000000..e6c5f66997
--- /dev/null
+++ b/mfbt/Unused.cpp
@@ -0,0 +1,13 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Unused.h"
+
+namespace mozilla {
+
+const unused_t Unused = unused_t();
+
+} // namespace mozilla
diff --git a/mfbt/Unused.h b/mfbt/Unused.h
new file mode 100644
index 0000000000..6c4ed4baac
--- /dev/null
+++ b/mfbt/Unused.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_unused_h
+#define mozilla_unused_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Types.h"
+
+#ifdef __cplusplus
+
+namespace mozilla {
+
+//
+// Suppress GCC warnings about unused return values with
+// Unused << SomeFuncDeclaredWarnUnusedReturnValue();
+//
+struct unused_t {
+ template <typename T>
+ MOZ_ALWAYS_INLINE_EVEN_DEBUG void operator<<(const T& /*unused*/) const {}
+};
+
+extern MFBT_DATA const unused_t Unused;
+
+} // namespace mozilla
+
+#endif // __cplusplus
+
+// An alternative to mozilla::Unused for use in (a) C code and (b) code where
+// linking with unused.o is difficult.
+#define MOZ_UNUSED(expr) \
+ do { \
+ if (expr) { \
+ (void)0; \
+ } \
+ } while (0)
+
+#endif // mozilla_unused_h
diff --git a/mfbt/Utf8.cpp b/mfbt/Utf8.cpp
new file mode 100644
index 0000000000..1a10c7a011
--- /dev/null
+++ b/mfbt/Utf8.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Maybe.h"
+#include "mozilla/TextUtils.h"
+#include "mozilla/Types.h"
+#include "mozilla/Utf8.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+MFBT_API bool mozilla::detail::IsValidUtf8(const void* aCodeUnits,
+ size_t aCount) {
+ const auto* s = reinterpret_cast<const unsigned char*>(aCodeUnits);
+ const auto* const limit = s + aCount;
+
+ while (s < limit) {
+ unsigned char c = *s++;
+
+ // If the first byte is ASCII, it's the only one in the code point. Have a
+ // fast path that avoids all the rest of the work and looping in that case.
+ if (IsAscii(c)) {
+ continue;
+ }
+
+ Maybe<char32_t> maybeCodePoint =
+ DecodeOneUtf8CodePoint(Utf8Unit(c), &s, limit);
+ if (maybeCodePoint.isNothing()) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(s == limit);
+ return true;
+}
diff --git a/mfbt/Utf8.h b/mfbt/Utf8.h
new file mode 100644
index 0000000000..31a94d0714
--- /dev/null
+++ b/mfbt/Utf8.h
@@ -0,0 +1,596 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * UTF-8-related functionality, including a type-safe structure representing a
+ * UTF-8 code unit.
+ */
+
+#ifndef mozilla_Utf8_h
+#define mozilla_Utf8_h
+
+#include "mozilla/Casting.h" // for mozilla::AssertedCast
+#include "mozilla/Likely.h" // for MOZ_UNLIKELY
+#include "mozilla/Maybe.h" // for mozilla::Maybe
+#include "mozilla/Span.h" // for mozilla::Span
+#include "mozilla/TextUtils.h" // for mozilla::IsAscii and via Latin1.h for
+ // encoding_rs_mem.h and MOZ_HAS_JSRUST.
+#include "mozilla/Types.h" // for MFBT_API
+
+#include <limits> // for std::numeric_limits
+#include <limits.h> // for CHAR_BIT
+#include <stddef.h> // for size_t
+#include <stdint.h> // for uint8_t
+
+#if MOZ_HAS_JSRUST()
+// Can't include mozilla/Encoding.h here.
+extern "C" {
+// Declared as uint8_t instead of char to match declaration in another header.
+size_t encoding_utf8_valid_up_to(uint8_t const* buffer, size_t buffer_len);
+}
+#else
+namespace mozilla {
+namespace detail {
+extern MFBT_API bool IsValidUtf8(const void* aCodeUnits, size_t aCount);
+}; // namespace detail
+}; // namespace mozilla
+#endif // MOZ_HAS_JSRUST
+
+namespace mozilla {
+
+union Utf8Unit;
+
+static_assert(CHAR_BIT == 8,
+ "Utf8Unit won't work so well with non-octet chars");
+
+/**
+ * A code unit within a UTF-8 encoded string. (A code unit is the smallest
+ * unit within the Unicode encoding of a string. For UTF-8 this is an 8-bit
+ * number; for UTF-16 it would be a 16-bit number.)
+ *
+ * This is *not* the same as a single code point: in UTF-8, non-ASCII code
+ * points are constituted by multiple code units.
+ */
+union Utf8Unit {
+ private:
+ // Utf8Unit is a union wrapping a raw |char|. The C++ object model and C++
+ // requirements as to how objects may be accessed with respect to their actual
+ // types (almost?) uniquely compel this choice.
+ //
+ // Our requirements for a UTF-8 code unit representation are:
+ //
+ // 1. It must be "compatible" with C++ character/string literals that use
+ // the UTF-8 encoding. Given a properly encoded C++ literal, you should
+ // be able to use |Utf8Unit| and friends to access it; given |Utf8Unit|
+ // and friends (particularly UnicodeData), you should be able to access
+ // C++ character types for their contents.
+ // 2. |Utf8Unit| and friends must convert to/from |char| and |char*| only by
+ // explicit operation.
+ // 3. |Utf8Unit| must participate in overload resolution and template type
+ // equivalence (that is, given |template<class> class X|, when |X<T>| and
+ // |X<U>| are the same type) distinctly from the C++ character types.
+ //
+ // And a few nice-to-haves (at least for the moment):
+ //
+ // 4. The representation should use unsigned numbers, to avoid undefined
+ // behavior that can arise with signed types, and because Unicode code
+ // points and code units are unsigned.
+ // 5. |Utf8Unit| and friends should be convertible to/from |unsigned char|
+ // and |unsigned char*|, for APIs that (because of #4 above) use those
+ // types as the "natural" choice for UTF-8 data.
+ //
+ // #1 requires that |Utf8Unit| "incorporate" a C++ character type: one of
+ // |{,{un,}signed} char|.[0] |uint8_t| won't work because it might not be a
+ // C++ character type.
+ //
+ // #2 and #3 mean that |Utf8Unit| can't *be* such a type (or a typedef to one:
+ // typedefs don't generate *new* types, just type aliases). This requires a
+ // compound type.
+ //
+ // The ultimate representation (and character type in it) is constrained by
+ // C++14 [basic.lval]p10 that defines how objects may be accessed, with
+ // respect to the dynamic type in memory and the actual type used to access
+ // them. It reads:
+ //
+ // If a program attempts to access the stored value of an object
+ // through a glvalue of other than one of the following types the
+ // behavior is undefined:
+ //
+ // 1. the dynamic type of the object,
+ // 2. a cv-qualified version of the dynamic type of the object,
+ // ...other types irrelevant here...
+ // 3. an aggregate or union type that includes one of the
+ // aforementioned types among its elements or non-static data
+ // members (including, recursively, an element or non-static
+ // data member of a subaggregate or contained union),
+ // ...more irrelevant types...
+ // 4. a char or unsigned char type.
+ //
+ // Accessing (wrapped) UTF-8 data as |char|/|unsigned char| is allowed no
+ // matter the representation by #4. (Briefly set aside what values are seen.)
+ // (And #2 allows |const| on either the dynamic type or the accessing type.)
+ // (|signed char| is really only useful for small signed numbers, not
+ // characters, so we ignore it.)
+ //
+ // If we interpret contents as |char|/|unsigned char| contrary to the actual
+ // type stored there, what happens? C++14 [basic.fundamental]p1 requires
+ // character types be identically aligned/sized; C++14 [basic.fundamental]p3
+ // requires |signed char| and |unsigned char| have the same value
+ // representation. C++ doesn't require identical bitwise representation, tho.
+ // Practically we could assume it, but this verges on C++ spec bits best not
+ // *relied* on for correctness, if possible.
+ //
+ // So we don't expose |Utf8Unit|'s contents as |unsigned char*|: only |char|
+ // and |char*|. Instead we safely expose |unsigned char| by fully-defined
+ // *integral conversion* (C++14 [conv.integral]p2). Integral conversion from
+ // |unsigned char| → |char| has only implementation-defined behavior. It'd be
+ // better not to depend on that, but given twos-complement won, it should be
+ // okay. (Also |unsigned char*| is awkward enough to work with for strings
+ // that it probably doesn't appear in string manipulation much anyway, only in
+ // places that should really use |Utf8Unit| directly.)
+ //
+ // The opposite direction -- interpreting |char| or |char*| data through
+ // |Utf8Unit| -- isn't tricky as long as |Utf8Unit| contains a |char| as
+ // decided above, using #3. An "aggregate or union" will work that contains a
+ // |char|. Oddly, an aggregate won't work: C++14 [dcl.init.aggr]p1 says
+ // aggregates must have "no private or protected non-static data members", and
+ // we want to keep the inner |char| hidden. So a |struct| is out, and only
+ // |union| remains.
+ //
+ // (Enums are not "an aggregate or union type", so [maybe surprisingly] we
+ // can't make |Utf8Unit| an enum class with |char| underlying type, because we
+ // are given no license to treat |char| memory as such an |enum|'s memory.)
+ //
+ // Therefore |Utf8Unit| is a union type with a |char| non-static data member.
+ // This satisfies all our requirements. It also supports the nice-to-haves of
+ // creating a |Utf8Unit| from an |unsigned char|, and being convertible to
+ // |unsigned char|. It doesn't satisfy the nice-to-haves of using an
+ // |unsigned char| internally, nor of letting us wrap an existing
+ // |unsigned char| or pointer to one. We probably *could* do these, if we
+ // were willing to rely harder on implementation-defined behaviors, but for
+ // now we privilege C++'s main character type over some conceptual purity.
+ //
+ // 0. There's a proposal for a UTF-8 character type distinct from the existing
+ // C++ narrow character types:
+ //
+ // http://open-std.org/JTC1/SC22/WG21/docs/papers/2016/p0482r0.html
+ //
+ // but it hasn't been standardized (and might never be), and none of the
+ // compilers we really care about have implemented it. Maybe someday we
+ // can change our implementation to it without too much trouble, if we're
+ // lucky...
+ char mValue = '\0';
+
+ public:
+ Utf8Unit() = default;
+
+ explicit constexpr Utf8Unit(char aUnit) : mValue(aUnit) {}
+
+ explicit constexpr Utf8Unit(unsigned char aUnit)
+ : mValue(static_cast<char>(aUnit)) {
+ // Per the above comment, the prior cast is integral conversion with
+ // implementation-defined semantics, and we regretfully but unavoidably
+ // assume the conversion does what we want it to.
+ }
+
+#ifdef __cpp_char8_t
+ explicit constexpr Utf8Unit(char8_t aUnit)
+ : mValue(static_cast<char>(aUnit)) {}
+#endif
+
+ constexpr bool operator==(const Utf8Unit& aOther) const {
+ return mValue == aOther.mValue;
+ }
+
+ constexpr bool operator!=(const Utf8Unit& aOther) const {
+ return !(*this == aOther);
+ }
+
+ /** Convert a UTF-8 code unit to a raw char. */
+ constexpr char toChar() const {
+ // Only a |char| is ever permitted to be written into this location, so this
+ // is both permissible and returns the desired value.
+ return mValue;
+ }
+
+ /** Convert a UTF-8 code unit to a raw unsigned char. */
+ constexpr unsigned char toUnsignedChar() const {
+ // Per the above comment, this is well-defined integral conversion.
+ return static_cast<unsigned char>(mValue);
+ }
+
+ /** Convert a UTF-8 code unit to a uint8_t. */
+ constexpr uint8_t toUint8() const {
+ // Per the above comment, this is well-defined integral conversion.
+ return static_cast<uint8_t>(mValue);
+ }
+
+ // We currently don't expose |&mValue|. |UnicodeData| sort of does, but
+ // that's a somewhat separate concern, justified in different comments in
+ // that other code.
+};
+
+/**
+ * Reinterpret the address of a UTF-8 code unit as |const unsigned char*|.
+ *
+ * Assuming proper backing has been set up, the resulting |const unsigned char*|
+ * may validly be dereferenced.
+ *
+ * No access is provided to mutate this underlying memory as |unsigned char|.
+ * Presently memory inside |Utf8Unit| is *only* stored as |char|, and we are
+ * loath to offer a way to write non-|char| data until absolutely necessary.
+ */
+inline const unsigned char* Utf8AsUnsignedChars(const Utf8Unit* aUnits) {
+ static_assert(sizeof(Utf8Unit) == sizeof(unsigned char),
+ "sizes must match to permissibly reinterpret_cast<>");
+ static_assert(alignof(Utf8Unit) == alignof(unsigned char),
+ "alignment must match to permissibly reinterpret_cast<>");
+
+ // The static_asserts above only enable the reinterpret_cast<> to occur.
+ //
+ // Dereferencing the resulting pointer is a separate question. Any object's
+ // memory may be interpreted as |unsigned char| per C++11 [basic.lval]p10, but
+ // this doesn't guarantee what values will be observed. If |char| is
+ // implemented to act like |unsigned char|, we're good to go: memory for the
+ // |char| in |Utf8Unit| acts as we need. But if |char| is implemented to act
+ // like |signed char|, dereferencing produces the right value only if the
+ // |char| types all use two's-complement representation. Every modern
+ // compiler does this, and there's a C++ proposal to standardize it.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0907r0.html So
+ // *technically* this is implementation-defined -- but everyone does it and
+ // this behavior is being standardized.
+ return reinterpret_cast<const unsigned char*>(aUnits);
+}
+
+/** Returns true iff |aUnit| is an ASCII value. */
+constexpr bool IsAscii(Utf8Unit aUnit) {
+ return IsAscii(aUnit.toUnsignedChar());
+}
+
+/**
+ * Return true if the given span of memory consists of a valid UTF-8
+ * string and false otherwise.
+ *
+ * The string *may* contain U+0000 NULL code points.
+ */
+inline bool IsUtf8(mozilla::Span<const char> aString) {
+#if MOZ_HAS_JSRUST()
+ size_t length = aString.Length();
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(aString.Elements());
+ // For short strings, the function call is a pessimization, and the SIMD
+ // code won't have a chance to kick in anyway.
+ if (length < 16) {
+ for (size_t i = 0; i < length; i++) {
+ if (ptr[i] >= 0x80U) {
+ ptr += i;
+ length -= i;
+ goto end;
+ }
+ }
+ return true;
+ }
+end:
+ return length == encoding_utf8_valid_up_to(ptr, length);
+#else
+ return detail::IsValidUtf8(aString.Elements(), aString.Length());
+#endif
+}
+
+#if MOZ_HAS_JSRUST()
+
+// See Latin1.h for conversions between Latin1 and UTF-8.
+
+/**
+ * Returns the index of the start of the first malformed byte
+ * sequence or the length of the string if there are none.
+ */
+inline size_t Utf8ValidUpTo(mozilla::Span<const char> aString) {
+ return encoding_utf8_valid_up_to(
+ reinterpret_cast<const uint8_t*>(aString.Elements()), aString.Length());
+}
+
+/**
+ * Converts potentially-invalid UTF-16 to UTF-8 replacing lone surrogates
+ * with the REPLACEMENT CHARACTER.
+ *
+ * The length of aDest must be at least the length of aSource times three.
+ *
+ * Returns the number of code units written.
+ */
+inline size_t ConvertUtf16toUtf8(mozilla::Span<const char16_t> aSource,
+ mozilla::Span<char> aDest) {
+ return encoding_mem_convert_utf16_to_utf8(
+ aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length());
+}
+
+/**
+ * Converts potentially-invalid UTF-8 to UTF-16 replacing malformed byte
+ * sequences with the REPLACEMENT CHARACTER with potentially insufficient
+ * output space.
+ *
+ * Returns the number of code units read and the number of bytes written.
+ *
+ * If the output isn't large enough, not all input is consumed.
+ *
+ * The conversion is guaranteed to be complete if the length of aDest is
+ * at least the length of aSource times three.
+ *
+ * The output is always valid UTF-8 ending on scalar value boundary
+ * even in the case of partial conversion.
+ *
+ * The semantics of this function match the semantics of
+ * TextEncoder.encodeInto.
+ * https://encoding.spec.whatwg.org/#dom-textencoder-encodeinto
+ */
+inline std::tuple<size_t, size_t> ConvertUtf16toUtf8Partial(
+ mozilla::Span<const char16_t> aSource, mozilla::Span<char> aDest) {
+ size_t srcLen = aSource.Length();
+ size_t dstLen = aDest.Length();
+ encoding_mem_convert_utf16_to_utf8_partial(aSource.Elements(), &srcLen,
+ aDest.Elements(), &dstLen);
+ return std::make_tuple(srcLen, dstLen);
+}
+
+/**
+ * Converts potentially-invalid UTF-8 to UTF-16 replacing malformed byte
+ * sequences with the REPLACEMENT CHARACTER.
+ *
+ * Returns the number of code units written.
+ *
+ * The length of aDest must be at least one greater than the length of aSource
+ * even though the last slot isn't written to.
+ *
+ * If you know that the input is valid for sure, use
+ * UnsafeConvertValidUtf8toUtf16() instead.
+ */
+inline size_t ConvertUtf8toUtf16(mozilla::Span<const char> aSource,
+ mozilla::Span<char16_t> aDest) {
+ return encoding_mem_convert_utf8_to_utf16(
+ aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length());
+}
+
+/**
+ * Converts known-valid UTF-8 to UTF-16. If the input might be invalid,
+ * use ConvertUtf8toUtf16() or ConvertUtf8toUtf16WithoutReplacement() instead.
+ *
+ * Returns the number of code units written.
+ *
+ * The length of aDest must be at least the length of aSource.
+ */
+inline size_t UnsafeConvertValidUtf8toUtf16(mozilla::Span<const char> aSource,
+ mozilla::Span<char16_t> aDest) {
+ return encoding_mem_convert_str_to_utf16(aSource.Elements(), aSource.Length(),
+ aDest.Elements(), aDest.Length());
+}
+
+/**
+ * Converts potentially-invalid UTF-8 to valid UTF-16 signaling on error.
+ *
+ * Returns the number of code units written or `mozilla::Nothing` if the
+ * input was invalid.
+ *
+ * The length of the destination buffer must be at least the length of the
+ * source buffer.
+ *
+ * When the input was invalid, some output may have been written.
+ *
+ * If you know that the input is valid for sure, use
+ * UnsafeConvertValidUtf8toUtf16() instead.
+ */
+inline mozilla::Maybe<size_t> ConvertUtf8toUtf16WithoutReplacement(
+ mozilla::Span<const char> aSource, mozilla::Span<char16_t> aDest) {
+ size_t written = encoding_mem_convert_utf8_to_utf16_without_replacement(
+ aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length());
+ if (MOZ_UNLIKELY(written == std::numeric_limits<size_t>::max())) {
+ return mozilla::Nothing();
+ }
+ return mozilla::Some(written);
+}
+
+#endif // MOZ_HAS_JSRUST
+
+/**
+ * Returns true iff |aUnit| is a UTF-8 trailing code unit matching the pattern
+ * 0b10xx'xxxx.
+ */
+inline bool IsTrailingUnit(Utf8Unit aUnit) {
+ return (aUnit.toUint8() & 0b1100'0000) == 0b1000'0000;
+}
+
+/**
+ * Given |aLeadUnit| that is a non-ASCII code unit, a pointer to an |Iter aIter|
+ * that (initially) itself points one unit past |aLeadUnit|, and
+ * |const EndIter& aEnd| that denotes the end of the UTF-8 data when compared
+ * against |*aIter| using |aEnd - *aIter|:
+ *
+ * If |aLeadUnit| and subsequent code units computed using |*aIter| (up to
+ * |aEnd|) encode a valid code point -- not exceeding Unicode's range, not a
+ * surrogate, in shortest form -- then return Some(that code point) and advance
+ * |*aIter| past those code units.
+ *
+ * Otherwise decrement |*aIter| (so that it points at |aLeadUnit|) and return
+ * Nothing().
+ *
+ * |Iter| and |EndIter| are generalized concepts most easily understood as if
+ * they were |const char*|, |const unsigned char*|, or |const Utf8Unit*|:
+ * iterators that when dereferenced can be used to construct a |Utf8Unit| and
+ * that can be compared and modified in certain limited ways. (Carefully note
+ * that this function mutates |*aIter|.) |Iter| and |EndIter| are template
+ * parameters to support more-complicated adaptor iterators.
+ *
+ * The template parameters after |Iter| allow users to implement custom handling
+ * for various forms of invalid UTF-8. A version of this function that defaults
+ * all such handling to no-ops is defined below this function. To learn how to
+ * define your own custom handling, consult the implementation of that function,
+ * which documents exactly how custom handler functors are invoked.
+ *
+ * This function is MOZ_ALWAYS_INLINE: if you don't need that, use the version
+ * of this function without the "Inline" suffix on the name.
+ */
+template <typename Iter, typename EndIter, class OnBadLeadUnit,
+ class OnNotEnoughUnits, class OnBadTrailingUnit, class OnBadCodePoint,
+ class OnNotShortestForm>
+MOZ_ALWAYS_INLINE Maybe<char32_t> DecodeOneUtf8CodePointInline(
+ const Utf8Unit aLeadUnit, Iter* aIter, const EndIter& aEnd,
+ OnBadLeadUnit aOnBadLeadUnit, OnNotEnoughUnits aOnNotEnoughUnits,
+ OnBadTrailingUnit aOnBadTrailingUnit, OnBadCodePoint aOnBadCodePoint,
+ OnNotShortestForm aOnNotShortestForm) {
+ MOZ_ASSERT(Utf8Unit((*aIter)[-1]) == aLeadUnit);
+
+ char32_t n = aLeadUnit.toUint8();
+ MOZ_ASSERT(!IsAscii(n));
+
+ // |aLeadUnit| determines the number of trailing code units in the code point
+ // and the bits of |aLeadUnit| that contribute to the code point's value.
+ uint8_t remaining;
+ uint32_t min;
+ if ((n & 0b1110'0000) == 0b1100'0000) {
+ remaining = 1;
+ min = 0x80;
+ n &= 0b0001'1111;
+ } else if ((n & 0b1111'0000) == 0b1110'0000) {
+ remaining = 2;
+ min = 0x800;
+ n &= 0b0000'1111;
+ } else if ((n & 0b1111'1000) == 0b1111'0000) {
+ remaining = 3;
+ min = 0x10000;
+ n &= 0b0000'0111;
+ } else {
+ *aIter -= 1;
+ aOnBadLeadUnit();
+ return Nothing();
+ }
+
+ // If the code point would require more code units than remain, the encoding
+ // is invalid.
+ auto actual = aEnd - *aIter;
+ if (MOZ_UNLIKELY(actual < remaining)) {
+ *aIter -= 1;
+ aOnNotEnoughUnits(AssertedCast<uint8_t>(actual + 1), remaining + 1);
+ return Nothing();
+ }
+
+ for (uint8_t i = 0; i < remaining; i++) {
+ const Utf8Unit unit(*(*aIter)++);
+
+ // Every non-leading code unit in properly encoded UTF-8 has its high
+ // bit set and the next-highest bit unset.
+ if (MOZ_UNLIKELY(!IsTrailingUnit(unit))) {
+ uint8_t unitsObserved = i + 1 + 1;
+ *aIter -= unitsObserved;
+ aOnBadTrailingUnit(unitsObserved);
+ return Nothing();
+ }
+
+ // The code point being encoded is the concatenation of all the
+ // unconstrained bits.
+ n = (n << 6) | (unit.toUint8() & 0b0011'1111);
+ }
+
+ // UTF-16 surrogates and values outside the Unicode range are invalid.
+ if (MOZ_UNLIKELY(n > 0x10FFFF || (0xD800 <= n && n <= 0xDFFF))) {
+ uint8_t unitsObserved = remaining + 1;
+ *aIter -= unitsObserved;
+ aOnBadCodePoint(n, unitsObserved);
+ return Nothing();
+ }
+
+ // Overlong code points are also invalid.
+ if (MOZ_UNLIKELY(n < min)) {
+ uint8_t unitsObserved = remaining + 1;
+ *aIter -= unitsObserved;
+ aOnNotShortestForm(n, unitsObserved);
+ return Nothing();
+ }
+
+ return Some(n);
+}
+
+/**
+ * Identical to the above function, but not forced to be instantiated inline --
+ * the compiler is permitted to common up separate invocations if it chooses.
+ */
+template <typename Iter, typename EndIter, class OnBadLeadUnit,
+ class OnNotEnoughUnits, class OnBadTrailingUnit, class OnBadCodePoint,
+ class OnNotShortestForm>
+inline Maybe<char32_t> DecodeOneUtf8CodePoint(
+ const Utf8Unit aLeadUnit, Iter* aIter, const EndIter& aEnd,
+ OnBadLeadUnit aOnBadLeadUnit, OnNotEnoughUnits aOnNotEnoughUnits,
+ OnBadTrailingUnit aOnBadTrailingUnit, OnBadCodePoint aOnBadCodePoint,
+ OnNotShortestForm aOnNotShortestForm) {
+ return DecodeOneUtf8CodePointInline(aLeadUnit, aIter, aEnd, aOnBadLeadUnit,
+ aOnNotEnoughUnits, aOnBadTrailingUnit,
+ aOnBadCodePoint, aOnNotShortestForm);
+}
+
+/**
+ * Like the always-inlined function above, but with no-op behavior from all
+ * trailing if-invalid notifier functors.
+ *
+ * This function is MOZ_ALWAYS_INLINE: if you don't need that, use the version
+ * of this function without the "Inline" suffix on the name.
+ */
+template <typename Iter, typename EndIter>
+MOZ_ALWAYS_INLINE Maybe<char32_t> DecodeOneUtf8CodePointInline(
+ const Utf8Unit aLeadUnit, Iter* aIter, const EndIter& aEnd) {
+ // aOnBadLeadUnit is called when |aLeadUnit| itself is an invalid lead unit in
+ // a multi-unit code point. It is passed no arguments: the caller already has
+ // |aLeadUnit| on hand, so no need to provide it again.
+ auto onBadLeadUnit = []() {};
+
+ // aOnNotEnoughUnits is called when |aLeadUnit| properly indicates a code
+ // point length, but there aren't enough units from |*aIter| to |aEnd| to
+ // satisfy that length. It is passed the number of code units actually
+ // available (according to |aEnd - *aIter|) and the number of code units that
+ // |aLeadUnit| indicates are needed. Both numbers include the contribution
+ // of |aLeadUnit| itself: so |aUnitsAvailable <= 3|, |aUnitsNeeded <= 4|, and
+ // |aUnitsAvailable < aUnitsNeeded|. As above, it also is not passed the lead
+ // code unit.
+ auto onNotEnoughUnits = [](uint8_t aUnitsAvailable, uint8_t aUnitsNeeded) {};
+
+ // aOnBadTrailingUnit is called when one of the trailing code units implied by
+ // |aLeadUnit| doesn't match the 0b10xx'xxxx bit pattern that all UTF-8
+ // trailing code units must satisfy. It is passed the total count of units
+ // observed (including |aLeadUnit|). The bad trailing code unit will
+ // conceptually be at |(*aIter)[aUnitsObserved - 1]| if this functor is
+ // called, and so |aUnitsObserved <= 4|.
+ auto onBadTrailingUnit = [](uint8_t aUnitsObserved) {};
+
+ // aOnBadCodePoint is called when a structurally-correct code point encoding
+ // is found, but the *value* that is encoded is not a valid code point: either
+ // because it exceeded the U+10FFFF Unicode maximum code point, or because it
+ // was a UTF-16 surrogate. It is passed the non-code point value and the
+ // number of code units used to encode it.
+ auto onBadCodePoint = [](char32_t aBadCodePoint, uint8_t aUnitsObserved) {};
+
+ // aOnNotShortestForm is called when structurally-correct encoding is found,
+ // but the encoded value should have been encoded in fewer code units (e.g.
+ // mis-encoding U+0000 as 0b1100'0000 0b1000'0000 in two code units instead of
+ // as 0b0000'0000). It is passed the mis-encoded code point (which will be
+ // valid and not a surrogate) and the count of code units that mis-encoded it.
+ auto onNotShortestForm = [](char32_t aBadCodePoint, uint8_t aUnitsObserved) {
+ };
+
+ return DecodeOneUtf8CodePointInline(aLeadUnit, aIter, aEnd, onBadLeadUnit,
+ onNotEnoughUnits, onBadTrailingUnit,
+ onBadCodePoint, onNotShortestForm);
+}
+
+/**
+ * Identical to the above function, but not forced to be instantiated inline --
+ * the compiler/linker are allowed to common up separate invocations.
+ */
+template <typename Iter, typename EndIter>
+inline Maybe<char32_t> DecodeOneUtf8CodePoint(const Utf8Unit aLeadUnit,
+ Iter* aIter,
+ const EndIter& aEnd) {
+ return DecodeOneUtf8CodePointInline(aLeadUnit, aIter, aEnd);
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_Utf8_h */
diff --git a/mfbt/Variant.h b/mfbt/Variant.h
new file mode 100644
index 0000000000..d1db3a2cc9
--- /dev/null
+++ b/mfbt/Variant.h
@@ -0,0 +1,928 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A template class for tagged unions. */
+
+#include <new>
+#include <stdint.h>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/TemplateLib.h"
+#include <type_traits>
+#include <utility>
+
+#ifndef mozilla_Variant_h
+# define mozilla_Variant_h
+
+namespace IPC {
+template <typename T>
+struct ParamTraits;
+} // namespace IPC
+
+namespace mozilla {
+
+namespace ipc {
+template <typename T>
+struct IPDLParamTraits;
+} // namespace ipc
+
+template <typename... Ts>
+class Variant;
+
+namespace detail {
+
+// Nth<N, types...>::Type is the Nth type (0-based) in the list of types Ts.
+template <size_t N, typename... Ts>
+struct Nth;
+
+template <typename T, typename... Ts>
+struct Nth<0, T, Ts...> {
+ using Type = T;
+};
+
+template <size_t N, typename T, typename... Ts>
+struct Nth<N, T, Ts...> {
+ using Type = typename Nth<N - 1, Ts...>::Type;
+};
+
+/// SelectVariantTypeHelper is used in the implementation of SelectVariantType.
+template <typename T, typename... Variants>
+struct SelectVariantTypeHelper;
+
+template <typename T>
+struct SelectVariantTypeHelper<T> {
+ static constexpr size_t count = 0;
+};
+
+template <typename T, typename... Variants>
+struct SelectVariantTypeHelper<T, T, Variants...> {
+ typedef T Type;
+ static constexpr size_t count =
+ 1 + SelectVariantTypeHelper<T, Variants...>::count;
+};
+
+template <typename T, typename... Variants>
+struct SelectVariantTypeHelper<T, const T, Variants...> {
+ typedef const T Type;
+ static constexpr size_t count =
+ 1 + SelectVariantTypeHelper<T, Variants...>::count;
+};
+
+template <typename T, typename... Variants>
+struct SelectVariantTypeHelper<T, const T&, Variants...> {
+ typedef const T& Type;
+ static constexpr size_t count =
+ 1 + SelectVariantTypeHelper<T, Variants...>::count;
+};
+
+template <typename T, typename... Variants>
+struct SelectVariantTypeHelper<T, T&&, Variants...> {
+ typedef T&& Type;
+ static constexpr size_t count =
+ 1 + SelectVariantTypeHelper<T, Variants...>::count;
+};
+
+template <typename T, typename Head, typename... Variants>
+struct SelectVariantTypeHelper<T, Head, Variants...>
+ : public SelectVariantTypeHelper<T, Variants...> {};
+
+/**
+ * SelectVariantType takes a type T and a list of variant types Variants and
+ * yields a type Type, selected from Variants, that can store a value of type T
+ * or a reference to type T. If no such type was found, Type is not defined.
+ * SelectVariantType also has a `count` member that contains the total number of
+ * selectable types (which will be used to check that a requested type is not
+ * ambiguously present twice.)
+ */
+template <typename T, typename... Variants>
+struct SelectVariantType
+ : public SelectVariantTypeHelper<
+ std::remove_const_t<std::remove_reference_t<T>>, Variants...> {};
+
+// Compute a fast, compact type that can be used to hold integral values that
+// distinctly map to every type in Ts.
+template <typename... Ts>
+struct VariantTag {
+ private:
+ static const size_t TypeCount = sizeof...(Ts);
+
+ public:
+ using Type = std::conditional_t<
+ (TypeCount <= 2), bool,
+ std::conditional_t<(TypeCount <= size_t(UINT_FAST8_MAX)), uint_fast8_t,
+ size_t // stop caring past a certain
+ // point :-)
+ >>;
+};
+
+// TagHelper gets the given sentinel tag value for the given type T. This has to
+// be split out from VariantImplementation because you can't nest a partial
+// template specialization within a template class.
+
+template <typename Tag, size_t N, typename T, typename U, typename Next,
+ bool isMatch>
+struct TagHelper;
+
+// In the case where T != U, we continue recursion.
+template <typename Tag, size_t N, typename T, typename U, typename Next>
+struct TagHelper<Tag, N, T, U, Next, false> {
+ static Tag tag() { return Next::template tag<U>(); }
+};
+
+// In the case where T == U, return the tag number.
+template <typename Tag, size_t N, typename T, typename U, typename Next>
+struct TagHelper<Tag, N, T, U, Next, true> {
+ static Tag tag() { return Tag(N); }
+};
+
+// The VariantImplementation template provides the guts of mozilla::Variant. We
+// create a VariantImplementation for each T in Ts... which handles
+// construction, destruction, etc for when the Variant's type is T. If the
+// Variant's type isn't T, it punts the request on to the next
+// VariantImplementation.
+
+template <typename Tag, size_t N, typename... Ts>
+struct VariantImplementation;
+
+// The singly typed Variant / recursion base case.
+template <typename Tag, size_t N, typename T>
+struct VariantImplementation<Tag, N, T> {
+ template <typename U>
+ static Tag tag() {
+ static_assert(std::is_same_v<T, U>, "mozilla::Variant: tag: bad type!");
+ return Tag(N);
+ }
+
+ template <typename Variant>
+ static void copyConstruct(void* aLhs, const Variant& aRhs) {
+ ::new (KnownNotNull, aLhs) T(aRhs.template as<N>());
+ }
+
+ template <typename Variant>
+ static void moveConstruct(void* aLhs, Variant&& aRhs) {
+ ::new (KnownNotNull, aLhs) T(aRhs.template extract<N>());
+ }
+
+ template <typename Variant>
+ static void destroy(Variant& aV) {
+ aV.template as<N>().~T();
+ }
+
+ template <typename Variant>
+ static bool equal(const Variant& aLhs, const Variant& aRhs) {
+ return aLhs.template as<N>() == aRhs.template as<N>();
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static decltype(auto) match(Matcher&& aMatcher, ConcreteVariant&& aV) {
+ if constexpr (std::is_invocable_v<Matcher, Tag,
+ decltype(std::forward<ConcreteVariant>(aV)
+ .template as<N>())>) {
+ return std::forward<Matcher>(aMatcher)(
+ Tag(N), std::forward<ConcreteVariant>(aV).template as<N>());
+ } else {
+ return std::forward<Matcher>(aMatcher)(
+ std::forward<ConcreteVariant>(aV).template as<N>());
+ }
+ }
+
+ template <typename ConcreteVariant, typename Matcher>
+ static decltype(auto) matchN(ConcreteVariant&& aV, Matcher&& aMatcher) {
+ if constexpr (std::is_invocable_v<Matcher, Tag,
+ decltype(std::forward<ConcreteVariant>(aV)
+ .template as<N>())>) {
+ return std::forward<Matcher>(aMatcher)(
+ Tag(N), std::forward<ConcreteVariant>(aV).template as<N>());
+ } else {
+ return std::forward<Matcher>(aMatcher)(
+ std::forward<ConcreteVariant>(aV).template as<N>());
+ }
+ }
+};
+
+// VariantImplementation for some variant type T.
+template <typename Tag, size_t N, typename T, typename... Ts>
+struct VariantImplementation<Tag, N, T, Ts...> {
+ // The next recursive VariantImplementation.
+ using Next = VariantImplementation<Tag, N + 1, Ts...>;
+
+ template <typename U>
+ static Tag tag() {
+ return TagHelper<Tag, N, T, U, Next, std::is_same_v<T, U>>::tag();
+ }
+
+ template <typename Variant>
+ static void copyConstruct(void* aLhs, const Variant& aRhs) {
+ if (aRhs.template is<N>()) {
+ ::new (KnownNotNull, aLhs) T(aRhs.template as<N>());
+ } else {
+ Next::copyConstruct(aLhs, aRhs);
+ }
+ }
+
+ template <typename Variant>
+ static void moveConstruct(void* aLhs, Variant&& aRhs) {
+ if (aRhs.template is<N>()) {
+ ::new (KnownNotNull, aLhs) T(aRhs.template extract<N>());
+ } else {
+ Next::moveConstruct(aLhs, std::move(aRhs));
+ }
+ }
+
+ template <typename Variant>
+ static void destroy(Variant& aV) {
+ if (aV.template is<N>()) {
+ aV.template as<N>().~T();
+ } else {
+ Next::destroy(aV);
+ }
+ }
+
+ template <typename Variant>
+ static bool equal(const Variant& aLhs, const Variant& aRhs) {
+ if (aLhs.template is<N>()) {
+ MOZ_ASSERT(aRhs.template is<N>());
+ return aLhs.template as<N>() == aRhs.template as<N>();
+ } else {
+ return Next::equal(aLhs, aRhs);
+ }
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static decltype(auto) match(Matcher&& aMatcher, ConcreteVariant&& aV) {
+ if (aV.template is<N>()) {
+ if constexpr (std::is_invocable_v<Matcher, Tag,
+ decltype(std::forward<ConcreteVariant>(
+ aV)
+ .template as<N>())>) {
+ return std::forward<Matcher>(aMatcher)(
+ Tag(N), std::forward<ConcreteVariant>(aV).template as<N>());
+ } else {
+ return std::forward<Matcher>(aMatcher)(
+ std::forward<ConcreteVariant>(aV).template as<N>());
+ }
+ } else {
+ // If you're seeing compilation errors here like "no matching
+ // function for call to 'match'" then that means that the
+ // Matcher doesn't exhaust all variant types. There must exist a
+ // Matcher::operator()(T&) for every variant type T.
+ //
+ // If you're seeing compilation errors here like "cannot initialize
+ // return object of type <...> with an rvalue of type <...>" then that
+ // means that the Matcher::operator()(T&) overloads are returning
+ // different types. They must all return the same type.
+ return Next::match(std::forward<Matcher>(aMatcher),
+ std::forward<ConcreteVariant>(aV));
+ }
+ }
+
+ template <typename ConcreteVariant, typename Mi, typename... Ms>
+ static decltype(auto) matchN(ConcreteVariant&& aV, Mi&& aMi, Ms&&... aMs) {
+ if (aV.template is<N>()) {
+ if constexpr (std::is_invocable_v<Mi, Tag,
+ decltype(std::forward<ConcreteVariant>(
+ aV)
+ .template as<N>())>) {
+ static_assert(
+ std::is_same_v<
+ decltype(std::forward<Mi>(aMi)(
+ Tag(N),
+ std::forward<ConcreteVariant>(aV).template as<N>())),
+ decltype(Next::matchN(std::forward<ConcreteVariant>(aV),
+ std::forward<Ms>(aMs)...))>,
+ "all matchers must have the same return type");
+ return std::forward<Mi>(aMi)(
+ Tag(N), std::forward<ConcreteVariant>(aV).template as<N>());
+ } else {
+ static_assert(
+ std::is_same_v<
+ decltype(std::forward<Mi>(aMi)(
+ std::forward<ConcreteVariant>(aV).template as<N>())),
+ decltype(Next::matchN(std::forward<ConcreteVariant>(aV),
+ std::forward<Ms>(aMs)...))>,
+ "all matchers must have the same return type");
+ return std::forward<Mi>(aMi)(
+ std::forward<ConcreteVariant>(aV).template as<N>());
+ }
+ } else {
+ // If you're seeing compilation errors here like "no matching
+ // function for call to 'match'" then that means that the
+ // Matchers don't exhaust all variant types. There must exist a
+ // Matcher (with its operator()(T&)) for every variant type T, in the
+ // exact same order.
+ return Next::matchN(std::forward<ConcreteVariant>(aV),
+ std::forward<Ms>(aMs)...);
+ }
+ }
+};
+
+/**
+ * AsVariantTemporary stores a value of type T to allow construction of a
+ * Variant value via type inference. Because T is copied and there's no
+ * guarantee that the copy can be elided, AsVariantTemporary is best used with
+ * primitive or very small types.
+ */
+template <typename T>
+struct AsVariantTemporary {
+ explicit AsVariantTemporary(const T& aValue) : mValue(aValue) {}
+
+ template <typename U>
+ explicit AsVariantTemporary(U&& aValue) : mValue(std::forward<U>(aValue)) {}
+
+ AsVariantTemporary(const AsVariantTemporary& aOther)
+ : mValue(aOther.mValue) {}
+
+ AsVariantTemporary(AsVariantTemporary&& aOther)
+ : mValue(std::move(aOther.mValue)) {}
+
+ AsVariantTemporary() = delete;
+ void operator=(const AsVariantTemporary&) = delete;
+ void operator=(AsVariantTemporary&&) = delete;
+
+ std::remove_const_t<std::remove_reference_t<T>> mValue;
+};
+
+} // namespace detail
+
+// Used to unambiguously specify one of the Variant's type.
+template <typename T>
+struct VariantType {
+ using Type = T;
+};
+
+// Used to specify one of the Variant's type by index.
+template <size_t N>
+struct VariantIndex {
+ static constexpr size_t index = N;
+};
+
+/**
+ * # mozilla::Variant
+ *
+ * A variant / tagged union / heterogenous disjoint union / sum-type template
+ * class. Similar in concept to (but not derived from) `boost::variant`.
+ *
+ * Sometimes, you may wish to use a C union with non-POD types. However, this is
+ * forbidden in C++ because it is not clear which type in the union should have
+ * its constructor and destructor run on creation and deletion
+ * respectively. This is the problem that `mozilla::Variant` solves.
+ *
+ * ## Usage
+ *
+ * A `mozilla::Variant` instance is constructed (via move or copy) from one of
+ * its variant types (ignoring const and references). It does *not* support
+ * construction from subclasses of variant types or types that coerce to one of
+ * the variant types.
+ *
+ * Variant<char, uint32_t> v1('a');
+ * Variant<UniquePtr<A>, B, C> v2(MakeUnique<A>());
+ * Variant<bool, char> v3(VariantType<char>, 0); // disambiguation needed
+ * Variant<int, int> v4(VariantIndex<1>, 0); // 2nd int
+ *
+ * Because specifying the full type of a Variant value is often verbose,
+ * there are two easier ways to construct values:
+ *
+ * A. AsVariant() can be used to construct a Variant value using type inference
+ * in contexts such as expressions or when returning values from functions.
+ * Because AsVariant() must copy or move the value into a temporary and this
+ * cannot necessarily be elided by the compiler, it's mostly appropriate only
+ * for use with primitive or very small types.
+ *
+ * Variant<char, uint32_t> Foo() { return AsVariant('x'); }
+ * // ...
+ * Variant<char, uint32_t> v1 = Foo(); // v1 holds char('x').
+ *
+ * B. Brace-construction with VariantType or VariantIndex; this also allows
+ * in-place construction with any number of arguments.
+ *
+ * struct AB { AB(int, int){...} };
+ * static Variant<AB, bool> foo()
+ * {
+ * return {VariantIndex<0>{}, 1, 2};
+ * }
+ * // ...
+ * Variant<AB, bool> v0 = Foo(); // v0 holds AB(1,2).
+ *
+ * All access to the contained value goes through type-safe accessors.
+ * Either the stored type, or the type index may be provided.
+ *
+ * void
+ * Foo(Variant<A, B, C> v)
+ * {
+ * if (v.is<A>()) {
+ * A& ref = v.as<A>();
+ * ...
+ * } else (v.is<1>()) { // Instead of v.is<B>.
+ * ...
+ * } else {
+ * ...
+ * }
+ * }
+ *
+ * In some situation, a Variant may be constructed from templated types, in
+ * which case it is possible that the same type could be given multiple times by
+ * an external developer. Or seemingly-different types could be aliases.
+ * In this case, repeated types can only be accessed through their index, to
+ * prevent ambiguous access by type.
+ *
+ * // Bad!
+ * template <typename T>
+ * struct ResultOrError
+ * {
+ * Variant<T, int> m;
+ * ResultOrError() : m(int(0)) {} // Error '0' by default
+ * ResultOrError(const T& r) : m(r) {}
+ * bool IsResult() const { return m.is<T>(); }
+ * bool IsError() const { return m.is<int>(); }
+ * };
+ * // Now instantiante with the result being an int too:
+ * ResultOrError<int> myResult(123); // Fail!
+ * // In Variant<int, int>, which 'int' are we refering to, from inside
+ * // ResultOrError functions?
+ *
+ * // Good!
+ * template <typename T>
+ * struct ResultOrError
+ * {
+ * Variant<T, int> m;
+ * ResultOrError() : m(VariantIndex<1>{}, 0) {} // Error '0' by default
+ * ResultOrError(const T& r) : m(VariantIndex<0>{}, r) {}
+ * bool IsResult() const { return m.is<0>(); } // 0 -> T
+ * bool IsError() const { return m.is<1>(); } // 1 -> int
+ * };
+ * // Now instantiante with the result being an int too:
+ * ResultOrError<int> myResult(123); // It now works!
+ *
+ * Attempting to use the contained value as type `T1` when the `Variant`
+ * instance contains a value of type `T2` causes an assertion failure.
+ *
+ * A a;
+ * Variant<A, B, C> v(a);
+ * v.as<B>(); // <--- Assertion failure!
+ *
+ * Trying to use a `Variant<Ts...>` instance as some type `U` that is not a
+ * member of the set of `Ts...` is a compiler error.
+ *
+ * A a;
+ * Variant<A, B, C> v(a);
+ * v.as<SomeRandomType>(); // <--- Compiler error!
+ *
+ * Additionally, you can turn a `Variant` that `is<T>` into a `T` by moving it
+ * out of the containing `Variant` instance with the `extract<T>` method:
+ *
+ * Variant<UniquePtr<A>, B, C> v(MakeUnique<A>());
+ * auto ptr = v.extract<UniquePtr<A>>();
+ *
+ * Finally, you can exhaustively match on the contained variant and branch into
+ * different code paths depending on which type is contained. This is preferred
+ * to manually checking every variant type T with is<T>() because it provides
+ * compile-time checking that you handled every type, rather than runtime
+ * assertion failures.
+ *
+ * // Bad!
+ * char* foo(Variant<A, B, C, D>& v) {
+ * if (v.is<A>()) {
+ * return ...;
+ * } else if (v.is<B>()) {
+ * return ...;
+ * } else {
+ * return doSomething(v.as<C>()); // Forgot about case D!
+ * }
+ * }
+ *
+ * // Instead, a single function object (that can deal with all possible
+ * // options) may be provided:
+ * struct FooMatcher
+ * {
+ * // The return type of all matchers must be identical.
+ * char* operator()(A& a) { ... }
+ * char* operator()(B& b) { ... }
+ * char* operator()(C& c) { ... }
+ * char* operator()(D& d) { ... } // Compile-time error to forget D!
+ * }
+ * char* foo(Variant<A, B, C, D>& v) {
+ * return v.match(FooMatcher());
+ * }
+ *
+ * // In some situations, a single generic lambda may also be appropriate:
+ * char* foo(Variant<A, B, C, D>& v) {
+ * return v.match([](auto&) {...});
+ * }
+ *
+ * // Alternatively, multiple function objects may be provided, each one
+ * // corresponding to an option, in the same order:
+ * char* foo(Variant<A, B, C, D>& v) {
+ * return v.match([](A&) { ... },
+ * [](B&) { ... },
+ * [](C&) { ... },
+ * [](D&) { ... });
+ * }
+ *
+ * // In rare cases, the index of the currently-active alternative is
+ * // needed, it may be obtained by adding a first parameter in the matcner
+ * // callback, which will receive the index in its most compact type (just
+ * // use `size_t` if the exact type is not important), e.g.:
+ * char* foo(Variant<A, B, C, D>& v) {
+ * return v.match([](auto aIndex, auto& aAlternative) {...});
+ * // --OR--
+ * return v.match([](size_t aIndex, auto& aAlternative) {...});
+ * }
+ *
+ * ## Examples
+ *
+ * A tree is either an empty leaf, or a node with a value and two children:
+ *
+ * struct Leaf { };
+ *
+ * template<typename T>
+ * struct Node
+ * {
+ * T value;
+ * Tree<T>* left;
+ * Tree<T>* right;
+ * };
+ *
+ * template<typename T>
+ * using Tree = Variant<Leaf, Node<T>>;
+ *
+ * A copy-on-write string is either a non-owning reference to some existing
+ * string, or an owning reference to our copy:
+ *
+ * class CopyOnWriteString
+ * {
+ * Variant<const char*, UniquePtr<char[]>> string;
+ *
+ * ...
+ * };
+ *
+ * Because Variant must be aligned suitable to hold any value stored within it,
+ * and because |alignas| requirements don't affect platform ABI with respect to
+ * how parameters are laid out in memory, Variant can't be used as the type of a
+ * function parameter. Pass Variant to functions by pointer or reference
+ * instead.
+ */
+template <typename... Ts>
+class MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS MOZ_NON_PARAM Variant {
+ friend struct IPC::ParamTraits<mozilla::Variant<Ts...>>;
+ friend struct mozilla::ipc::IPDLParamTraits<mozilla::Variant<Ts...>>;
+
+ using Tag = typename detail::VariantTag<Ts...>::Type;
+ using Impl = detail::VariantImplementation<Tag, 0, Ts...>;
+
+ static constexpr size_t RawDataAlignment = tl::Max<alignof(Ts)...>::value;
+ static constexpr size_t RawDataSize = tl::Max<sizeof(Ts)...>::value;
+
+ // Raw storage for the contained variant value.
+ alignas(RawDataAlignment) unsigned char rawData[RawDataSize];
+
+ // Each type is given a unique tag value that lets us keep track of the
+ // contained variant value's type.
+ Tag tag;
+
+ // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
+ // -Werror compile error) to reinterpret_cast<> |rawData| to |T*|, even
+ // through |void*|. Placing the latter cast in these separate functions
+ // breaks the chain such that affected GCC versions no longer warn/error.
+ void* ptr() { return rawData; }
+
+ const void* ptr() const { return rawData; }
+
+ public:
+ /** Perfect forwarding construction for some variant type T. */
+ template <typename RefT,
+ // RefT captures both const& as well as && (as intended, to support
+ // perfect forwarding), so we have to remove those qualifiers here
+ // when ensuring that T is a variant of this type, and getting T's
+ // tag, etc.
+ typename T = typename detail::SelectVariantType<RefT, Ts...>::Type>
+ explicit Variant(RefT&& aT) : tag(Impl::template tag<T>()) {
+ static_assert(
+ detail::SelectVariantType<RefT, Ts...>::count == 1,
+ "Variant can only be selected by type if that type is unique");
+ ::new (KnownNotNull, ptr()) T(std::forward<RefT>(aT));
+ }
+
+ /**
+ * Perfect forwarding construction for some variant type T, by
+ * explicitly giving the type.
+ * This is necessary to construct from any number of arguments,
+ * or to convert from a type that is not in the Variant's type list.
+ */
+ template <typename T, typename... Args>
+ MOZ_IMPLICIT Variant(const VariantType<T>&, Args&&... aTs)
+ : tag(Impl::template tag<T>()) {
+ ::new (KnownNotNull, ptr()) T(std::forward<Args>(aTs)...);
+ }
+
+ /**
+ * Perfect forwarding construction for some variant type T, by
+ * explicitly giving the type index.
+ * This is necessary to construct from any number of arguments,
+ * or to convert from a type that is not in the Variant's type list,
+ * or to construct a type that is present more than once in the Variant.
+ */
+ template <size_t N, typename... Args>
+ MOZ_IMPLICIT Variant(const VariantIndex<N>&, Args&&... aTs) : tag(N) {
+ using T = typename detail::Nth<N, Ts...>::Type;
+ ::new (KnownNotNull, ptr()) T(std::forward<Args>(aTs)...);
+ }
+
+ /**
+ * Constructs this Variant from an AsVariantTemporary<T> such that T can be
+ * stored in one of the types allowable in this Variant. This is used in the
+ * implementation of AsVariant().
+ */
+ template <typename RefT>
+ MOZ_IMPLICIT Variant(detail::AsVariantTemporary<RefT>&& aValue)
+ : tag(Impl::template tag<
+ typename detail::SelectVariantType<RefT, Ts...>::Type>()) {
+ using T = typename detail::SelectVariantType<RefT, Ts...>::Type;
+ static_assert(
+ detail::SelectVariantType<RefT, Ts...>::count == 1,
+ "Variant can only be selected by type if that type is unique");
+ ::new (KnownNotNull, ptr()) T(std::move(aValue.mValue));
+ }
+
+ /** Copy construction. */
+ Variant(const Variant& aRhs) : tag(aRhs.tag) {
+ Impl::copyConstruct(ptr(), aRhs);
+ }
+
+ /** Move construction. */
+ Variant(Variant&& aRhs) : tag(aRhs.tag) {
+ Impl::moveConstruct(ptr(), std::move(aRhs));
+ }
+
+ /** Copy assignment. */
+ Variant& operator=(const Variant& aRhs) {
+ MOZ_ASSERT(&aRhs != this, "self-assign disallowed");
+ this->~Variant();
+ ::new (KnownNotNull, this) Variant(aRhs);
+ return *this;
+ }
+
+ /** Move assignment. */
+ Variant& operator=(Variant&& aRhs) {
+ MOZ_ASSERT(&aRhs != this, "self-assign disallowed");
+ this->~Variant();
+ ::new (KnownNotNull, this) Variant(std::move(aRhs));
+ return *this;
+ }
+
+ /** Move assignment from AsVariant(). */
+ template <typename T>
+ Variant& operator=(detail::AsVariantTemporary<T>&& aValue) {
+ static_assert(
+ detail::SelectVariantType<T, Ts...>::count == 1,
+ "Variant can only be selected by type if that type is unique");
+ this->~Variant();
+ ::new (KnownNotNull, this) Variant(std::move(aValue));
+ return *this;
+ }
+
+ ~Variant() { Impl::destroy(*this); }
+
+ template <typename T, typename... Args>
+ T& emplace(Args&&... aTs) {
+ Impl::destroy(*this);
+ tag = Impl::template tag<T>();
+ ::new (KnownNotNull, ptr()) T(std::forward<Args>(aTs)...);
+ return as<T>();
+ }
+
+ template <size_t N, typename... Args>
+ typename detail::Nth<N, Ts...>::Type& emplace(Args&&... aTs) {
+ using T = typename detail::Nth<N, Ts...>::Type;
+ Impl::destroy(*this);
+ tag = N;
+ ::new (KnownNotNull, ptr()) T(std::forward<Args>(aTs)...);
+ return as<N>();
+ }
+
+ /** Check which variant type is currently contained. */
+ template <typename T>
+ bool is() const {
+ static_assert(
+ detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not uniquely found in this Variant's type list");
+ return Impl::template tag<T>() == tag;
+ }
+
+ template <size_t N>
+ bool is() const {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ return N == size_t(tag);
+ }
+
+ /**
+ * Operator == overload that defers to the variant type's operator==
+ * implementation if the rhs is tagged as the same type as this one.
+ */
+ bool operator==(const Variant& aRhs) const {
+ return tag == aRhs.tag && Impl::equal(*this, aRhs);
+ }
+
+ /**
+ * Operator != overload that defers to the negation of the variant type's
+ * operator== implementation if the rhs is tagged as the same type as this
+ * one.
+ */
+ bool operator!=(const Variant& aRhs) const { return !(*this == aRhs); }
+
+ // Accessors for working with the contained variant value.
+
+ /** Mutable lvalue-reference. */
+ template <typename T>
+ T& as() & {
+ static_assert(
+ detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not uniquely found in this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<T>());
+ return *static_cast<T*>(ptr());
+ }
+
+ template <size_t N>
+ typename detail::Nth<N, Ts...>::Type& as() & {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<N>());
+ return *static_cast<typename detail::Nth<N, Ts...>::Type*>(ptr());
+ }
+
+ /** Immutable const lvalue-reference. */
+ template <typename T>
+ const T& as() const& {
+ static_assert(detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not found in this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<T>());
+ return *static_cast<const T*>(ptr());
+ }
+
+ template <size_t N>
+ const typename detail::Nth<N, Ts...>::Type& as() const& {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<N>());
+ return *static_cast<const typename detail::Nth<N, Ts...>::Type*>(ptr());
+ }
+
+ /** Mutable rvalue-reference. */
+ template <typename T>
+ T&& as() && {
+ static_assert(
+ detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not uniquely found in this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<T>());
+ return std::move(*static_cast<T*>(ptr()));
+ }
+
+ template <size_t N>
+ typename detail::Nth<N, Ts...>::Type&& as() && {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<N>());
+ return std::move(
+ *static_cast<typename detail::Nth<N, Ts...>::Type*>(ptr()));
+ }
+
+ /** Immutable const rvalue-reference. */
+ template <typename T>
+ const T&& as() const&& {
+ static_assert(detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not found in this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<T>());
+ return std::move(*static_cast<const T*>(ptr()));
+ }
+
+ template <size_t N>
+ const typename detail::Nth<N, Ts...>::Type&& as() const&& {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<N>());
+ return std::move(
+ *static_cast<const typename detail::Nth<N, Ts...>::Type*>(ptr()));
+ }
+
+ /**
+ * Extract the contained variant value from this container into a temporary
+ * value. On completion, the value in the variant will be in a
+ * safely-destructible state, as determined by the behavior of T's move
+ * constructor when provided the variant's internal value.
+ */
+ template <typename T>
+ T extract() {
+ static_assert(
+ detail::SelectVariantType<T, Ts...>::count == 1,
+ "provided a type not uniquely found in this Variant's type list");
+ MOZ_ASSERT(is<T>());
+ return T(std::move(as<T>()));
+ }
+
+ template <size_t N>
+ typename detail::Nth<N, Ts...>::Type extract() {
+ static_assert(N < sizeof...(Ts),
+ "provided an index outside of this Variant's type list");
+ MOZ_RELEASE_ASSERT(is<N>());
+ return typename detail::Nth<N, Ts...>::Type(std::move(as<N>()));
+ }
+
+ // Exhaustive matching of all variant types on the contained value.
+
+ /** Match on an immutable const lvalue-reference. */
+ template <typename Matcher>
+ decltype(auto) match(Matcher&& aMatcher) const& {
+ return Impl::match(std::forward<Matcher>(aMatcher), *this);
+ }
+
+ template <typename M0, typename M1, typename... Ms>
+ decltype(auto) match(M0&& aM0, M1&& aM1, Ms&&... aMs) const& {
+ return matchN(*this, std::forward<M0>(aM0), std::forward<M1>(aM1),
+ std::forward<Ms>(aMs)...);
+ }
+
+ /** Match on a mutable non-const lvalue-reference. */
+ template <typename Matcher>
+ decltype(auto) match(Matcher&& aMatcher) & {
+ return Impl::match(std::forward<Matcher>(aMatcher), *this);
+ }
+
+ template <typename M0, typename M1, typename... Ms>
+ decltype(auto) match(M0&& aM0, M1&& aM1, Ms&&... aMs) & {
+ return matchN(*this, std::forward<M0>(aM0), std::forward<M1>(aM1),
+ std::forward<Ms>(aMs)...);
+ }
+
+ /** Match on an immutable const rvalue-reference. */
+ template <typename Matcher>
+ decltype(auto) match(Matcher&& aMatcher) const&& {
+ return Impl::match(std::forward<Matcher>(aMatcher), std::move(*this));
+ }
+
+ template <typename M0, typename M1, typename... Ms>
+ decltype(auto) match(M0&& aM0, M1&& aM1, Ms&&... aMs) const&& {
+ return matchN(std::move(*this), std::forward<M0>(aM0),
+ std::forward<M1>(aM1), std::forward<Ms>(aMs)...);
+ }
+
+ /** Match on a mutable non-const rvalue-reference. */
+ template <typename Matcher>
+ decltype(auto) match(Matcher&& aMatcher) && {
+ return Impl::match(std::forward<Matcher>(aMatcher), std::move(*this));
+ }
+
+ template <typename M0, typename M1, typename... Ms>
+ decltype(auto) match(M0&& aM0, M1&& aM1, Ms&&... aMs) && {
+ return matchN(std::move(*this), std::forward<M0>(aM0),
+ std::forward<M1>(aM1), std::forward<Ms>(aMs)...);
+ }
+
+ /**
+ * Incorporate the current variant's tag into hashValue.
+ * Note that this does not hash the actual contents; you must take
+ * care of that yourself, perhaps by using a match.
+ */
+ mozilla::HashNumber addTagToHash(mozilla::HashNumber hashValue) const {
+ return mozilla::AddToHash(hashValue, tag);
+ }
+
+ private:
+ template <typename ConcreteVariant, typename M0, typename M1, typename... Ms>
+ static decltype(auto) matchN(ConcreteVariant&& aVariant, M0&& aM0, M1&& aM1,
+ Ms&&... aMs) {
+ static_assert(
+ 2 + sizeof...(Ms) == sizeof...(Ts),
+ "Variant<T...>::match() takes either one callable argument that "
+ "accepts every type T; or one for each type T, in order");
+ return Impl::matchN(std::forward<ConcreteVariant>(aVariant),
+ std::forward<M0>(aM0), std::forward<M1>(aM1),
+ std::forward<Ms>(aMs)...);
+ }
+};
+
+/*
+ * AsVariant() is used to construct a Variant<T,...> value containing the
+ * provided T value using type inference. It can be used to construct Variant
+ * values in expressions or return them from functions without specifying the
+ * entire Variant type.
+ *
+ * Because AsVariant() must copy or move the value into a temporary and this
+ * cannot necessarily be elided by the compiler, it's mostly appropriate only
+ * for use with primitive or very small types.
+ *
+ * AsVariant() returns a AsVariantTemporary value which is implicitly
+ * convertible to any Variant that can hold a value of type T.
+ */
+template <typename T>
+detail::AsVariantTemporary<T> AsVariant(T&& aValue) {
+ return detail::AsVariantTemporary<T>(std::forward<T>(aValue));
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_Variant_h */
diff --git a/mfbt/Vector.h b/mfbt/Vector.h
new file mode 100644
index 0000000000..380e272548
--- /dev/null
+++ b/mfbt/Vector.h
@@ -0,0 +1,1653 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A type/length-parametrized vector class. */
+
+#ifndef mozilla_Vector_h
+#define mozilla_Vector_h
+
+#include <new> // for placement new
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Alignment.h"
+#include "mozilla/AllocPolicy.h"
+#include "mozilla/ArrayUtils.h" // for PointerRangeSize
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/ReentrancyGuard.h"
+#include "mozilla/Span.h"
+#include "mozilla/TemplateLib.h"
+
+namespace mozilla {
+
+template <typename T, size_t N, class AllocPolicy>
+class Vector;
+
+namespace detail {
+
+/*
+ * Check that the given capacity wastes the minimal amount of space if
+ * allocated on the heap. This means that aCapacity*EltSize is as close to a
+ * power-of-two as possible. growStorageBy() is responsible for ensuring this.
+ */
+template <size_t EltSize>
+static bool CapacityHasExcessSpace(size_t aCapacity) {
+ size_t size = aCapacity * EltSize;
+ return RoundUpPow2(size) - size >= EltSize;
+}
+
+/*
+ * AllocPolicy can optionally provide a `computeGrowth<T>(size_t aOldElts,
+ * size_t aIncr)` method that returns the new number of elements to allocate
+ * when the current capacity is `aOldElts` and `aIncr` more are being
+ * requested. If the AllocPolicy does not have such a method, a fallback
+ * will be used that mostly will just round the new requested capacity up to
+ * the next power of two, which results in doubling capacity for the most part.
+ *
+ * If the new size would overflow some limit, `computeGrowth` returns 0.
+ *
+ * A simpler way would be to make computeGrowth() part of the API for all
+ * AllocPolicy classes, but this turns out to be rather complex because
+ * mozalloc.h defines a very widely-used InfallibleAllocPolicy, and yet it
+ * can only be compiled in limited contexts, eg within `extern "C"` and with
+ * -std=c++11 rather than a later version. That makes the headers that are
+ * necessary for the computation unavailable (eg mfbt/MathAlgorithms.h).
+ */
+
+// Fallback version.
+template <size_t EltSize>
+inline size_t GrowEltsByDoubling(size_t aOldElts, size_t aIncr) {
+ /*
+ * When choosing a new capacity, its size in bytes should is as close to 2**N
+ * bytes as possible. 2**N-sized requests are best because they are unlikely
+ * to be rounded up by the allocator. Asking for a 2**N number of elements
+ * isn't as good, because if EltSize is not a power-of-two that would
+ * result in a non-2**N request size.
+ */
+
+ if (aIncr == 1) {
+ if (aOldElts == 0) {
+ return 1;
+ }
+
+ /* This case occurs in ~15--20% of the calls to Vector::growStorageBy. */
+
+ /*
+ * Will aOldSize * 4 * sizeof(T) overflow? This condition limits a
+ * collection to 1GB of memory on a 32-bit system, which is a reasonable
+ * limit. It also ensures that
+ *
+ * static_cast<char*>(end()) - static_cast<char*>(begin())
+ *
+ * for a Vector doesn't overflow ptrdiff_t (see bug 510319).
+ */
+ if (MOZ_UNLIKELY(aOldElts &
+ mozilla::tl::MulOverflowMask<4 * EltSize>::value)) {
+ return 0;
+ }
+
+ /*
+ * If we reach here, the existing capacity will have a size that is already
+ * as close to 2^N as sizeof(T) will allow. Just double the capacity, and
+ * then there might be space for one more element.
+ */
+ size_t newElts = aOldElts * 2;
+ if (CapacityHasExcessSpace<EltSize>(newElts)) {
+ newElts += 1;
+ }
+ return newElts;
+ }
+
+ /* This case occurs in ~2% of the calls to Vector::growStorageBy. */
+ size_t newMinCap = aOldElts + aIncr;
+
+ /* Did aOldElts + aIncr overflow? Will newMinCap * EltSize rounded up to the
+ * next power of two overflow PTRDIFF_MAX? */
+ if (MOZ_UNLIKELY(newMinCap < aOldElts ||
+ newMinCap & tl::MulOverflowMask<4 * EltSize>::value)) {
+ return 0;
+ }
+
+ size_t newMinSize = newMinCap * EltSize;
+ size_t newSize = RoundUpPow2(newMinSize);
+ return newSize / EltSize;
+};
+
+// Fallback version.
+template <typename AP, size_t EltSize>
+static size_t ComputeGrowth(size_t aOldElts, size_t aIncr, int) {
+ return GrowEltsByDoubling<EltSize>(aOldElts, aIncr);
+}
+
+// If the AllocPolicy provides its own computeGrowth<EltSize> implementation,
+// use that.
+template <typename AP, size_t EltSize>
+static size_t ComputeGrowth(
+ size_t aOldElts, size_t aIncr,
+ decltype(std::declval<AP>().template computeGrowth<EltSize>(0, 0),
+ bool()) aOverloadSelector) {
+ size_t newElts = AP::template computeGrowth<EltSize>(aOldElts, aIncr);
+ MOZ_ASSERT(newElts <= PTRDIFF_MAX && newElts * EltSize <= PTRDIFF_MAX,
+ "invalid Vector size (see bug 510319)");
+ return newElts;
+}
+
+/*
+ * This template class provides a default implementation for vector operations
+ * when the element type is not known to be a POD, as judged by IsPod.
+ */
+template <typename T, size_t N, class AP, bool IsPod>
+struct VectorImpl {
+ /*
+ * Constructs an object in the uninitialized memory at *aDst with aArgs.
+ */
+ template <typename... Args>
+ MOZ_NONNULL(1)
+ static inline void new_(T* aDst, Args&&... aArgs) {
+ new (KnownNotNull, aDst) T(std::forward<Args>(aArgs)...);
+ }
+
+ /* Destroys constructed objects in the range [aBegin, aEnd). */
+ static inline void destroy(T* aBegin, T* aEnd) {
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ p->~T();
+ }
+ }
+
+ /* Constructs objects in the uninitialized range [aBegin, aEnd). */
+ static inline void initialize(T* aBegin, T* aEnd) {
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ new_(p);
+ }
+ }
+
+ /*
+ * Copy-constructs objects in the uninitialized range
+ * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
+ */
+ template <typename U>
+ static inline void copyConstruct(T* aDst, const U* aSrcStart,
+ const U* aSrcEnd) {
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ new_(aDst, *p);
+ }
+ }
+
+ /*
+ * Move-constructs objects in the uninitialized range
+ * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
+ */
+ template <typename U>
+ static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd) {
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ new_(aDst, std::move(*p));
+ }
+ }
+
+ /*
+ * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
+ * the same object aU.
+ */
+ template <typename U>
+ static inline void copyConstructN(T* aDst, size_t aN, const U& aU) {
+ for (T* end = aDst + aN; aDst < end; ++aDst) {
+ new_(aDst, aU);
+ }
+ }
+
+ /*
+ * Grows the given buffer to have capacity aNewCap, preserving the objects
+ * constructed in the range [begin, end) and updating aV. Assumes that (1)
+ * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
+ * not overflow.
+ */
+ [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
+ size_t aNewCap) {
+ MOZ_ASSERT(!aV.usingInlineStorage());
+ MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
+ T* newbuf = aV.template pod_malloc<T>(aNewCap);
+ if (MOZ_UNLIKELY(!newbuf)) {
+ return false;
+ }
+ T* dst = newbuf;
+ T* src = aV.beginNoCheck();
+ for (; src < aV.endNoCheck(); ++dst, ++src) {
+ new_(dst, std::move(*src));
+ }
+ VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
+ aV.free_(aV.mBegin, aV.mTail.mCapacity);
+ aV.mBegin = newbuf;
+ /* aV.mLength is unchanged. */
+ aV.mTail.mCapacity = aNewCap;
+ return true;
+ }
+};
+
+/*
+ * This partial template specialization provides a default implementation for
+ * vector operations when the element type is known to be a POD, as judged by
+ * IsPod.
+ */
+template <typename T, size_t N, class AP>
+struct VectorImpl<T, N, AP, true> {
+ template <typename... Args>
+ MOZ_NONNULL(1)
+ static inline void new_(T* aDst, Args&&... aArgs) {
+ // Explicitly construct a local object instead of using a temporary since
+ // T(args...) will be treated like a C-style cast in the unary case and
+ // allow unsafe conversions. Both forms should be equivalent to an
+ // optimizing compiler.
+ T temp(std::forward<Args>(aArgs)...);
+ *aDst = temp;
+ }
+
+ static inline void destroy(T*, T*) {}
+
+ static inline void initialize(T* aBegin, T* aEnd) {
+ /*
+ * You would think that memset would be a big win (or even break even)
+ * when we know T is a POD. But currently it's not. This is probably
+ * because |append| tends to be given small ranges and memset requires
+ * a function call that doesn't get inlined.
+ *
+ * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
+ */
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ new_(p);
+ }
+ }
+
+ template <typename U>
+ static inline void copyConstruct(T* aDst, const U* aSrcStart,
+ const U* aSrcEnd) {
+ /*
+ * See above memset comment. Also, notice that copyConstruct is
+ * currently templated (T != U), so memcpy won't work without
+ * requiring T == U.
+ *
+ * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
+ */
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ new_(aDst, *p);
+ }
+ }
+
+ template <typename U>
+ static inline void moveConstruct(T* aDst, const U* aSrcStart,
+ const U* aSrcEnd) {
+ copyConstruct(aDst, aSrcStart, aSrcEnd);
+ }
+
+ static inline void copyConstructN(T* aDst, size_t aN, const T& aT) {
+ for (T* end = aDst + aN; aDst < end; ++aDst) {
+ new_(aDst, aT);
+ }
+ }
+
+ [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
+ size_t aNewCap) {
+ MOZ_ASSERT(!aV.usingInlineStorage());
+ MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
+ T* newbuf =
+ aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
+ if (MOZ_UNLIKELY(!newbuf)) {
+ return false;
+ }
+ aV.mBegin = newbuf;
+ /* aV.mLength is unchanged. */
+ aV.mTail.mCapacity = aNewCap;
+ return true;
+ }
+};
+
+// A struct for TestVector.cpp to access private internal fields.
+// DO NOT DEFINE IN YOUR OWN CODE.
+struct VectorTesting;
+
+} // namespace detail
+
+/*
+ * STL-like container providing a short-lived, dynamic buffer. Vector calls the
+ * constructors/destructors of all elements stored in its internal buffer, so
+ * non-PODs may be safely used. Additionally, Vector will store the first N
+ * elements in-place before resorting to dynamic allocation.
+ *
+ * T requirements:
+ * - default and copy constructible, assignable, destructible
+ * - operations do not throw
+ * MinInlineCapacity requirements:
+ * - any value, however, MinInlineCapacity is clamped to min/max values
+ * AllocPolicy:
+ * - see "Allocation policies" in AllocPolicy.h (defaults to
+ * mozilla::MallocAllocPolicy)
+ *
+ * Vector is not reentrant: T member functions called during Vector member
+ * functions must not call back into the same object!
+ */
+template <typename T, size_t MinInlineCapacity = 0,
+ class AllocPolicy = MallocAllocPolicy>
+class MOZ_NON_PARAM Vector final : private AllocPolicy {
+ /* utilities */
+ static constexpr bool kElemIsPod =
+ std::is_trivial_v<T> && std::is_standard_layout_v<T>;
+ typedef detail::VectorImpl<T, MinInlineCapacity, AllocPolicy, kElemIsPod>
+ Impl;
+ friend struct detail::VectorImpl<T, MinInlineCapacity, AllocPolicy,
+ kElemIsPod>;
+
+ friend struct detail::VectorTesting;
+
+ [[nodiscard]] bool growStorageBy(size_t aIncr);
+ [[nodiscard]] bool convertToHeapStorage(size_t aNewCap);
+ [[nodiscard]] bool maybeCheckSimulatedOOM(size_t aRequestedSize);
+
+ /* magic constants */
+
+ /**
+ * The maximum space allocated for inline element storage.
+ *
+ * We reduce space by what the AllocPolicy base class and prior Vector member
+ * fields likely consume to attempt to play well with binary size classes.
+ */
+ static constexpr size_t kMaxInlineBytes =
+ 1024 -
+ (sizeof(AllocPolicy) + sizeof(T*) + sizeof(size_t) + sizeof(size_t));
+
+ /**
+ * The number of T elements of inline capacity built into this Vector. This
+ * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
+ *
+ * We use a partially-specialized template (not explicit specialization, which
+ * is only allowed at namespace scope) to compute this value. The benefit is
+ * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
+ * defined at the time |Vector<T>| appears, if no inline storage is requested.
+ */
+ template <size_t MinimumInlineCapacity, size_t Dummy>
+ struct ComputeCapacity {
+ static constexpr size_t value =
+ tl::Min<MinimumInlineCapacity, kMaxInlineBytes / sizeof(T)>::value;
+ };
+
+ template <size_t Dummy>
+ struct ComputeCapacity<0, Dummy> {
+ static constexpr size_t value = 0;
+ };
+
+ /** The actual inline capacity in number of elements T. This may be zero! */
+ static constexpr size_t kInlineCapacity =
+ ComputeCapacity<MinInlineCapacity, 0>::value;
+
+ /* member data */
+
+ /*
+ * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
+ * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
+ * mLength, mBegin + mCapacity) holds uninitialized memory. The range
+ * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
+ * previously allocated by a call to reserve().
+ */
+ T* mBegin;
+
+ /* Number of elements in the vector. */
+ size_t mLength;
+
+ /*
+ * Memory used to store capacity, reserved element count (debug builds only),
+ * and inline storage. The simple "answer" is:
+ *
+ * size_t mCapacity;
+ * #ifdef DEBUG
+ * size_t mReserved;
+ * #endif
+ * alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
+ *
+ * but there are complications. First, C++ forbids zero-sized arrays that
+ * might result. Second, we don't want zero capacity to affect Vector's size
+ * (even empty classes take up a byte, unless they're base classes).
+ *
+ * Yet again, we eliminate the zero-sized array using partial specialization.
+ * And we eliminate potential size hit by putting capacity/reserved in one
+ * struct, then putting the array (if any) in a derived struct. If no array
+ * is needed, the derived struct won't consume extra space.
+ */
+ struct CapacityAndReserved {
+ explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
+ : mCapacity(aCapacity)
+#ifdef DEBUG
+ ,
+ mReserved(aReserved)
+#endif
+ {
+ }
+ CapacityAndReserved() = default;
+
+ /* Max number of elements storable in the vector without resizing. */
+ size_t mCapacity;
+
+#ifdef DEBUG
+ /* Max elements of reserved or used space in this vector. */
+ size_t mReserved;
+#endif
+ };
+
+// Silence warnings about this struct possibly being padded dued to the
+// alignas() in it -- there's nothing we can do to avoid it.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4324)
+#endif // _MSC_VER
+
+ template <size_t Capacity, size_t Dummy>
+ struct CRAndStorage : CapacityAndReserved {
+ explicit CRAndStorage(size_t aCapacity, size_t aReserved)
+ : CapacityAndReserved(aCapacity, aReserved) {}
+ CRAndStorage() = default;
+
+ alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
+
+ // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
+ // T*. Indirecting through this function addresses the problem.
+ void* data() { return mBytes; }
+
+ T* storage() { return static_cast<T*>(data()); }
+ };
+
+ template <size_t Dummy>
+ struct CRAndStorage<0, Dummy> : CapacityAndReserved {
+ explicit CRAndStorage(size_t aCapacity, size_t aReserved)
+ : CapacityAndReserved(aCapacity, aReserved) {}
+ CRAndStorage() = default;
+
+ T* storage() {
+ // If this returns |nullptr|, functions like |Vector::begin()| would too,
+ // breaking callers that pass a vector's elements as pointer/length to
+ // code that bounds its operation by length but (even just as a sanity
+ // check) always wants a non-null pointer. Fake up an aligned, non-null
+ // pointer to support these callers.
+ return reinterpret_cast<T*>(sizeof(T));
+ }
+ };
+
+ CRAndStorage<kInlineCapacity, 0> mTail;
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif // _MSC_VER
+
+#ifdef DEBUG
+ friend class ReentrancyGuard;
+ bool mEntered;
+#endif
+
+ /* private accessors */
+
+ bool usingInlineStorage() const {
+ return mBegin == const_cast<Vector*>(this)->inlineStorage();
+ }
+
+ T* inlineStorage() { return mTail.storage(); }
+
+ T* beginNoCheck() const { return mBegin; }
+
+ T* endNoCheck() { return mBegin + mLength; }
+
+ const T* endNoCheck() const { return mBegin + mLength; }
+
+#ifdef DEBUG
+ /**
+ * The amount of explicitly allocated space in this vector that is immediately
+ * available to be filled by appending additional elements. This value is
+ * always greater than or equal to |length()| -- the vector's actual elements
+ * are implicitly reserved. This value is always less than or equal to
+ * |capacity()|. It may be explicitly increased using the |reserve()| method.
+ */
+ size_t reserved() const {
+ MOZ_ASSERT(mLength <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+ return mTail.mReserved;
+ }
+#endif
+
+ bool internalEnsureCapacity(size_t aNeeded);
+
+ /* Append operations guaranteed to succeed due to pre-reserved space. */
+ template <typename U>
+ void internalAppend(U&& aU);
+ template <typename U, size_t O, class BP>
+ void internalAppendAll(const Vector<U, O, BP>& aU);
+ void internalAppendN(const T& aT, size_t aN);
+ template <typename U>
+ void internalAppend(const U* aBegin, size_t aLength);
+ template <typename U>
+ void internalMoveAppend(U* aBegin, size_t aLength);
+
+ public:
+ static const size_t sMaxInlineStorage = MinInlineCapacity;
+
+ typedef T ElementType;
+
+ explicit Vector(AllocPolicy);
+ Vector() : Vector(AllocPolicy()) {}
+
+ Vector(Vector&&); /* Move constructor. */
+ Vector& operator=(Vector&&); /* Move assignment. */
+ ~Vector();
+
+ /* accessors */
+
+ const AllocPolicy& allocPolicy() const { return *this; }
+
+ AllocPolicy& allocPolicy() { return *this; }
+
+ enum { InlineLength = MinInlineCapacity };
+
+ size_t length() const { return mLength; }
+
+ bool empty() const { return mLength == 0; }
+
+ size_t capacity() const { return mTail.mCapacity; }
+
+ T* begin() {
+ MOZ_ASSERT(!mEntered);
+ return mBegin;
+ }
+
+ const T* begin() const {
+ MOZ_ASSERT(!mEntered);
+ return mBegin;
+ }
+
+ T* end() {
+ MOZ_ASSERT(!mEntered);
+ return mBegin + mLength;
+ }
+
+ const T* end() const {
+ MOZ_ASSERT(!mEntered);
+ return mBegin + mLength;
+ }
+
+ T& operator[](size_t aIndex) {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(aIndex < mLength);
+ return begin()[aIndex];
+ }
+
+ const T& operator[](size_t aIndex) const {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(aIndex < mLength);
+ return begin()[aIndex];
+ }
+
+ T& back() {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(!empty());
+ return *(end() - 1);
+ }
+
+ const T& back() const {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(!empty());
+ return *(end() - 1);
+ }
+
+ operator mozilla::Span<const T>() const {
+ // Explicitly specify template argument here to avoid instantiating Span<T>
+ // first and then implicitly converting to Span<const T>
+ return mozilla::Span<const T>{mBegin, mLength};
+ }
+
+ operator mozilla::Span<T>() { return mozilla::Span{mBegin, mLength}; }
+
+ class Range {
+ friend class Vector;
+ T* mCur;
+ T* mEnd;
+ Range(T* aCur, T* aEnd) : mCur(aCur), mEnd(aEnd) {
+ MOZ_ASSERT(aCur <= aEnd);
+ }
+
+ public:
+ bool empty() const { return mCur == mEnd; }
+ size_t remain() const { return PointerRangeSize(mCur, mEnd); }
+ T& front() const {
+ MOZ_ASSERT(!empty());
+ return *mCur;
+ }
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ ++mCur;
+ }
+ T popCopyFront() {
+ MOZ_ASSERT(!empty());
+ return *mCur++;
+ }
+ };
+
+ class ConstRange {
+ friend class Vector;
+ const T* mCur;
+ const T* mEnd;
+ ConstRange(const T* aCur, const T* aEnd) : mCur(aCur), mEnd(aEnd) {
+ MOZ_ASSERT(aCur <= aEnd);
+ }
+
+ public:
+ bool empty() const { return mCur == mEnd; }
+ size_t remain() const { return PointerRangeSize(mCur, mEnd); }
+ const T& front() const {
+ MOZ_ASSERT(!empty());
+ return *mCur;
+ }
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ ++mCur;
+ }
+ T popCopyFront() {
+ MOZ_ASSERT(!empty());
+ return *mCur++;
+ }
+ };
+
+ Range all() { return Range(begin(), end()); }
+ ConstRange all() const { return ConstRange(begin(), end()); }
+
+ /* mutators */
+
+ /**
+ * Reverse the order of the elements in the vector in place.
+ */
+ void reverse();
+
+ /**
+ * Given that the vector is empty, grow the internal capacity to |aRequest|,
+ * keeping the length 0.
+ */
+ [[nodiscard]] bool initCapacity(size_t aRequest);
+
+ /**
+ * Given that the vector is empty, grow the internal capacity and length to
+ * |aRequest| leaving the elements' memory completely uninitialized (with all
+ * the associated hazards and caveats). This avoids the usual allocation-size
+ * rounding that happens in resize and overhead of initialization for elements
+ * that are about to be overwritten.
+ */
+ [[nodiscard]] bool initLengthUninitialized(size_t aRequest);
+
+ /**
+ * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
+ * |aRequest - length()| elements, in any sequence of append/appendAll calls,
+ * is guaranteed to succeed.
+ *
+ * A request to reserve an amount less than the current length does not affect
+ * reserved space.
+ */
+ [[nodiscard]] bool reserve(size_t aRequest);
+
+ /**
+ * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
+ * or unreserve storage for those elements.
+ */
+ void shrinkBy(size_t aIncr);
+
+ /**
+ * Destroy elements in the range [aNewLength, end()). Does not deallocate
+ * or unreserve storage for those elements.
+ */
+ void shrinkTo(size_t aNewLength);
+
+ /** Grow the vector by aIncr elements. */
+ [[nodiscard]] bool growBy(size_t aIncr);
+
+ /** Call shrinkBy or growBy based on whether newSize > length(). */
+ [[nodiscard]] bool resize(size_t aNewLength);
+
+ /**
+ * Increase the length of the vector, but don't initialize the new elements
+ * -- leave them as uninitialized memory.
+ */
+ [[nodiscard]] bool growByUninitialized(size_t aIncr);
+ void infallibleGrowByUninitialized(size_t aIncr);
+ [[nodiscard]] bool resizeUninitialized(size_t aNewLength);
+
+ /** Shorthand for shrinkBy(length()). */
+ void clear();
+
+ /** Clears and releases any heap-allocated storage. */
+ void clearAndFree();
+
+ /**
+ * Shrinks the storage to drop excess capacity if possible.
+ *
+ * The return value indicates whether the operation succeeded, otherwise, it
+ * represents an OOM. The bool can be safely ignored unless you want to
+ * provide the guarantee that `length() == capacity()`.
+ *
+ * For PODs, it calls the AllocPolicy's pod_realloc. For non-PODs, it moves
+ * the elements into the new storage.
+ */
+ bool shrinkStorageToFit();
+
+ /**
+ * If true, appending |aNeeded| elements won't reallocate elements storage.
+ * This *doesn't* mean that infallibleAppend may be used! You still must
+ * reserve the extra space, even if this method indicates that appends won't
+ * need to reallocate elements storage.
+ */
+ bool canAppendWithoutRealloc(size_t aNeeded) const;
+
+ /** Potentially fallible append operations. */
+
+ /**
+ * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
+ * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
+ * not amused.")
+ */
+ template <typename U>
+ [[nodiscard]] bool append(U&& aU);
+
+ /**
+ * Construct a T in-place as a new entry at the end of this vector.
+ */
+ template <typename... Args>
+ [[nodiscard]] bool emplaceBack(Args&&... aArgs) {
+ if (!growByUninitialized(1)) return false;
+ Impl::new_(&back(), std::forward<Args>(aArgs)...);
+ return true;
+ }
+
+ template <typename U, size_t O, class BP>
+ [[nodiscard]] bool appendAll(const Vector<U, O, BP>& aU);
+ template <typename U, size_t O, class BP>
+ [[nodiscard]] bool appendAll(Vector<U, O, BP>&& aU);
+ [[nodiscard]] bool appendN(const T& aT, size_t aN);
+ template <typename U>
+ [[nodiscard]] bool append(const U* aBegin, const U* aEnd);
+ template <typename U>
+ [[nodiscard]] bool append(const U* aBegin, size_t aLength);
+ template <typename U>
+ [[nodiscard]] bool moveAppend(U* aBegin, U* aEnd);
+
+ /*
+ * Guaranteed-infallible append operations for use upon vectors whose
+ * memory has been pre-reserved. Don't use this if you haven't reserved the
+ * memory!
+ */
+ template <typename U>
+ void infallibleAppend(U&& aU) {
+ internalAppend(std::forward<U>(aU));
+ }
+ void infallibleAppendN(const T& aT, size_t aN) { internalAppendN(aT, aN); }
+ template <typename U>
+ void infallibleAppend(const U* aBegin, const U* aEnd) {
+ internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
+ }
+ template <typename U>
+ void infallibleAppend(const U* aBegin, size_t aLength) {
+ internalAppend(aBegin, aLength);
+ }
+ template <typename... Args>
+ void infallibleEmplaceBack(Args&&... aArgs) {
+ infallibleGrowByUninitialized(1);
+ Impl::new_(&back(), std::forward<Args>(aArgs)...);
+ }
+
+ void popBack();
+
+ T popCopy();
+
+ /**
+ * If elements are stored in-place, return nullptr and leave this vector
+ * unmodified.
+ *
+ * Otherwise return this vector's elements buffer, and clear this vector as if
+ * by clearAndFree(). The caller now owns the buffer and is responsible for
+ * deallocating it consistent with this vector's AllocPolicy.
+ *
+ * N.B. Although a T*, only the range [0, length()) is constructed.
+ */
+ [[nodiscard]] T* extractRawBuffer();
+
+ /**
+ * If elements are stored in-place, allocate a new buffer, move this vector's
+ * elements into it, and return that buffer.
+ *
+ * Otherwise return this vector's elements buffer. The caller now owns the
+ * buffer and is responsible for deallocating it consistent with this vector's
+ * AllocPolicy.
+ *
+ * This vector is cleared, as if by clearAndFree(), when this method
+ * succeeds. This method fails and returns nullptr only if new elements buffer
+ * allocation fails.
+ *
+ * N.B. Only the range [0, length()) of the returned buffer is constructed.
+ * If any of these elements are uninitialized (as growByUninitialized
+ * enables), behavior is undefined.
+ */
+ [[nodiscard]] T* extractOrCopyRawBuffer();
+
+ /**
+ * Transfer ownership of an array of objects into the vector. The caller
+ * must have allocated the array in accordance with this vector's
+ * AllocPolicy.
+ *
+ * N.B. This call assumes that there are no uninitialized elements in the
+ * passed range [aP, aP + aLength). The range [aP + aLength, aP +
+ * aCapacity) must be allocated uninitialized memory.
+ */
+ void replaceRawBuffer(T* aP, size_t aLength, size_t aCapacity);
+
+ /**
+ * Transfer ownership of an array of objects into the vector. The caller
+ * must have allocated the array in accordance with this vector's
+ * AllocPolicy.
+ *
+ * N.B. This call assumes that there are no uninitialized elements in the
+ * passed array.
+ */
+ void replaceRawBuffer(T* aP, size_t aLength);
+
+ /**
+ * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
+ * one position higher. On success, |aP| should not be reused because it'll
+ * be a dangling pointer if reallocation of the vector storage occurred; the
+ * return value should be used instead. On failure, nullptr is returned.
+ *
+ * Example usage:
+ *
+ * if (!(p = vec.insert(p, val))) {
+ * <handle failure>
+ * }
+ * <keep working with p>
+ *
+ * This is inherently a linear-time operation. Be careful!
+ */
+ template <typename U>
+ [[nodiscard]] T* insert(T* aP, U&& aVal);
+
+ /**
+ * Removes the element |aT|, which must fall in the bounds [begin, end),
+ * shifting existing elements from |aT + 1| onward one position lower.
+ */
+ void erase(T* aT);
+
+ /**
+ * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
+ * [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
+ * position.
+ */
+ void erase(T* aBegin, T* aEnd);
+
+ /**
+ * Removes all elements that satisfy the predicate, shifting existing elements
+ * lower to fill erased gaps.
+ */
+ template <typename Pred>
+ void eraseIf(Pred aPred);
+
+ /**
+ * Removes all elements that compare equal to |aU|, shifting existing elements
+ * lower to fill erased gaps.
+ */
+ template <typename U>
+ void eraseIfEqual(const U& aU);
+
+ /**
+ * Measure the size of the vector's heap-allocated storage.
+ */
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
+
+ /**
+ * Like sizeOfExcludingThis, but also measures the size of the vector
+ * object (which must be heap-allocated) itself.
+ */
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
+
+ void swap(Vector& aOther);
+
+ private:
+ Vector(const Vector&) = delete;
+ void operator=(const Vector&) = delete;
+};
+
+/* This does the re-entrancy check plus several other sanity checks. */
+#define MOZ_REENTRANCY_GUARD_ET_AL \
+ ReentrancyGuard g(*this); \
+ MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
+ MOZ_ASSERT(reserved() <= mTail.mCapacity); \
+ MOZ_ASSERT(mLength <= reserved()); \
+ MOZ_ASSERT(mLength <= mTail.mCapacity)
+
+/* Vector Implementation */
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE Vector<T, N, AP>::Vector(AP aAP)
+ : AP(std::move(aAP)),
+ mLength(0),
+ mTail(kInlineCapacity, 0)
+#ifdef DEBUG
+ ,
+ mEntered(false)
+#endif
+{
+ mBegin = inlineStorage();
+}
+
+/* Move constructor. */
+template <typename T, size_t N, class AllocPolicy>
+MOZ_ALWAYS_INLINE Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
+ : AllocPolicy(std::move(aRhs))
+#ifdef DEBUG
+ ,
+ mEntered(false)
+#endif
+{
+ mLength = aRhs.mLength;
+ mTail.mCapacity = aRhs.mTail.mCapacity;
+#ifdef DEBUG
+ mTail.mReserved = aRhs.mTail.mReserved;
+#endif
+
+ if (aRhs.usingInlineStorage()) {
+ /* We can't move the buffer over in this case, so copy elements. */
+ mBegin = inlineStorage();
+ Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
+ /*
+ * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
+ * The elements in its in-line storage still need to be destroyed.
+ */
+ } else {
+ /*
+ * Take src's buffer, and turn src into an empty vector using
+ * in-line storage.
+ */
+ mBegin = aRhs.mBegin;
+ aRhs.mBegin = aRhs.inlineStorage();
+ aRhs.mTail.mCapacity = kInlineCapacity;
+ aRhs.mLength = 0;
+#ifdef DEBUG
+ aRhs.mTail.mReserved = 0;
+#endif
+ }
+}
+
+/* Move assignment. */
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE Vector<T, N, AP>& Vector<T, N, AP>::operator=(Vector&& aRhs) {
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
+ this->~Vector();
+ new (KnownNotNull, this) Vector(std::move(aRhs));
+ return *this;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE Vector<T, N, AP>::~Vector() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ if (!usingInlineStorage()) {
+ this->free_(beginNoCheck(), mTail.mCapacity);
+ }
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::reverse() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ T* elems = mBegin;
+ size_t len = mLength;
+ size_t mid = len / 2;
+ for (size_t i = 0; i < mid; i++) {
+ std::swap(elems[i], elems[len - i - 1]);
+ }
+}
+
+/*
+ * This function will create a new heap buffer with capacity aNewCap,
+ * move all elements in the inline buffer to this new buffer,
+ * and fail on OOM.
+ */
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::convertToHeapStorage(size_t aNewCap) {
+ MOZ_ASSERT(usingInlineStorage());
+
+ /* Allocate buffer. */
+ MOZ_ASSERT(!detail::CapacityHasExcessSpace<sizeof(T)>(aNewCap));
+ T* newBuf = this->template pod_malloc<T>(aNewCap);
+ if (MOZ_UNLIKELY(!newBuf)) {
+ return false;
+ }
+
+ /* Copy inline elements into heap buffer. */
+ Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
+ Impl::destroy(beginNoCheck(), endNoCheck());
+
+ /* Switch in heap buffer. */
+ mBegin = newBuf;
+ /* mLength is unchanged. */
+ mTail.mCapacity = aNewCap;
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_NEVER_INLINE bool Vector<T, N, AP>::growStorageBy(size_t aIncr) {
+ MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
+
+ size_t newCap;
+
+ if (aIncr == 1 && usingInlineStorage()) {
+ /* This case occurs in ~70--80% of the calls to this function. */
+ constexpr size_t newSize =
+ tl::RoundUpPow2<(kInlineCapacity + 1) * sizeof(T)>::value;
+ static_assert(newSize / sizeof(T) > 0,
+ "overflow when exceeding inline Vector storage");
+ newCap = newSize / sizeof(T);
+ } else {
+ newCap = detail::ComputeGrowth<AP, sizeof(T)>(mLength, aIncr, true);
+ if (MOZ_UNLIKELY(newCap == 0)) {
+ this->reportAllocOverflow();
+ return false;
+ }
+ }
+
+ if (usingInlineStorage()) {
+ return convertToHeapStorage(newCap);
+ }
+
+ return Impl::growTo(*this, newCap);
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::initCapacity(size_t aRequest) {
+ MOZ_ASSERT(empty());
+ MOZ_ASSERT(usingInlineStorage());
+ if (aRequest == 0) {
+ return true;
+ }
+ T* newbuf = this->template pod_malloc<T>(aRequest);
+ if (MOZ_UNLIKELY(!newbuf)) {
+ return false;
+ }
+ mBegin = newbuf;
+ mTail.mCapacity = aRequest;
+#ifdef DEBUG
+ mTail.mReserved = aRequest;
+#endif
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::initLengthUninitialized(size_t aRequest) {
+ if (!initCapacity(aRequest)) {
+ return false;
+ }
+ infallibleGrowByUninitialized(aRequest);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize) {
+ if (aRequestedSize <= N) {
+ return true;
+ }
+
+#ifdef DEBUG
+ if (aRequestedSize <= mTail.mReserved) {
+ return true;
+ }
+#endif
+
+ return allocPolicy().checkSimulatedOOM();
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::reserve(size_t aRequest) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ if (aRequest > mTail.mCapacity) {
+ if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(aRequest)) {
+ return false;
+ }
+#ifdef DEBUG
+ if (aRequest > mTail.mReserved) {
+ mTail.mReserved = aRequest;
+ }
+ MOZ_ASSERT(mLength <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+#endif
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::shrinkBy(size_t aIncr) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ MOZ_ASSERT(aIncr <= mLength);
+ Impl::destroy(endNoCheck() - aIncr, endNoCheck());
+ mLength -= aIncr;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::shrinkTo(size_t aNewLength) {
+ MOZ_ASSERT(aNewLength <= mLength);
+ shrinkBy(mLength - aNewLength);
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growBy(size_t aIncr) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ if (aIncr > mTail.mCapacity - mLength) {
+ if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
+ return false;
+ }
+ MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
+ T* newend = endNoCheck() + aIncr;
+ Impl::initialize(endNoCheck(), newend);
+ mLength += aIncr;
+#ifdef DEBUG
+ if (mLength > mTail.mReserved) {
+ mTail.mReserved = mLength;
+ }
+#endif
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growByUninitialized(size_t aIncr) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ if (aIncr > mTail.mCapacity - mLength) {
+ if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
+ return false;
+ }
+#ifdef DEBUG
+ if (mLength + aIncr > mTail.mReserved) {
+ mTail.mReserved = mLength + aIncr;
+ }
+#endif
+ infallibleGrowByUninitialized(aIncr);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::infallibleGrowByUninitialized(
+ size_t aIncr) {
+ MOZ_ASSERT(mLength + aIncr <= reserved());
+ mLength += aIncr;
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::resize(size_t aNewLength) {
+ size_t curLength = mLength;
+ if (aNewLength > curLength) {
+ return growBy(aNewLength - curLength);
+ }
+ shrinkBy(curLength - aNewLength);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::resizeUninitialized(
+ size_t aNewLength) {
+ size_t curLength = mLength;
+ if (aNewLength > curLength) {
+ return growByUninitialized(aNewLength - curLength);
+ }
+ shrinkBy(curLength - aNewLength);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::clear() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ mLength = 0;
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::clearAndFree() {
+ clear();
+
+ if (usingInlineStorage()) {
+ return;
+ }
+ this->free_(beginNoCheck(), mTail.mCapacity);
+ mBegin = inlineStorage();
+ mTail.mCapacity = kInlineCapacity;
+#ifdef DEBUG
+ mTail.mReserved = 0;
+#endif
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::shrinkStorageToFit() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+
+ const auto length = this->length();
+ if (usingInlineStorage() || length == capacity()) {
+ return true;
+ }
+
+ if (!length) {
+ this->free_(beginNoCheck(), mTail.mCapacity);
+ mBegin = inlineStorage();
+ mTail.mCapacity = kInlineCapacity;
+#ifdef DEBUG
+ mTail.mReserved = 0;
+#endif
+ return true;
+ }
+
+ T* newBuf;
+ size_t newCap;
+ if (length <= kInlineCapacity) {
+ newBuf = inlineStorage();
+ newCap = kInlineCapacity;
+ } else {
+ if (kElemIsPod) {
+ newBuf = this->template pod_realloc<T>(beginNoCheck(), mTail.mCapacity,
+ length);
+ } else {
+ newBuf = this->template pod_malloc<T>(length);
+ }
+ if (MOZ_UNLIKELY(!newBuf)) {
+ return false;
+ }
+ newCap = length;
+ }
+ if (!kElemIsPod || newBuf == inlineStorage()) {
+ Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ }
+ if (!kElemIsPod) {
+ this->free_(beginNoCheck(), mTail.mCapacity);
+ }
+ mBegin = newBuf;
+ mTail.mCapacity = newCap;
+#ifdef DEBUG
+ mTail.mReserved = length;
+#endif
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+inline bool Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const {
+ return mLength + aNeeded <= mTail.mCapacity;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U, size_t O, class BP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendAll(
+ const Vector<U, O, BP>& aOther) {
+ internalAppend(aOther.begin(), aOther.length());
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(U&& aU) {
+ MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+ Impl::new_(endNoCheck(), std::forward<U>(aU));
+ ++mLength;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ if (mLength + aNeeded > mTail.mCapacity) {
+ if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
+ return false;
+ }
+#ifdef DEBUG
+ if (mLength + aNeeded > mTail.mReserved) {
+ mTail.mReserved = mLength + aNeeded;
+ }
+#endif
+ internalAppendN(aT, aNeeded);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendN(const T& aT,
+ size_t aNeeded) {
+ MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+ Impl::copyConstructN(endNoCheck(), aNeeded, aT);
+ mLength += aNeeded;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+inline T* Vector<T, N, AP>::insert(T* aP, U&& aVal) {
+ MOZ_ASSERT(begin() <= aP);
+ MOZ_ASSERT(aP <= end());
+ size_t pos = aP - begin();
+ MOZ_ASSERT(pos <= mLength);
+ size_t oldLength = mLength;
+ if (pos == oldLength) {
+ if (!append(std::forward<U>(aVal))) {
+ return nullptr;
+ }
+ } else {
+ T oldBack = std::move(back());
+ if (!append(std::move(oldBack))) {
+ return nullptr;
+ }
+ for (size_t i = oldLength - 1; i > pos; --i) {
+ (*this)[i] = std::move((*this)[i - 1]);
+ }
+ (*this)[pos] = std::forward<U>(aVal);
+ }
+ return begin() + pos;
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::erase(T* aIt) {
+ MOZ_ASSERT(begin() <= aIt);
+ MOZ_ASSERT(aIt < end());
+ while (aIt + 1 < end()) {
+ *aIt = std::move(*(aIt + 1));
+ ++aIt;
+ }
+ popBack();
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::erase(T* aBegin, T* aEnd) {
+ MOZ_ASSERT(begin() <= aBegin);
+ MOZ_ASSERT(aBegin <= aEnd);
+ MOZ_ASSERT(aEnd <= end());
+ while (aEnd < end()) {
+ *aBegin++ = std::move(*aEnd++);
+ }
+ shrinkBy(aEnd - aBegin);
+}
+
+template <typename T, size_t N, class AP>
+template <typename Pred>
+void Vector<T, N, AP>::eraseIf(Pred aPred) {
+ // remove_if finds the first element to be erased, and then efficiently move-
+ // assigns elements to effectively overwrite elements that satisfy the
+ // predicate. It returns the new end pointer, after which there are only
+ // moved-from elements ready to be destroyed, so we just need to shrink the
+ // vector accordingly.
+ T* newEnd = std::remove_if(begin(), end(),
+ [&aPred](const T& aT) { return aPred(aT); });
+ MOZ_ASSERT(newEnd <= end());
+ shrinkBy(end() - newEnd);
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+void Vector<T, N, AP>::eraseIfEqual(const U& aU) {
+ return eraseIf([&aU](const T& aT) { return aT == aU; });
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::internalEnsureCapacity(
+ size_t aNeeded) {
+ if (mLength + aNeeded > mTail.mCapacity) {
+ if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
+ return false;
+ }
+#ifdef DEBUG
+ if (mLength + aNeeded > mTail.mReserved) {
+ mTail.mReserved = mLength + aNeeded;
+ }
+#endif
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
+ const U* aInsEnd) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
+ if (!internalEnsureCapacity(needed)) {
+ return false;
+ }
+ internalAppend(aInsBegin, needed);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(const U* aInsBegin,
+ size_t aInsLength) {
+ MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+ Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
+ mLength += aInsLength;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::moveAppend(U* aInsBegin, U* aInsEnd) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
+ if (!internalEnsureCapacity(needed)) {
+ return false;
+ }
+ internalMoveAppend(aInsBegin, needed);
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalMoveAppend(U* aInsBegin,
+ size_t aInsLength) {
+ MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
+ MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+ Impl::moveConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
+ mLength += aInsLength;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(U&& aU) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ if (mLength == mTail.mCapacity) {
+ if (MOZ_UNLIKELY(!growStorageBy(1))) {
+ return false;
+ }
+ } else if (!maybeCheckSimulatedOOM(mLength + 1)) {
+ return false;
+ }
+#ifdef DEBUG
+ if (mLength + 1 > mTail.mReserved) {
+ mTail.mReserved = mLength + 1;
+ }
+#endif
+ internalAppend(std::forward<U>(aU));
+ return true;
+}
+
+template <typename T, size_t N, class AP>
+template <typename U, size_t O, class BP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(
+ const Vector<U, O, BP>& aOther) {
+ return append(aOther.begin(), aOther.length());
+}
+
+template <typename T, size_t N, class AP>
+template <typename U, size_t O, class BP>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(Vector<U, O, BP>&& aOther) {
+ if (empty() && capacity() < aOther.length()) {
+ *this = std::move(aOther);
+ return true;
+ }
+
+ if (moveAppend(aOther.begin(), aOther.end())) {
+ aOther.clearAndFree();
+ return true;
+ }
+
+ return false;
+}
+
+template <typename T, size_t N, class AP>
+template <class U>
+MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
+ size_t aInsLength) {
+ return append(aInsBegin, aInsBegin + aInsLength);
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE void Vector<T, N, AP>::popBack() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+ MOZ_ASSERT(!empty());
+ --mLength;
+ endNoCheck()->~T();
+}
+
+template <typename T, size_t N, class AP>
+MOZ_ALWAYS_INLINE T Vector<T, N, AP>::popCopy() {
+ T ret = back();
+ popBack();
+ return ret;
+}
+
+template <typename T, size_t N, class AP>
+inline T* Vector<T, N, AP>::extractRawBuffer() {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+
+ if (usingInlineStorage()) {
+ return nullptr;
+ }
+
+ T* ret = mBegin;
+ mBegin = inlineStorage();
+ mLength = 0;
+ mTail.mCapacity = kInlineCapacity;
+#ifdef DEBUG
+ mTail.mReserved = 0;
+#endif
+ return ret;
+}
+
+template <typename T, size_t N, class AP>
+inline T* Vector<T, N, AP>::extractOrCopyRawBuffer() {
+ if (T* ret = extractRawBuffer()) {
+ return ret;
+ }
+
+ MOZ_REENTRANCY_GUARD_ET_AL;
+
+ T* copy = this->template pod_malloc<T>(mLength);
+ if (!copy) {
+ return nullptr;
+ }
+
+ Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ mBegin = inlineStorage();
+ mLength = 0;
+ mTail.mCapacity = kInlineCapacity;
+#ifdef DEBUG
+ mTail.mReserved = 0;
+#endif
+ return copy;
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength,
+ size_t aCapacity) {
+ MOZ_REENTRANCY_GUARD_ET_AL;
+
+ /* Destroy what we have. */
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ if (!usingInlineStorage()) {
+ this->free_(beginNoCheck(), mTail.mCapacity);
+ }
+
+ /* Take in the new buffer. */
+ if (aCapacity <= kInlineCapacity) {
+ /*
+ * We convert to inline storage if possible, even though aP might
+ * otherwise be acceptable. Maybe this behaviour should be
+ * specifiable with an argument to this function.
+ */
+ mBegin = inlineStorage();
+ mLength = aLength;
+ mTail.mCapacity = kInlineCapacity;
+ Impl::moveConstruct(mBegin, aP, aP + aLength);
+ Impl::destroy(aP, aP + aLength);
+ this->free_(aP, aCapacity);
+ } else {
+ mBegin = aP;
+ mLength = aLength;
+ mTail.mCapacity = aCapacity;
+ }
+#ifdef DEBUG
+ mTail.mReserved = aCapacity;
+#endif
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength) {
+ replaceRawBuffer(aP, aLength, aLength);
+}
+
+template <typename T, size_t N, class AP>
+inline size_t Vector<T, N, AP>::sizeOfExcludingThis(
+ MallocSizeOf aMallocSizeOf) const {
+ return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
+}
+
+template <typename T, size_t N, class AP>
+inline size_t Vector<T, N, AP>::sizeOfIncludingThis(
+ MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
+}
+
+template <typename T, size_t N, class AP>
+inline void Vector<T, N, AP>::swap(Vector& aOther) {
+ static_assert(N == 0, "still need to implement this for N != 0");
+
+ // This only works when inline storage is always empty.
+ if (!usingInlineStorage() && aOther.usingInlineStorage()) {
+ aOther.mBegin = mBegin;
+ mBegin = inlineStorage();
+ } else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
+ mBegin = aOther.mBegin;
+ aOther.mBegin = aOther.inlineStorage();
+ } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
+ std::swap(mBegin, aOther.mBegin);
+ } else {
+ // This case is a no-op, since we'd set both to use their inline storage.
+ }
+
+ std::swap(mLength, aOther.mLength);
+ std::swap(mTail.mCapacity, aOther.mTail.mCapacity);
+#ifdef DEBUG
+ std::swap(mTail.mReserved, aOther.mTail.mReserved);
+#endif
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_Vector_h */
diff --git a/mfbt/WasiAtomic.h b/mfbt/WasiAtomic.h
new file mode 100644
index 0000000000..ba222e91c0
--- /dev/null
+++ b/mfbt/WasiAtomic.h
@@ -0,0 +1,200 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_WasiAtomic_h
+#define mozilla_WasiAtomic_h
+
+// Clang >= 14 supports <atomic> for wasm targets.
+#if _LIBCPP_VERSION >= 14000
+# include <atomic>
+#else
+
+# include <cstddef> // For ptrdiff_t
+# include <cstdint>
+
+// WASI doesn't support <atomic> and we use it as single-threaded for now.
+// This is a stub implementation of std atomics to build WASI port of SM.
+
+namespace std {
+enum memory_order {
+ relaxed,
+ consume, // load-consume
+ acquire, // load-acquire
+ release, // store-release
+ acq_rel, // store-release load-acquire
+ seq_cst // store-release load-acquire
+};
+
+inline constexpr auto memory_order_relaxed = memory_order::relaxed;
+inline constexpr auto memory_order_consume = memory_order::consume;
+inline constexpr auto memory_order_acquire = memory_order::acquire;
+inline constexpr auto memory_order_release = memory_order::release;
+inline constexpr auto memory_order_acq_rel = memory_order::acq_rel;
+inline constexpr auto memory_order_seq_cst = memory_order::seq_cst;
+
+template <class T>
+struct atomic {
+ using value_type = T;
+ value_type value_;
+
+ atomic() noexcept = default;
+ constexpr atomic(T desired) noexcept : value_{desired} {}
+
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+ ~atomic() noexcept = default;
+
+ T load(memory_order m = memory_order_seq_cst) const volatile noexcept {
+ return value_;
+ }
+
+ void store(T desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ value_ = desired;
+ }
+
+ T operator=(T desired) volatile noexcept { return value_ = desired; }
+
+ T exchange(T desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ T tmp = value_;
+ value_ = desired;
+ return tmp;
+ }
+
+ bool compare_exchange_weak(T& expected, T desired, memory_order,
+ memory_order) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ bool compare_exchange_weak(
+ T& expected, T desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ bool compare_exchange_strong(T& expected, T desired, memory_order,
+ memory_order) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ bool compare_exchange_strong(
+ T& expected, T desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ T fetch_add(T arg, memory_order m = memory_order_seq_cst) volatile noexcept {
+ T previous = value_;
+ value_ = value_ + arg;
+ return previous;
+ }
+
+ T fetch_sub(T arg, memory_order m = memory_order_seq_cst) volatile noexcept {
+ T previous = value_;
+ value_ = value_ - arg;
+ return previous;
+ }
+
+ T fetch_or(T arg, memory_order m = memory_order_seq_cst) volatile noexcept {
+ T previous = value_;
+ value_ = value_ | arg;
+ return previous;
+ }
+
+ T fetch_xor(T arg, memory_order m = memory_order_seq_cst) volatile noexcept {
+ T previous = value_;
+ value_ = value_ ^ arg;
+ return previous;
+ }
+
+ T fetch_and(T arg, memory_order m = memory_order_seq_cst) volatile noexcept {
+ T previous = value_;
+ value_ = value_ & arg;
+ return previous;
+ }
+};
+
+template <class T>
+struct atomic<T*> {
+ using value_type = T*;
+ using difference_type = ptrdiff_t;
+
+ value_type value_;
+
+ atomic() noexcept = default;
+ constexpr atomic(T* desired) noexcept : value_{desired} {}
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ T* load(memory_order m = memory_order_seq_cst) const volatile noexcept {
+ return value_;
+ }
+
+ void store(T* desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ value_ = desired;
+ }
+
+ T* operator=(T* other) volatile noexcept { return value_ = other; }
+
+ T* exchange(T* desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ T* previous = value_;
+ value_ = desired;
+ return previous;
+ }
+
+ bool compare_exchange_weak(T*& expected, T* desired, memory_order s,
+ memory_order f) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ bool compare_exchange_weak(
+ T*& expected, T* desired,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ bool compare_exchange_strong(T*& expected, T* desired, memory_order s,
+ memory_order f) volatile noexcept {
+ expected = desired;
+ return true;
+ }
+
+ T* fetch_add(ptrdiff_t arg,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ T* previous = value_;
+ value_ = value_ + arg;
+ return previous;
+ }
+
+ T* fetch_sub(ptrdiff_t arg,
+ memory_order m = memory_order_seq_cst) volatile noexcept {
+ T* previous = value_;
+ value_ = value_ - arg;
+ return previous;
+ }
+};
+
+using atomic_uint8_t = atomic<uint8_t>;
+using atomic_uint16_t = atomic<uint16_t>;
+using atomic_uint32_t = atomic<uint32_t>;
+using atomic_uint64_t = atomic<uint64_t>;
+
+} // namespace std
+
+#endif
+
+#endif // mozilla_WasiAtomic_h
diff --git a/mfbt/WeakPtr.h b/mfbt/WeakPtr.h
new file mode 100644
index 0000000000..cb8bdf28e2
--- /dev/null
+++ b/mfbt/WeakPtr.h
@@ -0,0 +1,358 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Weak pointer functionality, implemented as a mixin for use with any class. */
+
+/**
+ * SupportsWeakPtr lets you have a pointer to an object 'Foo' without affecting
+ * its lifetime. It works by creating a single shared reference counted object
+ * (WeakReference) that each WeakPtr will access 'Foo' through. This lets 'Foo'
+ * clear the pointer in the WeakReference without having to know about all of
+ * the WeakPtrs to it and allows the WeakReference to live beyond the lifetime
+ * of 'Foo'.
+ *
+ * PLEASE NOTE: This weak pointer implementation is not thread-safe.
+ *
+ * The overhead of WeakPtr is that accesses to 'Foo' becomes an additional
+ * dereference, and an additional heap allocated pointer sized object shared
+ * between all of the WeakPtrs.
+ *
+ * Example of usage:
+ *
+ * // To have a class C support weak pointers, inherit from
+ * // SupportsWeakPtr
+ * class C : public SupportsWeakPtr
+ * {
+ * public:
+ * int mNum;
+ * void act();
+ * };
+ *
+ * C* ptr = new C();
+ *
+ * // Get weak pointers to ptr. The first time a weak pointer
+ * // is obtained, a reference counted WeakReference object is created that
+ * // can live beyond the lifetime of 'ptr'. The WeakReference
+ * // object will be notified of 'ptr's destruction.
+ * WeakPtr<C> weak = ptr;
+ * WeakPtr<C> other = ptr;
+ *
+ * // Test a weak pointer for validity before using it.
+ * if (weak) {
+ * weak->mNum = 17;
+ * weak->act();
+ * }
+ *
+ * // Destroying the underlying object clears weak pointers to it.
+ * delete ptr;
+ *
+ * MOZ_ASSERT(!weak, "Deleting |ptr| clears weak pointers to it.");
+ * MOZ_ASSERT(!other, "Deleting |ptr| clears all weak pointers to it.");
+ *
+ * WeakPtr is typesafe and may be used with any class. It is not required that
+ * the class be reference-counted or allocated in any particular way.
+ *
+ * The API was loosely inspired by Chromium's weak_ptr.h:
+ * http://src.chromium.org/svn/trunk/src/base/memory/weak_ptr.h
+ *
+ * Note that multiple base classes inheriting from SupportsWeakPtr is not
+ * currently supported. We could support it if needed though.
+ *
+ * For Gecko-internal usage there is also MainThreadWeakPtr<T>, a version of
+ * WeakPtr that can be destroyed on any thread, but whose release gets proxied
+ * to the main thread. This is a similar API to nsMainThreadPtrHandle, but
+ * without keeping a strong reference to the main-thread object. Said WeakPtr
+ * can't be accessed from any other thread that isn't the main thread.
+ */
+
+#ifndef mozilla_WeakPtr_h
+#define mozilla_WeakPtr_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
+
+#include <string.h>
+#include <type_traits>
+
+#if defined(MOZILLA_INTERNAL_API)
+// For thread safety checking.
+# include "nsISupportsImpl.h"
+// For main thread destructor behavior.
+# include "nsProxyRelease.h"
+#endif
+
+#if defined(MOZILLA_INTERNAL_API) && \
+ defined(MOZ_THREAD_SAFETY_OWNERSHIP_CHECKS_SUPPORTED)
+
+// Weak referencing is not implemented as thread safe. When a WeakPtr
+// is created or dereferenced on thread A but the real object is just
+// being Released() on thread B, there is a possibility of a race
+// when the proxy object (detail::WeakReference) is notified about
+// the real object destruction just between when thread A is storing
+// the object pointer locally and is about to add a reference to it.
+//
+// Hence, a non-null weak proxy object is considered to have a single
+// "owning thread". It means that each query for a weak reference,
+// its dereference, and destruction of the real object must all happen
+// on a single thread. The following macros implement assertions for
+// checking these conditions.
+//
+// We re-use XPCOM's nsAutoOwningEventTarget checks when they are available.
+// This has the advantage that it works with cooperative thread pools.
+
+# define MOZ_WEAKPTR_DECLARE_THREAD_SAFETY_CHECK \
+ /* Will be none if mPtr = nullptr. */ \
+ Maybe<nsAutoOwningEventTarget> _owningThread;
+# define MOZ_WEAKPTR_INIT_THREAD_SAFETY_CHECK() \
+ do { \
+ if (p) { \
+ _owningThread.emplace(); \
+ } \
+ } while (false)
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY() \
+ do { \
+ MOZ_DIAGNOSTIC_ASSERT( \
+ !_owningThread || _owningThread->IsCurrentThread(), \
+ "WeakPtr accessed from multiple threads"); \
+ } while (false)
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED(that) \
+ (that)->AssertThreadSafety();
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED_IF(that) \
+ do { \
+ if (that) { \
+ (that)->AssertThreadSafety(); \
+ } \
+ } while (false)
+
+# define MOZ_WEAKPTR_THREAD_SAFETY_CHECKING 1
+
+#else
+
+# define MOZ_WEAKPTR_DECLARE_THREAD_SAFETY_CHECK
+# define MOZ_WEAKPTR_INIT_THREAD_SAFETY_CHECK() \
+ do { \
+ } while (false)
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY() \
+ do { \
+ } while (false)
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED(that) \
+ do { \
+ } while (false)
+# define MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED_IF(that) \
+ do { \
+ } while (false)
+
+#endif
+
+namespace mozilla {
+
+namespace detail {
+
+enum class WeakPtrDestructorBehavior {
+ Normal,
+#ifdef MOZILLA_INTERNAL_API
+ ProxyToMainThread,
+#endif
+};
+
+} // namespace detail
+
+template <typename T, detail::WeakPtrDestructorBehavior =
+ detail::WeakPtrDestructorBehavior::Normal>
+class WeakPtr;
+class SupportsWeakPtr;
+
+namespace detail {
+
+// This can live beyond the lifetime of the class derived from
+// SupportsWeakPtr.
+class WeakReference : public ::mozilla::RefCounted<WeakReference> {
+ public:
+ explicit WeakReference(const SupportsWeakPtr* p)
+ : mPtr(const_cast<SupportsWeakPtr*>(p)) {
+ MOZ_WEAKPTR_INIT_THREAD_SAFETY_CHECK();
+ }
+
+ SupportsWeakPtr* get() const {
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY();
+ return mPtr;
+ }
+
+#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
+ const char* typeName() const { return "WeakReference"; }
+ size_t typeSize() const { return sizeof(*this); }
+#endif
+
+#ifdef MOZ_WEAKPTR_THREAD_SAFETY_CHECKING
+ void AssertThreadSafety() { MOZ_WEAKPTR_ASSERT_THREAD_SAFETY(); }
+#endif
+
+ private:
+ friend class mozilla::SupportsWeakPtr;
+
+ void detach() {
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY();
+ mPtr = nullptr;
+ }
+
+ SupportsWeakPtr* MOZ_NON_OWNING_REF mPtr;
+ MOZ_WEAKPTR_DECLARE_THREAD_SAFETY_CHECK
+};
+
+} // namespace detail
+
+class SupportsWeakPtr {
+ using WeakReference = detail::WeakReference;
+
+ protected:
+ ~SupportsWeakPtr() { DetachWeakPtr(); }
+
+ protected:
+ void DetachWeakPtr() {
+ if (mSelfReferencingWeakReference) {
+ mSelfReferencingWeakReference->detach();
+ }
+ }
+
+ private:
+ WeakReference* SelfReferencingWeakReference() const {
+ if (!mSelfReferencingWeakReference) {
+ mSelfReferencingWeakReference = new WeakReference(this);
+ } else {
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED(mSelfReferencingWeakReference);
+ }
+ return mSelfReferencingWeakReference.get();
+ }
+
+ template <typename U, detail::WeakPtrDestructorBehavior>
+ friend class WeakPtr;
+
+ mutable RefPtr<WeakReference> mSelfReferencingWeakReference;
+};
+
+template <typename T, detail::WeakPtrDestructorBehavior Destruct>
+class WeakPtr {
+ using WeakReference = detail::WeakReference;
+
+ public:
+ WeakPtr& operator=(const WeakPtr& aOther) {
+ // We must make sure the reference we have now is safe to be dereferenced
+ // before we throw it away... (this can be called from a ctor)
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED_IF(mRef);
+ // ...and make sure the new reference is used on a single thread as well.
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED(aOther.mRef);
+
+ mRef = aOther.mRef;
+ return *this;
+ }
+
+ WeakPtr(const WeakPtr& aOther) {
+ // The thread safety check is performed inside of the operator= method.
+ *this = aOther;
+ }
+
+ WeakPtr& operator=(decltype(nullptr)) {
+ // We must make sure the reference we have now is safe to be dereferenced
+ // before we throw it away.
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED_IF(mRef);
+ if (!mRef || mRef->get()) {
+ // Ensure that mRef is dereferenceable in the uninitialized state.
+ mRef = new WeakReference(nullptr);
+ }
+ return *this;
+ }
+
+ WeakPtr& operator=(const T* aOther) {
+ // We must make sure the reference we have now is safe to be dereferenced
+ // before we throw it away.
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED_IF(mRef);
+ if (aOther) {
+ mRef = aOther->SelfReferencingWeakReference();
+ } else if (!mRef || mRef->get()) {
+ // Ensure that mRef is dereferenceable in the uninitialized state.
+ mRef = new WeakReference(nullptr);
+ }
+ // The thread safety check happens inside SelfReferencingWeakPtr
+ // or is initialized in the WeakReference constructor.
+ return *this;
+ }
+
+ MOZ_IMPLICIT WeakPtr(T* aOther) {
+ *this = aOther;
+#ifdef MOZILLA_INTERNAL_API
+ if (Destruct == detail::WeakPtrDestructorBehavior::ProxyToMainThread) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "MainThreadWeakPtr makes no sense on non-main threads");
+ }
+#endif
+ }
+
+ explicit WeakPtr(const RefPtr<T>& aOther) : WeakPtr(aOther.get()) {}
+
+ // Ensure that mRef is dereferenceable in the uninitialized state.
+ WeakPtr() : mRef(new WeakReference(nullptr)) {}
+
+ explicit operator bool() const { return mRef->get(); }
+ T* get() const { return static_cast<T*>(mRef->get()); }
+ operator T*() const { return get(); }
+ T& operator*() const { return *get(); }
+ T* operator->() const MOZ_NO_ADDREF_RELEASE_ON_RETURN { return get(); }
+
+#ifdef MOZILLA_INTERNAL_API
+ ~WeakPtr() {
+ if (Destruct == detail::WeakPtrDestructorBehavior::ProxyToMainThread) {
+ NS_ReleaseOnMainThread("WeakPtr::mRef", mRef.forget());
+ } else {
+ MOZ_WEAKPTR_ASSERT_THREAD_SAFETY_DELEGATED(mRef);
+ }
+ }
+#endif
+
+ private:
+ friend class SupportsWeakPtr;
+
+ explicit WeakPtr(const RefPtr<WeakReference>& aOther) : mRef(aOther) {}
+
+ RefPtr<WeakReference> mRef;
+};
+
+#ifdef MOZILLA_INTERNAL_API
+
+template <typename T>
+using MainThreadWeakPtr =
+ WeakPtr<T, detail::WeakPtrDestructorBehavior::ProxyToMainThread>;
+
+#endif
+
+#define NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR tmp->DetachWeakPtr();
+
+#define NS_IMPL_CYCLE_COLLECTION_WEAK_PTR(class_, ...) \
+ NS_IMPL_CYCLE_COLLECTION_CLASS(class_) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(class_) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(class_) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+
+#define NS_IMPL_CYCLE_COLLECTION_WEAK_PTR_INHERITED(class_, super_, ...) \
+ NS_IMPL_CYCLE_COLLECTION_CLASS(class_) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(class_, super_) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(class_, super_) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+
+} // namespace mozilla
+
+#endif /* mozilla_WeakPtr_h */
diff --git a/mfbt/WindowsVersion.h b/mfbt/WindowsVersion.h
new file mode 100644
index 0000000000..c357a76d24
--- /dev/null
+++ b/mfbt/WindowsVersion.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_WindowsVersion_h
+#define mozilla_WindowsVersion_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include <stdint.h>
+#include <windows.h>
+
+namespace mozilla {
+
+inline bool IsWindows10BuildOrLater(uint32_t aBuild) {
+ static Atomic<uint32_t> minBuild(0);
+ static Atomic<uint32_t> maxBuild(UINT32_MAX);
+
+ if (minBuild >= aBuild) {
+ return true;
+ }
+
+ if (aBuild >= maxBuild) {
+ return false;
+ }
+
+ OSVERSIONINFOEXW info;
+ ZeroMemory(&info, sizeof(OSVERSIONINFOEXW));
+ info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEXW);
+ info.dwMajorVersion = 10;
+ info.dwBuildNumber = aBuild;
+
+ DWORDLONG conditionMask = 0;
+ VER_SET_CONDITION(conditionMask, VER_MAJORVERSION, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_MINORVERSION, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_BUILDNUMBER, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMINOR, VER_GREATER_EQUAL);
+
+ if (VerifyVersionInfoW(&info,
+ VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER |
+ VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
+ conditionMask)) {
+ minBuild = aBuild;
+ return true;
+ }
+
+ maxBuild = aBuild;
+ return false;
+}
+
+MOZ_ALWAYS_INLINE bool IsWin10AnniversaryUpdateOrLater() {
+ return IsWindows10BuildOrLater(14393);
+}
+
+MOZ_ALWAYS_INLINE bool IsWin10CreatorsUpdateOrLater() {
+ return IsWindows10BuildOrLater(15063);
+}
+
+MOZ_ALWAYS_INLINE bool IsWin10FallCreatorsUpdateOrLater() {
+ return IsWindows10BuildOrLater(16299);
+}
+
+MOZ_ALWAYS_INLINE bool IsWin10Sep2018UpdateOrLater() {
+ return IsWindows10BuildOrLater(17763);
+}
+
+MOZ_ALWAYS_INLINE bool IsWin11OrLater() {
+ return IsWindows10BuildOrLater(22000);
+}
+
+MOZ_ALWAYS_INLINE bool IsWin1122H2OrLater() {
+ return IsWindows10BuildOrLater(22621);
+}
+
+} // namespace mozilla
+
+#endif /* mozilla_WindowsVersion_h */
diff --git a/mfbt/WrappingOperations.h b/mfbt/WrappingOperations.h
new file mode 100644
index 0000000000..bd67ac34f1
--- /dev/null
+++ b/mfbt/WrappingOperations.h
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Math operations that implement wraparound semantics on overflow or underflow.
+ *
+ * While in some cases (but not all of them!) plain old C++ operators and casts
+ * will behave just like these functions, there are three reasons you should use
+ * these functions:
+ *
+ * 1) These functions make *explicit* the desire for and dependence upon
+ * wraparound semantics, just as Rust's i32::wrapping_add and similar
+ * functions explicitly produce wraparound in Rust.
+ * 2) They implement this functionality *safely*, without invoking signed
+ * integer overflow that has undefined behavior in C++.
+ * 3) They play nice with compiler-based integer-overflow sanitizers (see
+ * build/autoconf/sanitize.m4), that in appropriately configured builds
+ * verify at runtime that integral arithmetic doesn't overflow.
+ */
+
+#ifndef mozilla_WrappingOperations_h
+#define mozilla_WrappingOperations_h
+
+#include "mozilla/Attributes.h"
+
+#include <limits.h>
+#include <type_traits>
+
+namespace mozilla {
+
+namespace detail {
+
+template <typename UnsignedType>
+struct WrapToSignedHelper {
+ static_assert(std::is_unsigned_v<UnsignedType>,
+ "WrapToSigned must be passed an unsigned type");
+
+ using SignedType = std::make_signed_t<UnsignedType>;
+
+ static constexpr SignedType MaxValue =
+ (UnsignedType(1) << (CHAR_BIT * sizeof(SignedType) - 1)) - 1;
+ static constexpr SignedType MinValue = -MaxValue - 1;
+
+ static constexpr UnsignedType MinValueUnsigned =
+ static_cast<UnsignedType>(MinValue);
+ static constexpr UnsignedType MaxValueUnsigned =
+ static_cast<UnsignedType>(MaxValue);
+
+ // Overflow-correctness was proven in bug 1432646 and is explained in the
+ // comment below. This function is very hot, both at compile time and
+ // runtime, so disable all overflow checking in it.
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ MOZ_NO_SANITIZE_SIGNED_OVERFLOW static constexpr SignedType compute(
+ UnsignedType aValue) {
+ // This algorithm was originally provided here:
+ // https://stackoverflow.com/questions/13150449/efficient-unsigned-to-signed-cast-avoiding-implementation-defined-behavior
+ //
+ // If the value is in the non-negative signed range, just cast.
+ //
+ // If the value will be negative, compute its delta from the first number
+ // past the max signed integer, then add that to the minimum signed value.
+ //
+ // At the low end: if |u| is the maximum signed value plus one, then it has
+ // the same mathematical value as |MinValue| cast to unsigned form. The
+ // delta is zero, so the signed form of |u| is |MinValue| -- exactly the
+ // result of adding zero delta to |MinValue|.
+ //
+ // At the high end: if |u| is the maximum *unsigned* value, then it has all
+ // bits set. |MinValue| cast to unsigned form is purely the high bit set.
+ // So the delta is all bits but high set -- exactly |MaxValue|. And as
+ // |MinValue = -MaxValue - 1|, we have |MaxValue + (-MaxValue - 1)| to
+ // equal -1.
+ //
+ // Thus the delta below is in signed range, the corresponding cast is safe,
+ // and this computation produces values spanning [MinValue, 0): exactly the
+ // desired range of all negative signed integers.
+ return (aValue <= MaxValueUnsigned)
+ ? static_cast<SignedType>(aValue)
+ : static_cast<SignedType>(aValue - MinValueUnsigned) + MinValue;
+ }
+};
+
+} // namespace detail
+
+/**
+ * Convert an unsigned value to signed, if necessary wrapping around.
+ *
+ * This is the behavior normal C++ casting will perform in most implementations
+ * these days -- but this function makes explicit that such conversion is
+ * happening.
+ */
+template <typename UnsignedType>
+constexpr typename detail::WrapToSignedHelper<UnsignedType>::SignedType
+WrapToSigned(UnsignedType aValue) {
+ return detail::WrapToSignedHelper<UnsignedType>::compute(aValue);
+}
+
+namespace detail {
+
+template <typename T>
+constexpr T ToResult(std::make_unsigned_t<T> aUnsigned) {
+ // We could *always* return WrapToSigned and rely on unsigned conversion to
+ // undo the wrapping when |T| is unsigned, but this seems clearer.
+ return std::is_signed_v<T> ? WrapToSigned(aUnsigned) : aUnsigned;
+}
+
+template <typename T>
+struct WrappingAddHelper {
+ private:
+ using UnsignedT = std::make_unsigned_t<T>;
+
+ public:
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ static constexpr T compute(T aX, T aY) {
+ return ToResult<T>(static_cast<UnsignedT>(aX) + static_cast<UnsignedT>(aY));
+ }
+};
+
+} // namespace detail
+
+/**
+ * Add two integers of the same type and return the result converted to that
+ * type using wraparound semantics, without triggering overflow sanitizers.
+ *
+ * For N-bit unsigned integer types, this is equivalent to adding the two
+ * numbers, then taking the result mod 2**N:
+ *
+ * WrappingAdd(uint32_t(42), uint32_t(17)) is 59 (59 mod 2**32);
+ * WrappingAdd(uint8_t(240), uint8_t(20)) is 4 (260 mod 2**8).
+ *
+ * Unsigned WrappingAdd acts exactly like C++ unsigned addition.
+ *
+ * For N-bit signed integer types, this is equivalent to adding the two numbers
+ * wrapped to unsigned, then wrapping the sum mod 2**N to the signed range:
+ *
+ * WrappingAdd(int16_t(32767), int16_t(3)) is
+ * -32766 ((32770 mod 2**16) - 2**16);
+ * WrappingAdd(int8_t(-128), int8_t(-128)) is
+ * 0 (256 mod 2**8);
+ * WrappingAdd(int32_t(-42), int32_t(-17)) is
+ * -59 ((8589934533 mod 2**32) - 2**32).
+ *
+ * There's no equivalent to this operation in C++, as C++ signed addition that
+ * overflows has undefined behavior. But it's how such addition *tends* to
+ * behave with most compilers, unless an optimization or similar -- quite
+ * permissibly -- triggers different behavior.
+ */
+template <typename T>
+constexpr T WrappingAdd(T aX, T aY) {
+ return detail::WrappingAddHelper<T>::compute(aX, aY);
+}
+
+namespace detail {
+
+template <typename T>
+struct WrappingSubtractHelper {
+ private:
+ using UnsignedT = std::make_unsigned_t<T>;
+
+ public:
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ static constexpr T compute(T aX, T aY) {
+ return ToResult<T>(static_cast<UnsignedT>(aX) - static_cast<UnsignedT>(aY));
+ }
+};
+
+} // namespace detail
+
+/**
+ * Subtract two integers of the same type and return the result converted to
+ * that type using wraparound semantics, without triggering overflow sanitizers.
+ *
+ * For N-bit unsigned integer types, this is equivalent to subtracting the two
+ * numbers, then taking the result mod 2**N:
+ *
+ * WrappingSubtract(uint32_t(42), uint32_t(17)) is 29 (29 mod 2**32);
+ * WrappingSubtract(uint8_t(5), uint8_t(20)) is 241 (-15 mod 2**8).
+ *
+ * Unsigned WrappingSubtract acts exactly like C++ unsigned subtraction.
+ *
+ * For N-bit signed integer types, this is equivalent to subtracting the two
+ * numbers wrapped to unsigned, then wrapping the difference mod 2**N to the
+ * signed range:
+ *
+ * WrappingSubtract(int16_t(32767), int16_t(-5)) is -32764 ((32772 mod 2**16)
+ * - 2**16); WrappingSubtract(int8_t(-128), int8_t(127)) is 1 (-255 mod 2**8);
+ * WrappingSubtract(int32_t(-17), int32_t(-42)) is 25 (25 mod 2**32).
+ *
+ * There's no equivalent to this operation in C++, as C++ signed subtraction
+ * that overflows has undefined behavior. But it's how such subtraction *tends*
+ * to behave with most compilers, unless an optimization or similar -- quite
+ * permissibly -- triggers different behavior.
+ */
+template <typename T>
+constexpr T WrappingSubtract(T aX, T aY) {
+ return detail::WrappingSubtractHelper<T>::compute(aX, aY);
+}
+
+namespace detail {
+
+template <typename T>
+struct WrappingMultiplyHelper {
+ private:
+ using UnsignedT = std::make_unsigned_t<T>;
+
+ public:
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ static constexpr T compute(T aX, T aY) {
+ // Begin with |1U| to ensure the overall operation chain is never promoted
+ // to signed integer operations that might have *signed* integer overflow.
+ return ToResult<T>(static_cast<UnsignedT>(1U * static_cast<UnsignedT>(aX) *
+ static_cast<UnsignedT>(aY)));
+ }
+};
+
+} // namespace detail
+
+/**
+ * Multiply two integers of the same type and return the result converted to
+ * that type using wraparound semantics, without triggering overflow sanitizers.
+ *
+ * For N-bit unsigned integer types, this is equivalent to multiplying the two
+ * numbers, then taking the result mod 2**N:
+ *
+ * WrappingMultiply(uint32_t(42), uint32_t(17)) is 714 (714 mod 2**32);
+ * WrappingMultiply(uint8_t(16), uint8_t(24)) is 128 (384 mod 2**8);
+ * WrappingMultiply(uint16_t(3), uint16_t(32768)) is 32768 (98304 mod 2*16).
+ *
+ * Unsigned WrappingMultiply is *not* identical to C++ multiplication: with most
+ * compilers, in rare cases uint16_t*uint16_t can invoke *signed* integer
+ * overflow having undefined behavior! http://kqueue.org/blog/2013/09/17/cltq/
+ * has the grody details. (Some compilers do this for uint32_t, not uint16_t.)
+ * So it's especially important to use WrappingMultiply for wraparound math with
+ * uint16_t. That quirk aside, this function acts like you *thought* C++
+ * unsigned multiplication always worked.
+ *
+ * For N-bit signed integer types, this is equivalent to multiplying the two
+ * numbers wrapped to unsigned, then wrapping the product mod 2**N to the signed
+ * range:
+ *
+ * WrappingMultiply(int16_t(-456), int16_t(123)) is
+ * 9448 ((-56088 mod 2**16) + 2**16);
+ * WrappingMultiply(int32_t(-7), int32_t(-9)) is 63 (63 mod 2**32);
+ * WrappingMultiply(int8_t(16), int8_t(24)) is -128 ((384 mod 2**8) - 2**8);
+ * WrappingMultiply(int8_t(16), int8_t(255)) is -16 ((4080 mod 2**8) - 2**8).
+ *
+ * There's no equivalent to this operation in C++, as C++ signed
+ * multiplication that overflows has undefined behavior. But it's how such
+ * multiplication *tends* to behave with most compilers, unless an optimization
+ * or similar -- quite permissibly -- triggers different behavior.
+ */
+template <typename T>
+constexpr T WrappingMultiply(T aX, T aY) {
+ return detail::WrappingMultiplyHelper<T>::compute(aX, aY);
+}
+
+} /* namespace mozilla */
+
+#endif /* mozilla_WrappingOperations_h */
diff --git a/mfbt/XorShift128PlusRNG.h b/mfbt/XorShift128PlusRNG.h
new file mode 100644
index 0000000000..1aee59d89f
--- /dev/null
+++ b/mfbt/XorShift128PlusRNG.h
@@ -0,0 +1,122 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* The xorshift128+ pseudo-random number generator. */
+
+#ifndef mozilla_XorShift128Plus_h
+#define mozilla_XorShift128Plus_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/FloatingPoint.h"
+
+#include <inttypes.h>
+
+namespace mozilla {
+namespace non_crypto {
+
+/*
+ * A stream of pseudo-random numbers generated using the xorshift+ technique
+ * described here:
+ *
+ * Vigna, Sebastiano (2014). "Further scramblings of Marsaglia's xorshift
+ * generators". arXiv:1404.0390 (http://arxiv.org/abs/1404.0390)
+ *
+ * That paper says:
+ *
+ * In particular, we propose a tightly coded xorshift128+ generator that
+ * does not fail systematically any test from the BigCrush suite of TestU01
+ * (even reversed) and generates 64 pseudorandom bits in 1.10 ns on an
+ * Intel(R) Core(TM) i7-4770 CPU @3.40GHz (Haswell). It is the fastest
+ * generator we are aware of with such empirical statistical properties.
+ *
+ * The stream of numbers produced by this method repeats every 2**128 - 1 calls
+ * (i.e. never, for all practical purposes). Zero appears 2**64 - 1 times in
+ * this period; all other numbers appear 2**64 times. Additionally, each *bit*
+ * in the produced numbers repeats every 2**128 - 1 calls.
+ *
+ * This generator is not suitable as a cryptographically secure random number
+ * generator.
+ */
+class XorShift128PlusRNG {
+ uint64_t mState[2];
+
+ public:
+ /*
+ * Construct a xorshift128+ pseudo-random number stream using |aInitial0| and
+ * |aInitial1| as the initial state. These MUST NOT both be zero.
+ *
+ * If the initial states contain many zeros, for a few iterations you'll see
+ * many zeroes in the generated numbers. It's suggested to seed a SplitMix64
+ * generator <http://xorshift.di.unimi.it/splitmix64.c> and use its first two
+ * outputs to seed xorshift128+.
+ */
+ XorShift128PlusRNG(uint64_t aInitial0, uint64_t aInitial1) {
+ setState(aInitial0, aInitial1);
+ }
+
+ /**
+ * Return a pseudo-random 64-bit number.
+ */
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ uint64_t next() {
+ /*
+ * The offsetOfState*() methods below are provided so that exceedingly-rare
+ * callers that want to observe or poke at RNG state in C++ type-system-
+ * ignoring means can do so. Don't change the next() or nextDouble()
+ * algorithms without altering code that uses offsetOfState*()!
+ */
+ uint64_t s1 = mState[0];
+ const uint64_t s0 = mState[1];
+ mState[0] = s0;
+ s1 ^= s1 << 23;
+ mState[1] = s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26);
+ return mState[1] + s0;
+ }
+
+ /*
+ * Return a pseudo-random floating-point value in the range [0, 1). More
+ * precisely, choose an integer in the range [0, 2**53) and divide it by
+ * 2**53. Given the 2**128 - 1 period noted above, the produced doubles are
+ * all but uniformly distributed in this range.
+ */
+ double nextDouble() {
+ /*
+ * Because the IEEE 64-bit floating point format stores the leading '1' bit
+ * of the mantissa implicitly, it effectively represents a mantissa in the
+ * range [0, 2**53) in only 52 bits. FloatingPoint<double>::kExponentShift
+ * is the width of the bitfield in the in-memory format, so we must add one
+ * to get the mantissa's range.
+ */
+ static constexpr int kMantissaBits =
+ mozilla::FloatingPoint<double>::kExponentShift + 1;
+ uint64_t mantissa = next() & ((UINT64_C(1) << kMantissaBits) - 1);
+ return double(mantissa) / (UINT64_C(1) << kMantissaBits);
+ }
+
+ /*
+ * Set the stream's current state to |aState0| and |aState1|. These must not
+ * both be zero; ideally, they should have an almost even mix of zero and one
+ * bits.
+ */
+ void setState(uint64_t aState0, uint64_t aState1) {
+ MOZ_ASSERT(aState0 || aState1);
+ mState[0] = aState0;
+ mState[1] = aState1;
+ }
+
+ static size_t offsetOfState0() {
+ return offsetof(XorShift128PlusRNG, mState[0]);
+ }
+ static size_t offsetOfState1() {
+ return offsetof(XorShift128PlusRNG, mState[1]);
+ }
+};
+
+} // namespace non_crypto
+} // namespace mozilla
+
+#endif // mozilla_XorShift128Plus_h
diff --git a/mfbt/double-conversion/LICENSE b/mfbt/double-conversion/LICENSE
new file mode 100644
index 0000000000..933718a9ef
--- /dev/null
+++ b/mfbt/double-conversion/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/mfbt/double-conversion/add-mfbt-api-markers.patch b/mfbt/double-conversion/add-mfbt-api-markers.patch
new file mode 100644
index 0000000000..6fd0ae7091
--- /dev/null
+++ b/mfbt/double-conversion/add-mfbt-api-markers.patch
@@ -0,0 +1,207 @@
+diff --git a/double-conversion/double-to-string.h b/double-conversion/double-to-string.h
+--- a/double-conversion/double-to-string.h
++++ b/double-conversion/double-to-string.h
+@@ -23,16 +23,17 @@
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ #ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
+ #define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
+
++#include "mozilla/Types.h"
+ #include "utils.h"
+
+ namespace double_conversion {
+
+ class DoubleToStringConverter {
+ public:
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
+@@ -192,17 +193,17 @@ class DoubleToStringConverter {
+ //
+ // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN.
+ // Special values: "Infinity" and "NaN".
+ // Lower case 'e' for exponential values.
+ // decimal_in_shortest_low: -6
+ // decimal_in_shortest_high: 21
+ // max_leading_padding_zeroes_in_precision_mode: 6
+ // max_trailing_padding_zeroes_in_precision_mode: 0
+- static const DoubleToStringConverter& EcmaScriptConverter();
++ static MFBT_API const DoubleToStringConverter& EcmaScriptConverter();
+
+ // Computes the shortest string of digits that correctly represent the input
+ // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
+ // (see constructor) it then either returns a decimal representation, or an
+ // exponential representation.
+ // Example with decimal_in_shortest_low = -6,
+ // decimal_in_shortest_high = 21,
+ // EMIT_POSITIVE_EXPONENT_SIGN activated, and
+@@ -277,17 +278,17 @@ class DoubleToStringConverter {
+ // been provided to the constructor,
+ // - 'value' > 10^kMaxFixedDigitsBeforePoint, or
+ // - 'requested_digits' > kMaxFixedDigitsAfterPoint.
+ // The last two conditions imply that the result for non-special values never
+ // contains more than
+ // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
+ // (one additional character for the sign, and one for the decimal point).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+- bool ToFixed(double value,
++ MFBT_API bool ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes a representation in exponential format with requested_digits
+ // after the decimal point. The last emitted digit is rounded.
+ // If requested_digits equals -1, then the shortest exponential representation
+ // is computed.
+ //
+@@ -311,17 +312,17 @@ class DoubleToStringConverter {
+ // been provided to the constructor,
+ // - 'requested_digits' > kMaxExponentialDigits.
+ //
+ // The last condition implies that the result never contains more than
+ // kMaxExponentialDigits + 8 characters (the sign, the digit before the
+ // decimal point, the decimal point, the exponent character, the
+ // exponent's sign, and at most 3 exponent digits).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+- bool ToExponential(double value,
++ MFBT_API bool ToExponential(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+
+ // Computes 'precision' leading digits of the given 'value' and returns them
+ // either in exponential or decimal format, depending on
+ // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
+ // constructor).
+@@ -352,17 +353,17 @@ class DoubleToStringConverter {
+ // been provided to the constructor,
+ // - precision < kMinPericisionDigits
+ // - precision > kMaxPrecisionDigits
+ //
+ // The last condition implies that the result never contains more than
+ // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
+ // exponent character, the exponent's sign, and at most 3 exponent digits).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+- bool ToPrecision(double value,
++ MFBT_API bool ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const;
+
+ enum DtoaMode {
+ // Produce the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate
+ // but correct) 0.3.
+ SHORTEST,
+@@ -414,44 +415,44 @@ class DoubleToStringConverter {
+ // DoubleToAscii expects the given buffer to be big enough to hold all
+ // digits and a terminating null-character. In SHORTEST-mode it expects a
+ // buffer of at least kBase10MaximalLength + 1. In all other modes the
+ // requested_digits parameter and the padding-zeroes limit the size of the
+ // output. Don't forget the decimal point, the exponent character and the
+ // terminating null-character when computing the maximal output size.
+ // The given length is only used in debug mode to ensure the buffer is big
+ // enough.
+- static void DoubleToAscii(double v,
++ static MFBT_API void DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point);
+
+ private:
+ // Implementation for ToShortest and ToShortestSingle.
+- bool ToShortestIeeeNumber(double value,
++ MFBT_API bool ToShortestIeeeNumber(double value,
+ StringBuilder* result_builder,
+ DtoaMode mode) const;
+
+ // If the value is a special value (NaN or Infinity) constructs the
+ // corresponding string using the configured infinity/nan-symbol.
+ // If either of them is NULL or the value is not special then the
+ // function returns false.
+- bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
++ MFBT_API bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
+ // Constructs an exponential representation (i.e. 1.234e56).
+ // The given exponent assumes a decimal point after the first decimal digit.
+- void CreateExponentialRepresentation(const char* decimal_digits,
++ MFBT_API void CreateExponentialRepresentation(const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const;
+ // Creates a decimal representation (i.e 1234.5678).
+- void CreateDecimalRepresentation(const char* decimal_digits,
++ MFBT_API void CreateDecimalRepresentation(const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const;
+
+ const int flags_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+diff --git a/double-conversion/string-to-double.h b/double-conversion/string-to-double.h
+--- a/double-conversion/string-to-double.h
++++ b/double-conversion/string-to-double.h
+@@ -23,16 +23,17 @@
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ #ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
+ #define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
+
++#include "mozilla/Types.h"
+ #include "utils.h"
+
+ namespace double_conversion {
+
+ class StringToDoubleConverter {
+ public:
+ // Enumeration for allowing octals and ignoring junk when converting
+ // strings to numbers.
+@@ -178,34 +179,34 @@ class StringToDoubleConverter {
+ separator_(separator) {
+ }
+
+ // Performs the conversion.
+ // The output parameter 'processed_characters_count' is set to the number
+ // of characters that have been processed to read the number.
+ // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
+ // in the 'processed_characters_count'. Trailing junk is never included.
+- double StringToDouble(const char* buffer,
++ MFBT_API double StringToDouble(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble above but for 16 bit characters.
+- double StringToDouble(const uc16* buffer,
++ MFBT_API double StringToDouble(const uc16* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble but reads a float.
+ // Note that this is not equivalent to static_cast<float>(StringToDouble(...))
+ // due to potential double-rounding.
+- float StringToFloat(const char* buffer,
++ MFBT_API float StringToFloat(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToFloat above but for 16 bit characters.
+- float StringToFloat(const uc16* buffer,
++ MFBT_API float StringToFloat(const uc16* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble for T = double, and StringToFloat for T = float.
+ template <typename T>
+ T StringTo(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
diff --git a/mfbt/double-conversion/debug-only-functions.patch b/mfbt/double-conversion/debug-only-functions.patch
new file mode 100644
index 0000000000..b231e36949
--- /dev/null
+++ b/mfbt/double-conversion/debug-only-functions.patch
@@ -0,0 +1,39 @@
+diff --git a/double-conversion/strtod.cc b/double-conversion/strtod.cc
+--- a/double-conversion/strtod.cc
++++ b/double-conversion/strtod.cc
+@@ -436,16 +436,17 @@ static bool ComputeGuess(Vector<const ch
+ return true;
+ }
+ if (*guess == Double::Infinity()) {
+ return true;
+ }
+ return false;
+ }
+
++#ifdef DEBUG
+ static bool IsDigit(const char d) {
+ return ('0' <= d) && (d <= '9');
+ }
+
+ static bool IsNonZeroDigit(const char d) {
+ return ('1' <= d) && (d <= '9');
+ }
+
+@@ -457,16 +458,17 @@ static bool IsNonZeroDigit(const char d)
+ static bool AssertTrimmedDigits(const Vector<const char>& buffer) {
+ for(int i = 0; i < buffer.length(); ++i) {
+ if(!IsDigit(buffer[i])) {
+ return false;
+ }
+ }
+ return (buffer.length() == 0) || (IsNonZeroDigit(buffer[0]) && IsNonZeroDigit(buffer[buffer.length()-1]));
+ }
++#endif
+
+ double StrtodTrimmed(Vector<const char> trimmed, int exponent) {
+ DOUBLE_CONVERSION_ASSERT(trimmed.length() <= kMaxSignificantDecimalDigits);
+ DOUBLE_CONVERSION_ASSERT(AssertTrimmedDigits(trimmed));
+ double guess;
+ const bool is_correct = ComputeGuess(trimmed, exponent, &guess);
+ if (is_correct) {
+ return guess;
diff --git a/mfbt/double-conversion/double-conversion/README.md b/mfbt/double-conversion/double-conversion/README.md
new file mode 100644
index 0000000000..e5d9a4e682
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/README.md
@@ -0,0 +1,55 @@
+https://github.com/google/double-conversion
+
+This project (double-conversion) provides binary-decimal and decimal-binary
+routines for IEEE doubles.
+
+The library consists of efficient conversion routines that have been extracted
+from the V8 JavaScript engine. The code has been refactored and improved so that
+it can be used more easily in other projects.
+
+There is extensive documentation in `double-conversion/string-to-double.h` and
+`double-conversion/double-to-string.h`. Other examples can be found in
+`test/cctest/test-conversions.cc`.
+
+
+Building
+========
+
+This library can be built with [scons][0] or [cmake][1].
+The checked-in Makefile simply forwards to scons, and provides a
+shortcut to run all tests:
+
+ make
+ make test
+
+Scons
+-----
+
+The easiest way to install this library is to use `scons`. It builds
+the static and shared library, and is set up to install those at the
+correct locations:
+
+ scons install
+
+Use the `DESTDIR` option to change the target directory:
+
+ scons DESTDIR=alternative_directory install
+
+Cmake
+-----
+
+To use cmake run `cmake .` in the root directory. This overwrites the
+existing Makefile.
+
+Use `-DBUILD_SHARED_LIBS=ON` to enable the compilation of shared libraries.
+Note that this disables static libraries. There is currently no way to
+build both libraries at the same time with cmake.
+
+Use `-DBUILD_TESTING=ON` to build the test executable.
+
+ cmake . -DBUILD_TESTING=ON
+ make
+ test/cctest/cctest
+
+[0]: http://www.scons.org/
+[1]: https://cmake.org/
diff --git a/mfbt/double-conversion/double-conversion/bignum-dtoa.cc b/mfbt/double-conversion/double-conversion/bignum-dtoa.cc
new file mode 100644
index 0000000000..15123e6a63
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/bignum-dtoa.cc
@@ -0,0 +1,641 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cmath>
+
+#include "bignum-dtoa.h"
+
+#include "bignum.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+static int NormalizedExponent(uint64_t significand, int exponent) {
+ DOUBLE_CONVERSION_ASSERT(significand != 0);
+ while ((significand & Double::kHiddenBit) == 0) {
+ significand = significand << 1;
+ exponent = exponent - 1;
+ }
+ return exponent;
+}
+
+
+// Forward declarations:
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
+static int EstimatePower(int exponent);
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator.
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus);
+// Multiplies numerator/denominator so that its values lies in the range 1-10.
+// Returns decimal_point s.t.
+// v = numerator'/denominator' * 10^(decimal_point-1)
+// where numerator' and denominator' are the values of numerator and
+// denominator after the call to this function.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus);
+// Generates digits from the left to the right and stops when the generated
+// digits yield the shortest decimal representation of v.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length);
+// Generates 'requested_digits' after the decimal point.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char> buffer, int* length);
+// Generates 'count' digits of numerator/denominator.
+// Once 'count' digits have been produced rounds the result depending on the
+// remainder (remainders of exactly .5 round upwards). Might update the
+// decimal_point when rounding up (for example for 0.9999).
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char> buffer, int* length);
+
+
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* decimal_point) {
+ DOUBLE_CONVERSION_ASSERT(v > 0);
+ DOUBLE_CONVERSION_ASSERT(!Double(v).IsSpecial());
+ uint64_t significand;
+ int exponent;
+ bool lower_boundary_is_closer;
+ if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) {
+ float f = static_cast<float>(v);
+ DOUBLE_CONVERSION_ASSERT(f == v);
+ significand = Single(f).Significand();
+ exponent = Single(f).Exponent();
+ lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser();
+ } else {
+ significand = Double(v).Significand();
+ exponent = Double(v).Exponent();
+ lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser();
+ }
+ bool need_boundary_deltas =
+ (mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE);
+
+ bool is_even = (significand & 1) == 0;
+ int normalized_exponent = NormalizedExponent(significand, exponent);
+ // estimated_power might be too low by 1.
+ int estimated_power = EstimatePower(normalized_exponent);
+
+ // Shortcut for Fixed.
+ // The requested digits correspond to the digits after the point. If the
+ // number is much too small, then there is no need in trying to get any
+ // digits.
+ if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
+ buffer[0] = '\0';
+ *length = 0;
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ return;
+ }
+
+ Bignum numerator;
+ Bignum denominator;
+ Bignum delta_minus;
+ Bignum delta_plus;
+ // Make sure the bignum can grow large enough. The smallest double equals
+ // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
+ // The maximum double is 1.7976931348623157e308 which needs fewer than
+ // 308*4 binary digits.
+ DOUBLE_CONVERSION_ASSERT(Bignum::kMaxSignificantBits >= 324*4);
+ InitialScaledStartValues(significand, exponent, lower_boundary_is_closer,
+ estimated_power, need_boundary_deltas,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^estimated_power.
+ FixupMultiply10(estimated_power, is_even, decimal_point,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^(decimal_point-1), and
+ // 1 <= (numerator + delta_plus) / denominator < 10
+ switch (mode) {
+ case BIGNUM_DTOA_SHORTEST:
+ case BIGNUM_DTOA_SHORTEST_SINGLE:
+ GenerateShortestDigits(&numerator, &denominator,
+ &delta_minus, &delta_plus,
+ is_even, buffer, length);
+ break;
+ case BIGNUM_DTOA_FIXED:
+ BignumToFixed(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ case BIGNUM_DTOA_PRECISION:
+ GenerateCountedDigits(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ default:
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+ buffer[*length] = '\0';
+}
+
+
+// The procedure starts generating digits from the left to the right and stops
+// when the generated digits yield the shortest decimal representation of v. A
+// decimal representation of v is a number lying closer to v than to any other
+// double, so it converts to v when read.
+//
+// This is true if d, the decimal representation, is between m- and m+, the
+// upper and lower boundaries. d must be strictly between them if !is_even.
+// m- := (numerator - delta_minus) / denominator
+// m+ := (numerator + delta_plus) / denominator
+//
+// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
+// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
+// will be produced. This should be the standard precondition.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length) {
+ // Small optimization: if delta_minus and delta_plus are the same just reuse
+ // one of the two bignums.
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_plus = delta_minus;
+ }
+ *length = 0;
+ for (;;) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ DOUBLE_CONVERSION_ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[(*length)++] = static_cast<char>(digit + '0');
+
+ // Can we stop already?
+ // If the remainder of the division is less than the distance to the lower
+ // boundary we can stop. In this case we simply round down (discarding the
+ // remainder).
+ // Similarly we test if we can round up (using the upper boundary).
+ bool in_delta_room_minus;
+ bool in_delta_room_plus;
+ if (is_even) {
+ in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
+ } else {
+ in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
+ }
+ if (is_even) {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (!in_delta_room_minus && !in_delta_room_plus) {
+ // Prepare for next iteration.
+ numerator->Times10();
+ delta_minus->Times10();
+ // We optimized delta_plus to be equal to delta_minus (if they share the
+ // same value). So don't multiply delta_plus if they point to the same
+ // object.
+ if (delta_minus != delta_plus) {
+ delta_plus->Times10();
+ }
+ } else if (in_delta_room_minus && in_delta_room_plus) {
+ // Let's see if 2*numerator < denominator.
+ // If yes, then the next digit would be < 5 and we can round down.
+ int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
+ if (compare < 0) {
+ // Remaining digits are less than .5. -> Round down (== do nothing).
+ } else if (compare > 0) {
+ // Remaining digits are more than .5 of denominator. -> Round up.
+ // Note that the last digit could not be a '9' as otherwise the whole
+ // loop would have stopped earlier.
+ // We still have an assert here in case the preconditions were not
+ // satisfied.
+ DOUBLE_CONVERSION_ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ } else {
+ // Halfway case.
+ // TODO(floitsch): need a way to solve half-way cases.
+ // For now let's round towards even (since this is what Gay seems to
+ // do).
+
+ if ((buffer[(*length) - 1] - '0') % 2 == 0) {
+ // Round down => Do nothing.
+ } else {
+ DOUBLE_CONVERSION_ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ }
+ }
+ return;
+ } else if (in_delta_room_minus) {
+ // Round down (== do nothing).
+ return;
+ } else { // in_delta_room_plus
+ // Round up.
+ // Note again that the last digit could not be '9' since this would have
+ // stopped the loop earlier.
+ // We still have an DOUBLE_CONVERSION_ASSERT here, in case the preconditions were not
+ // satisfied.
+ DOUBLE_CONVERSION_ASSERT(buffer[(*length) -1] != '9');
+ buffer[(*length) - 1]++;
+ return;
+ }
+ }
+}
+
+
+// Let v = numerator / denominator < 10.
+// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
+// from left to right. Once 'count' digits have been produced we decide whether
+// to round up or down. Remainders of exactly .5 round upwards. Numbers such
+// as 9.999999 propagate a carry all the way, and change the
+// exponent (decimal_point), when rounding upwards.
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char> buffer, int* length) {
+ DOUBLE_CONVERSION_ASSERT(count >= 0);
+ for (int i = 0; i < count - 1; ++i) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ DOUBLE_CONVERSION_ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[i] = static_cast<char>(digit + '0');
+ // Prepare for next iteration.
+ numerator->Times10();
+ }
+ // Generate the last digit.
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ digit++;
+ }
+ DOUBLE_CONVERSION_ASSERT(digit <= 10);
+ buffer[count - 1] = static_cast<char>(digit + '0');
+ // Correct bad digits (in case we had a sequence of '9's). Propagate the
+ // carry until we hat a non-'9' or til we reach the first digit.
+ for (int i = count - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ if (buffer[0] == '0' + 10) {
+ // Propagate a carry past the top place.
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+ *length = count;
+}
+
+
+// Generates 'requested_digits' after the decimal point. It might omit
+// trailing '0's. If the input number is too small then no digits at all are
+// generated (ex.: 2 fixed digits for 0.00001).
+//
+// Input verifies: 1 <= (numerator + delta) / denominator < 10.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char> buffer, int* length) {
+ // Note that we have to look at more than just the requested_digits, since
+ // a number could be rounded up. Example: v=0.5 with requested_digits=0.
+ // Even though the power of v equals 0 we can't just stop here.
+ if (-(*decimal_point) > requested_digits) {
+ // The number is definitively too small.
+ // Ex: 0.001 with requested_digits == 1.
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ *length = 0;
+ return;
+ } else if (-(*decimal_point) == requested_digits) {
+ // We only need to verify if the number rounds down or up.
+ // Ex: 0.04 and 0.06 with requested_digits == 1.
+ DOUBLE_CONVERSION_ASSERT(*decimal_point == -requested_digits);
+ // Initially the fraction lies in range (1, 10]. Multiply the denominator
+ // by 10 so that we can compare more easily.
+ denominator->Times10();
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ // If the fraction is >= 0.5 then we have to include the rounded
+ // digit.
+ buffer[0] = '1';
+ *length = 1;
+ (*decimal_point)++;
+ } else {
+ // Note that we caught most of similar cases earlier.
+ *length = 0;
+ }
+ return;
+ } else {
+ // The requested digits correspond to the digits after the point.
+ // The variable 'needed_digits' includes the digits before the point.
+ int needed_digits = (*decimal_point) + requested_digits;
+ GenerateCountedDigits(needed_digits, decimal_point,
+ numerator, denominator,
+ buffer, length);
+ }
+}
+
+
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
+// v = f * 2^exponent and 2^52 <= f < 2^53.
+// v is hence a normalized double with the given exponent. The output is an
+// approximation for the exponent of the decimal approximation .digits * 10^k.
+//
+// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
+// Note: this property holds for v's upper boundary m+ too.
+// 10^k <= m+ < 10^k+1.
+// (see explanation below).
+//
+// Examples:
+// EstimatePower(0) => 16
+// EstimatePower(-52) => 0
+//
+// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
+static int EstimatePower(int exponent) {
+ // This function estimates log10 of v where v = f*2^e (with e == exponent).
+ // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
+ // Note that f is bounded by its container size. Let p = 53 (the double's
+ // significand size). Then 2^(p-1) <= f < 2^p.
+ //
+ // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
+ // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
+ // The computed number undershoots by less than 0.631 (when we compute log3
+ // and not log10).
+ //
+ // Optimization: since we only need an approximated result this computation
+ // can be performed on 64 bit integers. On x86/x64 architecture the speedup is
+ // not really measurable, though.
+ //
+ // Since we want to avoid overshooting we decrement by 1e10 so that
+ // floating-point imprecisions don't affect us.
+ //
+ // Explanation for v's boundary m+: the computation takes advantage of
+ // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
+ // (even for denormals where the delta can be much more important).
+
+ const double k1Log10 = 0.30102999566398114; // 1/lg(10)
+
+ // For doubles len(f) == 53 (don't forget the hidden bit).
+ const int kSignificandSize = Double::kSignificandSize;
+ double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
+ return static_cast<int>(estimate);
+}
+
+
+// See comments for InitialScaledStartValues.
+static void InitialScaledStartValuesPositiveExponent(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // A positive exponent implies a positive power.
+ DOUBLE_CONVERSION_ASSERT(estimated_power >= 0);
+ // Since the estimated_power is positive we simply multiply the denominator
+ // by 10^estimated_power.
+
+ // numerator = v.
+ numerator->AssignUInt64(significand);
+ numerator->ShiftLeft(exponent);
+ // denominator = 10^estimated_power.
+ denominator->AssignPowerUInt16(10, estimated_power);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ delta_plus->AssignUInt16(1);
+ delta_plus->ShiftLeft(exponent);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ delta_minus->ShiftLeft(exponent);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentPositivePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // v = f * 2^e with e < 0, and with estimated_power >= 0.
+ // This means that e is close to 0 (have a look at how estimated_power is
+ // computed).
+
+ // numerator = significand
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * / 2^-exponent
+ numerator->AssignUInt64(significand);
+ // denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
+ denominator->AssignPowerUInt16(10, estimated_power);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ // Given that the denominator already includes v's exponent the distance
+ // to the boundaries is simply 1.
+ delta_plus->AssignUInt16(1);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentNegativePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // Instead of multiplying the denominator with 10^estimated_power we
+ // multiply all values (numerator and deltas) by 10^-estimated_power.
+
+ // Use numerator as temporary container for power_ten.
+ Bignum* power_ten = numerator;
+ power_ten->AssignPowerUInt16(10, -estimated_power);
+
+ if (need_boundary_deltas) {
+ // Since power_ten == numerator we must make a copy of 10^estimated_power
+ // before we complete the computation of the numerator.
+ // delta_plus = delta_minus = 10^estimated_power
+ delta_plus->AssignBignum(*power_ten);
+ delta_minus->AssignBignum(*power_ten);
+ }
+
+ // numerator = significand * 2 * 10^-estimated_power
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
+ // Remember: numerator has been abused as power_ten. So no need to assign it
+ // to itself.
+ DOUBLE_CONVERSION_ASSERT(numerator == power_ten);
+ numerator->MultiplyByUInt64(significand);
+
+ // denominator = 2 * 2^-exponent with exponent < 0.
+ denominator->AssignUInt16(1);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ numerator->ShiftLeft(1);
+ denominator->ShiftLeft(1);
+ // With this shift the boundaries have their correct value, since
+ // delta_plus = 10^-estimated_power, and
+ // delta_minus = 10^-estimated_power.
+ // These assignments have been done earlier.
+ // The adjustments if f == 2^p-1 (lower boundary is closer) are done later.
+ }
+}
+
+
+// Let v = significand * 2^exponent.
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator. The functions GenerateShortestDigits and
+// GenerateCountedDigits will then convert this ratio to its decimal
+// representation d, with the required accuracy.
+// Then d * 10^estimated_power is the representation of v.
+// (Note: the fraction and the estimated_power might get adjusted before
+// generating the decimal representation.)
+//
+// The initial start values consist of:
+// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
+// - a scaled (common) denominator.
+// optionally (used by GenerateShortestDigits to decide if it has the shortest
+// decimal converting back to v):
+// - v - m-: the distance to the lower boundary.
+// - m+ - v: the distance to the upper boundary.
+//
+// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
+//
+// Let ep == estimated_power, then the returned values will satisfy:
+// v / 10^ep = numerator / denominator.
+// v's boundaries m- and m+:
+// m- / 10^ep == v / 10^ep - delta_minus / denominator
+// m+ / 10^ep == v / 10^ep + delta_plus / denominator
+// Or in other words:
+// m- == v - delta_minus * 10^ep / denominator;
+// m+ == v + delta_plus * 10^ep / denominator;
+//
+// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
+// or 10^k <= v < 10^(k+1)
+// we then have 0.1 <= numerator/denominator < 1
+// or 1 <= numerator/denominator < 10
+//
+// It is then easy to kickstart the digit-generation routine.
+//
+// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST
+// or BIGNUM_DTOA_SHORTEST_SINGLE.
+
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus) {
+ if (exponent >= 0) {
+ InitialScaledStartValuesPositiveExponent(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else if (estimated_power >= 0) {
+ InitialScaledStartValuesNegativeExponentPositivePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else {
+ InitialScaledStartValuesNegativeExponentNegativePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ }
+
+ if (need_boundary_deltas && lower_boundary_is_closer) {
+ // The lower boundary is closer at half the distance of "normal" numbers.
+ // Increase the common denominator and adapt all but the delta_minus.
+ denominator->ShiftLeft(1); // *2
+ numerator->ShiftLeft(1); // *2
+ delta_plus->ShiftLeft(1); // *2
+ }
+}
+
+
+// This routine multiplies numerator/denominator so that its values lies in the
+// range 1-10. That is after a call to this function we have:
+// 1 <= (numerator + delta_plus) /denominator < 10.
+// Let numerator the input before modification and numerator' the argument
+// after modification, then the output-parameter decimal_point is such that
+// numerator / denominator * 10^estimated_power ==
+// numerator' / denominator' * 10^(decimal_point - 1)
+// In some cases estimated_power was too low, and this is already the case. We
+// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
+// estimated_power) but do not touch the numerator or denominator.
+// Otherwise the routine multiplies the numerator and the deltas by 10.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ bool in_range;
+ if (is_even) {
+ // For IEEE doubles half-way cases (in decimal system numbers ending with 5)
+ // are rounded to the closest floating-point number with even significand.
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (in_range) {
+ // Since numerator + delta_plus >= denominator we already have
+ // 1 <= numerator/denominator < 10. Simply update the estimated_power.
+ *decimal_point = estimated_power + 1;
+ } else {
+ *decimal_point = estimated_power;
+ numerator->Times10();
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_minus->Times10();
+ delta_plus->AssignBignum(*delta_minus);
+ } else {
+ delta_minus->Times10();
+ delta_plus->Times10();
+ }
+ }
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/bignum-dtoa.h b/mfbt/double-conversion/double-conversion/bignum-dtoa.h
new file mode 100644
index 0000000000..34b961992d
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/bignum-dtoa.h
@@ -0,0 +1,84 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum BignumDtoaMode {
+ // Return the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate but
+ // correct) 0.3.
+ BIGNUM_DTOA_SHORTEST,
+ // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
+ BIGNUM_DTOA_SHORTEST_SINGLE,
+ // Return a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ BIGNUM_DTOA_FIXED,
+ // Return a fixed number of digits, no matter what the exponent is.
+ BIGNUM_DTOA_PRECISION
+};
+
+// Converts the given double 'v' to ascii.
+// The result should be interpreted as buffer * 10^(point-length).
+// The buffer will be null-terminated.
+//
+// The input v must be > 0 and different from NaN, and Infinity.
+//
+// The output depends on the given mode:
+// - SHORTEST: produce the least amount of digits for which the internal
+// identity requirement is still satisfied. If the digits are printed
+// (together with the correct exponent) then reading this number will give
+// 'v' again. The buffer will choose the representation that is closest to
+// 'v'. If there are two at the same distance, than the number is round up.
+// In this mode the 'requested_digits' parameter is ignored.
+// - FIXED: produces digits necessary to print a given number with
+// 'requested_digits' digits after the decimal point. The produced digits
+// might be too short in which case the caller has to fill the gaps with '0's.
+// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
+// buffer="2", point=0.
+// Note: the length of the returned buffer has no meaning wrt the significance
+// of its digits. That is, just because it contains '0's does not mean that
+// any other digit would not satisfy the internal identity requirement.
+// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+// Even though the length of produced digits usually equals
+// 'requested_digits', the function is allowed to return fewer digits, in
+// which case the caller has to fill the missing digits with '0's.
+// Halfway cases are again rounded up.
+// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
+// and a terminating null-character.
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_
diff --git a/mfbt/double-conversion/double-conversion/bignum.cc b/mfbt/double-conversion/double-conversion/bignum.cc
new file mode 100644
index 0000000000..5c74d70d3d
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/bignum.cc
@@ -0,0 +1,797 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <cstring>
+
+#include "bignum.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+Bignum::Chunk& Bignum::RawBigit(const int index) {
+ DOUBLE_CONVERSION_ASSERT(static_cast<unsigned>(index) < kBigitCapacity);
+ return bigits_buffer_[index];
+}
+
+
+const Bignum::Chunk& Bignum::RawBigit(const int index) const {
+ DOUBLE_CONVERSION_ASSERT(static_cast<unsigned>(index) < kBigitCapacity);
+ return bigits_buffer_[index];
+}
+
+
+template<typename S>
+static int BitSize(const S value) {
+ (void) value; // Mark variable as used.
+ return 8 * sizeof(value);
+}
+
+// Guaranteed to lie in one Bigit.
+void Bignum::AssignUInt16(const uint16_t value) {
+ DOUBLE_CONVERSION_ASSERT(kBigitSize >= BitSize(value));
+ Zero();
+ if (value > 0) {
+ RawBigit(0) = value;
+ used_bigits_ = 1;
+ }
+}
+
+
+void Bignum::AssignUInt64(uint64_t value) {
+ Zero();
+ for(int i = 0; value > 0; ++i) {
+ RawBigit(i) = value & kBigitMask;
+ value >>= kBigitSize;
+ ++used_bigits_;
+ }
+}
+
+
+void Bignum::AssignBignum(const Bignum& other) {
+ exponent_ = other.exponent_;
+ for (int i = 0; i < other.used_bigits_; ++i) {
+ RawBigit(i) = other.RawBigit(i);
+ }
+ used_bigits_ = other.used_bigits_;
+}
+
+
+static uint64_t ReadUInt64(const Vector<const char> buffer,
+ const int from,
+ const int digits_to_read) {
+ uint64_t result = 0;
+ for (int i = from; i < from + digits_to_read; ++i) {
+ const int digit = buffer[i] - '0';
+ DOUBLE_CONVERSION_ASSERT(0 <= digit && digit <= 9);
+ result = result * 10 + digit;
+ }
+ return result;
+}
+
+
+void Bignum::AssignDecimalString(const Vector<const char> value) {
+ // 2^64 = 18446744073709551616 > 10^19
+ static const int kMaxUint64DecimalDigits = 19;
+ Zero();
+ int length = value.length();
+ unsigned pos = 0;
+ // Let's just say that each digit needs 4 bits.
+ while (length >= kMaxUint64DecimalDigits) {
+ const uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
+ pos += kMaxUint64DecimalDigits;
+ length -= kMaxUint64DecimalDigits;
+ MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
+ AddUInt64(digits);
+ }
+ const uint64_t digits = ReadUInt64(value, pos, length);
+ MultiplyByPowerOfTen(length);
+ AddUInt64(digits);
+ Clamp();
+}
+
+
+static uint64_t HexCharValue(const int c) {
+ if ('0' <= c && c <= '9') {
+ return c - '0';
+ }
+ if ('a' <= c && c <= 'f') {
+ return 10 + c - 'a';
+ }
+ DOUBLE_CONVERSION_ASSERT('A' <= c && c <= 'F');
+ return 10 + c - 'A';
+}
+
+
+// Unlike AssignDecimalString(), this function is "only" used
+// for unit-tests and therefore not performance critical.
+void Bignum::AssignHexString(Vector<const char> value) {
+ Zero();
+ // Required capacity could be reduced by ignoring leading zeros.
+ EnsureCapacity(((value.length() * 4) + kBigitSize - 1) / kBigitSize);
+ DOUBLE_CONVERSION_ASSERT(sizeof(uint64_t) * 8 >= kBigitSize + 4); // TODO: static_assert
+ // Accumulates converted hex digits until at least kBigitSize bits.
+ // Works with non-factor-of-four kBigitSizes.
+ uint64_t tmp = 0;
+ for (int cnt = 0; !value.is_empty(); value.pop_back()) {
+ tmp |= (HexCharValue(value.last()) << cnt);
+ if ((cnt += 4) >= kBigitSize) {
+ RawBigit(used_bigits_++) = (tmp & kBigitMask);
+ cnt -= kBigitSize;
+ tmp >>= kBigitSize;
+ }
+ }
+ if (tmp > 0) {
+ DOUBLE_CONVERSION_ASSERT(tmp <= kBigitMask);
+ RawBigit(used_bigits_++) = static_cast<Bignum::Chunk>(tmp & kBigitMask);
+ }
+ Clamp();
+}
+
+
+void Bignum::AddUInt64(const uint64_t operand) {
+ if (operand == 0) {
+ return;
+ }
+ Bignum other;
+ other.AssignUInt64(operand);
+ AddBignum(other);
+}
+
+
+void Bignum::AddBignum(const Bignum& other) {
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+ DOUBLE_CONVERSION_ASSERT(other.IsClamped());
+
+ // If this has a greater exponent than other append zero-bigits to this.
+ // After this call exponent_ <= other.exponent_.
+ Align(other);
+
+ // There are two possibilities:
+ // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
+ // bbbbb 00000000
+ // ----------------
+ // ccccccccccc 0000
+ // or
+ // aaaaaaaaaa 0000
+ // bbbbbbbbb 0000000
+ // -----------------
+ // cccccccccccc 0000
+ // In both cases we might need a carry bigit.
+
+ EnsureCapacity(1 + (std::max)(BigitLength(), other.BigitLength()) - exponent_);
+ Chunk carry = 0;
+ int bigit_pos = other.exponent_ - exponent_;
+ DOUBLE_CONVERSION_ASSERT(bigit_pos >= 0);
+ for (int i = used_bigits_; i < bigit_pos; ++i) {
+ RawBigit(i) = 0;
+ }
+ for (int i = 0; i < other.used_bigits_; ++i) {
+ const Chunk my = (bigit_pos < used_bigits_) ? RawBigit(bigit_pos) : 0;
+ const Chunk sum = my + other.RawBigit(i) + carry;
+ RawBigit(bigit_pos) = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ ++bigit_pos;
+ }
+ while (carry != 0) {
+ const Chunk my = (bigit_pos < used_bigits_) ? RawBigit(bigit_pos) : 0;
+ const Chunk sum = my + carry;
+ RawBigit(bigit_pos) = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ ++bigit_pos;
+ }
+ used_bigits_ = static_cast<int16_t>(std::max(bigit_pos, static_cast<int>(used_bigits_)));
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+}
+
+
+void Bignum::SubtractBignum(const Bignum& other) {
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+ DOUBLE_CONVERSION_ASSERT(other.IsClamped());
+ // We require this to be bigger than other.
+ DOUBLE_CONVERSION_ASSERT(LessEqual(other, *this));
+
+ Align(other);
+
+ const int offset = other.exponent_ - exponent_;
+ Chunk borrow = 0;
+ int i;
+ for (i = 0; i < other.used_bigits_; ++i) {
+ DOUBLE_CONVERSION_ASSERT((borrow == 0) || (borrow == 1));
+ const Chunk difference = RawBigit(i + offset) - other.RawBigit(i) - borrow;
+ RawBigit(i + offset) = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ }
+ while (borrow != 0) {
+ const Chunk difference = RawBigit(i + offset) - borrow;
+ RawBigit(i + offset) = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+void Bignum::ShiftLeft(const int shift_amount) {
+ if (used_bigits_ == 0) {
+ return;
+ }
+ exponent_ += static_cast<int16_t>(shift_amount / kBigitSize);
+ const int local_shift = shift_amount % kBigitSize;
+ EnsureCapacity(used_bigits_ + 1);
+ BigitsShiftLeft(local_shift);
+}
+
+
+void Bignum::MultiplyByUInt32(const uint32_t factor) {
+ if (factor == 1) {
+ return;
+ }
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ if (used_bigits_ == 0) {
+ return;
+ }
+ // The product of a bigit with the factor is of size kBigitSize + 32.
+ // Assert that this number + 1 (for the carry) fits into double chunk.
+ DOUBLE_CONVERSION_ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
+ DoubleChunk carry = 0;
+ for (int i = 0; i < used_bigits_; ++i) {
+ const DoubleChunk product = static_cast<DoubleChunk>(factor) * RawBigit(i) + carry;
+ RawBigit(i) = static_cast<Chunk>(product & kBigitMask);
+ carry = (product >> kBigitSize);
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_bigits_ + 1);
+ RawBigit(used_bigits_) = carry & kBigitMask;
+ used_bigits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByUInt64(const uint64_t factor) {
+ if (factor == 1) {
+ return;
+ }
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ if (used_bigits_ == 0) {
+ return;
+ }
+ DOUBLE_CONVERSION_ASSERT(kBigitSize < 32);
+ uint64_t carry = 0;
+ const uint64_t low = factor & 0xFFFFFFFF;
+ const uint64_t high = factor >> 32;
+ for (int i = 0; i < used_bigits_; ++i) {
+ const uint64_t product_low = low * RawBigit(i);
+ const uint64_t product_high = high * RawBigit(i);
+ const uint64_t tmp = (carry & kBigitMask) + product_low;
+ RawBigit(i) = tmp & kBigitMask;
+ carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
+ (product_high << (32 - kBigitSize));
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_bigits_ + 1);
+ RawBigit(used_bigits_) = carry & kBigitMask;
+ used_bigits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByPowerOfTen(const int exponent) {
+ static const uint64_t kFive27 = DOUBLE_CONVERSION_UINT64_2PART_C(0x6765c793, fa10079d);
+ static const uint16_t kFive1 = 5;
+ static const uint16_t kFive2 = kFive1 * 5;
+ static const uint16_t kFive3 = kFive2 * 5;
+ static const uint16_t kFive4 = kFive3 * 5;
+ static const uint16_t kFive5 = kFive4 * 5;
+ static const uint16_t kFive6 = kFive5 * 5;
+ static const uint32_t kFive7 = kFive6 * 5;
+ static const uint32_t kFive8 = kFive7 * 5;
+ static const uint32_t kFive9 = kFive8 * 5;
+ static const uint32_t kFive10 = kFive9 * 5;
+ static const uint32_t kFive11 = kFive10 * 5;
+ static const uint32_t kFive12 = kFive11 * 5;
+ static const uint32_t kFive13 = kFive12 * 5;
+ static const uint32_t kFive1_to_12[] =
+ { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
+ kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
+
+ DOUBLE_CONVERSION_ASSERT(exponent >= 0);
+
+ if (exponent == 0) {
+ return;
+ }
+ if (used_bigits_ == 0) {
+ return;
+ }
+ // We shift by exponent at the end just before returning.
+ int remaining_exponent = exponent;
+ while (remaining_exponent >= 27) {
+ MultiplyByUInt64(kFive27);
+ remaining_exponent -= 27;
+ }
+ while (remaining_exponent >= 13) {
+ MultiplyByUInt32(kFive13);
+ remaining_exponent -= 13;
+ }
+ if (remaining_exponent > 0) {
+ MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
+ }
+ ShiftLeft(exponent);
+}
+
+
+void Bignum::Square() {
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+ const int product_length = 2 * used_bigits_;
+ EnsureCapacity(product_length);
+
+ // Comba multiplication: compute each column separately.
+ // Example: r = a2a1a0 * b2b1b0.
+ // r = 1 * a0b0 +
+ // 10 * (a1b0 + a0b1) +
+ // 100 * (a2b0 + a1b1 + a0b2) +
+ // 1000 * (a2b1 + a1b2) +
+ // 10000 * a2b2
+ //
+ // In the worst case we have to accumulate nb-digits products of digit*digit.
+ //
+ // Assert that the additional number of bits in a DoubleChunk are enough to
+ // sum up used_digits of Bigit*Bigit.
+ if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_bigits_) {
+ DOUBLE_CONVERSION_UNIMPLEMENTED();
+ }
+ DoubleChunk accumulator = 0;
+ // First shift the digits so we don't overwrite them.
+ const int copy_offset = used_bigits_;
+ for (int i = 0; i < used_bigits_; ++i) {
+ RawBigit(copy_offset + i) = RawBigit(i);
+ }
+ // We have two loops to avoid some 'if's in the loop.
+ for (int i = 0; i < used_bigits_; ++i) {
+ // Process temporary digit i with power i.
+ // The sum of the two indices must be equal to i.
+ int bigit_index1 = i;
+ int bigit_index2 = 0;
+ // Sum all of the sub-products.
+ while (bigit_index1 >= 0) {
+ const Chunk chunk1 = RawBigit(copy_offset + bigit_index1);
+ const Chunk chunk2 = RawBigit(copy_offset + bigit_index2);
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ RawBigit(i) = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ for (int i = used_bigits_; i < product_length; ++i) {
+ int bigit_index1 = used_bigits_ - 1;
+ int bigit_index2 = i - bigit_index1;
+ // Invariant: sum of both indices is again equal to i.
+ // Inner loop runs 0 times on last iteration, emptying accumulator.
+ while (bigit_index2 < used_bigits_) {
+ const Chunk chunk1 = RawBigit(copy_offset + bigit_index1);
+ const Chunk chunk2 = RawBigit(copy_offset + bigit_index2);
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ // The overwritten RawBigit(i) will never be read in further loop iterations,
+ // because bigit_index1 and bigit_index2 are always greater
+ // than i - used_bigits_.
+ RawBigit(i) = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ // Since the result was guaranteed to lie inside the number the
+ // accumulator must be 0 now.
+ DOUBLE_CONVERSION_ASSERT(accumulator == 0);
+
+ // Don't forget to update the used_digits and the exponent.
+ used_bigits_ = static_cast<int16_t>(product_length);
+ exponent_ *= 2;
+ Clamp();
+}
+
+
+void Bignum::AssignPowerUInt16(uint16_t base, const int power_exponent) {
+ DOUBLE_CONVERSION_ASSERT(base != 0);
+ DOUBLE_CONVERSION_ASSERT(power_exponent >= 0);
+ if (power_exponent == 0) {
+ AssignUInt16(1);
+ return;
+ }
+ Zero();
+ int shifts = 0;
+ // We expect base to be in range 2-32, and most often to be 10.
+ // It does not make much sense to implement different algorithms for counting
+ // the bits.
+ while ((base & 1) == 0) {
+ base >>= 1;
+ shifts++;
+ }
+ int bit_size = 0;
+ int tmp_base = base;
+ while (tmp_base != 0) {
+ tmp_base >>= 1;
+ bit_size++;
+ }
+ const int final_size = bit_size * power_exponent;
+ // 1 extra bigit for the shifting, and one for rounded final_size.
+ EnsureCapacity(final_size / kBigitSize + 2);
+
+ // Left to Right exponentiation.
+ int mask = 1;
+ while (power_exponent >= mask) mask <<= 1;
+
+ // The mask is now pointing to the bit above the most significant 1-bit of
+ // power_exponent.
+ // Get rid of first 1-bit;
+ mask >>= 2;
+ uint64_t this_value = base;
+
+ bool delayed_multiplication = false;
+ const uint64_t max_32bits = 0xFFFFFFFF;
+ while (mask != 0 && this_value <= max_32bits) {
+ this_value = this_value * this_value;
+ // Verify that there is enough space in this_value to perform the
+ // multiplication. The first bit_size bits must be 0.
+ if ((power_exponent & mask) != 0) {
+ DOUBLE_CONVERSION_ASSERT(bit_size > 0);
+ const uint64_t base_bits_mask =
+ ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
+ const bool high_bits_zero = (this_value & base_bits_mask) == 0;
+ if (high_bits_zero) {
+ this_value *= base;
+ } else {
+ delayed_multiplication = true;
+ }
+ }
+ mask >>= 1;
+ }
+ AssignUInt64(this_value);
+ if (delayed_multiplication) {
+ MultiplyByUInt32(base);
+ }
+
+ // Now do the same thing as a bignum.
+ while (mask != 0) {
+ Square();
+ if ((power_exponent & mask) != 0) {
+ MultiplyByUInt32(base);
+ }
+ mask >>= 1;
+ }
+
+ // And finally add the saved shifts.
+ ShiftLeft(shifts * power_exponent);
+}
+
+
+// Precondition: this/other < 16bit.
+uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+ DOUBLE_CONVERSION_ASSERT(other.IsClamped());
+ DOUBLE_CONVERSION_ASSERT(other.used_bigits_ > 0);
+
+ // Easy case: if we have less digits than the divisor than the result is 0.
+ // Note: this handles the case where this == 0, too.
+ if (BigitLength() < other.BigitLength()) {
+ return 0;
+ }
+
+ Align(other);
+
+ uint16_t result = 0;
+
+ // Start by removing multiples of 'other' until both numbers have the same
+ // number of digits.
+ while (BigitLength() > other.BigitLength()) {
+ // This naive approach is extremely inefficient if `this` divided by other
+ // is big. This function is implemented for doubleToString where
+ // the result should be small (less than 10).
+ DOUBLE_CONVERSION_ASSERT(other.RawBigit(other.used_bigits_ - 1) >= ((1 << kBigitSize) / 16));
+ DOUBLE_CONVERSION_ASSERT(RawBigit(used_bigits_ - 1) < 0x10000);
+ // Remove the multiples of the first digit.
+ // Example this = 23 and other equals 9. -> Remove 2 multiples.
+ result += static_cast<uint16_t>(RawBigit(used_bigits_ - 1));
+ SubtractTimes(other, RawBigit(used_bigits_ - 1));
+ }
+
+ DOUBLE_CONVERSION_ASSERT(BigitLength() == other.BigitLength());
+
+ // Both bignums are at the same length now.
+ // Since other has more than 0 digits we know that the access to
+ // RawBigit(used_bigits_ - 1) is safe.
+ const Chunk this_bigit = RawBigit(used_bigits_ - 1);
+ const Chunk other_bigit = other.RawBigit(other.used_bigits_ - 1);
+
+ if (other.used_bigits_ == 1) {
+ // Shortcut for easy (and common) case.
+ int quotient = this_bigit / other_bigit;
+ RawBigit(used_bigits_ - 1) = this_bigit - other_bigit * quotient;
+ DOUBLE_CONVERSION_ASSERT(quotient < 0x10000);
+ result += static_cast<uint16_t>(quotient);
+ Clamp();
+ return result;
+ }
+
+ const int division_estimate = this_bigit / (other_bigit + 1);
+ DOUBLE_CONVERSION_ASSERT(division_estimate < 0x10000);
+ result += static_cast<uint16_t>(division_estimate);
+ SubtractTimes(other, division_estimate);
+
+ if (other_bigit * (division_estimate + 1) > this_bigit) {
+ // No need to even try to subtract. Even if other's remaining digits were 0
+ // another subtraction would be too much.
+ return result;
+ }
+
+ while (LessEqual(other, *this)) {
+ SubtractBignum(other);
+ result++;
+ }
+ return result;
+}
+
+
+template<typename S>
+static int SizeInHexChars(S number) {
+ DOUBLE_CONVERSION_ASSERT(number > 0);
+ int result = 0;
+ while (number != 0) {
+ number >>= 4;
+ result++;
+ }
+ return result;
+}
+
+
+static char HexCharOfValue(const int value) {
+ DOUBLE_CONVERSION_ASSERT(0 <= value && value <= 16);
+ if (value < 10) {
+ return static_cast<char>(value + '0');
+ }
+ return static_cast<char>(value - 10 + 'A');
+}
+
+
+bool Bignum::ToHexString(char* buffer, const int buffer_size) const {
+ DOUBLE_CONVERSION_ASSERT(IsClamped());
+ // Each bigit must be printable as separate hex-character.
+ DOUBLE_CONVERSION_ASSERT(kBigitSize % 4 == 0);
+ static const int kHexCharsPerBigit = kBigitSize / 4;
+
+ if (used_bigits_ == 0) {
+ if (buffer_size < 2) {
+ return false;
+ }
+ buffer[0] = '0';
+ buffer[1] = '\0';
+ return true;
+ }
+ // We add 1 for the terminating '\0' character.
+ const int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
+ SizeInHexChars(RawBigit(used_bigits_ - 1)) + 1;
+ if (needed_chars > buffer_size) {
+ return false;
+ }
+ int string_index = needed_chars - 1;
+ buffer[string_index--] = '\0';
+ for (int i = 0; i < exponent_; ++i) {
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = '0';
+ }
+ }
+ for (int i = 0; i < used_bigits_ - 1; ++i) {
+ Chunk current_bigit = RawBigit(i);
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
+ current_bigit >>= 4;
+ }
+ }
+ // And finally the last bigit.
+ Chunk most_significant_bigit = RawBigit(used_bigits_ - 1);
+ while (most_significant_bigit != 0) {
+ buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
+ most_significant_bigit >>= 4;
+ }
+ return true;
+}
+
+
+Bignum::Chunk Bignum::BigitOrZero(const int index) const {
+ if (index >= BigitLength()) {
+ return 0;
+ }
+ if (index < exponent_) {
+ return 0;
+ }
+ return RawBigit(index - exponent_);
+}
+
+
+int Bignum::Compare(const Bignum& a, const Bignum& b) {
+ DOUBLE_CONVERSION_ASSERT(a.IsClamped());
+ DOUBLE_CONVERSION_ASSERT(b.IsClamped());
+ const int bigit_length_a = a.BigitLength();
+ const int bigit_length_b = b.BigitLength();
+ if (bigit_length_a < bigit_length_b) {
+ return -1;
+ }
+ if (bigit_length_a > bigit_length_b) {
+ return +1;
+ }
+ for (int i = bigit_length_a - 1; i >= (std::min)(a.exponent_, b.exponent_); --i) {
+ const Chunk bigit_a = a.BigitOrZero(i);
+ const Chunk bigit_b = b.BigitOrZero(i);
+ if (bigit_a < bigit_b) {
+ return -1;
+ }
+ if (bigit_a > bigit_b) {
+ return +1;
+ }
+ // Otherwise they are equal up to this digit. Try the next digit.
+ }
+ return 0;
+}
+
+
+int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
+ DOUBLE_CONVERSION_ASSERT(a.IsClamped());
+ DOUBLE_CONVERSION_ASSERT(b.IsClamped());
+ DOUBLE_CONVERSION_ASSERT(c.IsClamped());
+ if (a.BigitLength() < b.BigitLength()) {
+ return PlusCompare(b, a, c);
+ }
+ if (a.BigitLength() + 1 < c.BigitLength()) {
+ return -1;
+ }
+ if (a.BigitLength() > c.BigitLength()) {
+ return +1;
+ }
+ // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
+ // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
+ // of 'a'.
+ if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
+ return -1;
+ }
+
+ Chunk borrow = 0;
+ // Starting at min_exponent all digits are == 0. So no need to compare them.
+ const int min_exponent = (std::min)((std::min)(a.exponent_, b.exponent_), c.exponent_);
+ for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
+ const Chunk chunk_a = a.BigitOrZero(i);
+ const Chunk chunk_b = b.BigitOrZero(i);
+ const Chunk chunk_c = c.BigitOrZero(i);
+ const Chunk sum = chunk_a + chunk_b;
+ if (sum > chunk_c + borrow) {
+ return +1;
+ } else {
+ borrow = chunk_c + borrow - sum;
+ if (borrow > 1) {
+ return -1;
+ }
+ borrow <<= kBigitSize;
+ }
+ }
+ if (borrow == 0) {
+ return 0;
+ }
+ return -1;
+}
+
+
+void Bignum::Clamp() {
+ while (used_bigits_ > 0 && RawBigit(used_bigits_ - 1) == 0) {
+ used_bigits_--;
+ }
+ if (used_bigits_ == 0) {
+ // Zero.
+ exponent_ = 0;
+ }
+}
+
+
+void Bignum::Align(const Bignum& other) {
+ if (exponent_ > other.exponent_) {
+ // If "X" represents a "hidden" bigit (by the exponent) then we are in the
+ // following case (a == this, b == other):
+ // a: aaaaaaXXXX or a: aaaaaXXX
+ // b: bbbbbbX b: bbbbbbbbXX
+ // We replace some of the hidden digits (X) of a with 0 digits.
+ // a: aaaaaa000X or a: aaaaa0XX
+ const int zero_bigits = exponent_ - other.exponent_;
+ EnsureCapacity(used_bigits_ + zero_bigits);
+ for (int i = used_bigits_ - 1; i >= 0; --i) {
+ RawBigit(i + zero_bigits) = RawBigit(i);
+ }
+ for (int i = 0; i < zero_bigits; ++i) {
+ RawBigit(i) = 0;
+ }
+ used_bigits_ += static_cast<int16_t>(zero_bigits);
+ exponent_ -= static_cast<int16_t>(zero_bigits);
+
+ DOUBLE_CONVERSION_ASSERT(used_bigits_ >= 0);
+ DOUBLE_CONVERSION_ASSERT(exponent_ >= 0);
+ }
+}
+
+
+void Bignum::BigitsShiftLeft(const int shift_amount) {
+ DOUBLE_CONVERSION_ASSERT(shift_amount < kBigitSize);
+ DOUBLE_CONVERSION_ASSERT(shift_amount >= 0);
+ Chunk carry = 0;
+ for (int i = 0; i < used_bigits_; ++i) {
+ const Chunk new_carry = RawBigit(i) >> (kBigitSize - shift_amount);
+ RawBigit(i) = ((RawBigit(i) << shift_amount) + carry) & kBigitMask;
+ carry = new_carry;
+ }
+ if (carry != 0) {
+ RawBigit(used_bigits_) = carry;
+ used_bigits_++;
+ }
+}
+
+
+void Bignum::SubtractTimes(const Bignum& other, const int factor) {
+ DOUBLE_CONVERSION_ASSERT(exponent_ <= other.exponent_);
+ if (factor < 3) {
+ for (int i = 0; i < factor; ++i) {
+ SubtractBignum(other);
+ }
+ return;
+ }
+ Chunk borrow = 0;
+ const int exponent_diff = other.exponent_ - exponent_;
+ for (int i = 0; i < other.used_bigits_; ++i) {
+ const DoubleChunk product = static_cast<DoubleChunk>(factor) * other.RawBigit(i);
+ const DoubleChunk remove = borrow + product;
+ const Chunk difference = RawBigit(i + exponent_diff) - (remove & kBigitMask);
+ RawBigit(i + exponent_diff) = difference & kBigitMask;
+ borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
+ (remove >> kBigitSize));
+ }
+ for (int i = other.used_bigits_ + exponent_diff; i < used_bigits_; ++i) {
+ if (borrow == 0) {
+ return;
+ }
+ const Chunk difference = RawBigit(i) - borrow;
+ RawBigit(i) = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ }
+ Clamp();
+}
+
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/bignum.h b/mfbt/double-conversion/double-conversion/bignum.h
new file mode 100644
index 0000000000..14d1ca86fc
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/bignum.h
@@ -0,0 +1,152 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_H_
+#define DOUBLE_CONVERSION_BIGNUM_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+class Bignum {
+ public:
+ // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
+ // This bignum can encode much bigger numbers, since it contains an
+ // exponent.
+ static const int kMaxSignificantBits = 3584;
+
+ Bignum() : used_bigits_(0), exponent_(0) {}
+
+ void AssignUInt16(const uint16_t value);
+ void AssignUInt64(uint64_t value);
+ void AssignBignum(const Bignum& other);
+
+ void AssignDecimalString(const Vector<const char> value);
+ void AssignHexString(const Vector<const char> value);
+
+ void AssignPowerUInt16(uint16_t base, const int exponent);
+
+ void AddUInt64(const uint64_t operand);
+ void AddBignum(const Bignum& other);
+ // Precondition: this >= other.
+ void SubtractBignum(const Bignum& other);
+
+ void Square();
+ void ShiftLeft(const int shift_amount);
+ void MultiplyByUInt32(const uint32_t factor);
+ void MultiplyByUInt64(const uint64_t factor);
+ void MultiplyByPowerOfTen(const int exponent);
+ void Times10() { return MultiplyByUInt32(10); }
+ // Pseudocode:
+ // int result = this / other;
+ // this = this % other;
+ // In the worst case this function is in O(this/other).
+ uint16_t DivideModuloIntBignum(const Bignum& other);
+
+ bool ToHexString(char* buffer, const int buffer_size) const;
+
+ // Returns
+ // -1 if a < b,
+ // 0 if a == b, and
+ // +1 if a > b.
+ static int Compare(const Bignum& a, const Bignum& b);
+ static bool Equal(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) == 0;
+ }
+ static bool LessEqual(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) <= 0;
+ }
+ static bool Less(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) < 0;
+ }
+ // Returns Compare(a + b, c);
+ static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
+ // Returns a + b == c
+ static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) == 0;
+ }
+ // Returns a + b <= c
+ static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) <= 0;
+ }
+ // Returns a + b < c
+ static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) < 0;
+ }
+ private:
+ typedef uint32_t Chunk;
+ typedef uint64_t DoubleChunk;
+
+ static const int kChunkSize = sizeof(Chunk) * 8;
+ static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
+ // With bigit size of 28 we loose some bits, but a double still fits easily
+ // into two chunks, and more importantly we can use the Comba multiplication.
+ static const int kBigitSize = 28;
+ static const Chunk kBigitMask = (1 << kBigitSize) - 1;
+ // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
+ // grow. There are no checks if the stack-allocated space is sufficient.
+ static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
+
+ static void EnsureCapacity(const int size) {
+ if (size > kBigitCapacity) {
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+ }
+ void Align(const Bignum& other);
+ void Clamp();
+ bool IsClamped() const {
+ return used_bigits_ == 0 || RawBigit(used_bigits_ - 1) != 0;
+ }
+ void Zero() {
+ used_bigits_ = 0;
+ exponent_ = 0;
+ }
+ // Requires this to have enough capacity (no tests done).
+ // Updates used_bigits_ if necessary.
+ // shift_amount must be < kBigitSize.
+ void BigitsShiftLeft(const int shift_amount);
+ // BigitLength includes the "hidden" bigits encoded in the exponent.
+ int BigitLength() const { return used_bigits_ + exponent_; }
+ Chunk& RawBigit(const int index);
+ const Chunk& RawBigit(const int index) const;
+ Chunk BigitOrZero(const int index) const;
+ void SubtractTimes(const Bignum& other, const int factor);
+
+ // The Bignum's value is value(bigits_buffer_) * 2^(exponent_ * kBigitSize),
+ // where the value of the buffer consists of the lower kBigitSize bits of
+ // the first used_bigits_ Chunks in bigits_buffer_, first chunk has lowest
+ // significant bits.
+ int16_t used_bigits_;
+ int16_t exponent_;
+ Chunk bigits_buffer_[kBigitCapacity];
+
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Bignum);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_H_
diff --git a/mfbt/double-conversion/double-conversion/cached-powers.cc b/mfbt/double-conversion/double-conversion/cached-powers.cc
new file mode 100644
index 0000000000..56bdfc9d63
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/cached-powers.cc
@@ -0,0 +1,175 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <climits>
+#include <cmath>
+#include <cstdarg>
+
+#include "utils.h"
+
+#include "cached-powers.h"
+
+namespace double_conversion {
+
+namespace PowersOfTenCache {
+
+struct CachedPower {
+ uint64_t significand;
+ int16_t binary_exponent;
+ int16_t decimal_exponent;
+};
+
+static const CachedPower kCachedPowers[] = {
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xef340a98, 172aace5), -715, -196},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xaa242499, 697392d3), -183, -36},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd1b71758, e219652c), -77, -4},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9c400000, 00000000), -50, 4},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x813f3978, f8940984), 30, 28},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x924d692c, a61be758), 269, 100},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa59bc234, db398c25), 508, 172},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd01fef10, a657842c), 800, 260},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332},
+ {DOUBLE_CONVERSION_UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+};
+
+static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
+static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
+
+void GetCachedPowerForBinaryExponentRange(
+ int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent) {
+ int kQ = DiyFp::kSignificandSize;
+ double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ int foo = kCachedPowersOffset;
+ int index =
+ (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
+ DOUBLE_CONVERSION_ASSERT(0 <= index && index < static_cast<int>(DOUBLE_CONVERSION_ARRAY_SIZE(kCachedPowers)));
+ CachedPower cached_power = kCachedPowers[index];
+ DOUBLE_CONVERSION_ASSERT(min_exponent <= cached_power.binary_exponent);
+ (void) max_exponent; // Mark variable as used.
+ DOUBLE_CONVERSION_ASSERT(cached_power.binary_exponent <= max_exponent);
+ *decimal_exponent = cached_power.decimal_exponent;
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+}
+
+
+void GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent) {
+ DOUBLE_CONVERSION_ASSERT(kMinDecimalExponent <= requested_exponent);
+ DOUBLE_CONVERSION_ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
+ int index =
+ (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
+ CachedPower cached_power = kCachedPowers[index];
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+ *found_exponent = cached_power.decimal_exponent;
+ DOUBLE_CONVERSION_ASSERT(*found_exponent <= requested_exponent);
+ DOUBLE_CONVERSION_ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
+}
+
+} // namespace PowersOfTenCache
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/cached-powers.h b/mfbt/double-conversion/double-conversion/cached-powers.h
new file mode 100644
index 0000000000..f38c26d201
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/cached-powers.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
+#define DOUBLE_CONVERSION_CACHED_POWERS_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+namespace PowersOfTenCache {
+
+ // Not all powers of ten are cached. The decimal exponent of two neighboring
+ // cached numbers will differ by kDecimalExponentDistance.
+ static const int kDecimalExponentDistance = 8;
+
+ static const int kMinDecimalExponent = -348;
+ static const int kMaxDecimalExponent = 340;
+
+ // Returns a cached power-of-ten with a binary exponent in the range
+ // [min_exponent; max_exponent] (boundaries included).
+ void GetCachedPowerForBinaryExponentRange(int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent);
+
+ // Returns a cached power of ten x ~= 10^k such that
+ // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
+ // The given decimal_exponent must satisfy
+ // kMinDecimalExponent <= requested_exponent, and
+ // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
+ void GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent);
+
+} // namespace PowersOfTenCache
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_
diff --git a/mfbt/double-conversion/double-conversion/diy-fp.h b/mfbt/double-conversion/double-conversion/diy-fp.h
new file mode 100644
index 0000000000..a2200c4ded
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/diy-fp.h
@@ -0,0 +1,137 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DIY_FP_H_
+#define DOUBLE_CONVERSION_DIY_FP_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// This "Do It Yourself Floating Point" class implements a floating-point number
+// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
+// have the most significant bit of the significand set.
+// Multiplication and Subtraction do not normalize their results.
+// DiyFp store only non-negative numbers and are not designed to contain special
+// doubles (NaN and Infinity).
+class DiyFp {
+ public:
+ static const int kSignificandSize = 64;
+
+ DiyFp() : f_(0), e_(0) {}
+ DiyFp(const uint64_t significand, const int32_t exponent) : f_(significand), e_(exponent) {}
+
+ // this -= other.
+ // The exponents of both numbers must be the same and the significand of this
+ // must be greater or equal than the significand of other.
+ // The result will not be normalized.
+ void Subtract(const DiyFp& other) {
+ DOUBLE_CONVERSION_ASSERT(e_ == other.e_);
+ DOUBLE_CONVERSION_ASSERT(f_ >= other.f_);
+ f_ -= other.f_;
+ }
+
+ // Returns a - b.
+ // The exponents of both numbers must be the same and a must be greater
+ // or equal than b. The result will not be normalized.
+ static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Subtract(b);
+ return result;
+ }
+
+ // this *= other.
+ void Multiply(const DiyFp& other) {
+ // Simply "emulates" a 128 bit multiplication.
+ // However: the resulting number only contains 64 bits. The least
+ // significant 64 bits are only used for rounding the most significant 64
+ // bits.
+ const uint64_t kM32 = 0xFFFFFFFFU;
+ const uint64_t a = f_ >> 32;
+ const uint64_t b = f_ & kM32;
+ const uint64_t c = other.f_ >> 32;
+ const uint64_t d = other.f_ & kM32;
+ const uint64_t ac = a * c;
+ const uint64_t bc = b * c;
+ const uint64_t ad = a * d;
+ const uint64_t bd = b * d;
+ // By adding 1U << 31 to tmp we round the final result.
+ // Halfway cases will be rounded up.
+ const uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32) + (1U << 31);
+ e_ += other.e_ + 64;
+ f_ = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ }
+
+ // returns a * b;
+ static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Multiply(b);
+ return result;
+ }
+
+ void Normalize() {
+ DOUBLE_CONVERSION_ASSERT(f_ != 0);
+ uint64_t significand = f_;
+ int32_t exponent = e_;
+
+ // This method is mainly called for normalizing boundaries. In general,
+ // boundaries need to be shifted by 10 bits, and we optimize for this case.
+ const uint64_t k10MSBits = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFC00000, 00000000);
+ while ((significand & k10MSBits) == 0) {
+ significand <<= 10;
+ exponent -= 10;
+ }
+ while ((significand & kUint64MSB) == 0) {
+ significand <<= 1;
+ exponent--;
+ }
+ f_ = significand;
+ e_ = exponent;
+ }
+
+ static DiyFp Normalize(const DiyFp& a) {
+ DiyFp result = a;
+ result.Normalize();
+ return result;
+ }
+
+ uint64_t f() const { return f_; }
+ int32_t e() const { return e_; }
+
+ void set_f(uint64_t new_value) { f_ = new_value; }
+ void set_e(int32_t new_value) { e_ = new_value; }
+
+ private:
+ static const uint64_t kUint64MSB = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000);
+
+ uint64_t f_;
+ int32_t e_;
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DIY_FP_H_
diff --git a/mfbt/double-conversion/double-conversion/double-conversion.h b/mfbt/double-conversion/double-conversion/double-conversion.h
new file mode 100644
index 0000000000..6e8884d84c
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/double-conversion.h
@@ -0,0 +1,34 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+
+#include "string-to-double.h"
+#include "double-to-string.h"
+
+#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
diff --git a/mfbt/double-conversion/double-conversion/double-to-string.cc b/mfbt/double-conversion/double-conversion/double-to-string.cc
new file mode 100644
index 0000000000..cf4550f391
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/double-to-string.cc
@@ -0,0 +1,443 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <climits>
+#include <cmath>
+
+#include "double-to-string.h"
+
+#include "bignum-dtoa.h"
+#include "fast-dtoa.h"
+#include "fixed-dtoa.h"
+#include "ieee.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() {
+ int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN;
+ static DoubleToStringConverter converter(flags,
+ "Infinity",
+ "NaN",
+ 'e',
+ -6, 21,
+ 6, 0);
+ return converter;
+}
+
+
+bool DoubleToStringConverter::HandleSpecialValues(
+ double value,
+ StringBuilder* result_builder) const {
+ Double double_inspect(value);
+ if (double_inspect.IsInfinite()) {
+ if (infinity_symbol_ == DOUBLE_CONVERSION_NULLPTR) return false;
+ if (value < 0) {
+ result_builder->AddCharacter('-');
+ }
+ result_builder->AddString(infinity_symbol_);
+ return true;
+ }
+ if (double_inspect.IsNan()) {
+ if (nan_symbol_ == DOUBLE_CONVERSION_NULLPTR) return false;
+ result_builder->AddString(nan_symbol_);
+ return true;
+ }
+ return false;
+}
+
+
+void DoubleToStringConverter::CreateExponentialRepresentation(
+ const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const {
+ DOUBLE_CONVERSION_ASSERT(length != 0);
+ result_builder->AddCharacter(decimal_digits[0]);
+ if (length == 1) {
+ if ((flags_ & EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL) != 0) {
+ result_builder->AddCharacter('.');
+ if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL) != 0) {
+ result_builder->AddCharacter('0');
+ }
+ }
+ } else {
+ result_builder->AddCharacter('.');
+ result_builder->AddSubstring(&decimal_digits[1], length-1);
+ }
+ result_builder->AddCharacter(exponent_character_);
+ if (exponent < 0) {
+ result_builder->AddCharacter('-');
+ exponent = -exponent;
+ } else {
+ if ((flags_ & EMIT_POSITIVE_EXPONENT_SIGN) != 0) {
+ result_builder->AddCharacter('+');
+ }
+ }
+ DOUBLE_CONVERSION_ASSERT(exponent < 1e4);
+ // Changing this constant requires updating the comment of DoubleToStringConverter constructor
+ const int kMaxExponentLength = 5;
+ char buffer[kMaxExponentLength + 1];
+ buffer[kMaxExponentLength] = '\0';
+ int first_char_pos = kMaxExponentLength;
+ if (exponent == 0) {
+ buffer[--first_char_pos] = '0';
+ } else {
+ while (exponent > 0) {
+ buffer[--first_char_pos] = '0' + (exponent % 10);
+ exponent /= 10;
+ }
+ }
+ // Add prefix '0' to make exponent width >= min(min_exponent_with_, kMaxExponentLength)
+ // For example: convert 1e+9 -> 1e+09, if min_exponent_with_ is set to 2
+ while(kMaxExponentLength - first_char_pos < std::min(min_exponent_width_, kMaxExponentLength)) {
+ buffer[--first_char_pos] = '0';
+ }
+ result_builder->AddSubstring(&buffer[first_char_pos],
+ kMaxExponentLength - first_char_pos);
+}
+
+
+void DoubleToStringConverter::CreateDecimalRepresentation(
+ const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const {
+ // Create a representation that is padded with zeros if needed.
+ if (decimal_point <= 0) {
+ // "0.00000decimal_rep" or "0.000decimal_rep00".
+ result_builder->AddCharacter('0');
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', -decimal_point);
+ DOUBLE_CONVERSION_ASSERT(length <= digits_after_point - (-decimal_point));
+ result_builder->AddSubstring(decimal_digits, length);
+ int remaining_digits = digits_after_point - (-decimal_point) - length;
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ } else if (decimal_point >= length) {
+ // "decimal_rep0000.00000" or "decimal_rep.0000".
+ result_builder->AddSubstring(decimal_digits, length);
+ result_builder->AddPadding('0', decimal_point - length);
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', digits_after_point);
+ }
+ } else {
+ // "decima.l_rep000".
+ DOUBLE_CONVERSION_ASSERT(digits_after_point > 0);
+ result_builder->AddSubstring(decimal_digits, decimal_point);
+ result_builder->AddCharacter('.');
+ DOUBLE_CONVERSION_ASSERT(length - decimal_point <= digits_after_point);
+ result_builder->AddSubstring(&decimal_digits[decimal_point],
+ length - decimal_point);
+ int remaining_digits = digits_after_point - (length - decimal_point);
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ if (digits_after_point == 0) {
+ if ((flags_ & EMIT_TRAILING_DECIMAL_POINT) != 0) {
+ result_builder->AddCharacter('.');
+ }
+ if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) {
+ result_builder->AddCharacter('0');
+ }
+ }
+}
+
+
+bool DoubleToStringConverter::ToShortestIeeeNumber(
+ double value,
+ StringBuilder* result_builder,
+ DoubleToStringConverter::DtoaMode mode) const {
+ DOUBLE_CONVERSION_ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE);
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ int decimal_point;
+ bool sign;
+ const int kDecimalRepCapacity = kBase10MaximalLength + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, mode, 0, decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = (flags_ & UNIQUE_ZERO) != 0;
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ if ((decimal_in_shortest_low_ <= exponent) &&
+ (exponent < decimal_in_shortest_high_)) {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length,
+ decimal_point,
+ (std::max)(0, decimal_rep_length - decimal_point),
+ result_builder);
+ } else {
+ CreateExponentialRepresentation(decimal_rep, decimal_rep_length, exponent,
+ result_builder);
+ }
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits > kMaxFixedDigitsAfterPoint) return false;
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add space for the '\0' byte.
+ const int kDecimalRepCapacity =
+ kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+ DoubleToAscii(value, FIXED, requested_digits,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ requested_digits, result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToExponential(
+ double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits < -1) return false;
+ if (requested_digits > kMaxExponentialDigits) return false;
+
+ int decimal_point;
+ bool sign;
+ // Add space for digit before the decimal point and the '\0' character.
+ const int kDecimalRepCapacity = kMaxExponentialDigits + 2;
+ DOUBLE_CONVERSION_ASSERT(kDecimalRepCapacity > kBase10MaximalLength);
+ char decimal_rep[kDecimalRepCapacity];
+#ifndef NDEBUG
+ // Problem: there is an assert in StringBuilder::AddSubstring() that
+ // will pass this buffer to strlen(), and this buffer is not generally
+ // null-terminated.
+ memset(decimal_rep, 0, sizeof(decimal_rep));
+#endif
+ int decimal_rep_length;
+
+ if (requested_digits == -1) {
+ DoubleToAscii(value, SHORTEST, 0,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ } else {
+ DoubleToAscii(value, PRECISION, requested_digits + 1,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ DOUBLE_CONVERSION_ASSERT(decimal_rep_length <= requested_digits + 1);
+
+ for (int i = decimal_rep_length; i < requested_digits + 1; ++i) {
+ decimal_rep[i] = '0';
+ }
+ decimal_rep_length = requested_digits + 1;
+ }
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ CreateExponentialRepresentation(decimal_rep,
+ decimal_rep_length,
+ exponent,
+ result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (precision < kMinPrecisionDigits || precision > kMaxPrecisionDigits) {
+ return false;
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add one for the terminating null character.
+ const int kDecimalRepCapacity = kMaxPrecisionDigits + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, PRECISION, precision,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ DOUBLE_CONVERSION_ASSERT(decimal_rep_length <= precision);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ // The exponent if we print the number as x.xxeyyy. That is with the
+ // decimal point after the first digit.
+ int exponent = decimal_point - 1;
+
+ int extra_zero = ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) ? 1 : 0;
+ bool as_exponential =
+ (-decimal_point + 1 > max_leading_padding_zeroes_in_precision_mode_) ||
+ (decimal_point - precision + extra_zero >
+ max_trailing_padding_zeroes_in_precision_mode_);
+ if ((flags_ & NO_TRAILING_ZERO) != 0) {
+ // Truncate trailing zeros that occur after the decimal point (if exponential,
+ // that is everything after the first digit).
+ int stop = as_exponential ? 1 : std::max(1, decimal_point);
+ while (decimal_rep_length > stop && decimal_rep[decimal_rep_length - 1] == '0') {
+ --decimal_rep_length;
+ }
+ // Clamp precision to avoid the code below re-adding the zeros.
+ precision = std::min(precision, decimal_rep_length);
+ }
+ if (as_exponential) {
+ // Fill buffer to contain 'precision' digits.
+ // Usually the buffer is already at the correct length, but 'DoubleToAscii'
+ // is allowed to return less characters.
+ for (int i = decimal_rep_length; i < precision; ++i) {
+ decimal_rep[i] = '0';
+ }
+
+ CreateExponentialRepresentation(decimal_rep,
+ precision,
+ exponent,
+ result_builder);
+ } else {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ (std::max)(0, precision - decimal_point),
+ result_builder);
+ }
+ return true;
+}
+
+
+static BignumDtoaMode DtoaToBignumDtoaMode(
+ DoubleToStringConverter::DtoaMode dtoa_mode) {
+ switch (dtoa_mode) {
+ case DoubleToStringConverter::SHORTEST: return BIGNUM_DTOA_SHORTEST;
+ case DoubleToStringConverter::SHORTEST_SINGLE:
+ return BIGNUM_DTOA_SHORTEST_SINGLE;
+ case DoubleToStringConverter::FIXED: return BIGNUM_DTOA_FIXED;
+ case DoubleToStringConverter::PRECISION: return BIGNUM_DTOA_PRECISION;
+ default:
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+}
+
+
+void DoubleToStringConverter::DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point) {
+ Vector<char> vector(buffer, buffer_length);
+ DOUBLE_CONVERSION_ASSERT(!Double(v).IsSpecial());
+ DOUBLE_CONVERSION_ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE || requested_digits >= 0);
+
+ if (Double(v).Sign() < 0) {
+ *sign = true;
+ v = -v;
+ } else {
+ *sign = false;
+ }
+
+ if (mode == PRECISION && requested_digits == 0) {
+ vector[0] = '\0';
+ *length = 0;
+ return;
+ }
+
+ if (v == 0) {
+ vector[0] = '0';
+ vector[1] = '\0';
+ *length = 1;
+ *point = 1;
+ return;
+ }
+
+ bool fast_worked;
+ switch (mode) {
+ case SHORTEST:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, vector, length, point);
+ break;
+ case SHORTEST_SINGLE:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST_SINGLE, 0,
+ vector, length, point);
+ break;
+ case FIXED:
+ fast_worked = FastFixedDtoa(v, requested_digits, vector, length, point);
+ break;
+ case PRECISION:
+ fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+ vector, length, point);
+ break;
+ default:
+ fast_worked = false;
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+ if (fast_worked) return;
+
+ // If the fast dtoa didn't succeed use the slower bignum version.
+ BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
+ BignumDtoa(v, bignum_mode, requested_digits, vector, length, point);
+ vector[*length] = '\0';
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/double-to-string.h b/mfbt/double-conversion/double-conversion/double-to-string.h
new file mode 100644
index 0000000000..cf2e926d8f
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/double-to-string.h
@@ -0,0 +1,471 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
+#define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
+
+#include "mozilla/Types.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+class DoubleToStringConverter {
+ public:
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
+ // function returns false.
+ static const int kMaxFixedDigitsBeforePoint = 308;
+ static const int kMaxFixedDigitsAfterPoint = 100;
+
+ // When calling ToExponential with a requested_digits
+ // parameter > kMaxExponentialDigits then the function returns false.
+ static const int kMaxExponentialDigits = 120;
+
+ // When calling ToPrecision with a requested_digits
+ // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
+ // then the function returns false.
+ static const int kMinPrecisionDigits = 1;
+ static const int kMaxPrecisionDigits = 120;
+
+ // The maximal number of digits that are needed to emit a double in base 10.
+ // A higher precision can be achieved by using more digits, but the shortest
+ // accurate representation of any double will never use more digits than
+ // kBase10MaximalLength.
+ // Note that DoubleToAscii null-terminates its input. So the given buffer
+ // should be at least kBase10MaximalLength + 1 characters long.
+ static const int kBase10MaximalLength = 17;
+
+ // The maximal number of digits that are needed to emit a single in base 10.
+ // A higher precision can be achieved by using more digits, but the shortest
+ // accurate representation of any single will never use more digits than
+ // kBase10MaximalLengthSingle.
+ static const int kBase10MaximalLengthSingle = 9;
+
+ // The length of the longest string that 'ToShortest' can produce when the
+ // converter is instantiated with EcmaScript defaults (see
+ // 'EcmaScriptConverter')
+ // This value does not include the trailing '\0' character.
+ // This amount of characters is needed for negative values that hit the
+ // 'decimal_in_shortest_low' limit. For example: "-0.0000033333333333333333"
+ static const int kMaxCharsEcmaScriptShortest = 25;
+
+ enum Flags {
+ NO_FLAGS = 0,
+ EMIT_POSITIVE_EXPONENT_SIGN = 1,
+ EMIT_TRAILING_DECIMAL_POINT = 2,
+ EMIT_TRAILING_ZERO_AFTER_POINT = 4,
+ UNIQUE_ZERO = 8,
+ NO_TRAILING_ZERO = 16,
+ EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL = 32,
+ EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL = 64
+ };
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent
+ // form, emits a '+' for positive exponents. Example: 1.2e+2.
+ // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is
+ // converted into decimal format then a trailing decimal point is appended.
+ // Example: 2345.0 is converted to "2345.".
+ // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point
+ // emits a trailing '0'-character. This flag requires the
+ // EMIT_TRAILING_DECIMAL_POINT flag.
+ // Example: 2345.0 is converted to "2345.0".
+ // - UNIQUE_ZERO: "-0.0" is converted to "0.0".
+ // - NO_TRAILING_ZERO: Trailing zeros are removed from the fractional portion
+ // of the result in precision mode. Matches printf's %g.
+ // When EMIT_TRAILING_ZERO_AFTER_POINT is also given, one trailing zero is
+ // preserved.
+ // - EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL: when the input number has
+ // exactly one significant digit and is converted into exponent form then a
+ // trailing decimal point is appended to the significand in shortest mode
+ // or in precision mode with one requested digit.
+ // - EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL: in addition to a trailing
+ // decimal point emits a trailing '0'-character. This flag requires the
+ // EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag.
+ //
+ // Infinity symbol and nan_symbol provide the string representation for these
+ // special values. If the string is NULL and the special value is encountered
+ // then the conversion functions return false.
+ //
+ // The exponent_character is used in exponential representations. It is
+ // usually 'e' or 'E'.
+ //
+ // When converting to the shortest representation the converter will
+ // represent input numbers in decimal format if they are in the interval
+ // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[
+ // (lower boundary included, greater boundary excluded).
+ // Example: with decimal_in_shortest_low = -6 and
+ // decimal_in_shortest_high = 21:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // When converting to precision mode the converter may add
+ // max_leading_padding_zeroes before returning the number in exponential
+ // format.
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarly the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ //
+ // When converting numbers with exactly one significant digit to exponent
+ // form in shortest mode or in precision mode with one requested digit, the
+ // EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT flags have
+ // no effect. Use the EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag to
+ // append a decimal point in this case and the
+ // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL flag to also append a
+ // '0'-character in this case.
+ // Example with decimal_in_shortest_low = 0:
+ // ToShortest(0.0009) -> "9e-4"
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL deactivated.
+ // ToShortest(0.0009) -> "9.e-4"
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated.
+ // ToShortest(0.0009) -> "9.0e-4"
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated and
+ // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL activated.
+ //
+ // The min_exponent_width is used for exponential representations.
+ // The converter adds leading '0's to the exponent until the exponent
+ // is at least min_exponent_width digits long.
+ // The min_exponent_width is clamped to 5.
+ // As such, the exponent may never have more than 5 digits in total.
+ DoubleToStringConverter(int flags,
+ const char* infinity_symbol,
+ const char* nan_symbol,
+ char exponent_character,
+ int decimal_in_shortest_low,
+ int decimal_in_shortest_high,
+ int max_leading_padding_zeroes_in_precision_mode,
+ int max_trailing_padding_zeroes_in_precision_mode,
+ int min_exponent_width = 0)
+ : flags_(flags),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol),
+ exponent_character_(exponent_character),
+ decimal_in_shortest_low_(decimal_in_shortest_low),
+ decimal_in_shortest_high_(decimal_in_shortest_high),
+ max_leading_padding_zeroes_in_precision_mode_(
+ max_leading_padding_zeroes_in_precision_mode),
+ max_trailing_padding_zeroes_in_precision_mode_(
+ max_trailing_padding_zeroes_in_precision_mode),
+ min_exponent_width_(min_exponent_width) {
+ // When 'trailing zero after the point' is set, then 'trailing point'
+ // must be set too.
+ DOUBLE_CONVERSION_ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) ||
+ !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0));
+ }
+
+ // Returns a converter following the EcmaScript specification.
+ //
+ // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN.
+ // Special values: "Infinity" and "NaN".
+ // Lower case 'e' for exponential values.
+ // decimal_in_shortest_low: -6
+ // decimal_in_shortest_high: 21
+ // max_leading_padding_zeroes_in_precision_mode: 6
+ // max_trailing_padding_zeroes_in_precision_mode: 0
+ static MFBT_API const DoubleToStringConverter& EcmaScriptConverter();
+
+ // Computes the shortest string of digits that correctly represent the input
+ // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
+ // (see constructor) it then either returns a decimal representation, or an
+ // exponential representation.
+ // Example with decimal_in_shortest_low = -6,
+ // decimal_in_shortest_high = 21,
+ // EMIT_POSITIVE_EXPONENT_SIGN activated, and
+ // EMIT_TRAILING_DECIMAL_POINT deactivated:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // Note: the conversion may round the output if the returned string
+ // is accurate enough to uniquely identify the input-number.
+ // For example the most precise representation of the double 9e59 equals
+ // "899999999999999918767229449717619953810131273674690656206848", but
+ // the converter will return the shorter (but still correct) "9e59".
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except when the input value is special and no infinity_symbol or
+ // nan_symbol has been given to the constructor.
+ //
+ // The length of the longest result is the maximum of the length of the
+ // following string representations (each with possible examples):
+ // - NaN and negative infinity: "NaN", "-Infinity", "-inf".
+ // - -10^(decimal_in_shortest_high - 1):
+ // "-100000000000000000000", "-1000000000000000.0"
+ // - the longest string in range [0; -10^decimal_in_shortest_low]. Generally,
+ // this string is 3 + kBase10MaximalLength - decimal_in_shortest_low.
+ // (Sign, '0', decimal point, padding zeroes for decimal_in_shortest_low,
+ // and the significant digits).
+ // "-0.0000033333333333333333", "-0.0012345678901234567"
+ // - the longest exponential representation. (A negative number with
+ // kBase10MaximalLength significant digits).
+ // "-1.7976931348623157e+308", "-1.7976931348623157E308"
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+ bool ToShortest(double value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST);
+ }
+
+ // Same as ToShortest, but for single-precision floats.
+ bool ToShortestSingle(float value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE);
+ }
+
+
+ // Computes a decimal representation with a fixed number of digits after the
+ // decimal point. The last emitted digit is rounded.
+ //
+ // Examples:
+ // ToFixed(3.12, 1) -> "3.1"
+ // ToFixed(3.1415, 3) -> "3.142"
+ // ToFixed(1234.56789, 4) -> "1234.5679"
+ // ToFixed(1.23, 5) -> "1.23000"
+ // ToFixed(0.1, 4) -> "0.1000"
+ // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00"
+ // ToFixed(0.1, 30) -> "0.100000000000000005551115123126"
+ // ToFixed(0.1, 17) -> "0.10000000000000001"
+ //
+ // If requested_digits equals 0, then the tail of the result depends on
+ // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples, for requested_digits == 0,
+ // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be
+ // - false and false: then 123.45 -> 123
+ // 0.678 -> 1
+ // - true and false: then 123.45 -> 123.
+ // 0.678 -> 1.
+ // - true and true: then 123.45 -> 123.0
+ // 0.678 -> 1.0
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'value' > 10^kMaxFixedDigitsBeforePoint, or
+ // - 'requested_digits' > kMaxFixedDigitsAfterPoint.
+ // The last two conditions imply that the result for non-special values never
+ // contains more than
+ // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
+ // (one additional character for the sign, and one for the decimal point).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+ MFBT_API bool ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes a representation in exponential format with requested_digits
+ // after the decimal point. The last emitted digit is rounded.
+ // If requested_digits equals -1, then the shortest exponential representation
+ // is computed.
+ //
+ // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and
+ // exponent_character set to 'e'.
+ // ToExponential(3.12, 1) -> "3.1e0"
+ // ToExponential(5.0, 3) -> "5.000e0"
+ // ToExponential(0.001, 2) -> "1.00e-3"
+ // ToExponential(3.1415, -1) -> "3.1415e0"
+ // ToExponential(3.1415, 4) -> "3.1415e0"
+ // ToExponential(3.1415, 3) -> "3.142e0"
+ // ToExponential(123456789000000, 3) -> "1.235e14"
+ // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30"
+ // ToExponential(1000000000000000019884624838656.0, 32) ->
+ // "1.00000000000000001988462483865600e30"
+ // ToExponential(1234, 0) -> "1e3"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'requested_digits' > kMaxExponentialDigits.
+ //
+ // The last condition implies that the result never contains more than
+ // kMaxExponentialDigits + 8 characters (the sign, the digit before the
+ // decimal point, the decimal point, the exponent character, the
+ // exponent's sign, and at most 3 exponent digits).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+ MFBT_API bool ToExponential(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+
+ // Computes 'precision' leading digits of the given 'value' and returns them
+ // either in exponential or decimal format, depending on
+ // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
+ // constructor).
+ // The last computed digit is rounded.
+ //
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarly the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no
+ // EMIT_TRAILING_ZERO_AFTER_POINT:
+ // ToPrecision(123450.0, 6) -> "123450"
+ // ToPrecision(123450.0, 5) -> "123450"
+ // ToPrecision(123450.0, 4) -> "123500"
+ // ToPrecision(123450.0, 3) -> "123000"
+ // ToPrecision(123450.0, 2) -> "1.2e5"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - precision < kMinPericisionDigits
+ // - precision > kMaxPrecisionDigits
+ //
+ // The last condition implies that the result never contains more than
+ // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
+ // exponent character, the exponent's sign, and at most 3 exponent digits).
+ // In addition, the buffer must be able to hold the trailing '\0' character.
+ MFBT_API bool ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const;
+
+ enum DtoaMode {
+ // Produce the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate
+ // but correct) 0.3.
+ SHORTEST,
+ // Same as SHORTEST, but for single-precision floats.
+ SHORTEST_SINGLE,
+ // Produce a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ FIXED,
+ // Fixed number of digits (independent of the decimal point).
+ PRECISION
+ };
+
+ // Converts the given double 'v' to digit characters. 'v' must not be NaN,
+ // +Infinity, or -Infinity. In SHORTEST_SINGLE-mode this restriction also
+ // applies to 'v' after it has been casted to a single-precision float. That
+ // is, in this mode static_cast<float>(v) must not be NaN, +Infinity or
+ // -Infinity.
+ //
+ // The result should be interpreted as buffer * 10^(point-length).
+ //
+ // The digits are written to the buffer in the platform's charset, which is
+ // often UTF-8 (with ASCII-range digits) but may be another charset, such
+ // as EBCDIC.
+ //
+ // The output depends on the given mode:
+ // - SHORTEST: produce the least amount of digits for which the internal
+ // identity requirement is still satisfied. If the digits are printed
+ // (together with the correct exponent) then reading this number will give
+ // 'v' again. The buffer will choose the representation that is closest to
+ // 'v'. If there are two at the same distance, than the one farther away
+ // from 0 is chosen (halfway cases - ending with 5 - are rounded up).
+ // In this mode the 'requested_digits' parameter is ignored.
+ // - SHORTEST_SINGLE: same as SHORTEST but with single-precision.
+ // - FIXED: produces digits necessary to print a given number with
+ // 'requested_digits' digits after the decimal point. The produced digits
+ // might be too short in which case the caller has to fill the remainder
+ // with '0's.
+ // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+ // Halfway cases are rounded towards +/-Infinity (away from 0). The call
+ // toFixed(0.15, 2) thus returns buffer="2", point=0.
+ // The returned buffer may contain digits that would be truncated from the
+ // shortest representation of the input.
+ // - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+ // Even though the length of produced digits usually equals
+ // 'requested_digits', the function is allowed to return fewer digits, in
+ // which case the caller has to fill the missing digits with '0's.
+ // Halfway cases are again rounded away from 0.
+ // DoubleToAscii expects the given buffer to be big enough to hold all
+ // digits and a terminating null-character. In SHORTEST-mode it expects a
+ // buffer of at least kBase10MaximalLength + 1. In all other modes the
+ // requested_digits parameter and the padding-zeroes limit the size of the
+ // output. Don't forget the decimal point, the exponent character and the
+ // terminating null-character when computing the maximal output size.
+ // The given length is only used in debug mode to ensure the buffer is big
+ // enough.
+ static MFBT_API void DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point);
+
+ private:
+ // Implementation for ToShortest and ToShortestSingle.
+ MFBT_API bool ToShortestIeeeNumber(double value,
+ StringBuilder* result_builder,
+ DtoaMode mode) const;
+
+ // If the value is a special value (NaN or Infinity) constructs the
+ // corresponding string using the configured infinity/nan-symbol.
+ // If either of them is NULL or the value is not special then the
+ // function returns false.
+ MFBT_API bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
+ // Constructs an exponential representation (i.e. 1.234e56).
+ // The given exponent assumes a decimal point after the first decimal digit.
+ MFBT_API void CreateExponentialRepresentation(const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const;
+ // Creates a decimal representation (i.e 1234.5678).
+ MFBT_API void CreateDecimalRepresentation(const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const;
+
+ const int flags_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+ const char exponent_character_;
+ const int decimal_in_shortest_low_;
+ const int decimal_in_shortest_high_;
+ const int max_leading_padding_zeroes_in_precision_mode_;
+ const int max_trailing_padding_zeroes_in_precision_mode_;
+ const int min_exponent_width_;
+
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
diff --git a/mfbt/double-conversion/double-conversion/fast-dtoa.cc b/mfbt/double-conversion/double-conversion/fast-dtoa.cc
new file mode 100644
index 0000000000..d7a23984df
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/fast-dtoa.cc
@@ -0,0 +1,665 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "fast-dtoa.h"
+
+#include "cached-powers.h"
+#include "diy-fp.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// The minimal and maximal target exponent define the range of w's binary
+// exponent, where 'w' is the result of multiplying the input by a cached power
+// of ten.
+//
+// A different range might be chosen on a different platform, to optimize digit
+// generation, but a smaller range requires more powers of ten to be cached.
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
+
+
+// Adjusts the last digit of the generated number, and screens out generated
+// solutions that may be inaccurate. A solution may be inaccurate if it is
+// outside the safe interval, or if we cannot prove that it is closer to the
+// input than a neighboring representation of the same length.
+//
+// Input: * buffer containing the digits of too_high / 10^kappa
+// * the buffer's length
+// * distance_too_high_w == (too_high - w).f() * unit
+// * unsafe_interval == (too_high - too_low).f() * unit
+// * rest = (too_high - buffer * 10^kappa).f() * unit
+// * ten_kappa = 10^kappa * unit
+// * unit = the common multiplier
+// Output: returns true if the buffer is guaranteed to contain the closest
+// representable number to the input.
+// Modifies the generated digits in the buffer to approach (round towards) w.
+static bool RoundWeed(Vector<char> buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
+ uint64_t small_distance = distance_too_high_w - unit;
+ uint64_t big_distance = distance_too_high_w + unit;
+ // Let w_low = too_high - big_distance, and
+ // w_high = too_high - small_distance.
+ // Note: w_low < w < w_high
+ //
+ // The real w (* unit) must lie somewhere inside the interval
+ // ]w_low; w_high[ (often written as "(w_low; w_high)")
+
+ // Basically the buffer currently contains a number in the unsafe interval
+ // ]too_low; too_high[ with too_low < w < too_high
+ //
+ // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ // ^v 1 unit ^ ^ ^ ^
+ // boundary_high --------------------- . . . .
+ // ^v 1 unit . . . .
+ // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
+ // . . ^ . .
+ // . big_distance . . .
+ // . . . . rest
+ // small_distance . . . .
+ // v . . . .
+ // w_high - - - - - - - - - - - - - - - - - - . . . .
+ // ^v 1 unit . . . .
+ // w ---------------------------------------- . . . .
+ // ^v 1 unit v . . .
+ // w_low - - - - - - - - - - - - - - - - - - - - - . . .
+ // . . v
+ // buffer --------------------------------------------------+-------+--------
+ // . .
+ // safe_interval .
+ // v .
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
+ // ^v 1 unit .
+ // boundary_low ------------------------- unsafe_interval
+ // ^v 1 unit v
+ // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ //
+ //
+ // Note that the value of buffer could lie anywhere inside the range too_low
+ // to too_high.
+ //
+ // boundary_low, boundary_high and w are approximations of the real boundaries
+ // and v (the input number). They are guaranteed to be precise up to one unit.
+ // In fact the error is guaranteed to be strictly less than one unit.
+ //
+ // Anything that lies outside the unsafe interval is guaranteed not to round
+ // to v when read again.
+ // Anything that lies inside the safe interval is guaranteed to round to v
+ // when read again.
+ // If the number inside the buffer lies inside the unsafe interval but not
+ // inside the safe interval then we simply do not know and bail out (returning
+ // false).
+ //
+ // Similarly we have to take into account the imprecision of 'w' when finding
+ // the closest representation of 'w'. If we have two potential
+ // representations, and one is closer to both w_low and w_high, then we know
+ // it is closer to the actual value v.
+ //
+ // By generating the digits of too_high we got the largest (closest to
+ // too_high) buffer that is still in the unsafe interval. In the case where
+ // w_high < buffer < too_high we try to decrement the buffer.
+ // This way the buffer approaches (rounds towards) w.
+ // There are 3 conditions that stop the decrementation process:
+ // 1) the buffer is already below w_high
+ // 2) decrementing the buffer would make it leave the unsafe interval
+ // 3) decrementing the buffer would yield a number below w_high and farther
+ // away than the current number. In other words:
+ // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
+ // Instead of using the buffer directly we use its distance to too_high.
+ // Conceptually rest ~= too_high - buffer
+ // We need to do the following tests in this order to avoid over- and
+ // underflows.
+ DOUBLE_CONVERSION_ASSERT(rest <= unsafe_interval);
+ while (rest < small_distance && // Negated condition 1
+ unsafe_interval - rest >= ten_kappa && // Negated condition 2
+ (rest + ten_kappa < small_distance || // buffer{-1} > w_high
+ small_distance - rest >= rest + ten_kappa - small_distance)) {
+ buffer[length - 1]--;
+ rest += ten_kappa;
+ }
+
+ // We have approached w+ as much as possible. We now test if approaching w-
+ // would require changing the buffer. If yes, then we have two possible
+ // representations close to w, but we cannot decide which one is closer.
+ if (rest < big_distance &&
+ unsafe_interval - rest >= ten_kappa &&
+ (rest + ten_kappa < big_distance ||
+ big_distance - rest > rest + ten_kappa - big_distance)) {
+ return false;
+ }
+
+ // Weeding test.
+ // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
+ // Since too_low = too_high - unsafe_interval this is equivalent to
+ // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
+ // Conceptually we have: rest ~= too_high - buffer
+ return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
+}
+
+
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+ int length,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit,
+ int* kappa) {
+ DOUBLE_CONVERSION_ASSERT(rest < ten_kappa);
+ // The following tests are done in a specific order to avoid overflows. They
+ // will work correctly with any uint64 values of rest < ten_kappa and unit.
+ //
+ // If the unit is too big, then we don't know which way to round. For example
+ // a unit of 50 means that the real number lies within rest +/- 50. If
+ // 10^kappa == 40 then there is no way to tell which way to round.
+ if (unit >= ten_kappa) return false;
+ // Even if unit is just half the size of 10^kappa we are already completely
+ // lost. (And after the previous test we know that the expression will not
+ // over/underflow.)
+ if (ten_kappa - unit <= unit) return false;
+ // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+ if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+ return true;
+ }
+ // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+ if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+ // Increment the last digit recursively until we find a non '9' digit.
+ buffer[length - 1]++;
+ for (int i = length - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+ // exception of the first digit all digits are now '0'. Simply switch the
+ // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+ // the power (the kappa) is increased.
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*kappa) += 1;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Returns the biggest power of ten that is less than or equal to the given
+// number. We furthermore receive the maximum number of bits 'number' has.
+//
+// Returns power == 10^(exponent_plus_one-1) such that
+// power <= number < power * 10.
+// If number_bits == 0 then 0^(0-1) is returned.
+// The number of bits must be <= 32.
+// Precondition: number < (1 << (number_bits + 1)).
+
+// Inspired by the method for finding an integer log base 10 from here:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+static unsigned int const kSmallPowersOfTen[] =
+ {0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000,
+ 1000000000};
+
+static void BiggestPowerTen(uint32_t number,
+ int number_bits,
+ uint32_t* power,
+ int* exponent_plus_one) {
+ DOUBLE_CONVERSION_ASSERT(number < (1u << (number_bits + 1)));
+ // 1233/4096 is approximately 1/lg(10).
+ int exponent_plus_one_guess = ((number_bits + 1) * 1233 >> 12);
+ // We increment to skip over the first entry in the kPowersOf10 table.
+ // Note: kPowersOf10[i] == 10^(i-1).
+ exponent_plus_one_guess++;
+ // We don't have any guarantees that 2^number_bits <= number.
+ if (number < kSmallPowersOfTen[exponent_plus_one_guess]) {
+ exponent_plus_one_guess--;
+ }
+ *power = kSmallPowersOfTen[exponent_plus_one_guess];
+ *exponent_plus_one = exponent_plus_one_guess;
+}
+
+// Generates the digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * low, w and high are correct up to 1 ulp (unit in the last place). That
+// is, their error must be less than a unit of their last digits.
+// * low.e() == w.e() == high.e()
+// * low < w < high, and taking into account their error: low~ <= high~
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but len contains the number of digits.
+// * buffer contains the shortest possible decimal digit-sequence
+// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
+// correct values of low and high (without their error).
+// * if more than one decimal representation gives the minimal number of
+// decimal digits then the one closest to W (where W is the correct value
+// of w) is chosen.
+// Remark: this procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely (~0.5%).
+//
+// Say, for the sake of example, that
+// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w's value can be computed by w.f() * 2^w.e()
+// We can obtain w's integral digits by simply shifting w.f() by -w.e().
+// -> w's integral part is 0x1234
+// w's fractional part is therefore 0x567890abcdef.
+// Printing w's integral part is easy (simply print 0x1234 in decimal).
+// In order to print its fraction we repeatedly multiply the fraction by 10 and
+// get each digit. Example the first digit after the point would be computed by
+// (0x567890abcdef * 10) >> 48. -> 3
+// The whole thing becomes slightly more complicated because we want to stop
+// once we have enough digits. That is, once the digits inside the buffer
+// represent 'w' we can stop. Everything inside the interval low - high
+// represents w. However we have to pay attention to low, high and w's
+// imprecision.
+static bool DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ DOUBLE_CONVERSION_ASSERT(low.e() == w.e() && w.e() == high.e());
+ DOUBLE_CONVERSION_ASSERT(low.f() + 1 <= high.f() - 1);
+ DOUBLE_CONVERSION_ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ // low, w and high are imprecise, but by less than one ulp (unit in the last
+ // place).
+ // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
+ // the new numbers are outside of the interval we want the final
+ // representation to lie in.
+ // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
+ // numbers that are certain to lie in the interval. We will use this fact
+ // later on.
+ // We will now start by generating the digits within the uncertain
+ // interval. Later we will weed out representations that lie outside the safe
+ // interval and thus _might_ lie outside the correct interval.
+ uint64_t unit = 1;
+ DiyFp too_low = DiyFp(low.f() - unit, low.e());
+ DiyFp too_high = DiyFp(high.f() + unit, high.e());
+ // too_low and too_high are guaranteed to lie outside the interval we want the
+ // generated number in.
+ DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
+ // We now cut the input number into two parts: the integral digits and the
+ // fractionals. We will not write any decimal separator though, but adapt
+ // kappa instead.
+ // Reminder: we are currently computing the digits (stored inside the buffer)
+ // such that: too_low < buffer * 10^kappa < too_high
+ // We use too_high for the digit_generation and stop as soon as possible.
+ // If we stop early we effectively round down.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = too_high.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+ // Loop invariant: buffer = too_high / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than integrals.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
+ // Reminder: unsafe_interval.e() == one.e()
+ if (rest < unsafe_interval.f()) {
+ // Rounding down (by not emitting the remaining digits) yields a number
+ // that lies within the unsafe interval.
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+ unsafe_interval.f(), rest,
+ static_cast<uint64_t>(divisor) << -one.e(), unit);
+ }
+ divisor /= 10;
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (like the interval or 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ DOUBLE_CONVERSION_ASSERT(one.e() >= -60);
+ DOUBLE_CONVERSION_ASSERT(fractionals < one.f());
+ DOUBLE_CONVERSION_ASSERT(DOUBLE_CONVERSION_UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ for (;;) {
+ fractionals *= 10;
+ unit *= 10;
+ unsafe_interval.set_f(unsafe_interval.f() * 10);
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ if (fractionals < unsafe_interval.f()) {
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+ unsafe_interval.f(), fractionals, one.f(), unit);
+ }
+ }
+}
+
+
+
+// Generates (at most) requested_digits digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * w is correct up to 1 ulp (unit in the last place). That
+// is, its error must be strictly less than a unit of its last digit.
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but length contains the number of
+// digits.
+// * the representation in buffer is the most precise representation of
+// requested_digits digits.
+// * buffer contains at most requested_digits digits of w. If there are less
+// than requested_digits digits then some trailing '0's have been removed.
+// * kappa is such that
+// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely, but the failure-rate
+// increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ DOUBLE_CONVERSION_ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ DOUBLE_CONVERSION_ASSERT(kMinimalTargetExponent >= -60);
+ DOUBLE_CONVERSION_ASSERT(kMaximalTargetExponent <= -32);
+ // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+ // also scale its error.
+ uint64_t w_error = 1;
+ // We cut the input number into two parts: the integral digits and the
+ // fractional digits. We don't emit any decimal separator, but adapt kappa
+ // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+ // increase kappa by 1.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = w.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+
+ // Loop invariant: buffer = w / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than 'integrals'.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ requested_digits--;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ if (requested_digits == 0) break;
+ divisor /= 10;
+ }
+
+ if (requested_digits == 0) {
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ return RoundWeedCounted(buffer, *length, rest,
+ static_cast<uint64_t>(divisor) << -one.e(), w_error,
+ kappa);
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (the 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ DOUBLE_CONVERSION_ASSERT(one.e() >= -60);
+ DOUBLE_CONVERSION_ASSERT(fractionals < one.f());
+ DOUBLE_CONVERSION_ASSERT(DOUBLE_CONVERSION_UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (requested_digits > 0 && fractionals > w_error) {
+ fractionals *= 10;
+ w_error *= 10;
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ requested_digits--;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ }
+ if (requested_digits != 0) return false;
+ return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+ kappa);
+}
+
+
+// Provides a decimal representation of v.
+// Returns true if it succeeds, otherwise the result cannot be trusted.
+// There will be *length digits inside the buffer (not null-terminated).
+// If the function returns true then
+// v == (double) (buffer * 10^decimal_exponent).
+// The digits in the buffer are the shortest representation possible: no
+// 0.09999999999999999 instead of 0.1. The shorter representation will even be
+// chosen even if the longer one would be closer to v.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the closest will be
+// computed.
+static bool Grisu3(double v,
+ FastDtoaMode mode,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ // boundary_minus and boundary_plus are the boundaries between v and its
+ // closest floating-point neighbors. Any number strictly between
+ // boundary_minus and boundary_plus will round to v when convert to a double.
+ // Grisu3 will never output representations that lie exactly on a boundary.
+ DiyFp boundary_minus, boundary_plus;
+ if (mode == FAST_DTOA_SHORTEST) {
+ Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ } else {
+ DOUBLE_CONVERSION_ASSERT(mode == FAST_DTOA_SHORTEST_SINGLE);
+ float single_v = static_cast<float>(v);
+ Single(single_v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ }
+ DOUBLE_CONVERSION_ASSERT(boundary_plus.e() == w.e());
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ DOUBLE_CONVERSION_ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+ DOUBLE_CONVERSION_ASSERT(scaled_w.e() ==
+ boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
+ // In theory it would be possible to avoid some recomputations by computing
+ // the difference between w and boundary_minus/plus (a power of 2) and to
+ // compute scaled_boundary_minus/plus by subtracting/adding from
+ // scaled_w. However the code becomes much less readable and the speed
+ // enhancements are not terrific.
+ DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
+ DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
+
+ // DigitGen will generate the digits of scaled_w. Therefore we have
+ // v == (double) (scaled_w * 10^-mk).
+ // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
+ // integer than it will be updated. For instance if scaled_w == 1.23 then
+ // the buffer will be filled with "123" and the decimal_exponent will be
+ // decreased by 2.
+ int kappa;
+ bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ DOUBLE_CONVERSION_ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+ // We now have (double) (scaled_w * 10^-mk).
+ // DigitGen will generate the first requested_digits digits of scaled_w and
+ // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+ // will not always be exactly the same since DigitGenCounted only produces a
+ // limited number of digits.)
+ int kappa;
+ bool result = DigitGenCounted(scaled_w, requested_digits,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+bool FastDtoa(double v,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ DOUBLE_CONVERSION_ASSERT(v > 0);
+ DOUBLE_CONVERSION_ASSERT(!Double(v).IsSpecial());
+
+ bool result = false;
+ int decimal_exponent = 0;
+ switch (mode) {
+ case FAST_DTOA_SHORTEST:
+ case FAST_DTOA_SHORTEST_SINGLE:
+ result = Grisu3(v, mode, buffer, length, &decimal_exponent);
+ break;
+ case FAST_DTOA_PRECISION:
+ result = Grisu3Counted(v, requested_digits,
+ buffer, length, &decimal_exponent);
+ break;
+ default:
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+ if (result) {
+ *decimal_point = *length + decimal_exponent;
+ buffer[*length] = '\0';
+ }
+ return result;
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/fast-dtoa.h b/mfbt/double-conversion/double-conversion/fast-dtoa.h
new file mode 100644
index 0000000000..5f1e8eee5e
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/fast-dtoa.h
@@ -0,0 +1,88 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_
+#define DOUBLE_CONVERSION_FAST_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum FastDtoaMode {
+ // Computes the shortest representation of the given input. The returned
+ // result will be the most accurate number of this length. Longer
+ // representations might be more accurate.
+ FAST_DTOA_SHORTEST,
+ // Same as FAST_DTOA_SHORTEST but for single-precision floats.
+ FAST_DTOA_SHORTEST_SINGLE,
+ // Computes a representation where the precision (number of digits) is
+ // given as input. The precision is independent of the decimal point.
+ FAST_DTOA_PRECISION
+};
+
+// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
+// include the terminating '\0' character.
+static const int kFastDtoaMaximalLength = 17;
+// Same for single-precision numbers.
+static const int kFastDtoaMaximalSingleLength = 9;
+
+// Provides a decimal representation of v.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+// * v must be a strictly positive finite double.
+//
+// Returns true if it succeeds, otherwise the result can not be trusted.
+// There will be *length digits inside the buffer followed by a null terminator.
+// If the function returns true and mode equals
+// - FAST_DTOA_SHORTEST, then
+// the parameter requested_digits is ignored.
+// The result satisfies
+// v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible. E.g.
+// if 0.099999999999 and 0.1 represent the same double then "1" is returned
+// with point = 0.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain
+// the one closest to v.
+// - FAST_DTOA_PRECISION, then
+// the buffer contains requested_digits digits.
+// the difference v - (buffer * 10^(point-length)) is closest to zero for
+// all possible representations of requested_digits digits.
+// If there are two values that are equally close, then FastDtoa returns
+// false.
+// For both modes the buffer must be large enough to hold the result.
+bool FastDtoa(double d,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FAST_DTOA_H_
diff --git a/mfbt/double-conversion/double-conversion/fixed-dtoa.cc b/mfbt/double-conversion/double-conversion/fixed-dtoa.cc
new file mode 100644
index 0000000000..e739b19804
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/fixed-dtoa.cc
@@ -0,0 +1,405 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cmath>
+
+#include "fixed-dtoa.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// Represents a 128bit type. This class should be replaced by a native type on
+// platforms that support 128bit integers.
+class UInt128 {
+ public:
+ UInt128() : high_bits_(0), low_bits_(0) { }
+ UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
+
+ void Multiply(uint32_t multiplicand) {
+ uint64_t accumulator;
+
+ accumulator = (low_bits_ & kMask32) * multiplicand;
+ uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
+ low_bits_ = (accumulator << 32) + part;
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
+ part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
+ high_bits_ = (accumulator << 32) + part;
+ DOUBLE_CONVERSION_ASSERT((accumulator >> 32) == 0);
+ }
+
+ void Shift(int shift_amount) {
+ DOUBLE_CONVERSION_ASSERT(-64 <= shift_amount && shift_amount <= 64);
+ if (shift_amount == 0) {
+ return;
+ } else if (shift_amount == -64) {
+ high_bits_ = low_bits_;
+ low_bits_ = 0;
+ } else if (shift_amount == 64) {
+ low_bits_ = high_bits_;
+ high_bits_ = 0;
+ } else if (shift_amount <= 0) {
+ high_bits_ <<= -shift_amount;
+ high_bits_ += low_bits_ >> (64 + shift_amount);
+ low_bits_ <<= -shift_amount;
+ } else {
+ low_bits_ >>= shift_amount;
+ low_bits_ += high_bits_ << (64 - shift_amount);
+ high_bits_ >>= shift_amount;
+ }
+ }
+
+ // Modifies *this to *this MOD (2^power).
+ // Returns *this DIV (2^power).
+ int DivModPowerOf2(int power) {
+ if (power >= 64) {
+ int result = static_cast<int>(high_bits_ >> (power - 64));
+ high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
+ return result;
+ } else {
+ uint64_t part_low = low_bits_ >> power;
+ uint64_t part_high = high_bits_ << (64 - power);
+ int result = static_cast<int>(part_low + part_high);
+ high_bits_ = 0;
+ low_bits_ -= part_low << power;
+ return result;
+ }
+ }
+
+ bool IsZero() const {
+ return high_bits_ == 0 && low_bits_ == 0;
+ }
+
+ int BitAt(int position) const {
+ if (position >= 64) {
+ return static_cast<int>(high_bits_ >> (position - 64)) & 1;
+ } else {
+ return static_cast<int>(low_bits_ >> position) & 1;
+ }
+ }
+
+ private:
+ static const uint64_t kMask32 = 0xFFFFFFFF;
+ // Value == (high_bits_ << 64) + low_bits_
+ uint64_t high_bits_;
+ uint64_t low_bits_;
+};
+
+
+static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
+
+
+static void FillDigits32FixedLength(uint32_t number, int requested_length,
+ Vector<char> buffer, int* length) {
+ for (int i = requested_length - 1; i >= 0; --i) {
+ buffer[(*length) + i] = '0' + number % 10;
+ number /= 10;
+ }
+ *length += requested_length;
+}
+
+
+static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
+ int number_length = 0;
+ // We fill the digits in reverse order and exchange them afterwards.
+ while (number != 0) {
+ int digit = number % 10;
+ number /= 10;
+ buffer[(*length) + number_length] = static_cast<char>('0' + digit);
+ number_length++;
+ }
+ // Exchange the digits.
+ int i = *length;
+ int j = *length + number_length - 1;
+ while (i < j) {
+ char tmp = buffer[i];
+ buffer[i] = buffer[j];
+ buffer[j] = tmp;
+ i++;
+ j--;
+ }
+ *length += number_length;
+}
+
+
+static void FillDigits64FixedLength(uint64_t number,
+ Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ FillDigits32FixedLength(part0, 3, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+}
+
+
+static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ if (part0 != 0) {
+ FillDigits32(part0, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else if (part1 != 0) {
+ FillDigits32(part1, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else {
+ FillDigits32(part2, buffer, length);
+ }
+}
+
+
+static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
+ // An empty buffer represents 0.
+ if (*length == 0) {
+ buffer[0] = '1';
+ *decimal_point = 1;
+ *length = 1;
+ return;
+ }
+ // Round the last digit until we either have a digit that was not '9' or until
+ // we reached the first digit.
+ buffer[(*length) - 1]++;
+ for (int i = (*length) - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) {
+ return;
+ }
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0' + 10, we would need to set it to '0' and add
+ // a '1' in front. However we reach the first digit only if all following
+ // digits had been '9' before rounding up. Now all trailing digits are '0' and
+ // we simply switch the first digit to '1' and update the decimal-point
+ // (indicating that the point is now one digit to the right).
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+}
+
+
+// The given fractionals number represents a fixed-point number with binary
+// point at bit (-exponent).
+// Preconditions:
+// -128 <= exponent <= 0.
+// 0 <= fractionals * 2^exponent < 1
+// The buffer holds the result.
+// The function will round its result. During the rounding-process digits not
+// generated by this function might be updated, and the decimal-point variable
+// might be updated. If this function generates the digits 99 and the buffer
+// already contained "199" (thus yielding a buffer of "19999") then a
+// rounding-up will change the contents of the buffer to "20000".
+static void FillFractionals(uint64_t fractionals, int exponent,
+ int fractional_count, Vector<char> buffer,
+ int* length, int* decimal_point) {
+ DOUBLE_CONVERSION_ASSERT(-128 <= exponent && exponent <= 0);
+ // 'fractionals' is a fixed-point number, with binary point at bit
+ // (-exponent). Inside the function the non-converted remainder of fractionals
+ // is a fixed-point number, with binary point at bit 'point'.
+ if (-exponent <= 64) {
+ // One 64 bit number is sufficient.
+ DOUBLE_CONVERSION_ASSERT(fractionals >> 56 == 0);
+ int point = -exponent;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals == 0) break;
+ // Instead of multiplying by 10 we multiply by 5 and adjust the point
+ // location. This way the fractionals variable will not overflow.
+ // Invariant at the beginning of the loop: fractionals < 2^point.
+ // Initially we have: point <= 64 and fractionals < 2^56
+ // After each iteration the point is decremented by one.
+ // Note that 5^3 = 125 < 128 = 2^7.
+ // Therefore three iterations of this loop will not overflow fractionals
+ // (even without the subtraction at the end of the loop body). At this
+ // time point will satisfy point <= 61 and therefore fractionals < 2^point
+ // and any further multiplication of fractionals by 5 will not overflow.
+ fractionals *= 5;
+ point--;
+ int digit = static_cast<int>(fractionals >> point);
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ fractionals -= static_cast<uint64_t>(digit) << point;
+ }
+ // If the first bit after the point is set we have to round up.
+ DOUBLE_CONVERSION_ASSERT(fractionals == 0 || point - 1 >= 0);
+ if ((fractionals != 0) && ((fractionals >> (point - 1)) & 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ } else { // We need 128 bits.
+ DOUBLE_CONVERSION_ASSERT(64 < -exponent && -exponent <= 128);
+ UInt128 fractionals128 = UInt128(fractionals, 0);
+ fractionals128.Shift(-exponent - 64);
+ int point = 128;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals128.IsZero()) break;
+ // As before: instead of multiplying by 10 we multiply by 5 and adjust the
+ // point location.
+ // This multiplication will not overflow for the same reasons as before.
+ fractionals128.Multiply(5);
+ point--;
+ int digit = fractionals128.DivModPowerOf2(point);
+ DOUBLE_CONVERSION_ASSERT(digit <= 9);
+ buffer[*length] = static_cast<char>('0' + digit);
+ (*length)++;
+ }
+ if (fractionals128.BitAt(point - 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ }
+}
+
+
+// Removes leading and trailing zeros.
+// If leading zeros are removed then the decimal point position is adjusted.
+static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
+ while (*length > 0 && buffer[(*length) - 1] == '0') {
+ (*length)--;
+ }
+ int first_non_zero = 0;
+ while (first_non_zero < *length && buffer[first_non_zero] == '0') {
+ first_non_zero++;
+ }
+ if (first_non_zero != 0) {
+ for (int i = first_non_zero; i < *length; ++i) {
+ buffer[i - first_non_zero] = buffer[i];
+ }
+ *length -= first_non_zero;
+ *decimal_point -= first_non_zero;
+ }
+}
+
+
+bool FastFixedDtoa(double v,
+ int fractional_count,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ const uint32_t kMaxUInt32 = 0xFFFFFFFF;
+ uint64_t significand = Double(v).Significand();
+ int exponent = Double(v).Exponent();
+ // v = significand * 2^exponent (with significand a 53bit integer).
+ // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
+ // don't know how to compute the representation. 2^73 ~= 9.5*10^21.
+ // If necessary this limit could probably be increased, but we don't need
+ // more.
+ if (exponent > 20) return false;
+ if (fractional_count > 20) return false;
+ *length = 0;
+ // At most kDoubleSignificandSize bits of the significand are non-zero.
+ // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
+ // bits: 0..11*..0xxx..53*..xx
+ if (exponent + kDoubleSignificandSize > 64) {
+ // The exponent must be > 11.
+ //
+ // We know that v = significand * 2^exponent.
+ // And the exponent > 11.
+ // We simplify the task by dividing v by 10^17.
+ // The quotient delivers the first digits, and the remainder fits into a 64
+ // bit number.
+ // Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
+ const uint64_t kFive17 = DOUBLE_CONVERSION_UINT64_2PART_C(0xB1, A2BC2EC5); // 5^17
+ uint64_t divisor = kFive17;
+ int divisor_power = 17;
+ uint64_t dividend = significand;
+ uint32_t quotient;
+ uint64_t remainder;
+ // Let v = f * 2^e with f == significand and e == exponent.
+ // Then need q (quotient) and r (remainder) as follows:
+ // v = q * 10^17 + r
+ // f * 2^e = q * 10^17 + r
+ // f * 2^e = q * 5^17 * 2^17 + r
+ // If e > 17 then
+ // f * 2^(e-17) = q * 5^17 + r/2^17
+ // else
+ // f = q * 5^17 * 2^(17-e) + r/2^e
+ if (exponent > divisor_power) {
+ // We only allow exponents of up to 20 and therefore (17 - e) <= 3
+ dividend <<= exponent - divisor_power;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << divisor_power;
+ } else {
+ divisor <<= divisor_power - exponent;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << exponent;
+ }
+ FillDigits32(quotient, buffer, length);
+ FillDigits64FixedLength(remainder, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent >= 0) {
+ // 0 <= exponent <= 11
+ significand <<= exponent;
+ FillDigits64(significand, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent > -kDoubleSignificandSize) {
+ // We have to cut the number.
+ uint64_t integrals = significand >> -exponent;
+ uint64_t fractionals = significand - (integrals << -exponent);
+ if (integrals > kMaxUInt32) {
+ FillDigits64(integrals, buffer, length);
+ } else {
+ FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
+ }
+ *decimal_point = *length;
+ FillFractionals(fractionals, exponent, fractional_count,
+ buffer, length, decimal_point);
+ } else if (exponent < -128) {
+ // This configuration (with at most 20 digits) means that all digits must be
+ // 0.
+ DOUBLE_CONVERSION_ASSERT(fractional_count <= 20);
+ buffer[0] = '\0';
+ *length = 0;
+ *decimal_point = -fractional_count;
+ } else {
+ *decimal_point = 0;
+ FillFractionals(significand, exponent, fractional_count,
+ buffer, length, decimal_point);
+ }
+ TrimZeros(buffer, length, decimal_point);
+ buffer[*length] = '\0';
+ if ((*length) == 0) {
+ // The string is empty and the decimal_point thus has no importance. Mimic
+ // Gay's dtoa and set it to -fractional_count.
+ *decimal_point = -fractional_count;
+ }
+ return true;
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/fixed-dtoa.h b/mfbt/double-conversion/double-conversion/fixed-dtoa.h
new file mode 100644
index 0000000000..3bdd08e21f
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/fixed-dtoa.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_
+#define DOUBLE_CONVERSION_FIXED_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// Produces digits necessary to print a given number with
+// 'fractional_count' digits after the decimal point.
+// The buffer must be big enough to hold the result plus one terminating null
+// character.
+//
+// The produced digits might be too short in which case the caller has to fill
+// the gaps with '0's.
+// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
+// decimal_point = -2.
+// Halfway cases are rounded towards +/-Infinity (away from 0). The call
+// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
+// The returned buffer may contain digits that would be truncated from the
+// shortest representation of the input.
+//
+// This method only works for some parameters. If it can't handle the input it
+// returns false. The output is null-terminated when the function succeeds.
+bool FastFixedDtoa(double v, int fractional_count,
+ Vector<char> buffer, int* length, int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_
diff --git a/mfbt/double-conversion/double-conversion/ieee.h b/mfbt/double-conversion/double-conversion/ieee.h
new file mode 100644
index 0000000000..9203f4d558
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/ieee.h
@@ -0,0 +1,447 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_H_
+#define DOUBLE_CONVERSION_DOUBLE_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+// We assume that doubles and uint64_t have the same endianness.
+static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+static uint32_t float_to_uint32(float f) { return BitCast<uint32_t>(f); }
+static float uint32_to_float(uint32_t d32) { return BitCast<float>(d32); }
+
+// Helper functions for doubles.
+class Double {
+ public:
+ static const uint64_t kSignMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000);
+ static const uint64_t kExponentMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000);
+ static const uint64_t kSignificandMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x000FFFFF, FFFFFFFF);
+ static const uint64_t kHiddenBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00100000, 00000000);
+ static const uint64_t kQuietNanBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00080000, 00000000);
+ static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
+ static const int kSignificandSize = 53;
+ static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
+ static const int kMaxExponent = 0x7FF - kExponentBias;
+
+ Double() : d64_(0) {}
+ explicit Double(double d) : d64_(double_to_uint64(d)) {}
+ explicit Double(uint64_t d64) : d64_(d64) {}
+ explicit Double(DiyFp diy_fp)
+ : d64_(DiyFpToUint64(diy_fp)) {}
+
+ // The value encoded by this Double must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
+ DOUBLE_CONVERSION_ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // The value encoded by this Double must be strictly greater than 0.
+ DiyFp AsNormalizedDiyFp() const {
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
+ uint64_t f = Significand();
+ int e = Exponent();
+
+ // The current double could be a denormal.
+ while ((f & kHiddenBit) == 0) {
+ f <<= 1;
+ e--;
+ }
+ // Do the final shifts in one go.
+ f <<= DiyFp::kSignificandSize - kSignificandSize;
+ e -= DiyFp::kSignificandSize - kSignificandSize;
+ return DiyFp(f, e);
+ }
+
+ // Returns the double's bit as uint64.
+ uint64_t AsUint64() const {
+ return d64_;
+ }
+
+ // Returns the next greater double. Returns +infinity on input +infinity.
+ double NextDouble() const {
+ if (d64_ == kInfinity) return Double(kInfinity).value();
+ if (Sign() < 0 && Significand() == 0) {
+ // -0.0
+ return 0.0;
+ }
+ if (Sign() < 0) {
+ return Double(d64_ - 1).value();
+ } else {
+ return Double(d64_ + 1).value();
+ }
+ }
+
+ double PreviousDouble() const {
+ if (d64_ == (kInfinity | kSignMask)) return -Infinity();
+ if (Sign() < 0) {
+ return Double(d64_ + 1).value();
+ } else {
+ if (Significand() == 0) return -0.0;
+ return Double(d64_ - 1).value();
+ }
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint64_t d64 = AsUint64();
+ int biased_e =
+ static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint64_t Significand() const {
+ uint64_t d64 = AsUint64();
+ uint64_t significand = d64 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the double is a denormal.
+ bool IsDenormal() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) != 0);
+ }
+
+ bool IsQuietNan() const {
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ return IsNan() && ((AsUint64() & kQuietNanBit) == 0);
+#else
+ return IsNan() && ((AsUint64() & kQuietNanBit) != 0);
+#endif
+ }
+
+ bool IsSignalingNan() const {
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ return IsNan() && ((AsUint64() & kQuietNanBit) != 0);
+#else
+ return IsNan() && ((AsUint64() & kQuietNanBit) == 0);
+#endif
+ }
+
+
+ bool IsInfinite() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kSignMask) == 0? 1: -1;
+ }
+
+ // Precondition: the value encoded by this Double must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Double must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ double value() const { return uint64_to_double(d64_); }
+
+ // Returns the significand size for a given order of magnitude.
+ // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
+ // This function returns the number of significant binary digits v will have
+ // once it's encoded into a double. In almost all cases this is equal to
+ // kSignificandSize. The only exceptions are denormals. They start with
+ // leading zeroes and their effective significand-size is hence smaller.
+ static int SignificandSizeForOrderOfMagnitude(int order) {
+ if (order >= (kDenormalExponent + kSignificandSize)) {
+ return kSignificandSize;
+ }
+ if (order <= kDenormalExponent) return 0;
+ return order - kDenormalExponent;
+ }
+
+ static double Infinity() {
+ return Double(kInfinity).value();
+ }
+
+ static double NaN() {
+ return Double(kNaN).value();
+ }
+
+ private:
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const uint64_t kInfinity = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000);
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF7FFFF, FFFFFFFF);
+#else
+ static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF80000, 00000000);
+#endif
+
+
+ const uint64_t d64_;
+
+ static uint64_t DiyFpToUint64(DiyFp diy_fp) {
+ uint64_t significand = diy_fp.f();
+ int exponent = diy_fp.e();
+ while (significand > kHiddenBit + kSignificandMask) {
+ significand >>= 1;
+ exponent++;
+ }
+ if (exponent >= kMaxExponent) {
+ return kInfinity;
+ }
+ if (exponent < kDenormalExponent) {
+ return 0;
+ }
+ while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
+ significand <<= 1;
+ exponent--;
+ }
+ uint64_t biased_exponent;
+ if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
+ biased_exponent = 0;
+ } else {
+ biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
+ }
+ return (significand & kSignificandMask) |
+ (biased_exponent << kPhysicalSignificandSize);
+ }
+
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Double);
+};
+
+class Single {
+ public:
+ static const uint32_t kSignMask = 0x80000000;
+ static const uint32_t kExponentMask = 0x7F800000;
+ static const uint32_t kSignificandMask = 0x007FFFFF;
+ static const uint32_t kHiddenBit = 0x00800000;
+ static const uint32_t kQuietNanBit = 0x00400000;
+ static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit.
+ static const int kSignificandSize = 24;
+
+ Single() : d32_(0) {}
+ explicit Single(float f) : d32_(float_to_uint32(f)) {}
+ explicit Single(uint32_t d32) : d32_(d32) {}
+
+ // The value encoded by this Single must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
+ DOUBLE_CONVERSION_ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // Returns the single's bit as uint64.
+ uint32_t AsUint32() const {
+ return d32_;
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint32_t d32 = AsUint32();
+ int biased_e =
+ static_cast<int>((d32 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint32_t Significand() const {
+ uint32_t d32 = AsUint32();
+ uint32_t significand = d32 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the single is a denormal.
+ bool IsDenormal() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) != 0);
+ }
+
+ bool IsQuietNan() const {
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ return IsNan() && ((AsUint32() & kQuietNanBit) == 0);
+#else
+ return IsNan() && ((AsUint32() & kQuietNanBit) != 0);
+#endif
+ }
+
+ bool IsSignalingNan() const {
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ return IsNan() && ((AsUint32() & kQuietNanBit) != 0);
+#else
+ return IsNan() && ((AsUint32() & kQuietNanBit) == 0);
+#endif
+ }
+
+
+ bool IsInfinite() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kSignMask) == 0? 1: -1;
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Single must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ // Precondition: the value encoded by this Single must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ float value() const { return uint32_to_float(d32_); }
+
+ static float Infinity() {
+ return Single(kInfinity).value();
+ }
+
+ static float NaN() {
+ return Single(kNaN).value();
+ }
+
+ private:
+ static const int kExponentBias = 0x7F + kPhysicalSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const int kMaxExponent = 0xFF - kExponentBias;
+ static const uint32_t kInfinity = 0x7F800000;
+#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
+ static const uint32_t kNaN = 0x7FBFFFFF;
+#else
+ static const uint32_t kNaN = 0x7FC00000;
+#endif
+
+ const uint32_t d32_;
+
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Single);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_H_
diff --git a/mfbt/double-conversion/double-conversion/string-to-double.cc b/mfbt/double-conversion/double-conversion/string-to-double.cc
new file mode 100644
index 0000000000..972956ca69
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/string-to-double.cc
@@ -0,0 +1,818 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <climits>
+#include <locale>
+#include <cmath>
+
+#include "string-to-double.h"
+
+#include "ieee.h"
+#include "strtod.h"
+#include "utils.h"
+
+#ifdef _MSC_VER
+# if _MSC_VER >= 1900
+// Fix MSVC >= 2015 (_MSC_VER == 1900) warning
+// C4244: 'argument': conversion from 'const uc16' to 'char', possible loss of data
+// against Advance and friends, when instantiated with **it as char, not uc16.
+ __pragma(warning(disable: 4244))
+# endif
+# if _MSC_VER <= 1700 // VS2012, see IsDecimalDigitForRadix warning fix, below
+# define VS2012_RADIXWARN
+# endif
+#endif
+
+namespace double_conversion {
+
+namespace {
+
+inline char ToLower(char ch) {
+ static const std::ctype<char>& cType =
+ std::use_facet<std::ctype<char> >(std::locale::classic());
+ return cType.tolower(ch);
+}
+
+inline char Pass(char ch) {
+ return ch;
+}
+
+template <class Iterator, class Converter>
+static inline bool ConsumeSubStringImpl(Iterator* current,
+ Iterator end,
+ const char* substring,
+ Converter converter) {
+ DOUBLE_CONVERSION_ASSERT(converter(**current) == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || converter(**current) != *substring) {
+ return false;
+ }
+ }
+ ++*current;
+ return true;
+}
+
+// Consumes the given substring from the iterator.
+// Returns false, if the substring does not match.
+template <class Iterator>
+static bool ConsumeSubString(Iterator* current,
+ Iterator end,
+ const char* substring,
+ bool allow_case_insensitivity) {
+ if (allow_case_insensitivity) {
+ return ConsumeSubStringImpl(current, end, substring, ToLower);
+ } else {
+ return ConsumeSubStringImpl(current, end, substring, Pass);
+ }
+}
+
+// Consumes first character of the str is equal to ch
+inline bool ConsumeFirstCharacter(char ch,
+ const char* str,
+ bool case_insensitivity) {
+ return case_insensitivity ? ToLower(ch) == str[0] : ch == str[0];
+}
+} // namespace
+
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
+
+
+static const char kWhitespaceTable7[] = { 32, 13, 10, 9, 11, 12 };
+static const int kWhitespaceTable7Length = DOUBLE_CONVERSION_ARRAY_SIZE(kWhitespaceTable7);
+
+
+static const uc16 kWhitespaceTable16[] = {
+ 160, 8232, 8233, 5760, 6158, 8192, 8193, 8194, 8195,
+ 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8239, 8287, 12288, 65279
+};
+static const int kWhitespaceTable16Length = DOUBLE_CONVERSION_ARRAY_SIZE(kWhitespaceTable16);
+
+
+static bool isWhitespace(int x) {
+ if (x < 128) {
+ for (int i = 0; i < kWhitespaceTable7Length; i++) {
+ if (kWhitespaceTable7[i] == x) return true;
+ }
+ } else {
+ for (int i = 0; i < kWhitespaceTable16Length; i++) {
+ if (kWhitespaceTable16[i] == x) return true;
+ }
+ }
+ return false;
+}
+
+
+// Returns true if a nonspace found and false if the end has reached.
+template <class Iterator>
+static inline bool AdvanceToNonspace(Iterator* current, Iterator end) {
+ while (*current != end) {
+ if (!isWhitespace(**current)) return true;
+ ++*current;
+ }
+ return false;
+}
+
+
+static bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix)
+ || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+ || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool sign) {
+ return sign ? -0.0 : 0.0;
+}
+
+
+// Returns true if 'c' is a decimal digit that is valid for the given radix.
+//
+// The function is small and could be inlined, but VS2012 emitted a warning
+// because it constant-propagated the radix and concluded that the last
+// condition was always true. Moving it into a separate function and
+// suppressing optimisation keeps the compiler from warning.
+#ifdef VS2012_RADIXWARN
+#pragma optimize("",off)
+static bool IsDecimalDigitForRadix(int c, int radix) {
+ return '0' <= c && c <= '9' && (c - '0') < radix;
+}
+#pragma optimize("",on)
+#else
+static bool inline IsDecimalDigitForRadix(int c, int radix) {
+ return '0' <= c && c <= '9' && (c - '0') < radix;
+}
+#endif
+// Returns true if 'c' is a character digit that is valid for the given radix.
+// The 'a_character' should be 'a' or 'A'.
+//
+// The function is small and could be inlined, but VS2012 emitted a warning
+// because it constant-propagated the radix and concluded that the first
+// condition was always false. By moving it into a separate function the
+// compiler wouldn't warn anymore.
+static bool IsCharacterDigitForRadix(int c, int radix, char a_character) {
+ return radix > 10 && c >= a_character && c < a_character + radix - 10;
+}
+
+// Returns true, when the iterator is equal to end.
+template<class Iterator>
+static bool Advance (Iterator* it, uc16 separator, int base, Iterator& end) {
+ if (separator == StringToDoubleConverter::kNoSeparator) {
+ ++(*it);
+ return *it == end;
+ }
+ if (!isDigit(**it, base)) {
+ ++(*it);
+ return *it == end;
+ }
+ ++(*it);
+ if (*it == end) return true;
+ if (*it + 1 == end) return false;
+ if (**it == separator && isDigit(*(*it + 1), base)) {
+ ++(*it);
+ }
+ return *it == end;
+}
+
+// Checks whether the string in the range start-end is a hex-float string.
+// This function assumes that the leading '0x'/'0X' is already consumed.
+//
+// Hex float strings are of one of the following forms:
+// - hex_digits+ 'p' ('+'|'-')? exponent_digits+
+// - hex_digits* '.' hex_digits+ 'p' ('+'|'-')? exponent_digits+
+// - hex_digits+ '.' 'p' ('+'|'-')? exponent_digits+
+template<class Iterator>
+static bool IsHexFloatString(Iterator start,
+ Iterator end,
+ uc16 separator,
+ bool allow_trailing_junk) {
+ DOUBLE_CONVERSION_ASSERT(start != end);
+
+ Iterator current = start;
+
+ bool saw_digit = false;
+ while (isDigit(*current, 16)) {
+ saw_digit = true;
+ if (Advance(&current, separator, 16, end)) return false;
+ }
+ if (*current == '.') {
+ if (Advance(&current, separator, 16, end)) return false;
+ while (isDigit(*current, 16)) {
+ saw_digit = true;
+ if (Advance(&current, separator, 16, end)) return false;
+ }
+ }
+ if (!saw_digit) return false;
+ if (*current != 'p' && *current != 'P') return false;
+ if (Advance(&current, separator, 16, end)) return false;
+ if (*current == '+' || *current == '-') {
+ if (Advance(&current, separator, 16, end)) return false;
+ }
+ if (!isDigit(*current, 10)) return false;
+ if (Advance(&current, separator, 16, end)) return true;
+ while (isDigit(*current, 10)) {
+ if (Advance(&current, separator, 16, end)) return true;
+ }
+ return allow_trailing_junk || !AdvanceToNonspace(&current, end);
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+//
+// If parse_as_hex_float is true, then the string must be a valid
+// hex-float.
+template <int radix_log_2, class Iterator>
+static double RadixStringToIeee(Iterator* current,
+ Iterator end,
+ bool sign,
+ uc16 separator,
+ bool parse_as_hex_float,
+ bool allow_trailing_junk,
+ double junk_string_value,
+ bool read_as_double,
+ bool* result_is_junk) {
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ DOUBLE_CONVERSION_ASSERT(!parse_as_hex_float ||
+ IsHexFloatString(*current, end, separator, allow_trailing_junk));
+
+ const int kDoubleSize = Double::kSignificandSize;
+ const int kSingleSize = Single::kSignificandSize;
+ const int kSignificandSize = read_as_double? kDoubleSize: kSingleSize;
+
+ *result_is_junk = true;
+
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+ // Whether we have encountered a '.' and are parsing the decimal digits.
+ // Only relevant if parse_as_hex_float is true.
+ bool post_decimal = false;
+
+ // Skip leading 0s.
+ while (**current == '0') {
+ if (Advance(current, separator, radix, end)) {
+ *result_is_junk = false;
+ return SignedZero(sign);
+ }
+ }
+
+ while (true) {
+ int digit;
+ if (IsDecimalDigitForRadix(**current, radix)) {
+ digit = static_cast<char>(**current) - '0';
+ if (post_decimal) exponent -= radix_log_2;
+ } else if (IsCharacterDigitForRadix(**current, radix, 'a')) {
+ digit = static_cast<char>(**current) - 'a' + 10;
+ if (post_decimal) exponent -= radix_log_2;
+ } else if (IsCharacterDigitForRadix(**current, radix, 'A')) {
+ digit = static_cast<char>(**current) - 'A' + 10;
+ if (post_decimal) exponent -= radix_log_2;
+ } else if (parse_as_hex_float && **current == '.') {
+ post_decimal = true;
+ Advance(current, separator, radix, end);
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ continue;
+ } else if (parse_as_hex_float && (**current == 'p' || **current == 'P')) {
+ break;
+ } else {
+ if (allow_trailing_junk || !AdvanceToNonspace(current, end)) {
+ break;
+ } else {
+ return junk_string_value;
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> kSignificandSize);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent += overflow_bits_count;
+
+ bool zero_tail = true;
+ for (;;) {
+ if (Advance(current, separator, radix, end)) break;
+ if (parse_as_hex_float && **current == '.') {
+ // Just run over the '.'. We are just trying to see whether there is
+ // a non-zero digit somewhere.
+ Advance(current, separator, radix, end);
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ post_decimal = true;
+ }
+ if (!isDigit(**current, radix)) break;
+ zero_tail = zero_tail && **current == '0';
+ if (!post_decimal) exponent += radix_log_2;
+ }
+
+ if (!parse_as_hex_float &&
+ !allow_trailing_junk &&
+ AdvanceToNonspace(current, end)) {
+ return junk_string_value;
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & ((int64_t)1 << kSignificandSize)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
+ }
+ if (Advance(current, separator, radix, end)) break;
+ }
+
+ DOUBLE_CONVERSION_ASSERT(number < ((int64_t)1 << kSignificandSize));
+ DOUBLE_CONVERSION_ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ *result_is_junk = false;
+
+ if (parse_as_hex_float) {
+ DOUBLE_CONVERSION_ASSERT(**current == 'p' || **current == 'P');
+ Advance(current, separator, radix, end);
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ bool is_negative = false;
+ if (**current == '+') {
+ Advance(current, separator, radix, end);
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ } else if (**current == '-') {
+ is_negative = true;
+ Advance(current, separator, radix, end);
+ DOUBLE_CONVERSION_ASSERT(*current != end);
+ }
+ int written_exponent = 0;
+ while (IsDecimalDigitForRadix(**current, 10)) {
+ // No need to read exponents if they are too big. That could potentially overflow
+ // the `written_exponent` variable.
+ if (abs(written_exponent) <= 100 * Double::kMaxExponent) {
+ written_exponent = 10 * written_exponent + **current - '0';
+ }
+ if (Advance(current, separator, radix, end)) break;
+ }
+ if (is_negative) written_exponent = -written_exponent;
+ exponent += written_exponent;
+ }
+
+ if (exponent == 0 || number == 0) {
+ if (sign) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
+ }
+
+ DOUBLE_CONVERSION_ASSERT(number != 0);
+ double result = Double(DiyFp(number, exponent)).value();
+ return sign ? -result : result;
+}
+
+template <class Iterator>
+double StringToDoubleConverter::StringToIeee(
+ Iterator input,
+ int length,
+ bool read_as_double,
+ int* processed_characters_count) const {
+ Iterator current = input;
+ Iterator end = input + length;
+
+ *processed_characters_count = 0;
+
+ const bool allow_trailing_junk = (flags_ & ALLOW_TRAILING_JUNK) != 0;
+ const bool allow_leading_spaces = (flags_ & ALLOW_LEADING_SPACES) != 0;
+ const bool allow_trailing_spaces = (flags_ & ALLOW_TRAILING_SPACES) != 0;
+ const bool allow_spaces_after_sign = (flags_ & ALLOW_SPACES_AFTER_SIGN) != 0;
+ const bool allow_case_insensitivity = (flags_ & ALLOW_CASE_INSENSITIVITY) != 0;
+
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (current == end) return empty_string_value_;
+
+ if (allow_leading_spaces || allow_trailing_spaces) {
+ if (!AdvanceToNonspace(&current, end)) {
+ *processed_characters_count = static_cast<int>(current - input);
+ return empty_string_value_;
+ }
+ if (!allow_leading_spaces && (input != current)) {
+ // No leading spaces allowed, but AdvanceToNonspace moved forward.
+ return junk_string_value_;
+ }
+ }
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ bool sign = false;
+
+ if (*current == '+' || *current == '-') {
+ sign = (*current == '-');
+ ++current;
+ Iterator next_non_space = current;
+ // Skip following spaces (if allowed).
+ if (!AdvanceToNonspace(&next_non_space, end)) return junk_string_value_;
+ if (!allow_spaces_after_sign && (current != next_non_space)) {
+ return junk_string_value_;
+ }
+ current = next_non_space;
+ }
+
+ if (infinity_symbol_ != DOUBLE_CONVERSION_NULLPTR) {
+ if (ConsumeFirstCharacter(*current, infinity_symbol_, allow_case_insensitivity)) {
+ if (!ConsumeSubString(&current, end, infinity_symbol_, allow_case_insensitivity)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ *processed_characters_count = static_cast<int>(current - input);
+ return sign ? -Double::Infinity() : Double::Infinity();
+ }
+ }
+
+ if (nan_symbol_ != DOUBLE_CONVERSION_NULLPTR) {
+ if (ConsumeFirstCharacter(*current, nan_symbol_, allow_case_insensitivity)) {
+ if (!ConsumeSubString(&current, end, nan_symbol_, allow_case_insensitivity)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ *processed_characters_count = static_cast<int>(current - input);
+ return sign ? -Double::NaN() : Double::NaN();
+ }
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ if (Advance(&current, separator_, 10, end)) {
+ *processed_characters_count = static_cast<int>(current - input);
+ return SignedZero(sign);
+ }
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if (((flags_ & ALLOW_HEX) || (flags_ & ALLOW_HEX_FLOATS)) &&
+ (*current == 'x' || *current == 'X')) {
+ ++current;
+
+ if (current == end) return junk_string_value_; // "0x"
+
+ bool parse_as_hex_float = (flags_ & ALLOW_HEX_FLOATS) &&
+ IsHexFloatString(current, end, separator_, allow_trailing_junk);
+
+ if (!parse_as_hex_float && !isDigit(*current, 16)) {
+ return junk_string_value_;
+ }
+
+ bool result_is_junk;
+ double result = RadixStringToIeee<4>(&current,
+ end,
+ sign,
+ separator_,
+ parse_as_hex_float,
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &result_is_junk);
+ if (!result_is_junk) {
+ if (allow_trailing_spaces) AdvanceToNonspace(&current, end);
+ *processed_characters_count = static_cast<int>(current - input);
+ }
+ return result;
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ if (Advance(&current, separator_, 10, end)) {
+ *processed_characters_count = static_cast<int>(current - input);
+ return SignedZero(sign);
+ }
+ }
+ }
+
+ bool octal = leading_zero && (flags_ & ALLOW_OCTALS) != 0;
+
+ // The longest form of simplified number is: "-<significant digits>.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ DOUBLE_CONVERSION_STACK_UNINITIALIZED char
+ buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ DOUBLE_CONVERSION_ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ if (Advance(&current, separator_, 10, end)) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+
+ if (Advance(&current, separator_, 10, end)) {
+ if (significant_digits == 0 && !leading_zero) {
+ return junk_string_value_;
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ if (Advance(&current, separator_, 10, end)) {
+ *processed_characters_count = static_cast<int>(current - input);
+ return SignedZero(sign);
+ }
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is a fractional part.
+ // We don't emit a '.', but adjust the exponent instead.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ DOUBLE_CONVERSION_ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ exponent--;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ if (Advance(&current, separator_, 10, end)) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return junk_string_value_;
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+ Iterator junk_begin = current;
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ current = junk_begin;
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ char exponen_sign = '+';
+ if (*current == '+' || *current == '-') {
+ exponen_sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ current = junk_begin;
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ current = junk_begin;
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ DOUBLE_CONVERSION_ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10
+ && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (exponen_sign == '-' ? -num : num);
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+ if (allow_trailing_spaces) {
+ AdvanceToNonspace(&current, end);
+ }
+
+ parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ double result;
+ bool result_is_junk;
+ char* start = buffer;
+ result = RadixStringToIeee<3>(&start,
+ buffer + buffer_pos,
+ sign,
+ separator_,
+ false, // Don't parse as hex_float.
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &result_is_junk);
+ DOUBLE_CONVERSION_ASSERT(!result_is_junk);
+ *processed_characters_count = static_cast<int>(current - input);
+ return result;
+ }
+
+ if (nonzero_digit_dropped) {
+ buffer[buffer_pos++] = '1';
+ exponent--;
+ }
+
+ DOUBLE_CONVERSION_ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ // Code above ensures there are no leading zeros and the buffer has fewer than
+ // kMaxSignificantDecimalDigits characters. Trim trailing zeros.
+ Vector<const char> chars(buffer, buffer_pos);
+ chars = TrimTrailingZeros(chars);
+ exponent += buffer_pos - chars.length();
+
+ double converted;
+ if (read_as_double) {
+ converted = StrtodTrimmed(chars, exponent);
+ } else {
+ converted = StrtofTrimmed(chars, exponent);
+ }
+ *processed_characters_count = static_cast<int>(current - input);
+ return sign? -converted: converted;
+}
+
+
+double StringToDoubleConverter::StringToDouble(
+ const char* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToIeee(buffer, length, true, processed_characters_count);
+}
+
+
+double StringToDoubleConverter::StringToDouble(
+ const uc16* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToIeee(buffer, length, true, processed_characters_count);
+}
+
+
+float StringToDoubleConverter::StringToFloat(
+ const char* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return static_cast<float>(StringToIeee(buffer, length, false,
+ processed_characters_count));
+}
+
+
+float StringToDoubleConverter::StringToFloat(
+ const uc16* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return static_cast<float>(StringToIeee(buffer, length, false,
+ processed_characters_count));
+}
+
+
+template<>
+double StringToDoubleConverter::StringTo<double>(
+ const char* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToDouble(buffer, length, processed_characters_count);
+}
+
+
+template<>
+float StringToDoubleConverter::StringTo<float>(
+ const char* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToFloat(buffer, length, processed_characters_count);
+}
+
+
+template<>
+double StringToDoubleConverter::StringTo<double>(
+ const uc16* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToDouble(buffer, length, processed_characters_count);
+}
+
+
+template<>
+float StringToDoubleConverter::StringTo<float>(
+ const uc16* buffer,
+ int length,
+ int* processed_characters_count) const {
+ return StringToFloat(buffer, length, processed_characters_count);
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/string-to-double.h b/mfbt/double-conversion/double-conversion/string-to-double.h
new file mode 100644
index 0000000000..24972da26d
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/string-to-double.h
@@ -0,0 +1,239 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
+#define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
+
+#include "mozilla/Types.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+class StringToDoubleConverter {
+ public:
+ // Enumeration for allowing octals and ignoring junk when converting
+ // strings to numbers.
+ enum Flags {
+ NO_FLAGS = 0,
+ ALLOW_HEX = 1,
+ ALLOW_OCTALS = 2,
+ ALLOW_TRAILING_JUNK = 4,
+ ALLOW_LEADING_SPACES = 8,
+ ALLOW_TRAILING_SPACES = 16,
+ ALLOW_SPACES_AFTER_SIGN = 32,
+ ALLOW_CASE_INSENSITIVITY = 64,
+ ALLOW_CASE_INSENSIBILITY = 64, // Deprecated
+ ALLOW_HEX_FLOATS = 128,
+ };
+
+ static const uc16 kNoSeparator = '\0';
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers.
+ // Ex: StringToDouble("0x1234") -> 4660.0
+ // In StringToDouble("0x1234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK,
+ // the string will not be parsed as "0" followed by junk.
+ //
+ // - ALLOW_OCTALS: recognizes the prefix "0" for octals:
+ // If a sequence of octal digits starts with '0', then the number is
+ // read as octal integer. Octal numbers may only be integers.
+ // Ex: StringToDouble("01234") -> 668.0
+ // StringToDouble("012349") -> 12349.0 // Not a sequence of octal
+ // // digits.
+ // In StringToDouble("01234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // In StringToDouble("01234e56") the characters "e56" are trailing
+ // junk, too.
+ // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of
+ // a double literal.
+ // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces,
+ // new-lines, and tabs.
+ // - ALLOW_TRAILING_SPACES: ignore trailing whitespace.
+ // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign.
+ // Ex: StringToDouble("- 123.2") -> -123.2.
+ // StringToDouble("+ 123.2") -> 123.2
+ // - ALLOW_CASE_INSENSITIVITY: ignore case of characters for special values:
+ // infinity and nan.
+ // - ALLOW_HEX_FLOATS: allows hexadecimal float literals.
+ // This *must* start with "0x" and separate the exponent with "p".
+ // Examples: 0x1.2p3 == 9.0
+ // 0x10.1p0 == 16.0625
+ // ALLOW_HEX and ALLOW_HEX_FLOATS are indented.
+ //
+ // empty_string_value is returned when an empty string is given as input.
+ // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string
+ // containing only spaces is converted to the 'empty_string_value', too.
+ //
+ // junk_string_value is returned when
+ // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not
+ // part of a double-literal) is found.
+ // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a
+ // double literal.
+ //
+ // infinity_symbol and nan_symbol are strings that are used to detect
+ // inputs that represent infinity and NaN. They can be null, in which case
+ // they are ignored.
+ // The conversion routine first reads any possible signs. Then it compares the
+ // following character of the input-string with the first character of
+ // the infinity, and nan-symbol. If either matches, the function assumes, that
+ // a match has been found, and expects the following input characters to match
+ // the remaining characters of the special-value symbol.
+ // This means that the following restrictions apply to special-value symbols:
+ // - they must not start with signs ('+', or '-'),
+ // - they must not have the same first character.
+ // - they must not start with digits.
+ //
+ // If the separator character is not kNoSeparator, then that specific
+ // character is ignored when in between two valid digits of the significant.
+ // It is not allowed to appear in the exponent.
+ // It is not allowed to lead or trail the number.
+ // It is not allowed to appear twice next to each other.
+ //
+ // Examples:
+ // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = "infinity",
+ // nan_symbol = "nan":
+ // StringToDouble("0x1234") -> 4660.0.
+ // StringToDouble("0x1234K") -> 4660.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> NaN // junk_string_value.
+ // StringToDouble(" 1") -> NaN // junk_string_value.
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("-123.45") -> -123.45.
+ // StringToDouble("--123.45") -> NaN // junk_string_value.
+ // StringToDouble("123e45") -> 123e45.
+ // StringToDouble("123E45") -> 123e45.
+ // StringToDouble("123e+45") -> 123e45.
+ // StringToDouble("123E-45") -> 123e-45.
+ // StringToDouble("123e") -> 123.0 // trailing junk ignored.
+ // StringToDouble("123e-") -> 123.0 // trailing junk ignored.
+ // StringToDouble("+NaN") -> NaN // NaN string literal.
+ // StringToDouble("-infinity") -> -inf. // infinity literal.
+ // StringToDouble("Infinity") -> NaN // junk_string_value.
+ //
+ // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = NULL,
+ // nan_symbol = NULL:
+ // StringToDouble("0x1234") -> NaN // junk_string_value.
+ // StringToDouble("01234") -> 668.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> 0.0 // empty_string_value.
+ // StringToDouble(" 1") -> 1.0
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("0123e45") -> NaN // junk_string_value.
+ // StringToDouble("01239E45") -> 1239e45.
+ // StringToDouble("-infinity") -> NaN // junk_string_value.
+ // StringToDouble("NaN") -> NaN // junk_string_value.
+ //
+ // flags = NO_FLAGS,
+ // separator = ' ':
+ // StringToDouble("1 2 3 4") -> 1234.0
+ // StringToDouble("1 2") -> NaN // junk_string_value
+ // StringToDouble("1 000 000.0") -> 1000000.0
+ // StringToDouble("1.000 000") -> 1.0
+ // StringToDouble("1.0e1 000") -> NaN // junk_string_value
+ StringToDoubleConverter(int flags,
+ double empty_string_value,
+ double junk_string_value,
+ const char* infinity_symbol,
+ const char* nan_symbol,
+ uc16 separator = kNoSeparator)
+ : flags_(flags),
+ empty_string_value_(empty_string_value),
+ junk_string_value_(junk_string_value),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol),
+ separator_(separator) {
+ }
+
+ // Performs the conversion.
+ // The output parameter 'processed_characters_count' is set to the number
+ // of characters that have been processed to read the number.
+ // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
+ // in the 'processed_characters_count'. Trailing junk is never included.
+ MFBT_API double StringToDouble(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble above but for 16 bit characters.
+ MFBT_API double StringToDouble(const uc16* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble but reads a float.
+ // Note that this is not equivalent to static_cast<float>(StringToDouble(...))
+ // due to potential double-rounding.
+ MFBT_API float StringToFloat(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToFloat above but for 16 bit characters.
+ MFBT_API float StringToFloat(const uc16* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringToDouble for T = double, and StringToFloat for T = float.
+ template <typename T>
+ T StringTo(const char* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ // Same as StringTo above but for 16 bit characters.
+ template <typename T>
+ T StringTo(const uc16* buffer,
+ int length,
+ int* processed_characters_count) const;
+
+ private:
+ const int flags_;
+ const double empty_string_value_;
+ const double junk_string_value_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+ const uc16 separator_;
+
+ template <class Iterator>
+ double StringToIeee(Iterator start_pointer,
+ int length,
+ bool read_as_double,
+ int* processed_characters_count) const;
+
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
diff --git a/mfbt/double-conversion/double-conversion/strtod.cc b/mfbt/double-conversion/double-conversion/strtod.cc
new file mode 100644
index 0000000000..b9d8443c24
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/strtod.cc
@@ -0,0 +1,610 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <climits>
+#include <cstdarg>
+
+#include "bignum.h"
+#include "cached-powers.h"
+#include "ieee.h"
+#include "strtod.h"
+
+namespace double_conversion {
+
+#if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+// 2^53 = 9007199254740992.
+// Any integer with at most 15 decimal digits will hence fit into a double
+// (which has a 53bit significand) without loss of precision.
+static const int kMaxExactDoubleIntegerDecimalDigits = 15;
+#endif // #if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+// 2^64 = 18446744073709551616 > 10^19
+static const int kMaxUint64DecimalDigits = 19;
+
+// Max double: 1.7976931348623157 x 10^308
+// Min non-zero double: 4.9406564584124654 x 10^-324
+// Any x >= 10^309 is interpreted as +infinity.
+// Any x <= 10^-324 is interpreted as 0.
+// Note that 2.5e-324 (despite being smaller than the min double) will be read
+// as non-zero (equal to the min non-zero double).
+static const int kMaxDecimalPower = 309;
+static const int kMinDecimalPower = -324;
+
+// 2^64 = 18446744073709551616
+static const uint64_t kMaxUint64 = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF);
+
+
+#if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+static const double exact_powers_of_ten[] = {
+ 1.0, // 10^0
+ 10.0,
+ 100.0,
+ 1000.0,
+ 10000.0,
+ 100000.0,
+ 1000000.0,
+ 10000000.0,
+ 100000000.0,
+ 1000000000.0,
+ 10000000000.0, // 10^10
+ 100000000000.0,
+ 1000000000000.0,
+ 10000000000000.0,
+ 100000000000000.0,
+ 1000000000000000.0,
+ 10000000000000000.0,
+ 100000000000000000.0,
+ 1000000000000000000.0,
+ 10000000000000000000.0,
+ 100000000000000000000.0, // 10^20
+ 1000000000000000000000.0,
+ // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
+ 10000000000000000000000.0
+};
+static const int kExactPowersOfTenSize = DOUBLE_CONVERSION_ARRAY_SIZE(exact_powers_of_ten);
+#endif // #if defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+
+// Maximum number of significant digits in the decimal representation.
+// In fact the value is 772 (see conversions.cc), but to give us some margin
+// we round up to 780.
+static const int kMaxSignificantDecimalDigits = 780;
+
+static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
+ for (int i = 0; i < buffer.length(); i++) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(i, buffer.length());
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+static void CutToMaxSignificantDigits(Vector<const char> buffer,
+ int exponent,
+ char* significant_buffer,
+ int* significant_exponent) {
+ for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
+ significant_buffer[i] = buffer[i];
+ }
+ // The input buffer has been trimmed. Therefore the last digit must be
+ // different from '0'.
+ DOUBLE_CONVERSION_ASSERT(buffer[buffer.length() - 1] != '0');
+ // Set the last digit to be non-zero. This is sufficient to guarantee
+ // correct rounding.
+ significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
+ *significant_exponent =
+ exponent + (buffer.length() - kMaxSignificantDecimalDigits);
+}
+
+
+// Trims the buffer and cuts it to at most kMaxSignificantDecimalDigits.
+// If possible the input-buffer is reused, but if the buffer needs to be
+// modified (due to cutting), then the input needs to be copied into the
+// buffer_copy_space.
+static void TrimAndCut(Vector<const char> buffer, int exponent,
+ char* buffer_copy_space, int space_size,
+ Vector<const char>* trimmed, int* updated_exponent) {
+ Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
+ Vector<const char> right_trimmed = TrimTrailingZeros(left_trimmed);
+ exponent += left_trimmed.length() - right_trimmed.length();
+ if (right_trimmed.length() > kMaxSignificantDecimalDigits) {
+ (void) space_size; // Mark variable as used.
+ DOUBLE_CONVERSION_ASSERT(space_size >= kMaxSignificantDecimalDigits);
+ CutToMaxSignificantDigits(right_trimmed, exponent,
+ buffer_copy_space, updated_exponent);
+ *trimmed = Vector<const char>(buffer_copy_space,
+ kMaxSignificantDecimalDigits);
+ } else {
+ *trimmed = right_trimmed;
+ *updated_exponent = exponent;
+ }
+}
+
+
+// Reads digits from the buffer and converts them to a uint64.
+// Reads in as many digits as fit into a uint64.
+// When the string starts with "1844674407370955161" no further digit is read.
+// Since 2^64 = 18446744073709551616 it would still be possible read another
+// digit if it was less or equal than 6, but this would complicate the code.
+static uint64_t ReadUint64(Vector<const char> buffer,
+ int* number_of_read_digits) {
+ uint64_t result = 0;
+ int i = 0;
+ while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
+ int digit = buffer[i++] - '0';
+ DOUBLE_CONVERSION_ASSERT(0 <= digit && digit <= 9);
+ result = 10 * result + digit;
+ }
+ *number_of_read_digits = i;
+ return result;
+}
+
+
+// Reads a DiyFp from the buffer.
+// The returned DiyFp is not necessarily normalized.
+// If remaining_decimals is zero then the returned DiyFp is accurate.
+// Otherwise it has been rounded and has error of at most 1/2 ulp.
+static void ReadDiyFp(Vector<const char> buffer,
+ DiyFp* result,
+ int* remaining_decimals) {
+ int read_digits;
+ uint64_t significand = ReadUint64(buffer, &read_digits);
+ if (buffer.length() == read_digits) {
+ *result = DiyFp(significand, 0);
+ *remaining_decimals = 0;
+ } else {
+ // Round the significand.
+ if (buffer[read_digits] >= '5') {
+ significand++;
+ }
+ // Compute the binary exponent.
+ int exponent = 0;
+ *result = DiyFp(significand, exponent);
+ *remaining_decimals = buffer.length() - read_digits;
+ }
+}
+
+
+static bool DoubleStrtod(Vector<const char> trimmed,
+ int exponent,
+ double* result) {
+#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+ // Avoid "unused parameter" warnings
+ (void) trimmed;
+ (void) exponent;
+ (void) result;
+ // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
+ // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
+ // result is not accurate.
+ // We know that Windows32 uses 64 bits and is therefore accurate.
+ return false;
+#else
+ if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
+ int read_digits;
+ // The trimmed input fits into a double.
+ // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
+ // can compute the result-double simply by multiplying (resp. dividing) the
+ // two numbers.
+ // This is possible because IEEE guarantees that floating-point operations
+ // return the best possible approximation.
+ if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
+ // 10^-exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ DOUBLE_CONVERSION_ASSERT(read_digits == trimmed.length());
+ *result /= exact_powers_of_ten[-exponent];
+ return true;
+ }
+ if (0 <= exponent && exponent < kExactPowersOfTenSize) {
+ // 10^exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ DOUBLE_CONVERSION_ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[exponent];
+ return true;
+ }
+ int remaining_digits =
+ kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
+ if ((0 <= exponent) &&
+ (exponent - remaining_digits < kExactPowersOfTenSize)) {
+ // The trimmed string was short and we can multiply it with
+ // 10^remaining_digits. As a result the remaining exponent now fits
+ // into a double too.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ DOUBLE_CONVERSION_ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[remaining_digits];
+ *result *= exact_powers_of_ten[exponent - remaining_digits];
+ return true;
+ }
+ }
+ return false;
+#endif
+}
+
+
+// Returns 10^exponent as an exact DiyFp.
+// The given exponent must be in the range [1; kDecimalExponentDistance[.
+static DiyFp AdjustmentPowerOfTen(int exponent) {
+ DOUBLE_CONVERSION_ASSERT(0 < exponent);
+ DOUBLE_CONVERSION_ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
+ // Simply hardcode the remaining powers for the given decimal exponent
+ // distance.
+ DOUBLE_CONVERSION_ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
+ switch (exponent) {
+ case 1: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0xa0000000, 00000000), -60);
+ case 2: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0xc8000000, 00000000), -57);
+ case 3: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0xfa000000, 00000000), -54);
+ case 4: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0x9c400000, 00000000), -50);
+ case 5: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0xc3500000, 00000000), -47);
+ case 6: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0xf4240000, 00000000), -44);
+ case 7: return DiyFp(DOUBLE_CONVERSION_UINT64_2PART_C(0x98968000, 00000000), -40);
+ default:
+ DOUBLE_CONVERSION_UNREACHABLE();
+ }
+}
+
+
+// If the function returns true then the result is the correct double.
+// Otherwise it is either the correct double or the double that is just below
+// the correct double.
+static bool DiyFpStrtod(Vector<const char> buffer,
+ int exponent,
+ double* result) {
+ DiyFp input;
+ int remaining_decimals;
+ ReadDiyFp(buffer, &input, &remaining_decimals);
+ // Since we may have dropped some digits the input is not accurate.
+ // If remaining_decimals is different than 0 than the error is at most
+ // .5 ulp (unit in the last place).
+ // We don't want to deal with fractions and therefore keep a common
+ // denominator.
+ const int kDenominatorLog = 3;
+ const int kDenominator = 1 << kDenominatorLog;
+ // Move the remaining decimals into the exponent.
+ exponent += remaining_decimals;
+ uint64_t error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
+
+ int old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ DOUBLE_CONVERSION_ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
+ if (exponent < PowersOfTenCache::kMinDecimalExponent) {
+ *result = 0.0;
+ return true;
+ }
+ DiyFp cached_power;
+ int cached_decimal_exponent;
+ PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
+ &cached_power,
+ &cached_decimal_exponent);
+
+ if (cached_decimal_exponent != exponent) {
+ int adjustment_exponent = exponent - cached_decimal_exponent;
+ DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
+ input.Multiply(adjustment_power);
+ if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
+ // The product of input with the adjustment power fits into a 64 bit
+ // integer.
+ DOUBLE_CONVERSION_ASSERT(DiyFp::kSignificandSize == 64);
+ } else {
+ // The adjustment power is exact. There is hence only an error of 0.5.
+ error += kDenominator / 2;
+ }
+ }
+
+ input.Multiply(cached_power);
+ // The error introduced by a multiplication of a*b equals
+ // error_a + error_b + error_a*error_b/2^64 + 0.5
+ // Substituting a with 'input' and b with 'cached_power' we have
+ // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
+ int error_b = kDenominator / 2;
+ int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
+ int fixed_error = kDenominator / 2;
+ error += error_b + error_ab + fixed_error;
+
+ old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ // See if the double's significand changes if we add/subtract the error.
+ int order_of_magnitude = DiyFp::kSignificandSize + input.e();
+ int effective_significand_size =
+ Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
+ int precision_digits_count =
+ DiyFp::kSignificandSize - effective_significand_size;
+ if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
+ // This can only happen for very small denormals. In this case the
+ // half-way multiplied by the denominator exceeds the range of an uint64.
+ // Simply shift everything to the right.
+ int shift_amount = (precision_digits_count + kDenominatorLog) -
+ DiyFp::kSignificandSize + 1;
+ input.set_f(input.f() >> shift_amount);
+ input.set_e(input.e() + shift_amount);
+ // We add 1 for the lost precision of error, and kDenominator for
+ // the lost precision of input.f().
+ error = (error >> shift_amount) + 1 + kDenominator;
+ precision_digits_count -= shift_amount;
+ }
+ // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
+ DOUBLE_CONVERSION_ASSERT(DiyFp::kSignificandSize == 64);
+ DOUBLE_CONVERSION_ASSERT(precision_digits_count < 64);
+ uint64_t one64 = 1;
+ uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
+ uint64_t precision_bits = input.f() & precision_bits_mask;
+ uint64_t half_way = one64 << (precision_digits_count - 1);
+ precision_bits *= kDenominator;
+ half_way *= kDenominator;
+ DiyFp rounded_input(input.f() >> precision_digits_count,
+ input.e() + precision_digits_count);
+ if (precision_bits >= half_way + error) {
+ rounded_input.set_f(rounded_input.f() + 1);
+ }
+ // If the last_bits are too close to the half-way case than we are too
+ // inaccurate and round down. In this case we return false so that we can
+ // fall back to a more precise algorithm.
+
+ *result = Double(rounded_input).value();
+ if (half_way - error < precision_bits && precision_bits < half_way + error) {
+ // Too imprecise. The caller will have to fall back to a slower version.
+ // However the returned number is guaranteed to be either the correct
+ // double, or the next-lower double.
+ return false;
+ } else {
+ return true;
+ }
+}
+
+
+// Returns
+// - -1 if buffer*10^exponent < diy_fp.
+// - 0 if buffer*10^exponent == diy_fp.
+// - +1 if buffer*10^exponent > diy_fp.
+// Preconditions:
+// buffer.length() + exponent <= kMaxDecimalPower + 1
+// buffer.length() + exponent > kMinDecimalPower
+// buffer.length() <= kMaxDecimalSignificantDigits
+static int CompareBufferWithDiyFp(Vector<const char> buffer,
+ int exponent,
+ DiyFp diy_fp) {
+ DOUBLE_CONVERSION_ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
+ DOUBLE_CONVERSION_ASSERT(buffer.length() + exponent > kMinDecimalPower);
+ DOUBLE_CONVERSION_ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
+ // Make sure that the Bignum will be able to hold all our numbers.
+ // Our Bignum implementation has a separate field for exponents. Shifts will
+ // consume at most one bigit (< 64 bits).
+ // ln(10) == 3.3219...
+ DOUBLE_CONVERSION_ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+ Bignum buffer_bignum;
+ Bignum diy_fp_bignum;
+ buffer_bignum.AssignDecimalString(buffer);
+ diy_fp_bignum.AssignUInt64(diy_fp.f());
+ if (exponent >= 0) {
+ buffer_bignum.MultiplyByPowerOfTen(exponent);
+ } else {
+ diy_fp_bignum.MultiplyByPowerOfTen(-exponent);
+ }
+ if (diy_fp.e() > 0) {
+ diy_fp_bignum.ShiftLeft(diy_fp.e());
+ } else {
+ buffer_bignum.ShiftLeft(-diy_fp.e());
+ }
+ return Bignum::Compare(buffer_bignum, diy_fp_bignum);
+}
+
+
+// Returns true if the guess is the correct double.
+// Returns false, when guess is either correct or the next-lower double.
+static bool ComputeGuess(Vector<const char> trimmed, int exponent,
+ double* guess) {
+ if (trimmed.length() == 0) {
+ *guess = 0.0;
+ return true;
+ }
+ if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) {
+ *guess = Double::Infinity();
+ return true;
+ }
+ if (exponent + trimmed.length() <= kMinDecimalPower) {
+ *guess = 0.0;
+ return true;
+ }
+
+ if (DoubleStrtod(trimmed, exponent, guess) ||
+ DiyFpStrtod(trimmed, exponent, guess)) {
+ return true;
+ }
+ if (*guess == Double::Infinity()) {
+ return true;
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static bool IsDigit(const char d) {
+ return ('0' <= d) && (d <= '9');
+}
+
+static bool IsNonZeroDigit(const char d) {
+ return ('1' <= d) && (d <= '9');
+}
+
+#ifdef __has_cpp_attribute
+#if __has_cpp_attribute(maybe_unused)
+[[maybe_unused]]
+#endif
+#endif
+static bool AssertTrimmedDigits(const Vector<const char>& buffer) {
+ for(int i = 0; i < buffer.length(); ++i) {
+ if(!IsDigit(buffer[i])) {
+ return false;
+ }
+ }
+ return (buffer.length() == 0) || (IsNonZeroDigit(buffer[0]) && IsNonZeroDigit(buffer[buffer.length()-1]));
+}
+#endif
+
+double StrtodTrimmed(Vector<const char> trimmed, int exponent) {
+ DOUBLE_CONVERSION_ASSERT(trimmed.length() <= kMaxSignificantDecimalDigits);
+ DOUBLE_CONVERSION_ASSERT(AssertTrimmedDigits(trimmed));
+ double guess;
+ const bool is_correct = ComputeGuess(trimmed, exponent, &guess);
+ if (is_correct) {
+ return guess;
+ }
+ DiyFp upper_boundary = Double(guess).UpperBoundary();
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return Double(guess).NextDouble();
+ } else if ((Double(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return Double(guess).NextDouble();
+ }
+}
+
+double Strtod(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ return StrtodTrimmed(trimmed, updated_exponent);
+}
+
+static float SanitizedDoubletof(double d) {
+ DOUBLE_CONVERSION_ASSERT(d >= 0.0);
+ // ASAN has a sanitize check that disallows casting doubles to floats if
+ // they are too big.
+ // https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks
+ // The behavior should be covered by IEEE 754, but some projects use this
+ // flag, so work around it.
+ float max_finite = 3.4028234663852885981170418348451692544e+38;
+ // The half-way point between the max-finite and infinity value.
+ // Since infinity has an even significand everything equal or greater than
+ // this value should become infinity.
+ double half_max_finite_infinity =
+ 3.40282356779733661637539395458142568448e+38;
+ if (d >= max_finite) {
+ if (d >= half_max_finite_infinity) {
+ return Single::Infinity();
+ } else {
+ return max_finite;
+ }
+ } else {
+ return static_cast<float>(d);
+ }
+}
+
+float Strtof(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ exponent = updated_exponent;
+ return StrtofTrimmed(trimmed, exponent);
+}
+
+float StrtofTrimmed(Vector<const char> trimmed, int exponent) {
+ DOUBLE_CONVERSION_ASSERT(trimmed.length() <= kMaxSignificantDecimalDigits);
+ DOUBLE_CONVERSION_ASSERT(AssertTrimmedDigits(trimmed));
+
+ double double_guess;
+ bool is_correct = ComputeGuess(trimmed, exponent, &double_guess);
+
+ float float_guess = SanitizedDoubletof(double_guess);
+ if (float_guess == double_guess) {
+ // This shortcut triggers for integer values.
+ return float_guess;
+ }
+
+ // We must catch double-rounding. Say the double has been rounded up, and is
+ // now a boundary of a float, and rounds up again. This is why we have to
+ // look at previous too.
+ // Example (in decimal numbers):
+ // input: 12349
+ // high-precision (4 digits): 1235
+ // low-precision (3 digits):
+ // when read from input: 123
+ // when rounded from high precision: 124.
+ // To do this we simply look at the neighbors of the correct result and see
+ // if they would round to the same float. If the guess is not correct we have
+ // to look at four values (since two different doubles could be the correct
+ // double).
+
+ double double_next = Double(double_guess).NextDouble();
+ double double_previous = Double(double_guess).PreviousDouble();
+
+ float f1 = SanitizedDoubletof(double_previous);
+ float f2 = float_guess;
+ float f3 = SanitizedDoubletof(double_next);
+ float f4;
+ if (is_correct) {
+ f4 = f3;
+ } else {
+ double double_next2 = Double(double_next).NextDouble();
+ f4 = SanitizedDoubletof(double_next2);
+ }
+ (void) f2; // Mark variable as used.
+ DOUBLE_CONVERSION_ASSERT(f1 <= f2 && f2 <= f3 && f3 <= f4);
+
+ // If the guess doesn't lie near a single-precision boundary we can simply
+ // return its float-value.
+ if (f1 == f4) {
+ return float_guess;
+ }
+
+ DOUBLE_CONVERSION_ASSERT((f1 != f2 && f2 == f3 && f3 == f4) ||
+ (f1 == f2 && f2 != f3 && f3 == f4) ||
+ (f1 == f2 && f2 == f3 && f3 != f4));
+
+ // guess and next are the two possible candidates (in the same way that
+ // double_guess was the lower candidate for a double-precision guess).
+ float guess = f1;
+ float next = f4;
+ DiyFp upper_boundary;
+ if (guess == 0.0f) {
+ float min_float = 1e-45f;
+ upper_boundary = Double(static_cast<double>(min_float) / 2).AsDiyFp();
+ } else {
+ upper_boundary = Single(guess).UpperBoundary();
+ }
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return next;
+ } else if ((Single(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return next;
+ }
+}
+
+} // namespace double_conversion
diff --git a/mfbt/double-conversion/double-conversion/strtod.h b/mfbt/double-conversion/double-conversion/strtod.h
new file mode 100644
index 0000000000..77221fb9d5
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/strtod.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_STRTOD_H_
+#define DOUBLE_CONVERSION_STRTOD_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+double Strtod(Vector<const char> buffer, int exponent);
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+float Strtof(Vector<const char> buffer, int exponent);
+
+// Same as Strtod, but assumes that 'trimmed' is already trimmed, as if run
+// through TrimAndCut. That is, 'trimmed' must have no leading or trailing
+// zeros, must not be a lone zero, and must not have 'too many' digits.
+double StrtodTrimmed(Vector<const char> trimmed, int exponent);
+
+// Same as Strtof, but assumes that 'trimmed' is already trimmed, as if run
+// through TrimAndCut. That is, 'trimmed' must have no leading or trailing
+// zeros, must not be a lone zero, and must not have 'too many' digits.
+float StrtofTrimmed(Vector<const char> trimmed, int exponent);
+
+inline Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
+ for (int i = buffer.length() - 1; i >= 0; --i) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(0, i + 1);
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_STRTOD_H_
diff --git a/mfbt/double-conversion/double-conversion/utils.h b/mfbt/double-conversion/double-conversion/utils.h
new file mode 100644
index 0000000000..629ac2b9a5
--- /dev/null
+++ b/mfbt/double-conversion/double-conversion/utils.h
@@ -0,0 +1,421 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_UTILS_H_
+#define DOUBLE_CONVERSION_UTILS_H_
+
+// Use DOUBLE_CONVERSION_NON_PREFIXED_MACROS to get unprefixed macros as was
+// the case in double-conversion releases prior to 3.1.6
+
+#include <cstdlib>
+#include <cstring>
+
+// For pre-C++11 compatibility
+#if __cplusplus >= 201103L
+#define DOUBLE_CONVERSION_NULLPTR nullptr
+#else
+#define DOUBLE_CONVERSION_NULLPTR NULL
+#endif
+
+#include "mozilla/Assertions.h"
+
+#ifndef DOUBLE_CONVERSION_ASSERT
+#define DOUBLE_CONVERSION_ASSERT(condition) \
+ MOZ_ASSERT(condition)
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ASSERT)
+#define ASSERT DOUBLE_CONVERSION_ASSERT
+#endif
+
+#ifndef DOUBLE_CONVERSION_UNIMPLEMENTED
+#define DOUBLE_CONVERSION_UNIMPLEMENTED() \
+ MOZ_CRASH("DOUBLE_CONVERSION_UNIMPLEMENTED")
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNIMPLEMENTED)
+#define UNIMPLEMENTED DOUBLE_CONVERSION_UNIMPLEMENTED
+#endif
+
+#ifndef DOUBLE_CONVERSION_NO_RETURN
+#ifdef _MSC_VER
+#define DOUBLE_CONVERSION_NO_RETURN __declspec(noreturn)
+#else
+#define DOUBLE_CONVERSION_NO_RETURN __attribute__((noreturn))
+#endif
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(NO_RETURN)
+#define NO_RETURN DOUBLE_CONVERSION_NO_RETURN
+#endif
+
+#ifndef DOUBLE_CONVERSION_UNREACHABLE
+#ifdef _MSC_VER
+void DOUBLE_CONVERSION_NO_RETURN abort_noreturn();
+inline void abort_noreturn() { MOZ_CRASH("abort_noreturn"); }
+#define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn())
+#else
+#define DOUBLE_CONVERSION_UNREACHABLE() \
+ MOZ_CRASH("DOUBLE_CONVERSION_UNREACHABLE")
+#endif
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNREACHABLE)
+#define UNREACHABLE DOUBLE_CONVERSION_UNREACHABLE
+#endif
+
+// Not all compilers support __has_attribute and combining a check for both
+// ifdef and __has_attribute on the same preprocessor line isn't portable.
+#ifdef __has_attribute
+# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) 0
+#endif
+
+#ifndef DOUBLE_CONVERSION_UNUSED
+#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(unused)
+#define DOUBLE_CONVERSION_UNUSED __attribute__((unused))
+#else
+#define DOUBLE_CONVERSION_UNUSED
+#endif
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNUSED)
+#define UNUSED DOUBLE_CONVERSION_UNUSED
+#endif
+
+#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(uninitialized)
+#define DOUBLE_CONVERSION_STACK_UNINITIALIZED __attribute__((uninitialized))
+#else
+#define DOUBLE_CONVERSION_STACK_UNINITIALIZED
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(STACK_UNINITIALIZED)
+#define STACK_UNINITIALIZED DOUBLE_CONVERSION_STACK_UNINITIALIZED
+#endif
+
+// Double operations detection based on target architecture.
+// Linux uses a 80bit wide floating point stack on x86. This induces double
+// rounding, which in turn leads to wrong results.
+// An easy way to test if the floating-point operations are correct is to
+// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then
+// the result is equal to 89255e-22.
+// The best way to test this, is to create a division-function and to compare
+// the output of the division with the expected result. (Inlining must be
+// disabled.)
+// On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
+//
+// For example:
+/*
+// -- in div.c
+double Div_double(double x, double y) { return x / y; }
+
+// -- in main.c
+double Div_double(double x, double y); // Forward declaration.
+
+int main(int argc, char** argv) {
+ return Div_double(89255.0, 1e22) == 89255e-22;
+}
+*/
+// Run as follows ./main || echo "correct"
+//
+// If it prints "correct" then the architecture should be here, in the "correct" section.
+#if defined(_M_X64) || defined(__x86_64__) || \
+ defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \
+ defined(__hppa__) || defined(__ia64__) || \
+ defined(__mips__) || \
+ defined(__loongarch__) || \
+ defined(__nios2__) || defined(__ghs) || \
+ defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
+ defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
+ defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
+ defined(__SH4__) || defined(__alpha__) || \
+ defined(_MIPS_ARCH_MIPS32R2) || defined(__ARMEB__) ||\
+ defined(__AARCH64EL__) || defined(__aarch64__) || defined(__AARCH64EB__) || \
+ defined(__riscv) || defined(__e2k__) || \
+ defined(__or1k__) || defined(__arc__) || defined(__ARC64__) || \
+ defined(__microblaze__) || defined(__XTENSA__) || \
+ defined(__EMSCRIPTEN__) || defined(__wasm32__)
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#elif defined(__mc68000__) || \
+ defined(__pnacl__) || defined(__native_client__)
+#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
+#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
+#if defined(_WIN32)
+// Windows uses a 64bit wide floating point stack.
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#else
+#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
+#endif // _WIN32
+#else
+#error Target architecture was not detected as supported by Double-Conversion.
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(CORRECT_DOUBLE_OPERATIONS)
+#define CORRECT_DOUBLE_OPERATIONS DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
+#endif
+
+#if defined(_WIN32) && !defined(__MINGW32__)
+
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t; // NOLINT
+typedef unsigned short uint16_t; // NOLINT
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+// intptr_t and friends are defined in crtdefs.h through stdio.h.
+
+#else
+
+#include <stdint.h>
+
+#endif
+
+typedef uint16_t uc16;
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write DOUBLE_CONVERSION_UINT64_2PART_C(0x12345678,90123456);
+#define DOUBLE_CONVERSION_UINT64_2PART_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UINT64_2PART_C)
+#define UINT64_2PART_C DOUBLE_CONVERSION_UINT64_2PART_C
+#endif
+
+// The expression DOUBLE_CONVERSION_ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use DOUBLE_CONVERSION_ARRAY_SIZE on statically allocated
+// arrays.
+#ifndef DOUBLE_CONVERSION_ARRAY_SIZE
+#define DOUBLE_CONVERSION_ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ARRAY_SIZE)
+#define ARRAY_SIZE DOUBLE_CONVERSION_ARRAY_SIZE
+#endif
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#ifndef DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN
+#define DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_COPY_AND_ASSIGN)
+#define DC_DISALLOW_COPY_AND_ASSIGN DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN
+#endif
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#ifndef DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS
+#define DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName)
+#endif
+#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_IMPLICIT_CONSTRUCTORS)
+#define DC_DISALLOW_IMPLICIT_CONSTRUCTORS DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS
+#endif
+
+namespace double_conversion {
+
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ DOUBLE_CONVERSION_ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+// This is a simplified version of V8's Vector class.
+template <typename T>
+class Vector {
+ public:
+ Vector() : start_(DOUBLE_CONVERSION_NULLPTR), length_(0) {}
+ Vector(T* data, int len) : start_(data), length_(len) {
+ DOUBLE_CONVERSION_ASSERT(len == 0 || (len > 0 && data != DOUBLE_CONVERSION_NULLPTR));
+ }
+
+ // Returns a vector using the same backing storage as this one,
+ // spanning from and including 'from', to but not including 'to'.
+ Vector<T> SubVector(int from, int to) {
+ DOUBLE_CONVERSION_ASSERT(to <= length_);
+ DOUBLE_CONVERSION_ASSERT(from < to);
+ DOUBLE_CONVERSION_ASSERT(0 <= from);
+ return Vector<T>(start() + from, to - from);
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ DOUBLE_CONVERSION_ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ T& first() { return start_[0]; }
+
+ T& last() { return start_[length_ - 1]; }
+
+ void pop_back() {
+ DOUBLE_CONVERSION_ASSERT(!is_empty());
+ --length_;
+ }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+ StringBuilder(char* buffer, int buffer_size)
+ : buffer_(buffer, buffer_size), position_(0) { }
+
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+ int size() const { return buffer_.length(); }
+
+ // Get the current position in the builder.
+ int position() const {
+ DOUBLE_CONVERSION_ASSERT(!is_finalized());
+ return position_;
+ }
+
+ // Reset the position.
+ void Reset() { position_ = 0; }
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ void AddCharacter(char c) {
+ DOUBLE_CONVERSION_ASSERT(c != '\0');
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_++] = c;
+ }
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n) {
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ + n < buffer_.length());
+ DOUBLE_CONVERSION_ASSERT(static_cast<size_t>(n) <= strlen(s));
+ memmove(&buffer_[position_], s, static_cast<size_t>(n));
+ position_ += n;
+ }
+
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count) {
+ for (int i = 0; i < count; i++) {
+ AddCharacter(c);
+ }
+ }
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize() {
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_] = '\0';
+ // Make sure nobody managed to add a 0-character to the
+ // buffer while building the string.
+ DOUBLE_CONVERSION_ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
+ position_ = -1;
+ DOUBLE_CONVERSION_ASSERT(is_finalized());
+ return buffer_.start();
+ }
+
+ private:
+ Vector<char> buffer_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
+
+// The type-based aliasing rule allows the compiler to assume that pointers of
+// different types (for some definition of different) never alias each other.
+// Thus the following code does not work:
+//
+// float f = foo();
+// int fbits = *(int*)(&f);
+//
+// The compiler 'knows' that the int pointer can't refer to f since the types
+// don't match, so the compiler may cache f in a register, leaving random data
+// in fbits. Using C++ style casts makes no difference, however a pointer to
+// char data is assumed to alias any other pointer. This is the 'memcpy
+// exception'.
+//
+// Bit_cast uses the memcpy exception to move the bits from a variable of one
+// type of a variable of another type. Of course the end result is likely to
+// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
+// will completely optimize BitCast away.
+//
+// There is an additional use for BitCast.
+// Recent gccs will warn when they see casts that may result in breakage due to
+// the type-based aliasing rule. If you have checked that there is no breakage
+// you can use BitCast to cast one pointer type to another. This confuses gcc
+// enough that it can no longer see that you have cast one pointer type to
+// another thus avoiding the warning.
+template <class Dest, class Source>
+Dest BitCast(const Source& source) {
+ // Compile time assertion: sizeof(Dest) == sizeof(Source)
+ // A compile error here means your Dest and Source have different sizes.
+#if __cplusplus >= 201103L
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "source and destination size mismatch");
+#else
+ DOUBLE_CONVERSION_UNUSED
+ typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+#endif
+
+ Dest dest;
+ memmove(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+template <class Dest, class Source>
+Dest BitCast(Source* source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+}
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_UTILS_H_
diff --git a/mfbt/double-conversion/moz.yaml b/mfbt/double-conversion/moz.yaml
new file mode 100644
index 0000000000..598f9b88fd
--- /dev/null
+++ b/mfbt/double-conversion/moz.yaml
@@ -0,0 +1,48 @@
+schema: 1
+
+bugzilla:
+ product: Core
+ component: "MFBT"
+
+origin:
+ name: double-conversion
+ description: binary-decimal and decimal-binary routines for IEEE doubles
+
+ url: https://github.com/google/double-conversion
+
+ release: 4f7a25d8ced8c7cf6eee6fd09d6788eaa23c9afe (2023-05-18T12:20:37Z).
+ revision: 4f7a25d8ced8c7cf6eee6fd09d6788eaa23c9afe
+
+ license: BSD-3-Clause
+ license-file: LICENSE
+
+vendoring:
+ url: https://github.com/google/double-conversion
+ source-hosting: github
+ tracking: commit
+
+ exclude:
+ - "*"
+ - "double-conversion/*"
+ - cmake
+ - msvc
+ - test
+
+ include:
+ - LICENSE
+ - README.md
+ - "double-conversion/*.h"
+ - "double-conversion/*.cc"
+
+ patches:
+ - add-mfbt-api-markers.patch
+ - use-mozilla-assertions.patch
+ - debug-only-functions.patch
+ - to-fixed-dbl-max.patch
+
+ update-actions:
+ - action: move-file
+ from: '{vendor_dir}/README.md'
+ to: '{vendor_dir}/double-conversion/README.md'
+ - action: delete-path
+ path: '{vendor_dir}/double-conversion/.gitignore'
diff --git a/mfbt/double-conversion/to-fixed-dbl-max.patch b/mfbt/double-conversion/to-fixed-dbl-max.patch
new file mode 100644
index 0000000000..9d0a5d2f2e
--- /dev/null
+++ b/mfbt/double-conversion/to-fixed-dbl-max.patch
@@ -0,0 +1,51 @@
+diff --git a/double-conversion/double-to-string.cc b/double-conversion/double-to-string.cc
+--- a/double-conversion/double-to-string.cc
++++ b/double-conversion/double-to-string.cc
+@@ -207,25 +207,21 @@ bool DoubleToStringConverter::ToShortest
+ }
+ return true;
+ }
+
+
+ bool DoubleToStringConverter::ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+- DOUBLE_CONVERSION_ASSERT(kMaxFixedDigitsBeforePoint == 60);
+- const double kFirstNonFixed = 1e60;
+-
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits > kMaxFixedDigitsAfterPoint) return false;
+- if (value >= kFirstNonFixed || value <= -kFirstNonFixed) return false;
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add space for the '\0' byte.
+ const int kDecimalRepCapacity =
+ kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1;
+ char decimal_rep[kDecimalRepCapacity];
+diff --git a/double-conversion/double-to-string.h b/double-conversion/double-to-string.h
+--- a/double-conversion/double-to-string.h
++++ b/double-conversion/double-to-string.h
+@@ -33,17 +33,17 @@
+
+ namespace double_conversion {
+
+ class DoubleToStringConverter {
+ public:
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
+ // function returns false.
+- static const int kMaxFixedDigitsBeforePoint = 60;
++ static const int kMaxFixedDigitsBeforePoint = 308;
+ static const int kMaxFixedDigitsAfterPoint = 100;
+
+ // When calling ToExponential with a requested_digits
+ // parameter > kMaxExponentialDigits then the function returns false.
+ static const int kMaxExponentialDigits = 120;
+
+ // When calling ToPrecision with a requested_digits
+ // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
diff --git a/mfbt/double-conversion/use-mozilla-assertions.patch b/mfbt/double-conversion/use-mozilla-assertions.patch
new file mode 100644
index 0000000000..c6f8988d6b
--- /dev/null
+++ b/mfbt/double-conversion/use-mozilla-assertions.patch
@@ -0,0 +1,60 @@
+diff --git a/double-conversion/utils.h b/double-conversion/utils.h
+--- a/double-conversion/utils.h
++++ b/double-conversion/utils.h
+@@ -36,27 +36,29 @@
+
+ // For pre-C++11 compatibility
+ #if __cplusplus >= 201103L
+ #define DOUBLE_CONVERSION_NULLPTR nullptr
+ #else
+ #define DOUBLE_CONVERSION_NULLPTR NULL
+ #endif
+
+-#include <cassert>
++#include "mozilla/Assertions.h"
++
+ #ifndef DOUBLE_CONVERSION_ASSERT
+ #define DOUBLE_CONVERSION_ASSERT(condition) \
+- assert(condition)
++ MOZ_ASSERT(condition)
+ #endif
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ASSERT)
+ #define ASSERT DOUBLE_CONVERSION_ASSERT
+ #endif
+
+ #ifndef DOUBLE_CONVERSION_UNIMPLEMENTED
+-#define DOUBLE_CONVERSION_UNIMPLEMENTED() (abort())
++#define DOUBLE_CONVERSION_UNIMPLEMENTED() \
++ MOZ_CRASH("DOUBLE_CONVERSION_UNIMPLEMENTED")
+ #endif
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNIMPLEMENTED)
+ #define UNIMPLEMENTED DOUBLE_CONVERSION_UNIMPLEMENTED
+ #endif
+
+ #ifndef DOUBLE_CONVERSION_NO_RETURN
+ #ifdef _MSC_VER
+ #define DOUBLE_CONVERSION_NO_RETURN __declspec(noreturn)
+@@ -66,20 +68,21 @@
+ #endif
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(NO_RETURN)
+ #define NO_RETURN DOUBLE_CONVERSION_NO_RETURN
+ #endif
+
+ #ifndef DOUBLE_CONVERSION_UNREACHABLE
+ #ifdef _MSC_VER
+ void DOUBLE_CONVERSION_NO_RETURN abort_noreturn();
+-inline void abort_noreturn() { abort(); }
++inline void abort_noreturn() { MOZ_CRASH("abort_noreturn"); }
+ #define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn())
+ #else
+-#define DOUBLE_CONVERSION_UNREACHABLE() (abort())
++#define DOUBLE_CONVERSION_UNREACHABLE() \
++ MOZ_CRASH("DOUBLE_CONVERSION_UNREACHABLE")
+ #endif
+ #endif
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNREACHABLE)
+ #define UNREACHABLE DOUBLE_CONVERSION_UNREACHABLE
+ #endif
+
+ // Not all compilers support __has_attribute and combining a check for both
+ // ifdef and __has_attribute on the same preprocessor line isn't portable.
diff --git a/mfbt/fallible.h b/mfbt/fallible.h
new file mode 100644
index 0000000000..fabb54e82a
--- /dev/null
+++ b/mfbt/fallible.h
@@ -0,0 +1,64 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_fallible_h
+#define mozilla_fallible_h
+
+#if defined(__cplusplus)
+
+/* Explicit fallible allocation
+ *
+ * Memory allocation (normally) defaults to abort in case of failed
+ * allocation. That is, it never returns NULL, and crashes instead.
+ *
+ * Code can explicitely request for fallible memory allocation thanks
+ * to the declarations below.
+ *
+ * The typical use of the mozilla::fallible const is with placement new,
+ * like the following:
+ *
+ * foo = new (mozilla::fallible) Foo();
+ *
+ * The following forms, or derivatives, are also possible but deprecated:
+ *
+ * foo = new ((mozilla::fallible_t())) Foo();
+ *
+ * const mozilla::fallible_t f = mozilla::fallible_t();
+ * bar = new (f) Bar();
+ *
+ * It is also possible to declare method overloads with fallible allocation
+ * alternatives, like so:
+ *
+ * class Foo {
+ * public:
+ * void Method(void *);
+ * void Method(void *, const mozilla::fallible_t&);
+ * };
+ *
+ * Foo foo;
+ * foo.Method(nullptr, mozilla::fallible);
+ *
+ * If that last method call is in a method that itself takes a const
+ * fallible_t& argument, it is recommended to propagate that argument
+ * instead of using mozilla::fallible:
+ *
+ * void Func(Foo &foo, const mozilla::fallible_t& aFallible) {
+ * foo.Method(nullptr, aFallible);
+ * }
+ *
+ */
+
+# include <new>
+
+namespace mozilla {
+
+using fallible_t = std::nothrow_t;
+
+static const fallible_t& fallible = std::nothrow;
+
+} // namespace mozilla
+
+#endif
+
+#endif // mozilla_fallible_h
diff --git a/mfbt/lz4/LICENSE b/mfbt/lz4/LICENSE
new file mode 100644
index 0000000000..488491695a
--- /dev/null
+++ b/mfbt/lz4/LICENSE
@@ -0,0 +1,24 @@
+LZ4 Library
+Copyright (c) 2011-2020, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/mfbt/lz4/README.md b/mfbt/lz4/README.md
new file mode 100644
index 0000000000..08d1cef2bf
--- /dev/null
+++ b/mfbt/lz4/README.md
@@ -0,0 +1,169 @@
+LZ4 - Library Files
+================================
+
+The `/lib` directory contains many files, but depending on project's objectives,
+not all of them are required.
+Limited systems may want to reduce the nb of source files to include
+as a way to reduce binary size and dependencies.
+
+Capabilities are added at the "level" granularity, detailed below.
+
+#### Level 1 : Minimal LZ4 build
+
+The minimum required is **`lz4.c`** and **`lz4.h`**,
+which provides the fast compression and decompression algorithms.
+They generate and decode data using the [LZ4 block format].
+
+
+#### Level 2 : High Compression variant
+
+For more compression ratio at the cost of compression speed,
+the High Compression variant called **lz4hc** is available.
+Add files **`lz4hc.c`** and **`lz4hc.h`**.
+This variant also compresses data using the [LZ4 block format],
+and depends on regular `lib/lz4.*` source files.
+
+
+#### Level 3 : Frame support, for interoperability
+
+In order to produce compressed data compatible with `lz4` command line utility,
+it's necessary to use the [official interoperable frame format].
+This format is generated and decoded automatically by the **lz4frame** library.
+Its public API is described in `lib/lz4frame.h`.
+In order to work properly, lz4frame needs all other modules present in `/lib`,
+including, lz4 and lz4hc, and also **xxhash**.
+So it's necessary to also include `xxhash.c` and `xxhash.h`.
+
+
+#### Level 4 : File compression operations
+
+As a helper around file operations,
+the library has been recently extended with `lz4file.c` and `lz4file.h`
+(still considered experimental at the time of this writing).
+These helpers allow opening, reading, writing, and closing files
+using transparent LZ4 compression / decompression.
+As a consequence, using `lz4file` adds a dependency on `<stdio.h>`.
+
+`lz4file` relies on `lz4frame` in order to produce compressed data
+conformant to the [LZ4 Frame format] specification.
+Consequently, to enable this capability,
+it's necessary to include all `*.c` and `*.h` files from `lib/` directory.
+
+
+#### Advanced / Experimental API
+
+Definitions which are not guaranteed to remain stable in future versions,
+are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`.
+As the name suggests, these definitions should only be invoked
+in the context of static linking ***only***.
+Otherwise, dependent application may fail on API or ABI break in the future.
+The associated symbols are also not exposed by the dynamic library by default.
+Should they be nonetheless needed, it's possible to force their publication
+by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS`
+and `LZ4F_PUBLISH_STATIC_FUNCTIONS`.
+
+
+#### Build macros
+
+The following build macro can be selected to adjust source code behavior at compilation time :
+
+- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus.
+ This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them.
+ It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
+ For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`,
+ and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`.
+
+- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow.
+ Set to 65535 by default, which is the maximum value supported by lz4 format.
+ Reducing maximum distance will reduce opportunities for LZ4 to find matches,
+ hence will produce a worse compression ratio.
+ Setting a smaller max distance could allow compatibility with specific decoders with limited memory budget.
+ This build macro only influences the compressed output of the compressor.
+
+- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning.
+ This is meant to invite users to update their source code.
+ Should this be a problem, it's generally possible to make the compiler ignore these warnings,
+ for example with `-Wno-deprecated-declarations` on `gcc`,
+ or `_CRT_SECURE_NO_WARNINGS` for Visual Studio.
+ This build macro offers another project-specific method
+ by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files.
+
+- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths
+ by using bitcount instructions, generally implemented as fast single instructions in many cpus.
+ In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance,
+ it's possible to use an optimized software path instead.
+ This is achieved by setting this build macros.
+ In most cases, it's not expected to be necessary,
+ but it can be legitimately considered for less common platforms.
+
+- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area
+ passed as argument to become a compression state is suitably aligned.
+ This test can be disabled if it proves flaky, by setting this value to 0.
+
+- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to `<stdlib,h>`'s `malloc()`, `calloc()` and `free()`
+ by user-defined functions, which must be named `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
+ User functions must be available at link time.
+
+- `LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION` :
+ Remove support of dynamic memory allocation.
+ For more details, see description of this macro in `lib/lz4.c`.
+
+- `LZ4_FREESTANDING` : by setting this build macro to 1,
+ LZ4/HC removes dependencies on the C standard library,
+ including allocation functions and `memmove()`, `memcpy()`, and `memset()`.
+ This build macro is designed to help use LZ4/HC in restricted environments
+ (embedded, bootloader, etc).
+ For more details, see description of this macro in `lib/lz4.h`.
+
+
+
+#### Amalgamation
+
+lz4 source code can be amalgamated into a single file.
+One can combine all source code into `lz4_all.c` by using following command:
+```
+cat lz4.c lz4hc.c lz4frame.c > lz4_all.c
+```
+(`cat` file order is important) then compile `lz4_all.c`.
+All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`.
+
+
+#### Windows : using MinGW+MSYS to create DLL
+
+DLL can be created using MinGW+MSYS with the `make liblz4` command.
+This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`.
+To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits:
+```
+make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT
+```
+The import library is only required with Visual C++.
+The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library
+`dll\liblz4.dll` are required to compile a project using gcc/MinGW.
+The dynamic library has to be added to linking options.
+It means that if a project that uses LZ4 consists of a single `test-dll.c`
+file it should be linked with `dll\liblz4.dll`. For example:
+```
+ $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll
+```
+The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`.
+
+
+#### Miscellaneous
+
+Other files present in the directory are not source code. They are :
+
+ - `LICENSE` : contains the BSD license text
+ - `Makefile` : `make` script to compile and install lz4 library (static and dynamic)
+ - `liblz4.pc.in` : for `pkg-config` (used in `make install`)
+ - `README.md` : this file
+
+[official interoperable frame format]: ../doc/lz4_Frame_format.md
+[LZ4 Frame format]: ../doc/lz4_Frame_format.md
+[LZ4 block format]: ../doc/lz4_Block_format.md
+
+
+#### License
+
+All source material within __lib__ directory are BSD 2-Clause licensed.
+See [LICENSE](LICENSE) for details.
+The license is also reminded at the top of each source file.
diff --git a/mfbt/lz4/README.mozilla b/mfbt/lz4/README.mozilla
new file mode 100644
index 0000000000..3974a20090
--- /dev/null
+++ b/mfbt/lz4/README.mozilla
@@ -0,0 +1,18 @@
+This directory contains the LZ4 source from the upstream repo:
+https://github.com/lz4/lz4/
+
+Current version: 1.9.4 [5ff839680134437dbf4678f3d0c7b371d84f4964]
+
+Our in-tree copy of LZ4 does not depend on any generated files from the
+upstream build system, only the lz4*.{c,h} files found in the lib
+sub-directory. Therefore, it should be sufficient to simply overwrite
+the in-tree files with the updated ones from upstream.
+
+If the collection of source files changes, manual updates to moz.build may be
+needed as we don't use the upstream makefiles.
+
+Note that we do NOT use the copy of xxhash.{c,h} from the LZ4 repo. We
+instead use the newer release from that project's upstream repo:
+https://github.com/Cyan4973/xxHash
+
+Current version: 0.8.1 [35b0373c697b5f160d3db26b1cbb45a0d5ba788c]
diff --git a/mfbt/lz4/lz4.c b/mfbt/lz4/lz4.c
new file mode 100644
index 0000000000..654bfdf32f
--- /dev/null
+++ b/mfbt/lz4/lz4.c
@@ -0,0 +1,2722 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4_HEAPMODE
+# define LZ4_HEAPMODE 0
+#endif
+
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+
+/*-************************************
+* CPU Feature Detection
+**************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets which assembly generation depends on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+# if defined(__GNUC__) && \
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define LZ4_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
+# define LZ4_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
+# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+# define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+
+
+/*-************************************
+* Dependency
+**************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+# define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#include "lz4.h"
+/* see also "memory routines" below */
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+# include <intrin.h> /* only present in VS2005+ */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
+#endif /* _MSC_VER */
+
+#ifndef LZ4_FORCE_INLINE
+# ifdef _MSC_VER /* Visual Studio */
+# define LZ4_FORCE_INLINE static __forceinline
+# else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define LZ4_FORCE_INLINE static inline
+# endif
+# else
+# define LZ4_FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+# endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
+# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+# undef LZ4_FORCE_INLINE
+# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
+#else
+# define LZ4_FORCE_O2
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+#else
+# define expect(expr,value) (expr)
+#endif
+
+#ifndef likely
+#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
+#define unlikely(expr) expect((expr) != 0, 0)
+#endif
+
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+# define LZ4_ALIGN_TEST 1
+#endif
+
+
+/*-************************************
+* Memory routines
+**************************************/
+
+/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
+ * Disable relatively high-level LZ4/HC functions that use dynamic memory
+ * allocation functions (malloc(), calloc(), free()).
+ *
+ * Note that this is a compile-time switch. And since it disables
+ * public/stable LZ4 v1 API functions, we don't recommend using this
+ * symbol to generate a library for distribution.
+ *
+ * The following public functions are removed when this symbol is defined.
+ * - lz4 : LZ4_createStream, LZ4_freeStream,
+ * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)
+ * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,
+ * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)
+ * - lz4frame, lz4file : All LZ4F_* functions
+ */
+#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+# define ALLOC(s) lz4_error_memory_allocation_is_disabled
+# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
+# define FREEMEM(p) lz4_error_memory_allocation_is_disabled
+#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+# define ALLOC(s) LZ4_malloc(s)
+# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
+# define FREEMEM(p) LZ4_free(p)
+#else
+# include <stdlib.h> /* malloc, calloc, free */
+# define ALLOC(s) malloc(s)
+# define ALLOC_AND_ZERO(s) calloc(1,s)
+# define FREEMEM(p) free(p)
+#endif
+
+#if ! LZ4_FREESTANDING
+# include <string.h> /* memset, memcpy */
+#endif
+#if !defined(LZ4_memset)
+# define LZ4_memset(p,v,s) memset((p),(v),(s))
+#endif
+#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))
+
+
+/*-************************************
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
+# include <stdio.h>
+ static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+static int LZ4_isAligned(const void* ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment -1)) == 0;
+}
+
+
+/*-************************************
+* Types
+**************************************/
+#include <limits.h>
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef uintptr_t uptrval;
+#else
+# if UINT_MAX != 4294967295UL
+# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+# endif
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+#endif
+
+#if defined(__x86_64__)
+ typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+ typedef size_t reg_t; /* 32-bits in x32 mode */
+#endif
+
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+
+
+/*-************************************
+* Reading and writing into memory
+**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if !defined(LZ4_memcpy)
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+# else
+# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+# endif
+#endif
+
+#if !defined(LZ4_memmove)
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4_memmove __builtin_memmove
+# else
+# define LZ4_memmove memmove
+# endif
+#endif
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
+
+static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign;
+
+static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; }
+static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; }
+static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; }
+
+static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; }
+static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; }
+
+#else /* safe and portable access using memcpy() */
+
+static U16 LZ4_read16(const void* memPtr)
+{
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static reg_t LZ4_read_ARCH(const void* memPtr)
+{
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static void LZ4_write16(void* memPtr, U16 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void* memPtr, U32 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read16(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian()) {
+ LZ4_write16(memPtr, value);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
+
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
+LZ4_FORCE_INLINE
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+}
+
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+
+
+#ifndef LZ4_FAST_DEC_LOOP
+# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+# define LZ4_FAST_DEC_LOOP 1
+# elif defined(__aarch64__) && defined(__APPLE__)
+# define LZ4_FAST_DEC_LOOP 1
+# elif defined(__aarch64__) && !defined(__clang__)
+ /* On non-Apple aarch64, we disable this optimization for clang because
+ * on certain mobile chipsets, performance is reduced with clang. For
+ * more information refer to https://github.com/lz4/lz4/pull/707 */
+# define LZ4_FAST_DEC_LOOP 1
+# else
+# define LZ4_FAST_DEC_LOOP 0
+# endif
+#endif
+
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ assert(srcPtr + offset == dstPtr);
+ if (offset < 8) {
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ LZ4_memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
+
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_INLINE void
+LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+}
+
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ BYTE v[8];
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
+ switch(offset) {
+ case 1:
+ MEM_INIT(v, *srcPtr, 8);
+ break;
+ case 2:
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
+# pragma warning(push)
+# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
+#endif
+ LZ4_memcpy(&v[4], v, 4);
+#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
+# pragma warning(pop)
+#endif
+ break;
+ case 4:
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
+#endif
+
+
+/*-************************************
+* Common functions
+**************************************/
+static unsigned LZ4_NbCommonBytes (reg_t val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+ if (sizeof(val) == 8) {
+# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
+/*-*************************************************************************************************
+* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
+* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
+* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
+****************************************************************************************************/
+# if defined(__clang__) && (__clang_major__ < 10)
+ /* Avoid undefined clang-cl intrinsics issue.
+ * See https://github.com/lz4/lz4/pull/1017 for details. */
+ return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
+# else
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+# endif
+# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
+# else
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+# endif
+ } else /* 32 bits */ {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
+# else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+# endif
+ }
+ } else /* Big Endian CPU */ {
+ if (sizeof(val)==8) {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
+# else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
+ unsigned r;
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+#endif
+# endif
+ } else /* 32 bits */ {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz((U32)val) >> 3;
+# else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
+# endif
+ }
+ }
+}
+
+
+#define STEPSIZE sizeof(reg_t)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn+=STEPSIZE; pMatch+=STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ } }
+
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+* Local Constants
+**************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+/*-************************************
+* Local Structures and types
+**************************************/
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Everything concerning the preceding content is
+ * in a separate context, pointed to by ctx->dictCtx.
+ * ctx->dictionary, ctx->dictSize, and table entries
+ * in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+
+/*-************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }
+
+
+/*-****************************************
+* Internal Definitions, used only in Tests
+*******************************************/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize);
+int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
+ int compressedSize, int targetOutputSize, int dstCapacity,
+ const void* dictStart, size_t dictSize);
+#if defined (__cplusplus)
+}
+#endif
+
+/*-******************************
+* Compression functions
+********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
+ if (LZ4_isLittleEndian()) {
+ const U64 prime5bytes = 889523592379ULL;
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+ } else {
+ const U64 prime8bytes = 11400714785074694791ULL;
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+ }
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
+{
+ if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */ assert(0); return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+ void* tableBase, tableType_t const tableType,
+ const BYTE* srcBase)
+{
+ switch (tableType)
+ {
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32* const hashTable = (const U32*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16* const hashTable = (const U16*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
+ return hashTable[h];
+ }
+ assert(0); return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+}
+
+LZ4_FORCE_INLINE const BYTE*
+LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
+LZ4_FORCE_INLINE void
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if ((tableType_t)cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if ((tableType_t)cctx->tableType != tableType
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
+ || tableType == byPtr
+ || inputSize >= 4 KB)
+ {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = (U32)clearedTable;
+ } else {
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
+ * is faster than compressing without a gap.
+ * However, compressing with currentOffset == 0 is faster still,
+ * so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal* const cctx,
+ const char* const source,
+ char* const dest,
+ const int inputSize,
+ int* inputConsumed, /* only written when outputDirective == fillOutput */
+ const int maxOutputSize,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ int result;
+ const BYTE* ip = (const BYTE*) source;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE* base = (const BYTE*) source - startIndex;
+ const BYTE* lowLimit;
+
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
+ const BYTE* const dictionary =
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+ const U32 dictSize =
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE* dictBase = (dictionary == NULL) ? NULL :
+ (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const olimit = op + maxOutputSize;
+
+ U32 offset = 0;
+ U32 forwardH;
+
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
+ assert(ip != NULL);
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
+ }
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U32)tableType;
+
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE* match;
+ BYTE* token;
+ const BYTE* filledIp;
+
+ /* Find a match */
+ if (tableType == byPtr) {
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+ } while ( (match+LZ4_DISTANCE_MAX < ip)
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+
+ } else { /* byU32, byU16 */
+
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const current = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex <= current);
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else if (dictDirective == usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
+ assert(startIndex - matchIndex >= MINMATCH);
+ assert(dictBase);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
+ assert(matchIndex < current);
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
+ continue;
+ } /* too far */
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem) offset = current - matchIndex;
+ break; /* match found */
+ }
+
+ } while(1);
+ }
+
+ /* Catch up */
+ filledIp = ip;
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
+
+ /* Encode Literals */
+ { unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ int len = (int)(litLength - RUN_MASK);
+ *token = (RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op+litLength);
+ op+=litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
+ }
+
+_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
+ /* Encode Offset */
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset); op+=2;
+ } else {
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
+ assert(ip-match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
+ }
+
+ /* Encode MatchLength */
+ { unsigned matchCode;
+
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
+ && (lowLimit==dictionary) /* match within extDict */ ) {
+ const BYTE* limit = ip + (dictEnd-match);
+ assert(dictEnd > match);
+ if (limit > matchlimit) limit = matchlimit;
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
+ ip += (size_t)matchCode + MINMATCH;
+ if (ip==limit) {
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
+ matchCode += more;
+ ip += more;
+ }
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
+ } else {
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
+ }
+
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE* ptr;
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
+ U32 const h = LZ4_hashPosition(ptr, tableType);
+ LZ4_clearHash(h, cctx->hashTable, tableType);
+ }
+ }
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+ while (matchCode >= 4*255) {
+ op+=4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4*255;
+ }
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip >= mflimitPlusOne) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
+
+ /* Test next position */
+ if (tableType == byPtr) {
+
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ( (match+LZ4_DISTANCE_MAX >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const current = (U32)(ip-base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ assert(dictBase);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
+ token=op++;
+ *token=0;
+ if (maybe_extMem) offset = current - matchIndex;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
+ goto _next_match;
+ }
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRun = (size_t)(iend - anchor);
+ if ( (outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
+ op += lastRun;
+ }
+
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
+ result = (int)(((char*)op) - dest);
+ assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
+ return result;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal* const cctx,
+ const char* const src,
+ char* const dst,
+ const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
+ srcSize, dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert (inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective,
+ tableType, dictDirective, dictIssue, acceleration);
+}
+
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
+ assert(ctx != NULL);
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
+{
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+
+int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ int result;
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctxPtr == NULL) return 0;
+#else
+ LZ4_stream_t ctx;
+ LZ4_stream_t* const ctxPtr = &ctx;
+#endif
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctxPtr);
+#endif
+ return result;
+}
+
+
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
+{
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
+}
+
+
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
+static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+ void* const s = LZ4_initStream(state, sizeof (*state));
+ assert(s != NULL); (void)s;
+
+ if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ } else {
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+ } }
+}
+
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctx == NULL) return 0;
+#else
+ LZ4_stream_t ctxBody;
+ LZ4_stream_t* ctx = &ctxBody;
+#endif
+
+ int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctx);
+#endif
+ return result;
+}
+
+
+
+/*-******************************
+* Streaming functions
+********************************/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_stream_t* LZ4_createStream(void)
+{
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
+ LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL) return NULL;
+ LZ4_initStream(lz4s, sizeof(*lz4s));
+ return lz4s;
+}
+#endif
+
+static size_t LZ4_stream_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) { return NULL; }
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+ return (LZ4_stream_t*)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
+void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+{
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
+
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
+{
+ if (!LZ4_stream) return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
+ FREEMEM(LZ4_stream);
+ return (0);
+}
+#endif
+
+
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
+
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 KB;
+
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+ base = dictEnd - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->tableType = (U32)tableType;
+
+ while (p <= dictEnd-HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
+ p+=3;
+ }
+
+ return (int)dict->dictSize;
+}
+
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
+{
+ const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
+ workingStream, dictionaryStream,
+ dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ if (dictCtx != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
+ }
+
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
+ }
+ workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
+{
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
+ for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
+ const char* source, char* dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
+{
+ const tableType_t tableType = byU32;
+ LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;
+ const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;
+
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize);
+
+ LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ /* invalidate tiny dictionaries */
+ if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */
+ && (dictEnd != source) /* prefix mode */
+ && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
+ && (streamPtr->dictCtx == NULL) /* usingDictCtx */
+ ) {
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
+ /* remove dictionary existence from history, to employ faster prefix mode */
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)source;
+ dictEnd = source;
+ }
+
+ /* Check overlapping input/dictionary space */
+ { const char* const sourceEnd = source + inputSize;
+ if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == source) {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
+ else
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
+ }
+
+ /* external dictionary mode */
+ { int result;
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ }
+ } else { /* small data <= 4 KB */
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ }
+ }
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ return result;
+ }
+}
+
+
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
+{
+ LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
+ int result;
+
+ LZ4_renormDictT(streamPtr, srcSize);
+
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ }
+
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)srcSize;
+
+ return result;
+}
+
+
+/*! LZ4_saveDict() :
+ * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * save it into a safer place (char* safeBuffer).
+ * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,
+ * one can therefore call LZ4_compress_fast_continue() right after.
+ * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ */
+int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+
+ DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer);
+
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
+
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0) {
+ const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
+ assert(dict->dictionary);
+ LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
+ }
+
+ dict->dictionary = (const BYTE*)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+
+
+
+/*-*******************************
+ * Decompression functions
+ ********************************/
+
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+
+/* variant for decompress_unsafe()
+ * does not know end of input
+ * presumes input is well formed
+ * note : will consume at least one byte */
+size_t read_long_length_no_check(const BYTE** pp)
+{
+ size_t b, l = 0;
+ do { b = **pp; (*pp)++; l += b; } while (b==255);
+ DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1)
+ return l;
+}
+
+/* core decoder variant for LZ4_decompress_fast*()
+ * for legacy support only : these entry points are deprecated.
+ * - Presumes input is correctly formed (no defense vs malformed inputs)
+ * - Does not know input size (presume input buffer is "large enough")
+ * - Decompress a full block (only)
+ * @return : nb of bytes read from input.
+ * Note : this variant is not optimized for speed, just for maintenance.
+ * the goal is to remove support of decompress_fast*() variants by v2.0
+**/
+LZ4_FORCE_INLINE int
+LZ4_decompress_unsafe_generic(
+ const BYTE* const istart,
+ BYTE* const ostart,
+ int decompressedSize,
+
+ size_t prefixSize,
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note: =0 if dictStart==NULL */
+ )
+{
+ const BYTE* ip = istart;
+ BYTE* op = (BYTE*)ostart;
+ BYTE* const oend = ostart + decompressedSize;
+ const BYTE* const prefixStart = ostart - prefixSize;
+
+ DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
+ if (dictStart == NULL) assert(dictSize == 0);
+
+ while (1) {
+ /* start new sequence */
+ unsigned token = *ip++;
+
+ /* literals */
+ { size_t ll = token >> ML_BITS;
+ if (ll==15) {
+ /* long literal length */
+ ll += read_long_length_no_check(&ip);
+ }
+ if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */
+ LZ4_memmove(op, ip, ll); /* support in-place decompression */
+ op += ll;
+ ip += ll;
+ if ((size_t)(oend-op) < MFLIMIT) {
+ if (op==oend) break; /* end of block */
+ DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op);
+ /* incorrect end of block :
+ * last match must start at least MFLIMIT==12 bytes before end of output block */
+ return -1;
+ } }
+
+ /* match */
+ { size_t ml = token & 15;
+ size_t const offset = LZ4_readLE16(ip);
+ ip+=2;
+
+ if (ml==15) {
+ /* long literal length */
+ ml += read_long_length_no_check(&ip);
+ }
+ ml += MINMATCH;
+
+ if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */
+
+ { const BYTE* match = op - offset;
+
+ /* out of range */
+ if (offset > (size_t)(op - prefixStart) + dictSize) {
+ DEBUGLOG(6, "offset out of range");
+ return -1;
+ }
+
+ /* check special case : extDict */
+ if (offset > (size_t)(op - prefixStart)) {
+ /* extDict scenario */
+ const BYTE* const dictEnd = dictStart + dictSize;
+ const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));
+ size_t const extml = (size_t)(dictEnd - extMatch);
+ if (extml > ml) {
+ /* match entirely within extDict */
+ LZ4_memmove(op, extMatch, ml);
+ op += ml;
+ ml = 0;
+ } else {
+ /* match split between extDict & prefix */
+ LZ4_memmove(op, extMatch, extml);
+ op += extml;
+ ml -= extml;
+ }
+ match = prefixStart;
+ }
+
+ /* match copy - slow variant, supporting overlap copy */
+ { size_t u;
+ for (u=0; u<ml; u++) {
+ op[u] = match[u];
+ } } }
+ op += ml;
+ if ((size_t)(oend-op) < LASTLITERALS) {
+ DEBUGLOG(5, "invalid: match ends at distance %zi from end of block", oend-op);
+ /* incorrect end of block :
+ * last match must stop at least LASTLITERALS==5 bytes before end of output block */
+ return -1;
+ }
+ } /* match */
+ } /* main loop */
+ return (int)(ip - istart);
+}
+
+
+/* Read the variable-length literal or match length.
+ *
+ * @ip : input pointer
+ * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
+ * @initial_check - check ip >= ipmax before start of loop. Returns initial_error if so.
+ * @error (output) - error code. Must be set to 0 before call.
+**/
+typedef size_t Rvl_t;
+static const Rvl_t rvl_error = (Rvl_t)(-1);
+LZ4_FORCE_INLINE Rvl_t
+read_variable_length(const BYTE** ip, const BYTE* ilimit,
+ int initial_check)
+{
+ Rvl_t s, length = 0;
+ assert(ip != NULL);
+ assert(*ip != NULL);
+ assert(ilimit != NULL);
+ if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (unlikely((*ip) > ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ /* accumulator overflow detection (32-bit mode only) */
+ if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
+ return rvl_error;
+ }
+ } while (s==255);
+
+ return length;
+}
+
+/*! LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+LZ4_FORCE_INLINE int
+LZ4_decompress_generic(
+ const char* const src,
+ char* const dst,
+ int srcSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
+
+ earlyEnd_directive partialDecoding, /* full, partial */
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+{
+ if ((src == NULL) || (outputSize < 0)) { return -1; }
+
+ { const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
+ BYTE* op = (BYTE*) dst;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
+
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
+
+ const int checkOffset = (dictSize < (int)(64 KB));
+
+
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
+ const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
+
+ const BYTE* match;
+ size_t offset;
+ unsigned token;
+ size_t length;
+
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if (unlikely(outputSize==0)) {
+ /* Empty output buffer */
+ if (partialDecoding) return 0;
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
+ }
+ if (unlikely(srcSize==0)) { return -1; }
+
+ /* LZ4_FAST_DEC_LOOP:
+ * designed for modern OoO performance cpus,
+ * where copying reliably 32-bytes is preferable to an unpredictable branch.
+ * note : fast loop may show a regression for some client arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "skip fast decode loop");
+ goto safe_decode;
+ }
+
+ /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
+ if (addl == rvl_error) { goto _output_error; }
+ length += addl;
+ if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+
+ /* copy literals */
+ cpy = op+length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ LZ4_wildCopy32(op, ip, cpy);
+ ip += length; op = cpy;
+ } else {
+ cpy = op+length;
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
+ /* We don't need to check oend, since we check it once for each loop below */
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
+ LZ4_memcpy(op, ip, 16);
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+ assert(match <= op); /* overflow check */
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) { goto _output_error; }
+ length += addl;
+ length += MINMATCH;
+ if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: skip LZ4_wildCopy32 when true */
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
+ op += length;
+ continue;
+ } } }
+
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) {
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(length, (size_t)(oend-op));
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ } }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend-op >= 32));
+ if (unlikely(offset<16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
+ }
+ safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ( (length != RUN_MASK)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((ip < shortiend) & (op <= shortoend)) ) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, 16);
+ op += length; ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip); ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ( (length != ML_MASK)
+ && (offset >= 8)
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
+ if (addl == rvl_error) { goto _output_error; }
+ length += addl;
+ if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ }
+
+ /* copy literals */
+ cpy = op+length;
+#if LZ4_FAST_DEC_LOOP
+ safe_literal_copy:
+#endif
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
+ */
+ if (ip+length > iend) {
+ length = (size_t)(iend-ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op<=oend);
+ length = (size_t)(oend-op);
+ }
+ } else {
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((ip+length != iend) || (cpy > oend)) {
+ DEBUGLOG(6, "should have been last run of literals")
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ goto _output_error;
+ }
+ }
+ LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */
+ ip += length;
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
+ break;
+ }
+ } else {
+ LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ _copy_match:
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) { goto _output_error; }
+ length += addl;
+ if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+ safe_match_copy:
+#endif
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op));
+ else goto _output_error; /* doesn't respect parsing restriction */
+ }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+ assert(match >= lowPrefix);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /* partialDecoding : may end anywhere within the block */
+ assert(op<=oend);
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = MIN(length, (size_t)(oend-op));
+ const BYTE* const matchEnd = match + mlen;
+ BYTE* const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) { break; }
+ continue;
+ }
+
+ if (unlikely(offset<8)) {
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op+4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_memcpy(op, match, 8);
+ match += 8;
+ }
+ op += 8;
+
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, 8);
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ }
+ op = cpy; /* wildcopy correction */
+ }
+
+ /* end of decoding */
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
+
+ /* Overflow error detected */
+ _output_error:
+ return (int) (-(((const char*)ip)-src))-1;
+ }
+}
+
+
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ decode_full_block, noDict,
+ (BYTE*)dest, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ partial_decode,
+ noDict, (BYTE*)dst, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
+{
+ DEBUGLOG(5, "LZ4_decompress_fast");
+ return LZ4_decompress_unsafe_generic(
+ (const BYTE*)source, (BYTE*)dest, originalSize,
+ 0, NULL, 0);
+}
+
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ return LZ4_decompress_unsafe_generic(
+ (const BYTE*)source, (BYTE*)dest, originalSize,
+ 64 KB, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ decode_full_block, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,
+ size_t prefixSize)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
+ int compressedSize, int targetOutputSize, int dstCapacity,
+ const void* dictStart, size_t dictSize)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_unsafe_generic(
+ (const BYTE*)source, (BYTE*)dest, originalSize,
+ 0, (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_streamDecode_t* LZ4_createStreamDecode(void)
+{
+ LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));
+ return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+}
+
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
+{
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+#endif
+
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ lz4sd->prefixSize = (size_t)dictSize;
+ if (dictSize) {
+ assert(dictionary != NULL);
+ lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
+ } else {
+ lz4sd->prefixEnd = (const BYTE*) dictionary;
+ }
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0) return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+ if (maxBlockSize < 16) maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)result;
+ lz4sd->prefixEnd += result;
+ } else {
+ /* The buffer wraps around, or they're switching to another buffer. */
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ }
+
+ return result;
+}
+
+LZ4_FORCE_O2 int
+LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,
+ const char* source, char* dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal* const lz4sd =
+ (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);
+ int result;
+
+ DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
+ assert(originalSize >= 0);
+
+ if (lz4sd->prefixSize == 0) {
+ DEBUGLOG(5, "first invocation : no prefix nor extDict");
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ DEBUGLOG(5, "continue using existing prefix");
+ result = LZ4_decompress_unsafe_generic(
+ (const BYTE*)source, (BYTE*)dest, originalSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ DEBUGLOG(5, "prefix becomes extDict");
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ }
+
+ return result;
+}
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0)
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
+}
+
+int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)
+{
+ if (dictSize==0)
+ return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0 || dictStart+dictSize == dest)
+ return LZ4_decompress_unsafe_generic(
+ (const BYTE*)source, (BYTE*)dest, originalSize,
+ (size_t)dictSize, NULL, 0);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
+}
+
+
+/*=*************************************************
+* Obsolete Functions
+***************************************************/
+/* obsolete compression functions */
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* src, char* dest, int srcSize)
+{
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+}
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
+}
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
+}
+
+/*
+These decompression functions are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress (const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }
+
+int LZ4_resetStreamState(void* state, char* inputBuffer)
+{
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t*)state);
+ return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void* LZ4_create (char* inputBuffer)
+{
+ (void)inputBuffer;
+ return LZ4_createStream();
+}
+#endif
+
+char* LZ4_slideInputBuffer (void* state)
+{
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
diff --git a/mfbt/lz4/lz4.h b/mfbt/lz4/lz4.h
new file mode 100644
index 0000000000..491c6087c4
--- /dev/null
+++ b/mfbt/lz4/lz4.h
@@ -0,0 +1,842 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Header File
+ * Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
+
+/**
+ Introduction
+
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
+ scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
+ multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
+
+ The LZ4 compression library provides in-memory compression and decompression functions.
+ It gives full buffer control to user.
+ Compression can be done in:
+ - a single step (described as Simple Functions)
+ - a single step, reusing a context (described in Advanced Functions)
+ - unbounded multiple steps (described as Streaming compression)
+
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
+ Each application is free to encode and pass such metadata in whichever way it wants.
+
+ lz4.h only handle blocks, it can not generate Frames.
+
+ Blocks are different from Frames (doc/lz4_Frame_format.md).
+ Frames bundle both blocks and metadata in a specified manner.
+ Embedding metadata is required for compressed data to be self-contained and portable.
+ Frame format is delivered through a companion API, declared in lz4frame.h.
+ The `lz4` CLI can only manage frames.
+*/
+
+/*^***************************************************************
+* Export parameters
+*****************************************************************/
+/*
+* LZ4_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+* LZ4LIB_VISIBILITY :
+* Control library symbols visibility.
+*/
+#ifndef LZ4LIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4LIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define LZ4LIB_API LZ4LIB_VISIBILITY
+#endif
+
+/*! LZ4_FREESTANDING :
+ * When this macro is set to 1, it enables "freestanding mode" that is
+ * suitable for typical freestanding environment which doesn't support
+ * standard C library.
+ *
+ * - LZ4_FREESTANDING is a compile-time switch.
+ * - It requires the following macros to be defined:
+ * LZ4_memcpy, LZ4_memmove, LZ4_memset.
+ * - It only enables LZ4/HC functions which don't use heap.
+ * All LZ4F_* functions are not supported.
+ * - See tests/freestanding.c to check its basic setup.
+ */
+#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1)
+# define LZ4_HEAPMODE 0
+# define LZ4HC_HEAPMODE 0
+# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
+# if !defined(LZ4_memcpy)
+# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'."
+# endif
+# if !defined(LZ4_memset)
+# error "LZ4_FREESTANDING requires macro 'LZ4_memset'."
+# endif
+# if !defined(LZ4_memmove)
+# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'."
+# endif
+#elif ! defined(LZ4_FREESTANDING)
+# define LZ4_FREESTANDING 0
+#endif
+
+
+/*------ Version ------*/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */
+
+LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */
+LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */
+
+
+/*-************************************
+* Tuning parameter
+**************************************/
+#define LZ4_MEMORY_USAGE_MIN 10
+#define LZ4_MEMORY_USAGE_DEFAULT 14
+#define LZ4_MEMORY_USAGE_MAX 20
+
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
+ * Increasing memory usage improves compression ratio, at the cost of speed.
+ * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#ifndef LZ4_MEMORY_USAGE
+# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
+#endif
+
+#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
+# error "LZ4_MEMORY_USAGE is too small !"
+#endif
+
+#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX)
+# error "LZ4_MEMORY_USAGE is too large !"
+#endif
+
+/*-************************************
+* Simple Functions
+**************************************/
+/*! LZ4_compress_default() :
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
+
+/*! LZ4_decompress_safe() :
+ * compressedSize : is the exact complete size of the compressed block.
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
+
+
+/*-************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
+
+/*! LZ4_compressBound() :
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
+ or 0, if input size is incorrect (too large or negative)
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
+
+/*! LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
+*/
+LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_fast_extState() :
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
+ * Use LZ4_sizeofState() to know how much memory must be allocated,
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ * Then, provide this buffer as `void* state` to compression function.
+ */
+LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_destSize() :
+ * Reverse the logic : compresses as much data as possible from 'src' buffer
+ * into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough,
+ * or fill 'dst' buffer completely with as much data as possible from 'src'.
+ * note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ * or 0 if compression fails.
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
+LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
+
+
+/*! LZ4_decompress_safe_partial() :
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ * into destination buffer 'dst' of size 'dstCapacity'.
+ * Up to 'targetOutputSize' bytes will be decoded.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
+ * If source stream is detected malformed, function returns a negative result.
+ *
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
+ * so dstCapacity is kind of redundant.
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
+ * it could write more bytes, though only up to dstCapacity.
+ * Some "margin" used to be required for this operation to work properly.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
+
+
+/*-*********************************************
+* Streaming Compression Functions
+***********************************************/
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+
+/**
+ Note about RC_INVOKED
+
+ - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
+ https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
+
+ - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
+ and reports warning "RC4011: identifier truncated".
+
+ - To eliminate the warning, we surround long preprocessor symbol with
+ "#if !defined(RC_INVOKED) ... #endif" block that means
+ "skip this block when rc.exe is trying to read it".
+*/
+#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_resetStream_fast() : v1.9.0+
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ * (e.g., LZ4_compress_fast_continue()).
+ *
+ * An LZ4_stream_t must be initialized once before usage.
+ * This is automatically done when created by LZ4_createStream().
+ * However, should the LZ4_stream_t be simply declared on stack (for example),
+ * it's necessary to initialize it first, using LZ4_initStream().
+ *
+ * After init, start any new stream with LZ4_resetStream_fast().
+ * A same LZ4_stream_t can be re-used multiple times consecutively
+ * and compress multiple streams,
+ * provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ * but is not compatible with memory regions containing garbage data.
+ *
+ * Note: it's only useful to call LZ4_resetStream_fast()
+ * in the context of streaming compression.
+ * The *extState* functions perform their own resets.
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
+ */
+LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
+
+/*! LZ4_loadDict() :
+ * Use this function to reference a static dictionary into LZ4_stream_t.
+ * The dictionary must remain available during compression.
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ * The same dictionary will have to be loaded on decompression side for successful decoding.
+ * Dictionary are useful for better compression of small data (KB range).
+ * While LZ4 accept any input as dictionary,
+ * results are generally better when using Zstandard's Dictionary Builder.
+ * Loading a size of 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
+ */
+LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*! LZ4_compress_fast_continue() :
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
+ * 'dst' buffer must be already allocated.
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ * or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
+ * Each block has precise boundaries.
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
+ *
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
+ *
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
+ * Make sure that buffers are separated, by at least one byte.
+ * This construction ensures that each block only depends on previous block.
+ *
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
+ *
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
+ */
+LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_saveDict() :
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location,
+ * save it into a safer place (char* safeBuffer).
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
+ */
+LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
+
+
+/*-**********************************************
+* Streaming Decompression Functions
+* Bufferless synchronous API
+************************************************/
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
+
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ * creation / destruction of streaming decompression tracking context.
+ * A tracking context can be re-used multiple times.
+ */
+#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_setStreamDecode() :
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
+ * Use this function to start decompression of a new stream of blocks.
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
+ * @return : 1 if OK, 0 if error
+ */
+LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
+
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ * Note : in a ring buffer scenario (optional),
+ * blocks are presumed decompressed next to each other
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
+ * at which stage it resumes from beginning of ring buffer.
+ * When setting such a ring buffer for streaming decompression,
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_*_continue() :
+ * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
+ * A block is an unsplittable entity, it must be presented entirely to a decompression function.
+ * Decompression functions only accepts one block at a time.
+ * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
+ * If less than 64KB of data has been decoded, all the data must be present.
+ *
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized.
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
+ * - Synchronized mode :
+ * Decompression buffer size is _exactly_ the same as compression buffer size,
+ * and follows exactly same update rule (block boundaries at same positions),
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized,
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *
+ * Whenever these conditions are not possible,
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
+*/
+LZ4LIB_API int
+LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode,
+ const char* src, char* dst,
+ int srcSize, int dstCapacity);
+
+
+/*! LZ4_decompress_*_usingDict() :
+ * These decoding functions work the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
+ * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int
+LZ4_decompress_safe_usingDict(const char* src, char* dst,
+ int srcSize, int dstCapacity,
+ const char* dictStart, int dictSize);
+
+LZ4LIB_API int
+LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
+ int compressedSize,
+ int targetOutputSize, int maxOutputSize,
+ const char* dictStart, int dictSize);
+
+#endif /* LZ4_H_2983827168210 */
+
+
+/*^*************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step.
+ * It is only safe to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
+ * From a high level, the difference is that
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_attach_dictionary() :
+ * This is an experimental API that allows
+ * efficient use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDict() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the first compression call on the stream.
+ */
+LZ4LIB_STATIC_API void
+LZ4_attach_dictionary(LZ4_stream_t* workingStream,
+ const LZ4_stream_t* dictionaryStream);
+
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly constrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
+/*-************************************************************
+ * Private Definitions
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
+ **************************************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef int8_t LZ4_i8;
+ typedef uint8_t LZ4_byte;
+ typedef uint16_t LZ4_u16;
+ typedef uint32_t LZ4_u32;
+#else
+ typedef signed char LZ4_i8;
+ typedef unsigned char LZ4_byte;
+ typedef unsigned short LZ4_u16;
+ typedef unsigned int LZ4_u32;
+#endif
+
+/*! LZ4_stream_t :
+ * Never ever use below internal definitions directly !
+ * These definitions are not API/ABI safe, and may change in future versions.
+ * If you need static allocation, declare or allocate an LZ4_stream_t object.
+**/
+
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ const LZ4_byte* dictionary;
+ const LZ4_stream_t_internal* dictCtx;
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ LZ4_u32 dictSize;
+ /* Implicit padding to ensure structure is aligned */
+};
+
+#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
+union LZ4_stream_u {
+ char minStateSize[LZ4_STREAM_MINSIZE];
+ LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+
+/*! LZ4_initStream() : v1.9.0+
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is automatically done when invoking LZ4_createStream(),
+ * but it's not when the structure is simply declared on stack (for example).
+ *
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ * It can also initialize any arbitrary buffer of sufficient size,
+ * and will @return a pointer of proper type upon initialization.
+ *
+ * Note : initialization fails if size and alignment conditions are not respected.
+ * In which case, the function will @return NULL.
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead
+**/
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
+
+
+/*! LZ4_streamDecode_t :
+ * Never ever use below internal definitions directly !
+ * These definitions are not API/ABI safe, and may change in future versions.
+ * If you need static allocation, declare or allocate an LZ4_streamDecode_t object.
+**/
+typedef struct {
+ const LZ4_byte* externalDict;
+ const LZ4_byte* prefixEnd;
+ size_t extDictSize;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+#define LZ4_STREAMDECODE_MINSIZE 32
+union LZ4_streamDecode_u {
+ char minStateSize[LZ4_STREAMDECODE_MINSIZE];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} ; /* previously typedef'd to LZ4_streamDecode_t */
+
+
+
+/*-************************************
+* Obsolete Functions
+**************************************/
+
+/*! Deprecation warnings
+ *
+ * Deprecated functions make the compiler generate a warning when invoked.
+ * This is meant to invite users to update their source code.
+ * Should deprecation warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ * before including the header file.
+ */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define LZ4_DEPRECATED(message) [[deprecated(message)]]
+# elif defined(_MSC_VER)
+# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+# define LZ4_DEPRECATED(message) /* disabled */
+# endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/*! Obsolete decompression functions (since v1.8.0) */
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+ * These functions used to be faster than LZ4_decompress_safe(),
+ * but this is no longer the case. They are now slower.
+ * This is because LZ4_decompress_fast() doesn't know the input size,
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
+ *
+ * The last remaining LZ4_decompress_fast() specificity is that
+ * it can decompress a block without knowing its compressed size.
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
+ *
+ * Parameters:
+ * originalSize : is the uncompressed size to regenerate.
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
+ * @return : number of bytes read from source buffer (== compressed size).
+ * The function expects to finish at block's end exactly.
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result.
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too.
+ * These issues never happen if input (compressed) data is correct.
+ * But they may happen if input data is invalid (error or intentional tampering).
+ * As a consequence, use these functions in trusted environments with trusted data **only**.
+ */
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
+LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
+
+/*! LZ4_resetStream() :
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is done with LZ4_initStream(), or LZ4_resetStream().
+ * Consider switching to LZ4_initStream(),
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
+
+#endif /* LZ4_H_98237428734687 */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/mfbt/lz4/lz4file.c b/mfbt/lz4/lz4file.c
new file mode 100644
index 0000000000..eaf9b1704d
--- /dev/null
+++ b/mfbt/lz4/lz4file.c
@@ -0,0 +1,311 @@
+/*
+ * LZ4 file library
+ * Copyright (C) 2022, Xiaomi Inc.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ */
+#include <stdlib.h>
+#include <string.h>
+#include "lz4.h"
+#include "lz4file.h"
+
+struct LZ4_readFile_s {
+ LZ4F_dctx* dctxPtr;
+ FILE* fp;
+ LZ4_byte* srcBuf;
+ size_t srcBufNext;
+ size_t srcBufSize;
+ size_t srcBufMaxSize;
+};
+
+struct LZ4_writeFile_s {
+ LZ4F_cctx* cctxPtr;
+ FILE* fp;
+ LZ4_byte* dstBuf;
+ size_t maxWriteSize;
+ size_t dstBufMaxSize;
+ LZ4F_errorCode_t errCode;
+};
+
+LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp)
+{
+ char buf[LZ4F_HEADER_SIZE_MAX];
+ size_t consumedSize;
+ LZ4F_errorCode_t ret;
+ LZ4F_frameInfo_t info;
+
+ if (fp == NULL || lz4fRead == NULL) {
+ return -LZ4F_ERROR_GENERIC;
+ }
+
+ *lz4fRead = (LZ4_readFile_t*)calloc(1, sizeof(LZ4_readFile_t));
+ if (*lz4fRead == NULL) {
+ return -LZ4F_ERROR_allocation_failed;
+ }
+
+ ret = LZ4F_createDecompressionContext(&(*lz4fRead)->dctxPtr, LZ4F_getVersion());
+ if (LZ4F_isError(ret)) {
+ free(*lz4fRead);
+ return ret;
+ }
+
+ (*lz4fRead)->fp = fp;
+ consumedSize = fread(buf, 1, sizeof(buf), (*lz4fRead)->fp);
+ if (consumedSize != sizeof(buf)) {
+ free(*lz4fRead);
+ return -LZ4F_ERROR_GENERIC;
+ }
+
+ ret = LZ4F_getFrameInfo((*lz4fRead)->dctxPtr, &info, buf, &consumedSize);
+ if (LZ4F_isError(ret)) {
+ LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
+ free(*lz4fRead);
+ return ret;
+ }
+
+ switch (info.blockSizeID) {
+ case LZ4F_default :
+ case LZ4F_max64KB :
+ (*lz4fRead)->srcBufMaxSize = 64 * 1024;
+ break;
+ case LZ4F_max256KB:
+ (*lz4fRead)->srcBufMaxSize = 256 * 1024;
+ break;
+ case LZ4F_max1MB:
+ (*lz4fRead)->srcBufMaxSize = 1 * 1024 * 1024;
+ break;
+ case LZ4F_max4MB:
+ (*lz4fRead)->srcBufMaxSize = 4 * 1024 * 1024;
+ break;
+ default:
+ LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
+ free(*lz4fRead);
+ return -LZ4F_ERROR_maxBlockSize_invalid;
+ }
+
+ (*lz4fRead)->srcBuf = (LZ4_byte*)malloc((*lz4fRead)->srcBufMaxSize);
+ if ((*lz4fRead)->srcBuf == NULL) {
+ LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
+ free(lz4fRead);
+ return -LZ4F_ERROR_allocation_failed;
+ }
+
+ (*lz4fRead)->srcBufSize = sizeof(buf) - consumedSize;
+ memcpy((*lz4fRead)->srcBuf, buf + consumedSize, (*lz4fRead)->srcBufSize);
+
+ return ret;
+}
+
+size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size)
+{
+ LZ4_byte* p = (LZ4_byte*)buf;
+ size_t next = 0;
+
+ if (lz4fRead == NULL || buf == NULL)
+ return -LZ4F_ERROR_GENERIC;
+
+ while (next < size) {
+ size_t srcsize = lz4fRead->srcBufSize - lz4fRead->srcBufNext;
+ size_t dstsize = size - next;
+ size_t ret;
+
+ if (srcsize == 0) {
+ ret = fread(lz4fRead->srcBuf, 1, lz4fRead->srcBufMaxSize, lz4fRead->fp);
+ if (ret > 0) {
+ lz4fRead->srcBufSize = ret;
+ srcsize = lz4fRead->srcBufSize;
+ lz4fRead->srcBufNext = 0;
+ }
+ else if (ret == 0) {
+ break;
+ }
+ else {
+ return -LZ4F_ERROR_GENERIC;
+ }
+ }
+
+ ret = LZ4F_decompress(lz4fRead->dctxPtr,
+ p, &dstsize,
+ lz4fRead->srcBuf + lz4fRead->srcBufNext,
+ &srcsize,
+ NULL);
+ if (LZ4F_isError(ret)) {
+ return ret;
+ }
+
+ lz4fRead->srcBufNext += srcsize;
+ next += dstsize;
+ p += dstsize;
+ }
+
+ return next;
+}
+
+LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead)
+{
+ if (lz4fRead == NULL)
+ return -LZ4F_ERROR_GENERIC;
+ LZ4F_freeDecompressionContext(lz4fRead->dctxPtr);
+ free(lz4fRead->srcBuf);
+ free(lz4fRead);
+ return LZ4F_OK_NoError;
+}
+
+LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr)
+{
+ LZ4_byte buf[LZ4F_HEADER_SIZE_MAX];
+ size_t ret;
+
+ if (fp == NULL || lz4fWrite == NULL)
+ return -LZ4F_ERROR_GENERIC;
+
+ *lz4fWrite = (LZ4_writeFile_t*)malloc(sizeof(LZ4_writeFile_t));
+ if (*lz4fWrite == NULL) {
+ return -LZ4F_ERROR_allocation_failed;
+ }
+ if (prefsPtr != NULL) {
+ switch (prefsPtr->frameInfo.blockSizeID) {
+ case LZ4F_default :
+ case LZ4F_max64KB :
+ (*lz4fWrite)->maxWriteSize = 64 * 1024;
+ break;
+ case LZ4F_max256KB:
+ (*lz4fWrite)->maxWriteSize = 256 * 1024;
+ break;
+ case LZ4F_max1MB:
+ (*lz4fWrite)->maxWriteSize = 1 * 1024 * 1024;
+ break;
+ case LZ4F_max4MB:
+ (*lz4fWrite)->maxWriteSize = 4 * 1024 * 1024;
+ break;
+ default:
+ free(lz4fWrite);
+ return -LZ4F_ERROR_maxBlockSize_invalid;
+ }
+ } else {
+ (*lz4fWrite)->maxWriteSize = 64 * 1024;
+ }
+
+ (*lz4fWrite)->dstBufMaxSize = LZ4F_compressBound((*lz4fWrite)->maxWriteSize, prefsPtr);
+ (*lz4fWrite)->dstBuf = (LZ4_byte*)malloc((*lz4fWrite)->dstBufMaxSize);
+ if ((*lz4fWrite)->dstBuf == NULL) {
+ free(*lz4fWrite);
+ return -LZ4F_ERROR_allocation_failed;
+ }
+
+ ret = LZ4F_createCompressionContext(&(*lz4fWrite)->cctxPtr, LZ4F_getVersion());
+ if (LZ4F_isError(ret)) {
+ free((*lz4fWrite)->dstBuf);
+ free(*lz4fWrite);
+ return ret;
+ }
+
+ ret = LZ4F_compressBegin((*lz4fWrite)->cctxPtr, buf, LZ4F_HEADER_SIZE_MAX, prefsPtr);
+ if (LZ4F_isError(ret)) {
+ LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
+ free((*lz4fWrite)->dstBuf);
+ free(*lz4fWrite);
+ return ret;
+ }
+
+ if (ret != fwrite(buf, 1, ret, fp)) {
+ LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
+ free((*lz4fWrite)->dstBuf);
+ free(*lz4fWrite);
+ return -LZ4F_ERROR_GENERIC;
+ }
+
+ (*lz4fWrite)->fp = fp;
+ (*lz4fWrite)->errCode = LZ4F_OK_NoError;
+ return LZ4F_OK_NoError;
+}
+
+size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size)
+{
+ LZ4_byte* p = (LZ4_byte*)buf;
+ size_t remain = size;
+ size_t chunk;
+ size_t ret;
+
+ if (lz4fWrite == NULL || buf == NULL)
+ return -LZ4F_ERROR_GENERIC;
+ while (remain) {
+ if (remain > lz4fWrite->maxWriteSize)
+ chunk = lz4fWrite->maxWriteSize;
+ else
+ chunk = remain;
+
+ ret = LZ4F_compressUpdate(lz4fWrite->cctxPtr,
+ lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize,
+ p, chunk,
+ NULL);
+ if (LZ4F_isError(ret)) {
+ lz4fWrite->errCode = ret;
+ return ret;
+ }
+
+ if(ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
+ lz4fWrite->errCode = -LZ4F_ERROR_GENERIC;
+ return -LZ4F_ERROR_GENERIC;
+ }
+
+ p += chunk;
+ remain -= chunk;
+ }
+
+ return size;
+}
+
+LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite)
+{
+ LZ4F_errorCode_t ret = LZ4F_OK_NoError;
+
+ if (lz4fWrite == NULL)
+ return -LZ4F_ERROR_GENERIC;
+
+ if (lz4fWrite->errCode == LZ4F_OK_NoError) {
+ ret = LZ4F_compressEnd(lz4fWrite->cctxPtr,
+ lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize,
+ NULL);
+ if (LZ4F_isError(ret)) {
+ goto out;
+ }
+
+ if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
+ ret = -LZ4F_ERROR_GENERIC;
+ }
+ }
+
+out:
+ LZ4F_freeCompressionContext(lz4fWrite->cctxPtr);
+ free(lz4fWrite->dstBuf);
+ free(lz4fWrite);
+ return ret;
+}
diff --git a/mfbt/lz4/lz4file.h b/mfbt/lz4/lz4file.h
new file mode 100644
index 0000000000..5527130720
--- /dev/null
+++ b/mfbt/lz4/lz4file.h
@@ -0,0 +1,93 @@
+/*
+ LZ4 file library
+ Header File
+ Copyright (C) 2022, Xiaomi Inc.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef LZ4FILE_H
+#define LZ4FILE_H
+
+#include <stdio.h>
+#include "lz4frame_static.h"
+
+typedef struct LZ4_readFile_s LZ4_readFile_t;
+typedef struct LZ4_writeFile_s LZ4_writeFile_t;
+
+/*! LZ4F_readOpen() :
+ * Set read lz4file handle.
+ * `lz4f` will set a lz4file handle.
+ * `fp` must be the return value of the lz4 file opened by fopen.
+ */
+LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp);
+
+/*! LZ4F_read() :
+ * Read lz4file content to buffer.
+ * `lz4f` must use LZ4_readOpen to set first.
+ * `buf` read data buffer.
+ * `size` read data buffer size.
+ */
+LZ4FLIB_STATIC_API size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size);
+
+/*! LZ4F_readClose() :
+ * Close lz4file handle.
+ * `lz4f` must use LZ4_readOpen to set first.
+ */
+LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead);
+
+/*! LZ4F_writeOpen() :
+ * Set write lz4file handle.
+ * `lz4f` will set a lz4file handle.
+ * `fp` must be the return value of the lz4 file opened by fopen.
+ */
+LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_write() :
+ * Write buffer to lz4file.
+ * `lz4f` must use LZ4F_writeOpen to set first.
+ * `buf` write data buffer.
+ * `size` write data buffer size.
+ */
+LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size);
+
+/*! LZ4F_writeClose() :
+ * Close lz4file handle.
+ * `lz4f` must use LZ4F_writeOpen to set first.
+ */
+LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite);
+
+#endif /* LZ4FILE_H */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/mfbt/lz4/lz4frame.c b/mfbt/lz4/lz4frame.c
new file mode 100644
index 0000000000..174f9ae4f2
--- /dev/null
+++ b/mfbt/lz4/lz4frame.c
@@ -0,0 +1,2078 @@
+/*
+ * LZ4 auto-framing library
+ * Copyright (C) 2011-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ */
+
+/* LZ4F is a stand-alone API to create LZ4-compressed Frames
+ * in full conformance with specification v1.6.1 .
+ * This library rely upon memory management capabilities (malloc, free)
+ * provided either by <stdlib.h>,
+ * or redirected towards another library of user's choice
+ * (see Memory Routines below).
+ */
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4F_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4F_HEAPMODE
+# define LZ4F_HEAPMODE 0
+#endif
+
+
+/*-************************************
+* Library declarations
+**************************************/
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
+#define LZ4_STATIC_LINKING_ONLY
+#include "lz4.h"
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "lz4hc.h"
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/*-************************************
+* Memory routines
+**************************************/
+/*
+ * User may redirect invocations of
+ * malloc(), calloc() and free()
+ * towards another library or solution of their choice
+ * by modifying below section.
+**/
+
+#include <string.h> /* memset, memcpy, memmove */
+#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
+# define MEM_INIT(p,v,s) memset((p),(v),(s))
+#endif
+
+#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
+# include <stdlib.h> /* malloc, calloc, free */
+# define ALLOC(s) malloc(s)
+# define ALLOC_AND_ZERO(s) calloc(1,(s))
+# define FREEMEM(p) free(p)
+#endif
+
+static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
+{
+ /* custom calloc defined : use it */
+ if (cmem.customCalloc != NULL) {
+ return cmem.customCalloc(cmem.opaqueState, s);
+ }
+ /* nothing defined : use default <stdlib.h>'s calloc() */
+ if (cmem.customAlloc == NULL) {
+ return ALLOC_AND_ZERO(s);
+ }
+ /* only custom alloc defined : use it, and combine it with memset() */
+ { void* const p = cmem.customAlloc(cmem.opaqueState, s);
+ if (p != NULL) MEM_INIT(p, 0, s);
+ return p;
+} }
+
+static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
+{
+ /* custom malloc defined : use it */
+ if (cmem.customAlloc != NULL) {
+ return cmem.customAlloc(cmem.opaqueState, s);
+ }
+ /* nothing defined : use default <stdlib.h>'s malloc() */
+ return ALLOC(s);
+}
+
+static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
+{
+ /* custom malloc defined : use it */
+ if (cmem.customFree != NULL) {
+ cmem.customFree(cmem.opaqueState, p);
+ return;
+ }
+ /* nothing defined : use default <stdlib.h>'s free() */
+ FREEMEM(p);
+}
+
+
+/*-************************************
+* Debug
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
+# include <stdio.h>
+static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+/*-************************************
+* Basic Types
+**************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+
+/* unoptimized version; solves endianness & alignment issues */
+static U32 LZ4F_readLE32 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U32 value32 = srcPtr[0];
+ value32 += ((U32)srcPtr[1])<< 8;
+ value32 += ((U32)srcPtr[2])<<16;
+ value32 += ((U32)srcPtr[3])<<24;
+ return value32;
+}
+
+static void LZ4F_writeLE32 (void* dst, U32 value32)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value32;
+ dstPtr[1] = (BYTE)(value32 >> 8);
+ dstPtr[2] = (BYTE)(value32 >> 16);
+ dstPtr[3] = (BYTE)(value32 >> 24);
+}
+
+static U64 LZ4F_readLE64 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U64 value64 = srcPtr[0];
+ value64 += ((U64)srcPtr[1]<<8);
+ value64 += ((U64)srcPtr[2]<<16);
+ value64 += ((U64)srcPtr[3]<<24);
+ value64 += ((U64)srcPtr[4]<<32);
+ value64 += ((U64)srcPtr[5]<<40);
+ value64 += ((U64)srcPtr[6]<<48);
+ value64 += ((U64)srcPtr[7]<<56);
+ return value64;
+}
+
+static void LZ4F_writeLE64 (void* dst, U64 value64)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value64;
+ dstPtr[1] = (BYTE)(value64 >> 8);
+ dstPtr[2] = (BYTE)(value64 >> 16);
+ dstPtr[3] = (BYTE)(value64 >> 24);
+ dstPtr[4] = (BYTE)(value64 >> 32);
+ dstPtr[5] = (BYTE)(value64 >> 40);
+ dstPtr[6] = (BYTE)(value64 >> 48);
+ dstPtr[7] = (BYTE)(value64 >> 56);
+}
+
+
+/*-************************************
+* Constants
+**************************************/
+#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
+# define KB *(1<<10)
+# define MB *(1<<20)
+# define GB *(1<<30)
+#endif
+
+#define _1BIT 0x01
+#define _2BITS 0x03
+#define _3BITS 0x07
+#define _4BITS 0x0F
+#define _8BITS 0xFF
+
+#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
+#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
+
+static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
+static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
+static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
+static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
+
+
+/*-************************************
+* Structures and local types
+**************************************/
+
+typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t;
+
+typedef struct LZ4F_cctx_s
+{
+ LZ4F_CustomMem cmem;
+ LZ4F_preferences_t prefs;
+ U32 version;
+ U32 cStage;
+ const LZ4F_CDict* cdict;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpBuff; /* internal buffer, for streaming */
+ BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
+ size_t tmpInSize; /* amount of data to compress after tmpIn */
+ U64 totalInSize;
+ XXH32_state_t xxh;
+ void* lz4CtxPtr;
+ U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
+ U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
+ LZ4F_blockCompression_t blockCompression;
+} LZ4F_cctx_t;
+
+
+/*-************************************
+* Error management
+**************************************/
+#define LZ4F_GENERATE_STRING(STRING) #STRING,
+static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
+
+
+unsigned LZ4F_isError(LZ4F_errorCode_t code)
+{
+ return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
+}
+
+const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
+{
+ static const char* codeError = "Unspecified error code";
+ if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
+ return codeError;
+}
+
+LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
+{
+ if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
+ return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
+}
+
+static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
+{
+ /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
+ LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
+ return (LZ4F_errorCode_t)-(ptrdiff_t)code;
+}
+
+#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
+
+#define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e)
+
+#define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r)
+
+unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
+
+int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
+
+size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
+{
+ static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
+
+ if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+ if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
+ RETURN_ERROR(maxBlockSize_invalid);
+ { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
+ return blockSizes[blockSizeIdx];
+} }
+
+/*-************************************
+* Private functions
+**************************************/
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+static BYTE LZ4F_headerChecksum (const void* header, size_t length)
+{
+ U32 const xxh = XXH32(header, length, 0);
+ return (BYTE)(xxh >> 8);
+}
+
+
+/*-************************************
+* Simple-pass compression functions
+**************************************/
+static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
+ const size_t srcSize)
+{
+ LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
+ size_t maxBlockSize = 64 KB;
+ while (requestedBSID > proposedBSID) {
+ if (srcSize <= maxBlockSize)
+ return proposedBSID;
+ proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
+ maxBlockSize <<= 2;
+ }
+ return requestedBSID;
+}
+
+/*! LZ4F_compressBound_internal() :
+ * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
+ * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
+ * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
+ * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
+ */
+static size_t LZ4F_compressBound_internal(size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr,
+ size_t alreadyBuffered)
+{
+ LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
+ prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
+ prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
+ { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
+ U32 const flush = prefsPtr->autoFlush | (srcSize==0);
+ LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
+ size_t const blockSize = LZ4F_getBlockSize(blockID);
+ size_t const maxBuffered = blockSize - 1;
+ size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
+ size_t const maxSrcSize = srcSize + bufferedSize;
+ unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
+ size_t const partialBlockSize = maxSrcSize & (blockSize-1);
+ size_t const lastBlockSize = flush ? partialBlockSize : 0;
+ unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
+
+ size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
+ size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
+
+ return ((BHSize + blockCRCSize) * nbBlocks) +
+ (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
+ }
+}
+
+size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefs;
+ size_t const headerSize = maxFHSize; /* max header size, including optional fields */
+
+ if (preferencesPtr!=NULL) prefs = *preferencesPtr;
+ else MEM_INIT(&prefs, 0, sizeof(prefs));
+ prefs.autoFlush = 1;
+
+ return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
+}
+
+
+/*! LZ4F_compressFrame_usingCDict() :
+ * Compress srcBuffer using a dictionary, in a single step.
+ * cdict can be NULL, in which case, no dictionary is used.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ * however, it's the only way to provide a dictID, so it's not recommended.
+ * @return : number of bytes written into dstBuffer,
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefs;
+ LZ4F_compressOptions_t options;
+ BYTE* const dstStart = (BYTE*) dstBuffer;
+ BYTE* dstPtr = dstStart;
+ BYTE* const dstEnd = dstStart + dstCapacity;
+
+ if (preferencesPtr!=NULL)
+ prefs = *preferencesPtr;
+ else
+ MEM_INIT(&prefs, 0, sizeof(prefs));
+ if (prefs.frameInfo.contentSize != 0)
+ prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
+
+ prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
+ prefs.autoFlush = 1;
+ if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
+ prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
+
+ MEM_INIT(&options, 0, sizeof(options));
+ options.stableSrc = 1;
+
+ RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall);
+
+ { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
+ FORWARD_IF_ERROR(headerSize);
+ dstPtr += headerSize; /* header size */ }
+
+ assert(dstEnd >= dstPtr);
+ { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
+ FORWARD_IF_ERROR(cSize);
+ dstPtr += cSize; }
+
+ assert(dstEnd >= dstPtr);
+ { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
+ FORWARD_IF_ERROR(tailSize);
+ dstPtr += tailSize; }
+
+ assert(dstEnd >= dstStart);
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressFrame() :
+ * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ size_t result;
+#if (LZ4F_HEAPMODE)
+ LZ4F_cctx_t* cctxPtr;
+ result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
+ FORWARD_IF_ERROR(result);
+#else
+ LZ4F_cctx_t cctx;
+ LZ4_stream_t lz4ctx;
+ LZ4F_cctx_t* const cctxPtr = &cctx;
+
+ MEM_INIT(&cctx, 0, sizeof(cctx));
+ cctx.version = LZ4F_VERSION;
+ cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
+ if ( preferencesPtr == NULL
+ || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) {
+ LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
+ cctxPtr->lz4CtxPtr = &lz4ctx;
+ cctxPtr->lz4CtxAlloc = 1;
+ cctxPtr->lz4CtxState = 1;
+ }
+#endif
+ DEBUGLOG(4, "LZ4F_compressFrame");
+
+ result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
+ srcBuffer, srcSize,
+ NULL, preferencesPtr);
+
+#if (LZ4F_HEAPMODE)
+ LZ4F_freeCompressionContext(cctxPtr);
+#else
+ if ( preferencesPtr != NULL
+ && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) {
+ LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
+ }
+#endif
+ return result;
+}
+
+
+/*-***************************************************
+* Dictionary compression
+*****************************************************/
+
+struct LZ4F_CDict_s {
+ LZ4F_CustomMem cmem;
+ void* dictContent;
+ LZ4_stream_t* fastCtx;
+ LZ4_streamHC_t* HCCtx;
+}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
+
+LZ4F_CDict*
+LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
+{
+ const char* dictStart = (const char*)dictBuffer;
+ LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
+ DEBUGLOG(4, "LZ4F_createCDict_advanced");
+ if (!cdict) return NULL;
+ cdict->cmem = cmem;
+ if (dictSize > 64 KB) {
+ dictStart += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ cdict->dictContent = LZ4F_malloc(dictSize, cmem);
+ cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
+ if (cdict->fastCtx)
+ LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
+ cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
+ if (cdict->HCCtx)
+ LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
+ if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
+ LZ4F_freeCDict(cdict);
+ return NULL;
+ }
+ memcpy(cdict->dictContent, dictStart, dictSize);
+ LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
+ LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
+ LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
+ return cdict;
+}
+
+/*! LZ4F_createCDict() :
+ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
+ * @return : digested dictionary for compression, or NULL if failed */
+LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
+{
+ DEBUGLOG(4, "LZ4F_createCDict");
+ return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
+}
+
+void LZ4F_freeCDict(LZ4F_CDict* cdict)
+{
+ if (cdict==NULL) return; /* support free on NULL */
+ LZ4F_free(cdict->dictContent, cdict->cmem);
+ LZ4F_free(cdict->fastCtx, cdict->cmem);
+ LZ4F_free(cdict->HCCtx, cdict->cmem);
+ LZ4F_free(cdict, cdict->cmem);
+}
+
+
+/*-*********************************
+* Advanced compression functions
+***********************************/
+
+LZ4F_cctx*
+LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
+{
+ LZ4F_cctx* const cctxPtr =
+ (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
+ if (cctxPtr==NULL) return NULL;
+
+ cctxPtr->cmem = customMem;
+ cctxPtr->version = version;
+ cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
+
+ return cctxPtr;
+}
+
+/*! LZ4F_createCompressionContext() :
+ * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
+ * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
+ * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
+ * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
+ * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
+ * Object can release its memory using LZ4F_freeCompressionContext();
+**/
+LZ4F_errorCode_t
+LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
+{
+ assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */
+ /* in case it nonetheless happen in production */
+ RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null);
+
+ *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
+ RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed);
+ return LZ4F_OK_NoError;
+}
+
+
+LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
+{
+ if (cctxPtr != NULL) { /* support free on NULL */
+ LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
+ LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
+ LZ4F_free(cctxPtr, cctxPtr->cmem);
+ }
+ return LZ4F_OK_NoError;
+}
+
+
+/**
+ * This function prepares the internal LZ4(HC) stream for a new compression,
+ * resetting the context and attaching the dictionary, if there is one.
+ *
+ * It needs to be called at the beginning of each independent compression
+ * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
+ * beginning of each block in blockIndependent mode).
+ */
+static void LZ4F_initStream(void* ctx,
+ const LZ4F_CDict* cdict,
+ int level,
+ LZ4F_blockMode_t blockMode) {
+ if (level < LZ4HC_CLEVEL_MIN) {
+ if (cdict != NULL || blockMode == LZ4F_blockLinked) {
+ /* In these cases, we will call LZ4_compress_fast_continue(),
+ * which needs an already reset context. Otherwise, we'll call a
+ * one-shot API. The non-continued APIs internally perform their own
+ * resets at the beginning of their calls, where they know what
+ * tableType they need the context to be in. So in that case this
+ * would be misguided / wasted work. */
+ LZ4_resetStream_fast((LZ4_stream_t*)ctx);
+ }
+ LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
+ } else {
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
+ LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
+ }
+}
+
+static int ctxTypeID_to_size(int ctxTypeID) {
+ switch(ctxTypeID) {
+ case 1:
+ return LZ4_sizeofState();
+ case 2:
+ return LZ4_sizeofStateHC();
+ default:
+ return 0;
+ }
+}
+
+/*! LZ4F_compressBegin_usingCDict() :
+ * init streaming compression AND writes frame header into @dstBuffer.
+ * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * @return : number of bytes written into @dstBuffer for the header
+ * or an error code (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+
+ RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
+ if (preferencesPtr == NULL) preferencesPtr = &prefNull;
+ cctxPtr->prefs = *preferencesPtr;
+
+ /* cctx Management */
+ { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
+ int requiredSize = ctxTypeID_to_size(ctxTypeID);
+ int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc);
+ if (allocatedSize < requiredSize) {
+ /* not enough space allocated */
+ LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ /* must take ownership of memory allocation,
+ * in order to respect custom allocator contract */
+ cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem);
+ if (cctxPtr->lz4CtxPtr)
+ LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
+ } else {
+ cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem);
+ if (cctxPtr->lz4CtxPtr)
+ LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
+ }
+ RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed);
+ cctxPtr->lz4CtxAlloc = ctxTypeID;
+ cctxPtr->lz4CtxState = ctxTypeID;
+ } else if (cctxPtr->lz4CtxState != ctxTypeID) {
+ /* otherwise, a sufficient buffer is already allocated,
+ * but we need to reset it to the correct context type */
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
+ } else {
+ LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
+ LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
+ }
+ cctxPtr->lz4CtxState = ctxTypeID;
+ } }
+
+ /* Buffer Management */
+ if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
+ cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+ cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
+
+ { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
+ ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
+ cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
+
+ if (cctxPtr->maxBufferSize < requiredBuffSize) {
+ cctxPtr->maxBufferSize = 0;
+ LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
+ cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem);
+ RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed);
+ cctxPtr->maxBufferSize = requiredBuffSize;
+ } }
+ cctxPtr->tmpIn = cctxPtr->tmpBuff;
+ cctxPtr->tmpInSize = 0;
+ (void)XXH32_reset(&(cctxPtr->xxh), 0);
+
+ /* context init */
+ cctxPtr->cdict = cdict;
+ if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
+ /* frame init only for blockLinked : blockIndependent will be init at each block */
+ LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
+ }
+ if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
+ LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
+ }
+
+ /* Magic Number */
+ LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
+ dstPtr += 4;
+ { BYTE* const headerStart = dstPtr;
+
+ /* FLG Byte */
+ *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
+ + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
+ + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
+ + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
+ + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
+ + (cctxPtr->prefs.frameInfo.dictID > 0) );
+ /* BD Byte */
+ *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
+ /* Optional Frame content size field */
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
+ dstPtr += 8;
+ cctxPtr->totalInSize = 0;
+ }
+ /* Optional dictionary ID field */
+ if (cctxPtr->prefs.frameInfo.dictID) {
+ LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
+ dstPtr += 4;
+ }
+ /* Header CRC Byte */
+ *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
+ dstPtr++;
+ }
+
+ cctxPtr->cStage = 1; /* header written, now request input data block */
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressBegin() :
+ * init streaming compression AND writes frame header into @dstBuffer.
+ * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * @preferencesPtr can be NULL, in which case default parameters are selected.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
+ NULL, preferencesPtr);
+}
+
+
+/* LZ4F_compressBound() :
+ * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
+ * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
+ * This function cannot fail.
+ */
+size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ if (preferencesPtr && preferencesPtr->autoFlush) {
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
+ }
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
+}
+
+
+typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
+
+
+/*! LZ4F_makeBlock():
+ * compress a single block, add header and optional checksum.
+ * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
+ */
+static size_t LZ4F_makeBlock(void* dst,
+ const void* src, size_t srcSize,
+ compressFunc_t compress, void* lz4ctx, int level,
+ const LZ4F_CDict* cdict,
+ LZ4F_blockChecksum_t crcFlag)
+{
+ BYTE* const cSizePtr = (BYTE*)dst;
+ U32 cSize;
+ assert(compress != NULL);
+ cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
+ (int)(srcSize), (int)(srcSize-1),
+ level, cdict);
+
+ if (cSize == 0 || cSize >= srcSize) {
+ cSize = (U32)srcSize;
+ LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
+ memcpy(cSizePtr+BHSize, src, srcSize);
+ } else {
+ LZ4F_writeLE32(cSizePtr, cSize);
+ }
+ if (crcFlag) {
+ U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
+ LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
+ }
+ return BHSize + cSize + ((U32)crcFlag)*BFSize;
+}
+
+
+static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ int const acceleration = (level < 0) ? -level + 1 : 1;
+ DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize);
+ LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
+ if (cdict) {
+ return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
+ } else {
+ return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
+ }
+}
+
+static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ int const acceleration = (level < 0) ? -level + 1 : 1;
+ (void)cdict; /* init once at beginning of frame */
+ DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize);
+ return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
+}
+
+static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
+ if (cdict) {
+ return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
+ }
+ return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
+}
+
+static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ (void)level; (void)cdict; /* init once at beginning of frame */
+ return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
+}
+
+static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
+ return 0;
+}
+
+static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode)
+{
+ if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock;
+ if (level < LZ4HC_CLEVEL_MIN) {
+ if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
+ return LZ4F_compressBlock_continue;
+ }
+ if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
+ return LZ4F_compressBlockHC_continue;
+}
+
+/* Save history (up to 64KB) into @tmpBuff */
+static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
+{
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
+ return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+ return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+}
+
+typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
+
+static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
+
+
+ /*! LZ4F_compressUpdateImpl() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * When successful, the function always entirely consumes @srcBuffer.
+ * src data is either buffered or compressed into @dstBuffer.
+ * If the block compression does not match the compression of the previous block, the old data is flushed
+ * and operations continue with the new compression mode.
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * After an error, the state is left in a UB state, and must be re-initialized.
+ */
+static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* compressOptionsPtr,
+ LZ4F_blockCompression_t blockCompression)
+ {
+ size_t const blockSize = cctxPtr->maxBlockSize;
+ const BYTE* srcPtr = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcPtr + srcSize;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ LZ4F_lastBlockStatus lastBlockCompressed = notDone;
+ compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
+ size_t bytesWritten;
+ DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
+
+ RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */
+ if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
+ RETURN_ERROR(dstMaxSize_tooSmall);
+
+ if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
+ RETURN_ERROR(dstMaxSize_tooSmall);
+
+ /* flush currently written block, to continue with new block compression */
+ if (cctxPtr->blockCompression != blockCompression) {
+ bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
+ dstPtr += bytesWritten;
+ cctxPtr->blockCompression = blockCompression;
+ }
+
+ if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
+
+ /* complete tmp buffer */
+ if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
+ size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
+ assert(blockSize > cctxPtr->tmpInSize);
+ if (sizeToCopy > srcSize) {
+ /* add src to tmpIn buffer */
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
+ srcPtr = srcEnd;
+ cctxPtr->tmpInSize += srcSize;
+ /* still needs some CRC */
+ } else {
+ /* complete tmpIn block and then compress it */
+ lastBlockCompressed = fromTmpBuffer;
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
+ srcPtr += sizeToCopy;
+
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ cctxPtr->tmpIn, blockSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
+ cctxPtr->tmpInSize = 0;
+ } }
+
+ while ((size_t)(srcEnd - srcPtr) >= blockSize) {
+ /* compress full blocks */
+ lastBlockCompressed = fromSrcBuffer;
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ srcPtr, blockSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ srcPtr += blockSize;
+ }
+
+ if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
+ /* autoFlush : remaining input (< blockSize) is compressed */
+ lastBlockCompressed = fromSrcBuffer;
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ srcPtr, (size_t)(srcEnd - srcPtr),
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ srcPtr = srcEnd;
+ }
+
+ /* preserve dictionary within @tmpBuff whenever necessary */
+ if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
+ /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
+ assert(blockCompression == LZ4B_COMPRESSED);
+ if (compressOptionsPtr->stableSrc) {
+ cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
+ } else {
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ assert(0 <= realDictSize && realDictSize <= 64 KB);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+ }
+
+ /* keep tmpIn within limits */
+ if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
+ && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
+ {
+ /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
+ * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
+ }
+
+ /* some input data left, necessarily < blockSize */
+ if (srcPtr < srcEnd) {
+ /* fill tmp buffer */
+ size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
+ memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
+ cctxPtr->tmpInSize = sizeToCopy;
+ }
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
+ (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
+
+ cctxPtr->totalInSize += srcSize;
+ return (size_t)(dstPtr - dstStart);
+}
+
+/*! LZ4F_compressUpdate() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * When successful, the function always entirely consumes @srcBuffer.
+ * src data is either buffered or compressed into @dstBuffer.
+ * If previously an uncompressed block was written, buffered data is flushed
+ * before appending compressed data is continued.
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * After an error, the state is left in a UB state, and must be re-initialized.
+ */
+size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ return LZ4F_compressUpdateImpl(cctxPtr,
+ dstBuffer, dstCapacity,
+ srcBuffer, srcSize,
+ compressOptionsPtr, LZ4B_COMPRESSED);
+}
+
+/*! LZ4F_compressUpdate() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * When successful, the function always entirely consumes @srcBuffer.
+ * src data is either buffered or compressed into @dstBuffer.
+ * If previously an uncompressed block was written, buffered data is flushed
+ * before appending compressed data is continued.
+ * This is only supported when LZ4F_blockIndependent is used
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * After an error, the state is left in a UB state, and must be re-initialized.
+ */
+size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* compressOptionsPtr) {
+ RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid);
+ return LZ4F_compressUpdateImpl(cctxPtr,
+ dstBuffer, dstCapacity,
+ srcBuffer, srcSize,
+ compressOptionsPtr, LZ4B_UNCOMPRESSED);
+}
+
+
+/*! LZ4F_flush() :
+ * When compressed data must be sent immediately, without waiting for a block to be filled,
+ * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
+ * The result of the function is the number of bytes written into dstBuffer.
+ * It can be zero, this means there was no data left within LZ4F_cctx.
+ * The function outputs an error code if it fails (can be tested using LZ4F_isError())
+ * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
+ */
+size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ compressFunc_t compress;
+
+ if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
+ RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
+ RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
+ (void)compressOptionsPtr; /* not useful (yet) */
+
+ /* select compression function */
+ compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression);
+
+ /* compress tmp buffer */
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ cctxPtr->tmpIn, cctxPtr->tmpInSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
+
+ if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
+ cctxPtr->tmpIn += cctxPtr->tmpInSize;
+ cctxPtr->tmpInSize = 0;
+
+ /* keep tmpIn within limits */
+ if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressEnd() :
+ * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
+ * It will flush whatever data remained within compressionContext (like LZ4_flush())
+ * but also properly finalize the frame, with an endMark and an (optional) checksum.
+ * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
+ * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
+ */
+size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+
+ size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
+ DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
+ FORWARD_IF_ERROR(flushSize);
+ dstPtr += flushSize;
+
+ assert(flushSize <= dstCapacity);
+ dstCapacity -= flushSize;
+
+ RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall);
+ LZ4F_writeLE32(dstPtr, 0);
+ dstPtr += 4; /* endMark */
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
+ U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
+ RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
+ DEBUGLOG(5,"Writing 32-bit content checksum");
+ LZ4F_writeLE32(dstPtr, xxh);
+ dstPtr+=4; /* content Checksum */
+ }
+
+ cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
+ cctxPtr->maxBufferSize = 0; /* reuse HC context */
+
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
+ RETURN_ERROR(frameSize_wrong);
+ }
+
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*-***************************************************
+* Frame Decompression
+*****************************************************/
+
+typedef enum {
+ dstage_getFrameHeader=0, dstage_storeFrameHeader,
+ dstage_init,
+ dstage_getBlockHeader, dstage_storeBlockHeader,
+ dstage_copyDirect, dstage_getBlockChecksum,
+ dstage_getCBlock, dstage_storeCBlock,
+ dstage_flushOut,
+ dstage_getSuffix, dstage_storeSuffix,
+ dstage_getSFrameSize, dstage_storeSFrameSize,
+ dstage_skipSkippable
+} dStage_t;
+
+struct LZ4F_dctx_s {
+ LZ4F_CustomMem cmem;
+ LZ4F_frameInfo_t frameInfo;
+ U32 version;
+ dStage_t dStage;
+ U64 frameRemainingSize;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpIn;
+ size_t tmpInSize;
+ size_t tmpInTarget;
+ BYTE* tmpOutBuffer;
+ const BYTE* dict;
+ size_t dictSize;
+ BYTE* tmpOut;
+ size_t tmpOutSize;
+ size_t tmpOutStart;
+ XXH32_state_t xxh;
+ XXH32_state_t blockChecksum;
+ int skipChecksum;
+ BYTE header[LZ4F_HEADER_SIZE_MAX];
+}; /* typedef'd to LZ4F_dctx in lz4frame.h */
+
+
+LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
+{
+ LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
+ if (dctx == NULL) return NULL;
+
+ dctx->cmem = customMem;
+ dctx->version = version;
+ return dctx;
+}
+
+/*! LZ4F_createDecompressionContext() :
+ * Create a decompressionContext object, which will track all decompression operations.
+ * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
+ * Object can later be released using LZ4F_freeDecompressionContext().
+ * @return : if != 0, there was an error during context creation.
+ */
+LZ4F_errorCode_t
+LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
+{
+ assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */
+ RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */
+
+ *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
+ if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */
+ RETURN_ERROR(allocation_failed);
+ }
+ return LZ4F_OK_NoError;
+}
+
+LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
+{
+ LZ4F_errorCode_t result = LZ4F_OK_NoError;
+ if (dctx != NULL) { /* can accept NULL input, like free() */
+ result = (LZ4F_errorCode_t)dctx->dStage;
+ LZ4F_free(dctx->tmpIn, dctx->cmem);
+ LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
+ LZ4F_free(dctx, dctx->cmem);
+ }
+ return result;
+}
+
+
+/*==--- Streaming Decompression operations ---==*/
+
+void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
+{
+ dctx->dStage = dstage_getFrameHeader;
+ dctx->dict = NULL;
+ dctx->dictSize = 0;
+ dctx->skipChecksum = 0;
+}
+
+
+/*! LZ4F_decodeHeader() :
+ * input : `src` points at the **beginning of the frame**
+ * output : set internal values of dctx, such as
+ * dctx->frameInfo and dctx->dStage.
+ * Also allocates internal buffers.
+ * @return : nb Bytes read from src (necessarily <= srcSize)
+ * or an error code (testable with LZ4F_isError())
+ */
+static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
+{
+ unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
+ size_t frameHeaderSize;
+ const BYTE* srcPtr = (const BYTE*)src;
+
+ DEBUGLOG(5, "LZ4F_decodeHeader");
+ /* need to decode header to get frameInfo */
+ RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */
+ MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
+
+ /* special case : skippable frames */
+ if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
+ dctx->frameInfo.frameType = LZ4F_skippableFrame;
+ if (src == (void*)(dctx->header)) {
+ dctx->tmpInSize = srcSize;
+ dctx->tmpInTarget = 8;
+ dctx->dStage = dstage_storeSFrameSize;
+ return srcSize;
+ } else {
+ dctx->dStage = dstage_getSFrameSize;
+ return 4;
+ } }
+
+ /* control magic number */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
+ DEBUGLOG(4, "frame header error : unknown magic number");
+ RETURN_ERROR(frameType_unknown);
+ }
+#endif
+ dctx->frameInfo.frameType = LZ4F_frame;
+
+ /* Flags */
+ { U32 const FLG = srcPtr[4];
+ U32 const version = (FLG>>6) & _2BITS;
+ blockChecksumFlag = (FLG>>4) & _1BIT;
+ blockMode = (FLG>>5) & _1BIT;
+ contentSizeFlag = (FLG>>3) & _1BIT;
+ contentChecksumFlag = (FLG>>2) & _1BIT;
+ dictIDFlag = FLG & _1BIT;
+ /* validate */
+ if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
+ if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
+ }
+
+ /* Frame Header Size */
+ frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
+
+ if (srcSize < frameHeaderSize) {
+ /* not enough input to fully decode frame header */
+ if (srcPtr != dctx->header)
+ memcpy(dctx->header, srcPtr, srcSize);
+ dctx->tmpInSize = srcSize;
+ dctx->tmpInTarget = frameHeaderSize;
+ dctx->dStage = dstage_storeFrameHeader;
+ return srcSize;
+ }
+
+ { U32 const BD = srcPtr[5];
+ blockSizeID = (BD>>4) & _3BITS;
+ /* validate */
+ if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
+ if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */
+ if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */
+ }
+
+ /* check header */
+ assert(frameHeaderSize > 5);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
+ RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid);
+ }
+#endif
+
+ /* save */
+ dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
+ dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
+ dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
+ dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
+ dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
+ if (contentSizeFlag)
+ dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
+ if (dictIDFlag)
+ dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
+
+ dctx->dStage = dstage_init;
+
+ return frameHeaderSize;
+}
+
+
+/*! LZ4F_headerSize() :
+ * @return : size of frame header
+ * or an error code, which can be tested using LZ4F_isError()
+ */
+size_t LZ4F_headerSize(const void* src, size_t srcSize)
+{
+ RETURN_ERROR_IF(src == NULL, srcPtr_wrong);
+
+ /* minimal srcSize to determine header size */
+ if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
+ RETURN_ERROR(frameHeader_incomplete);
+
+ /* special case : skippable frames */
+ if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
+ return 8;
+
+ /* control magic number */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
+ RETURN_ERROR(frameType_unknown);
+#endif
+
+ /* Frame Header Size */
+ { BYTE const FLG = ((const BYTE*)src)[4];
+ U32 const contentSizeFlag = (FLG>>3) & _1BIT;
+ U32 const dictIDFlag = FLG & _1BIT;
+ return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
+ }
+}
+
+/*! LZ4F_getFrameInfo() :
+ * This function extracts frame parameters (max blockSize, frame checksum, etc.).
+ * Usage is optional. Objective is to provide relevant information for allocation purposes.
+ * This function works in 2 situations :
+ * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
+ * Amount of input data provided must be large enough to successfully decode the frame header.
+ * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
+ * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
+ * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
+ * Decompression must resume from (srcBuffer + *srcSizePtr).
+ * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
+ * or an error code which can be tested using LZ4F_isError()
+ * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
+ * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
+ */
+LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
+ LZ4F_frameInfo_t* frameInfoPtr,
+ const void* srcBuffer, size_t* srcSizePtr)
+{
+ LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
+ if (dctx->dStage > dstage_storeFrameHeader) {
+ /* frameInfo already decoded */
+ size_t o=0, i=0;
+ *srcSizePtr = 0;
+ *frameInfoPtr = dctx->frameInfo;
+ /* returns : recommended nb of bytes for LZ4F_decompress() */
+ return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
+ } else {
+ if (dctx->dStage == dstage_storeFrameHeader) {
+ /* frame decoding already started, in the middle of header => automatic fail */
+ *srcSizePtr = 0;
+ RETURN_ERROR(frameDecoding_alreadyStarted);
+ } else {
+ size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
+ if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
+ if (*srcSizePtr < hSize) {
+ *srcSizePtr=0;
+ RETURN_ERROR(frameHeader_incomplete);
+ }
+
+ { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
+ if (LZ4F_isError(decodeResult)) {
+ *srcSizePtr = 0;
+ } else {
+ *srcSizePtr = decodeResult;
+ decodeResult = BHSize; /* block header size */
+ }
+ *frameInfoPtr = dctx->frameInfo;
+ return decodeResult;
+ } } }
+}
+
+
+/* LZ4F_updateDict() :
+ * only used for LZ4F_blockLinked mode
+ * Condition : @dstPtr != NULL
+ */
+static void LZ4F_updateDict(LZ4F_dctx* dctx,
+ const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
+ unsigned withinTmp)
+{
+ assert(dstPtr != NULL);
+ if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
+ assert(dctx->dict != NULL);
+
+ if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ assert(dstPtr >= dstBufferStart);
+ if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
+ dctx->dict = (const BYTE*)dstBufferStart;
+ dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
+ return;
+ }
+
+ assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
+
+ /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
+ assert(dctx->tmpOutBuffer != NULL);
+
+ if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
+ /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
+ assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
+ size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
+ size_t copySize = 64 KB - dctx->tmpOutSize;
+ const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
+ if (dctx->tmpOutSize > 64 KB) copySize = 0;
+ if (copySize > preserveSize) copySize = preserveSize;
+
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
+ return;
+ }
+
+ if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
+ if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
+ size_t const preserveSize = 64 KB - dstSize;
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
+ dctx->dictSize = preserveSize;
+ }
+ memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ /* join dict & dest into tmp */
+ { size_t preserveSize = 64 KB - dstSize;
+ if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
+ memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dstSize;
+ }
+}
+
+
+/*! LZ4F_decompress() :
+ * Call this function repetitively to regenerate compressed data in srcBuffer.
+ * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
+ * into dstBuffer of capacity *dstSizePtr.
+ *
+ * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
+ *
+ * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
+ * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
+ * Remaining data will have to be presented again in a subsequent invocation.
+ *
+ * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
+ * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
+ * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
+ * Note that this is just a hint, and it's always possible to any srcSize value.
+ * When a frame is fully decoded, @return will be 0.
+ * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
+ */
+size_t LZ4F_decompress(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr)
+{
+ LZ4F_decompressOptions_t optionsNull;
+ const BYTE* const srcStart = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcStart + *srcSizePtr;
+ const BYTE* srcPtr = srcStart;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
+ BYTE* dstPtr = dstStart;
+ const BYTE* selectedIn = NULL;
+ unsigned doAnotherStage = 1;
+ size_t nextSrcSizeHint = 1;
+
+
+ DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
+ srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
+ if (dstBuffer == NULL) assert(*dstSizePtr == 0);
+ MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
+ if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
+ *srcSizePtr = 0;
+ *dstSizePtr = 0;
+ assert(dctx != NULL);
+ dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
+
+ /* behaves as a state machine */
+
+ while (doAnotherStage) {
+
+ switch(dctx->dStage)
+ {
+
+ case dstage_getFrameHeader:
+ DEBUGLOG(6, "dstage_getFrameHeader");
+ if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
+ size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
+ FORWARD_IF_ERROR(hSize);
+ srcPtr += hSize;
+ break;
+ }
+ dctx->tmpInSize = 0;
+ if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
+ dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
+ dctx->dStage = dstage_storeFrameHeader;
+ /* fall-through */
+
+ case dstage_storeFrameHeader:
+ DEBUGLOG(6, "dstage_storeFrameHeader");
+ { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ }
+ if (dctx->tmpInSize < dctx->tmpInTarget) {
+ nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
+ doAnotherStage = 0; /* not enough src data, ask for some more */
+ break;
+ }
+ FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */
+ break;
+
+ case dstage_init:
+ DEBUGLOG(6, "dstage_init");
+ if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
+ /* internal buffers allocation */
+ { size_t const bufferNeeded = dctx->maxBlockSize
+ + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
+ if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
+ dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
+ LZ4F_free(dctx->tmpIn, dctx->cmem);
+ dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
+ RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed);
+ LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
+ dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
+ RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed);
+ dctx->maxBufferSize = bufferNeeded;
+ } }
+ dctx->tmpInSize = 0;
+ dctx->tmpInTarget = 0;
+ dctx->tmpOut = dctx->tmpOutBuffer;
+ dctx->tmpOutStart = 0;
+ dctx->tmpOutSize = 0;
+
+ dctx->dStage = dstage_getBlockHeader;
+ /* fall-through */
+
+ case dstage_getBlockHeader:
+ if ((size_t)(srcEnd - srcPtr) >= BHSize) {
+ selectedIn = srcPtr;
+ srcPtr += BHSize;
+ } else {
+ /* not enough input to read cBlockSize field */
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeBlockHeader;
+ }
+
+ if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
+ case dstage_storeBlockHeader:
+ { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
+ size_t const wantedData = BHSize - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(wantedData, remainingInput);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+
+ if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
+ nextSrcSizeHint = BHSize - dctx->tmpInSize;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ } /* if (dctx->dStage == dstage_storeBlockHeader) */
+
+ /* decode block header */
+ { U32 const blockHeader = LZ4F_readLE32(selectedIn);
+ size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
+ size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
+ if (blockHeader==0) { /* frameEnd signal, no more block */
+ DEBUGLOG(5, "end of frame");
+ dctx->dStage = dstage_getSuffix;
+ break;
+ }
+ if (nextCBlockSize > dctx->maxBlockSize) {
+ RETURN_ERROR(maxBlockSize_invalid);
+ }
+ if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
+ /* next block is uncompressed */
+ dctx->tmpInTarget = nextCBlockSize;
+ DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_reset(&dctx->blockChecksum, 0);
+ }
+ dctx->dStage = dstage_copyDirect;
+ break;
+ }
+ /* next block is a compressed block */
+ dctx->tmpInTarget = nextCBlockSize + crcSize;
+ dctx->dStage = dstage_getCBlock;
+ if (dstPtr==dstEnd || srcPtr==srcEnd) {
+ nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
+ doAnotherStage = 0;
+ }
+ break;
+ }
+
+ case dstage_copyDirect: /* uncompressed block */
+ DEBUGLOG(6, "dstage_copyDirect");
+ { size_t sizeToCopy;
+ if (dstPtr == NULL) {
+ sizeToCopy = 0;
+ } else {
+ size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
+ sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
+ memcpy(dstPtr, srcPtr, sizeToCopy);
+ if (!dctx->skipChecksum) {
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
+ }
+ if (dctx->frameInfo.contentChecksumFlag)
+ (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
+ }
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= sizeToCopy;
+
+ /* history management (linked blocks only)*/
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
+ } }
+
+ srcPtr += sizeToCopy;
+ dstPtr += sizeToCopy;
+ if (sizeToCopy == dctx->tmpInTarget) { /* all done */
+ if (dctx->frameInfo.blockChecksumFlag) {
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_getBlockChecksum;
+ } else
+ dctx->dStage = dstage_getBlockHeader; /* new block */
+ break;
+ }
+ dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
+ }
+ nextSrcSizeHint = dctx->tmpInTarget +
+ +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
+
+ /* check block checksum for recently transferred uncompressed block */
+ case dstage_getBlockChecksum:
+ DEBUGLOG(6, "dstage_getBlockChecksum");
+ { const void* crcSrc;
+ if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
+ crcSrc = srcPtr;
+ srcPtr += 4;
+ } else {
+ size_t const stillToCopy = 4 - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr));
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ if (dctx->tmpInSize < 4) { /* all input consumed */
+ doAnotherStage = 0;
+ break;
+ }
+ crcSrc = dctx->header;
+ }
+ if (!dctx->skipChecksum) {
+ U32 const readCRC = LZ4F_readLE32(crcSrc);
+ U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ DEBUGLOG(6, "compare block checksum");
+ if (readCRC != calcCRC) {
+ DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
+ readCRC, calcCRC);
+ RETURN_ERROR(blockChecksum_invalid);
+ }
+#else
+ (void)readCRC;
+ (void)calcCRC;
+#endif
+ } }
+ dctx->dStage = dstage_getBlockHeader; /* new block */
+ break;
+
+ case dstage_getCBlock:
+ DEBUGLOG(6, "dstage_getCBlock");
+ if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeCBlock;
+ break;
+ }
+ /* input large enough to read full block directly */
+ selectedIn = srcPtr;
+ srcPtr += dctx->tmpInTarget;
+
+ if (0) /* always jump over next block */
+ case dstage_storeCBlock:
+ { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
+ size_t const inputLeft = (size_t)(srcEnd-srcPtr);
+ size_t const sizeToCopy = MIN(wantedData, inputLeft);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
+ nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
+ + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ }
+
+ /* At this stage, input is large enough to decode a block */
+
+ /* First, decode and control block checksum if it exists */
+ if (dctx->frameInfo.blockChecksumFlag) {
+ assert(dctx->tmpInTarget >= 4);
+ dctx->tmpInTarget -= 4;
+ assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
+ { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
+ U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid);
+#else
+ (void)readBlockCrc;
+ (void)calcBlockCrc;
+#endif
+ } }
+
+ /* decode directly into destination buffer if there is enough room */
+ if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
+ /* unless the dictionary is stored in tmpOut:
+ * in which case it's faster to decode within tmpOut
+ * to benefit from prefix speedup */
+ && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
+ {
+ const char* dict = (const char*)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ assert(dstPtr != NULL);
+ if (dict && dictSize > 1 GB) {
+ /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
+ dict += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ decodedSize = LZ4_decompress_safe_usingDict(
+ (const char*)selectedIn, (char*)dstPtr,
+ (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
+ dict, (int)dictSize);
+ RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
+ if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
+ XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= (size_t)decodedSize;
+
+ /* dictionary management */
+ if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
+ }
+
+ dstPtr += decodedSize;
+ dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
+ break;
+ }
+
+ /* not enough place into dst : decode into tmpOut */
+
+ /* manage dictionary */
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ if (dctx->dict == dctx->tmpOutBuffer) {
+ /* truncate dictionary to 64 KB if too big */
+ if (dctx->dictSize > 128 KB) {
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
+ dctx->dictSize = 64 KB;
+ }
+ dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
+ } else { /* dict not within tmpOut */
+ size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
+ dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
+ } }
+
+ /* Decode block into tmpOut */
+ { const char* dict = (const char*)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ if (dict && dictSize > 1 GB) {
+ /* the dictSize param is an int, avoid truncation / sign issues */
+ dict += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ decodedSize = LZ4_decompress_safe_usingDict(
+ (const char*)selectedIn, (char*)dctx->tmpOut,
+ (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
+ dict, (int)dictSize);
+ RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
+ if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
+ XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= (size_t)decodedSize;
+ dctx->tmpOutSize = (size_t)decodedSize;
+ dctx->tmpOutStart = 0;
+ dctx->dStage = dstage_flushOut;
+ }
+ /* fall-through */
+
+ case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
+ DEBUGLOG(6, "dstage_flushOut");
+ if (dstPtr != NULL) {
+ size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
+ memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
+
+ /* dictionary management */
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
+
+ dctx->tmpOutStart += sizeToCopy;
+ dstPtr += sizeToCopy;
+ }
+ if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
+ dctx->dStage = dstage_getBlockHeader; /* get next block */
+ break;
+ }
+ /* could not flush everything : stop there, just request a block header */
+ doAnotherStage = 0;
+ nextSrcSizeHint = BHSize;
+ break;
+
+ case dstage_getSuffix:
+ RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */
+ if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
+ nextSrcSizeHint = 0;
+ LZ4F_resetDecompressionContext(dctx);
+ doAnotherStage = 0;
+ break;
+ }
+ if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeSuffix;
+ } else {
+ selectedIn = srcPtr;
+ srcPtr += 4;
+ }
+
+ if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
+ case dstage_storeSuffix:
+ { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
+ size_t const wantedData = 4 - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(wantedData, remainingInput);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+ if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
+ nextSrcSizeHint = 4 - dctx->tmpInSize;
+ doAnotherStage=0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ } /* if (dctx->dStage == dstage_storeSuffix) */
+
+ /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
+ if (!dctx->skipChecksum) {
+ U32 const readCRC = LZ4F_readLE32(selectedIn);
+ U32 const resultCRC = XXH32_digest(&(dctx->xxh));
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
+#else
+ (void)readCRC;
+ (void)resultCRC;
+#endif
+ }
+ nextSrcSizeHint = 0;
+ LZ4F_resetDecompressionContext(dctx);
+ doAnotherStage = 0;
+ break;
+
+ case dstage_getSFrameSize:
+ if ((srcEnd - srcPtr) >= 4) {
+ selectedIn = srcPtr;
+ srcPtr += 4;
+ } else {
+ /* not enough input to read cBlockSize field */
+ dctx->tmpInSize = 4;
+ dctx->tmpInTarget = 8;
+ dctx->dStage = dstage_storeSFrameSize;
+ }
+
+ if (dctx->dStage == dstage_storeSFrameSize)
+ case dstage_storeSFrameSize:
+ { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
+ (size_t)(srcEnd - srcPtr) );
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+ if (dctx->tmpInSize < dctx->tmpInTarget) {
+ /* not enough input to get full sBlockSize; wait for more */
+ nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->header + 4;
+ } /* if (dctx->dStage == dstage_storeSFrameSize) */
+
+ /* case dstage_decodeSFrameSize: */ /* no direct entry */
+ { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
+ dctx->frameInfo.contentSize = SFrameSize;
+ dctx->tmpInTarget = SFrameSize;
+ dctx->dStage = dstage_skipSkippable;
+ break;
+ }
+
+ case dstage_skipSkippable:
+ { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
+ srcPtr += skipSize;
+ dctx->tmpInTarget -= skipSize;
+ doAnotherStage = 0;
+ nextSrcSizeHint = dctx->tmpInTarget;
+ if (nextSrcSizeHint) break; /* still more to skip */
+ /* frame fully skipped : prepare context for a new frame */
+ LZ4F_resetDecompressionContext(dctx);
+ break;
+ }
+ } /* switch (dctx->dStage) */
+ } /* while (doAnotherStage) */
+
+ /* preserve history within tmpOut whenever necessary */
+ LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
+ if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
+ && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
+ && (dctx->dict != NULL) /* dictionary exists */
+ && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
+ && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
+ {
+ if (dctx->dStage == dstage_flushOut) {
+ size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
+ size_t copySize = 64 KB - dctx->tmpOutSize;
+ const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
+ if (dctx->tmpOutSize > 64 KB) copySize = 0;
+ if (copySize > preserveSize) copySize = preserveSize;
+ assert(dctx->tmpOutBuffer != NULL);
+
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dctx->tmpOutStart;
+ } else {
+ const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
+ size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
+
+ memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = newDictSize;
+ dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
+ }
+ }
+
+ *srcSizePtr = (size_t)(srcPtr - srcStart);
+ *dstSizePtr = (size_t)(dstPtr - dstStart);
+ return nextSrcSizeHint;
+}
+
+/*! LZ4F_decompress_usingDict() :
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+ * It must remain accessible throughout the entire frame decoding.
+ */
+size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr)
+{
+ if (dctx->dStage <= dstage_init) {
+ dctx->dict = (const BYTE*)dict;
+ dctx->dictSize = dictSize;
+ }
+ return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
+ srcBuffer, srcSizePtr,
+ decompressOptionsPtr);
+}
diff --git a/mfbt/lz4/lz4frame.h b/mfbt/lz4/lz4frame.h
new file mode 100644
index 0000000000..1bdf6c4fcb
--- /dev/null
+++ b/mfbt/lz4/lz4frame.h
@@ -0,0 +1,692 @@
+/*
+ LZ4F - LZ4-Frame library
+ Header File
+ Copyright (C) 2011-2020, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* LZ4F is a stand-alone API able to create and decode LZ4 frames
+ * conformant with specification v1.6.1 in doc/lz4_Frame_format.md .
+ * Generated frames are compatible with `lz4` CLI.
+ *
+ * LZ4F also offers streaming capabilities.
+ *
+ * lz4.h is not required when using lz4frame.h,
+ * except to extract common constants such as LZ4_VERSION_NUMBER.
+ * */
+
+#ifndef LZ4F_H_09782039843
+#define LZ4F_H_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
+
+/**
+ * Introduction
+ *
+ * lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md .
+ * LZ4 Frames are compatible with `lz4` CLI,
+ * and designed to be interoperable with any system.
+**/
+
+/*-***************************************************************
+ * Compiler specifics
+ *****************************************************************/
+/* LZ4_DLL_EXPORT :
+ * Enable exporting of functions when building a Windows DLL
+ * LZ4FLIB_VISIBILITY :
+ * Control library symbols visibility.
+ */
+#ifndef LZ4FLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4FLIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
+#else
+# define LZ4FLIB_API LZ4FLIB_VISIBILITY
+#endif
+
+#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
+# define LZ4F_DEPRECATE(x) x
+#else
+# if defined(_MSC_VER)
+# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
+# define LZ4F_DEPRECATE(x) x __attribute__((deprecated))
+# else
+# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */
+# endif
+#endif
+
+
+/*-************************************
+ * Error management
+ **************************************/
+typedef size_t LZ4F_errorCode_t;
+
+LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */
+LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */
+
+
+/*-************************************
+ * Frame compression types
+ ************************************* */
+/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
+#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
+# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
+#else
+# define LZ4F_OBSOLETE_ENUM(x)
+#endif
+
+/* The larger the block size, the (slightly) better the compression ratio,
+ * though there are diminishing returns.
+ * Larger blocks also increase memory usage on both compression and decompression sides.
+ */
+typedef enum {
+ LZ4F_default=0,
+ LZ4F_max64KB=4,
+ LZ4F_max256KB=5,
+ LZ4F_max1MB=6,
+ LZ4F_max4MB=7
+ LZ4F_OBSOLETE_ENUM(max64KB)
+ LZ4F_OBSOLETE_ENUM(max256KB)
+ LZ4F_OBSOLETE_ENUM(max1MB)
+ LZ4F_OBSOLETE_ENUM(max4MB)
+} LZ4F_blockSizeID_t;
+
+/* Linked blocks sharply reduce inefficiencies when using small blocks,
+ * they compress better.
+ * However, some LZ4 decoders are only compatible with independent blocks */
+typedef enum {
+ LZ4F_blockLinked=0,
+ LZ4F_blockIndependent
+ LZ4F_OBSOLETE_ENUM(blockLinked)
+ LZ4F_OBSOLETE_ENUM(blockIndependent)
+} LZ4F_blockMode_t;
+
+typedef enum {
+ LZ4F_noContentChecksum=0,
+ LZ4F_contentChecksumEnabled
+ LZ4F_OBSOLETE_ENUM(noContentChecksum)
+ LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
+} LZ4F_contentChecksum_t;
+
+typedef enum {
+ LZ4F_noBlockChecksum=0,
+ LZ4F_blockChecksumEnabled
+} LZ4F_blockChecksum_t;
+
+typedef enum {
+ LZ4F_frame=0,
+ LZ4F_skippableFrame
+ LZ4F_OBSOLETE_ENUM(skippableFrame)
+} LZ4F_frameType_t;
+
+#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
+typedef LZ4F_blockSizeID_t blockSizeID_t;
+typedef LZ4F_blockMode_t blockMode_t;
+typedef LZ4F_frameType_t frameType_t;
+typedef LZ4F_contentChecksum_t contentChecksum_t;
+#endif
+
+/*! LZ4F_frameInfo_t :
+ * makes it possible to set or read frame parameters.
+ * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO,
+ * setting all parameters to default.
+ * It's then possible to update selectively some parameters */
+typedef struct {
+ LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
+ LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
+ LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
+ LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
+ unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
+ unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
+ LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
+} LZ4F_frameInfo_t;
+
+#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
+
+/*! LZ4F_preferences_t :
+ * makes it possible to supply advanced compression instructions to streaming interface.
+ * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES,
+ * setting all parameters to default.
+ * All reserved fields must be set to zero. */
+typedef struct {
+ LZ4F_frameInfo_t frameInfo;
+ int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
+ unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */
+ unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */
+ unsigned reserved[3]; /* must be zero for forward compatibility */
+} LZ4F_preferences_t;
+
+#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
+
+
+/*-*********************************
+* Simple compression function
+***********************************/
+
+LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
+
+/*! LZ4F_compressFrameBound() :
+ * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
+ * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
+ * Note : this result is only usable with LZ4F_compressFrame().
+ * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
+ */
+LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
+
+/*! LZ4F_compressFrame() :
+ * Compress an entire srcBuffer into a valid LZ4 frame.
+ * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr);
+
+
+/*-***********************************
+* Advanced compression functions
+*************************************/
+typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
+typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */
+
+typedef struct {
+ unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
+ unsigned reserved[3];
+} LZ4F_compressOptions_t;
+
+/*--- Resource Management ---*/
+
+#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */
+LZ4FLIB_API unsigned LZ4F_getVersion(void);
+
+/*! LZ4F_createCompressionContext() :
+ * The first thing to do is to create a compressionContext object,
+ * which will keep track of operation state during streaming compression.
+ * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version,
+ * and a pointer to LZ4F_cctx*, to write the resulting pointer into.
+ * @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
+ * The function provides a pointer to a fully allocated LZ4F_cctx object.
+ * @cctxPtr MUST be != NULL.
+ * If @return != zero, context creation failed.
+ * A created compression context can be employed multiple times for consecutive streaming operations.
+ * Once all streaming compression jobs are completed,
+ * the state object can be released using LZ4F_freeCompressionContext().
+ * Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored.
+ * Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing).
+**/
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
+
+
+/*---- Compression ----*/
+
+#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */
+#define LZ4F_HEADER_SIZE_MAX 19
+
+/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
+#define LZ4F_BLOCK_HEADER_SIZE 4
+
+/* Size in bytes of a block checksum footer in little-endian format. */
+#define LZ4F_BLOCK_CHECKSUM_SIZE 4
+
+/* Size in bytes of the content checksum. */
+#define LZ4F_CONTENT_CHECKSUM_SIZE 4
+
+/*! LZ4F_compressBegin() :
+ * will write the frame header into dstBuffer.
+ * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (which can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_compressBound() :
+ * Provides minimum dstCapacity required to guarantee success of
+ * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario.
+ * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead.
+ * Note that the result is only valid for a single invocation of LZ4F_compressUpdate().
+ * When invoking LZ4F_compressUpdate() multiple times,
+ * if the output buffer is gradually filled up instead of emptied and re-used from its start,
+ * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound().
+ * @return is always the same for a srcSize and prefsPtr.
+ * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
+ * tech details :
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
+ * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
+ * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
+ */
+LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_compressUpdate() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
+ * This value is provided by LZ4F_compressBound().
+ * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
+ * After an error, the state is left in a UB state, and must be re-initialized or freed.
+ * If previously an uncompressed block was written, buffered data is flushed
+ * before appending compressed data is continued.
+ * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
+ * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+/*! LZ4F_flush() :
+ * When data must be generated and sent immediately, without waiting for a block to be completely filled,
+ * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
+ * `dstCapacity` must be large enough to ensure the operation will be successful.
+ * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
+ * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx)
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
+ */
+LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+/*! LZ4F_compressEnd() :
+ * To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
+ * It will flush whatever data remained within `cctx` (like LZ4_flush())
+ * and properly finalize the frame, with an endMark and a checksum.
+ * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
+ * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark),
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
+ * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
+ */
+LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+
+/*-*********************************
+* Decompression functions
+***********************************/
+typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
+typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
+
+typedef struct {
+ unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
+ * This optimization skips storage operations in tmp buffers. */
+ unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
+ * Setting this option to 1 once disables all checksums for the rest of the frame. */
+ unsigned reserved1; /* must be set to zero for forward compatibility */
+ unsigned reserved0; /* idem */
+} LZ4F_decompressOptions_t;
+
+
+/* Resource management */
+
+/*! LZ4F_createDecompressionContext() :
+ * Create an LZ4F_dctx object, to track all decompression operations.
+ * @version provided MUST be LZ4F_VERSION.
+ * @dctxPtr MUST be valid.
+ * The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object.
+ * The @return is an errorCode, which can be tested using LZ4F_isError().
+ * dctx memory can be released using LZ4F_freeDecompressionContext();
+ * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
+ * That is, it should be == 0 if decompression has been completed fully and correctly.
+ */
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
+
+
+/*-***********************************
+* Streaming decompression functions
+*************************************/
+
+#define LZ4F_MAGICNUMBER 0x184D2204U
+#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
+#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
+
+/*! LZ4F_headerSize() : v1.9.0+
+ * Provide the header size of a frame starting at `src`.
+ * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH,
+ * which is enough to decode the header length.
+ * @return : size of frame header
+ * or an error code, which can be tested using LZ4F_isError()
+ * note : Frame header size is variable, but is guaranteed to be
+ * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
+ */
+LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
+
+/*! LZ4F_getFrameInfo() :
+ * This function extracts frame parameters (max blockSize, dictID, etc.).
+ * Its usage is optional: user can also invoke LZ4F_decompress() directly.
+ *
+ * Extracted information will fill an existing LZ4F_frameInfo_t structure.
+ * This can be useful for allocation and dictionary identification purposes.
+ *
+ * LZ4F_getFrameInfo() can work in the following situations :
+ *
+ * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress().
+ * It will decode header from `srcBuffer`,
+ * consuming the header and starting the decoding process.
+ *
+ * Input size must be large enough to contain the full frame header.
+ * Frame header size can be known beforehand by LZ4F_headerSize().
+ * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes,
+ * and not more than <= LZ4F_HEADER_SIZE_MAX bytes.
+ * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work.
+ * It's allowed to provide more input data than the header size,
+ * LZ4F_getFrameInfo() will only consume the header.
+ *
+ * If input size is not large enough,
+ * aka if it's smaller than header size,
+ * function will fail and return an error code.
+ *
+ * 2) After decoding has been started,
+ * it's possible to invoke LZ4F_getFrameInfo() anytime
+ * to extract already decoded frame parameters stored within dctx.
+ *
+ * Note that, if decoding has barely started,
+ * and not yet read enough information to decode the header,
+ * LZ4F_getFrameInfo() will fail.
+ *
+ * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value).
+ * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started,
+ * and when decoding the header has been successful.
+ * Decompression must then resume from (srcBuffer + *srcSizePtr).
+ *
+ * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call,
+ * or an error code which can be tested using LZ4F_isError().
+ * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
+ * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
+ */
+LZ4FLIB_API size_t
+LZ4F_getFrameInfo(LZ4F_dctx* dctx,
+ LZ4F_frameInfo_t* frameInfoPtr,
+ const void* srcBuffer, size_t* srcSizePtr);
+
+/*! LZ4F_decompress() :
+ * Call this function repetitively to regenerate data compressed in `srcBuffer`.
+ *
+ * The function requires a valid dctx state.
+ * It will read up to *srcSizePtr bytes from srcBuffer,
+ * and decompress data into dstBuffer, of capacity *dstSizePtr.
+ *
+ * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
+ * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value).
+ *
+ * The function does not necessarily read all input bytes, so always check value in *srcSizePtr.
+ * Unconsumed source data must be presented again in subsequent invocations.
+ *
+ * `dstBuffer` can freely change between each consecutive function invocation.
+ * `dstBuffer` content will be overwritten.
+ *
+ * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
+ * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
+ * Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
+ * This is just a hint though, it's always possible to provide any srcSize.
+ *
+ * When a frame is fully decoded, @return will be 0 (no more data expected).
+ * When provided with more bytes than necessary to decode a frame,
+ * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0.
+ *
+ * If decompression failed, @return is an error code, which can be tested using LZ4F_isError().
+ * After a decompression error, the `dctx` context is not resumable.
+ * Use LZ4F_resetDecompressionContext() to return to clean state.
+ *
+ * After a frame is fully decoded, dctx can be used again to decompress another frame.
+ */
+LZ4FLIB_API size_t
+LZ4F_decompress(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* dOptPtr);
+
+
+/*! LZ4F_resetDecompressionContext() : added in v1.8.0
+ * In case of an error, the context is left in "undefined" state.
+ * In which case, it's necessary to reset it, before re-using it.
+ * This method can also be used to abruptly stop any unfinished decompression,
+ * and start a new one using same context resources. */
+LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4F_H_09782039843 */
+
+#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
+#define LZ4F_H_STATIC_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* These declarations are not stable and may change in the future.
+ * They are therefore only safe to depend on
+ * when the caller is statically linked against the library.
+ * To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
+ *
+ * By default, these symbols aren't published into shared/dynamic libraries.
+ * You can override this behavior and force them to be published
+ * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
+ * Use at your own risk.
+ */
+#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
+# define LZ4FLIB_STATIC_API LZ4FLIB_API
+#else
+# define LZ4FLIB_STATIC_API
+#endif
+
+
+/* --- Error List --- */
+#define LZ4F_LIST_ERRORS(ITEM) \
+ ITEM(OK_NoError) \
+ ITEM(ERROR_GENERIC) \
+ ITEM(ERROR_maxBlockSize_invalid) \
+ ITEM(ERROR_blockMode_invalid) \
+ ITEM(ERROR_contentChecksumFlag_invalid) \
+ ITEM(ERROR_compressionLevel_invalid) \
+ ITEM(ERROR_headerVersion_wrong) \
+ ITEM(ERROR_blockChecksum_invalid) \
+ ITEM(ERROR_reservedFlag_set) \
+ ITEM(ERROR_allocation_failed) \
+ ITEM(ERROR_srcSize_tooLarge) \
+ ITEM(ERROR_dstMaxSize_tooSmall) \
+ ITEM(ERROR_frameHeader_incomplete) \
+ ITEM(ERROR_frameType_unknown) \
+ ITEM(ERROR_frameSize_wrong) \
+ ITEM(ERROR_srcPtr_wrong) \
+ ITEM(ERROR_decompressionFailed) \
+ ITEM(ERROR_headerChecksum_invalid) \
+ ITEM(ERROR_contentChecksum_invalid) \
+ ITEM(ERROR_frameDecoding_alreadyStarted) \
+ ITEM(ERROR_compressionState_uninitialized) \
+ ITEM(ERROR_parameter_null) \
+ ITEM(ERROR_maxCode)
+
+#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
+
+/* enum list is exposed, to handle specific errors */
+typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
+ _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes;
+
+LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
+
+
+/*! LZ4F_getBlockSize() :
+ * Return, in scalar format (size_t),
+ * the maximum block size associated with blockSizeID.
+**/
+LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID);
+
+/*! LZ4F_uncompressedUpdate() :
+ * LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
+ * Important rule: dstCapacity MUST be large enough to store the entire source buffer as
+ * no compression is done for this operation
+ * If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode).
+ * After an error, the state is left in a UB state, and must be re-initialized or freed.
+ * If previously a compressed block was written, buffered data is flushed
+ * before appending uncompressed data is continued.
+ * This is only supported when LZ4F_blockIndependent is used
+ * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
+ * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ */
+LZ4FLIB_STATIC_API size_t
+LZ4F_uncompressedUpdate(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+/**********************************
+ * Bulk processing dictionary API
+ *********************************/
+
+/* A Dictionary is useful for the compression of small messages (KB range).
+ * It dramatically improves compression efficiency.
+ *
+ * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
+ * Best results are generally achieved by using Zstandard's Dictionary Builder
+ * to generate a high-quality dictionary from a set of samples.
+ *
+ * Loading a dictionary has a cost, since it involves construction of tables.
+ * The Bulk processing dictionary API makes it possible to share this cost
+ * over an arbitrary number of compression jobs, even concurrently,
+ * markedly improving compression latency for these cases.
+ *
+ * The same dictionary will have to be used on the decompression side
+ * for decoding to be successful.
+ * To help identify the correct dictionary at decoding stage,
+ * the frame header allows optional embedding of a dictID field.
+ */
+typedef struct LZ4F_CDict_s LZ4F_CDict;
+
+/*! LZ4_createCDict() :
+ * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
+ * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
+LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
+LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
+
+
+/*! LZ4_compressFrame_usingCDict() :
+ * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
+ * cctx must point to a context created by LZ4F_createCompressionContext().
+ * If cdict==NULL, compress without a dictionary.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * If this condition is not respected, function will fail (@return an errorCode).
+ * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ * but it's not recommended, as it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t
+LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr);
+
+
+/*! LZ4F_compressBegin_usingCDict() :
+ * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
+ * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * `prefsPtr` is optional : you may provide NULL as argument,
+ * however, it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer for the header,
+ * or an error code (which can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t
+LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* prefsPtr);
+
+
+/*! LZ4F_decompress_usingDict() :
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+** It must remain accessible throughout the entire frame decoding. */
+LZ4FLIB_STATIC_API size_t
+LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass custom allocation/free functions.
+ * LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size);
+typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size);
+typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address);
+typedef struct {
+ LZ4F_AllocFunction customAlloc;
+ LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */
+ LZ4F_FreeFunction customFree;
+ void* opaqueState;
+} LZ4F_CustomMem;
+static
+#ifdef __GNUC__
+__attribute__((__unused__))
+#endif
+LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+
+LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version);
+LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version);
+LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */
diff --git a/mfbt/lz4/lz4frame_static.h b/mfbt/lz4/lz4frame_static.h
new file mode 100644
index 0000000000..2b44a63155
--- /dev/null
+++ b/mfbt/lz4/lz4frame_static.h
@@ -0,0 +1,47 @@
+/*
+ LZ4 auto-framing library
+ Header File for static linking only
+ Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#ifndef LZ4FRAME_STATIC_H_0398209384
+#define LZ4FRAME_STATIC_H_0398209384
+
+/* The declarations that formerly were made here have been merged into
+ * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward,
+ * it is recommended to simply include that header directly.
+ */
+
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
+
+#endif /* LZ4FRAME_STATIC_H_0398209384 */
diff --git a/mfbt/lz4/lz4hc.c b/mfbt/lz4/lz4hc.c
new file mode 100644
index 0000000000..b21ad6bb59
--- /dev/null
+++ b/mfbt/lz4/lz4hc.c
@@ -0,0 +1,1631 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
+
+
+/* *************************************
+* Tuning Parameter
+***************************************/
+
+/*! HEAPMODE :
+ * Select how default compression function will allocate workplace memory,
+ * in stack (0:fastest), or in heap (1:requires malloc()).
+ * Since workplace is rather large, heap mode is recommended.
+**/
+#ifndef LZ4HC_HEAPMODE
+# define LZ4HC_HEAPMODE 1
+#endif
+
+
+/*=== Dependency ===*/
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "lz4hc.h"
+
+
+/*=== Common definitions ===*/
+#if defined(__GNUC__)
+# pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#if defined (__clang__)
+# pragma clang diagnostic ignored "-Wunused-function"
+#endif
+
+#define LZ4_COMMONDEFS_ONLY
+#ifndef LZ4_SRC_INCLUDED
+#include "lz4.c" /* LZ4_count, constants, mem */
+#endif
+
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+
+/*=== Constants ===*/
+#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
+#define LZ4_OPT_NUM (1<<12)
+
+
+/*=== Macros ===*/
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
+#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
+#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
+#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
+/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
+#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
+
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+
+
+/**************************************
+* HC Compression
+**************************************/
+static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
+{
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+}
+
+static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
+{
+ size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
+ size_t newStartingOffset = bufferSize + hc4->dictLimit;
+ assert(newStartingOffset >= bufferSize); /* check overflow */
+ if (newStartingOffset > 1 GB) {
+ LZ4HC_clearTables(hc4);
+ newStartingOffset = 0;
+ }
+ newStartingOffset += 64 KB;
+ hc4->nextToUpdate = (U32)newStartingOffset;
+ hc4->prefixStart = start;
+ hc4->end = start;
+ hc4->dictStart = start;
+ hc4->dictLimit = (U32)newStartingOffset;
+ hc4->lowLimit = (U32)newStartingOffset;
+}
+
+
+/* Update chains up to ip (excluded) */
+LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const hashTable = hc4->hashTable;
+ const BYTE* const prefixPtr = hc4->prefixStart;
+ U32 const prefixIdx = hc4->dictLimit;
+ U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
+ U32 idx = hc4->nextToUpdate;
+ assert(ip >= prefixPtr);
+ assert(target >= prefixIdx);
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
+ size_t delta = idx - hashTable[h];
+ if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
+ DELTANEXTU16(chainTable, idx) = (U16)delta;
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ hc4->nextToUpdate = target;
+}
+
+/** LZ4HC_countBack() :
+ * @return : negative value, nb of common bytes before ip/match */
+LZ4_FORCE_INLINE
+int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
+ const BYTE* const iMin, const BYTE* const mMin)
+{
+ int back = 0;
+ int const min = (int)MAX(iMin - ip, mMin - match);
+ assert(min <= 0);
+ assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
+ assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
+ while ( (back > min)
+ && (ip[back-1] == match[back-1]) )
+ back--;
+ return back;
+}
+
+#if defined(_MSC_VER)
+# define LZ4HC_rotl32(x,r) _rotl(x,r)
+#else
+# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
+
+
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
+{
+ size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
+ if (bitsToRotate == 0) return pattern;
+ return LZ4HC_rotl32(pattern, (int)bitsToRotate);
+}
+
+/* LZ4HC_countPattern() :
+ * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
+static unsigned
+LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
+{
+ const BYTE* const iStart = ip;
+ reg_t const pattern = (sizeof(pattern)==8) ?
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
+
+ while (likely(ip < iEnd-(sizeof(pattern)-1))) {
+ reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
+ if (!diff) { ip+=sizeof(pattern); continue; }
+ ip += LZ4_NbCommonBytes(diff);
+ return (unsigned)(ip - iStart);
+ }
+
+ if (LZ4_isLittleEndian()) {
+ reg_t patternByte = pattern;
+ while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
+ ip++; patternByte >>= 8;
+ }
+ } else { /* big endian */
+ U32 bitOffset = (sizeof(pattern)*8) - 8;
+ while (ip < iEnd) {
+ BYTE const byte = (BYTE)(pattern >> bitOffset);
+ if (*ip != byte) break;
+ ip ++; bitOffset -= 8;
+ } }
+
+ return (unsigned)(ip - iStart);
+}
+
+/* LZ4HC_reverseCountPattern() :
+ * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
+ * read using natural platform endianness */
+static unsigned
+LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
+{
+ const BYTE* const iStart = ip;
+
+ while (likely(ip >= iLow+4)) {
+ if (LZ4_read32(ip-4) != pattern) break;
+ ip -= 4;
+ }
+ { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
+ while (likely(ip>iLow)) {
+ if (ip[-1] != *bytePtr) break;
+ ip--; bytePtr--;
+ } }
+ return (unsigned)(iStart - ip);
+}
+
+/* LZ4HC_protectDictEnd() :
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
+ * 4 byte MINMATCH would overflow.
+ * @returns true if the match index is okay.
+ */
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
+{
+ return ((U32)((dictLimit - 1) - matchIndex) >= 3);
+}
+
+typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
+typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
+
+LZ4_FORCE_INLINE int
+LZ4HC_InsertAndGetWiderMatch (
+ LZ4HC_CCtx_internal* const hc4,
+ const BYTE* const ip,
+ const BYTE* const iLowLimit, const BYTE* const iHighLimit,
+ int longest,
+ const BYTE** matchpos,
+ const BYTE** startpos,
+ const int maxNbAttempts,
+ const int patternAnalysis, const int chainSwap,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const HashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
+ const BYTE* const prefixPtr = hc4->prefixStart;
+ const U32 prefixIdx = hc4->dictLimit;
+ const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
+ const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex);
+ const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
+ const BYTE* const dictStart = hc4->dictStart;
+ const U32 dictIdx = hc4->lowLimit;
+ const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
+ int const lookBackLength = (int)(ip-iLowLimit);
+ int nbAttempts = maxNbAttempts;
+ U32 matchChainPos = 0;
+ U32 const pattern = LZ4_read32(ip);
+ U32 matchIndex;
+ repeat_state_e repeat = rep_untested;
+ size_t srcPatternLength = 0;
+
+ DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
+ /* First Match */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+ DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
+ matchIndex, lowestMatchIndex);
+
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
+ int matchLength=0;
+ nbAttempts--;
+ assert(matchIndex < ipIndex);
+ if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
+ /* do nothing */
+ } else if (matchIndex >= prefixIdx) { /* within current Prefix */
+ const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx;
+ assert(matchPtr < ip);
+ assert(longest >= 1);
+ if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
+ if (LZ4_read32(matchPtr) == pattern) {
+ int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
+ matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
+ } } }
+ } else { /* lowestMatchIndex <= matchIndex < dictLimit */
+ const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
+ assert(matchIndex >= dictIdx);
+ if ( likely(matchIndex <= prefixIdx - 4)
+ && (LZ4_read32(matchPtr) == pattern) ) {
+ int back = 0;
+ const BYTE* vLimit = ip + (prefixIdx - matchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
+ matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit);
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip + back;
+ } } }
+
+ if (chainSwap && matchLength==longest) { /* better match => select a better chain */
+ assert(lookBackLength==0); /* search forward only */
+ if (matchIndex + (U32)longest <= ipIndex) {
+ int const kTrigger = 4;
+ U32 distanceToNextMatch = 1;
+ int const end = longest - MINMATCH + 1;
+ int step = 1;
+ int accel = 1 << kTrigger;
+ int pos;
+ for (pos = 0; pos < end; pos += step) {
+ U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
+ step = (accel++ >> kTrigger);
+ if (candidateDist > distanceToNextMatch) {
+ distanceToNextMatch = candidateDist;
+ matchChainPos = (U32)pos;
+ accel = 1 << kTrigger;
+ } }
+ if (distanceToNextMatch > 1) {
+ if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
+ matchIndex -= distanceToNextMatch;
+ continue;
+ } } }
+
+ { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
+ if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
+ U32 const matchCandidateIdx = matchIndex-1;
+ /* may be a repeated pattern */
+ if (repeat == rep_untested) {
+ if ( ((pattern & 0xFFFF) == (pattern >> 16))
+ & ((pattern & 0xFF) == (pattern >> 24)) ) {
+ repeat = rep_confirmed;
+ srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
+ } else {
+ repeat = rep_not;
+ } }
+ if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
+ && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
+ const int extDict = matchCandidateIdx < prefixIdx;
+ const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx;
+ if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
+ const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
+ size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
+ if (extDict && matchPtr + forwardPatternLength == iLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
+ forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
+ }
+ { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
+ size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
+ size_t currentSegmentLength;
+ if (!extDict
+ && matchPtr - backLength == prefixPtr
+ && dictIdx < prefixIdx) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
+ backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
+ }
+ /* Limit backLength not go further than lowestMatchIndex */
+ backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
+ assert(matchCandidateIdx - backLength >= lowestMatchIndex);
+ currentSegmentLength = backLength + forwardPatternLength;
+ /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
+ if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
+ && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
+ U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
+ if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
+ matchIndex = newMatchIndex;
+ else {
+ /* Can only happen if started in the prefix */
+ assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
+ matchIndex = prefixIdx;
+ }
+ } else {
+ U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
+ if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
+ assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
+ matchIndex = prefixIdx;
+ } else {
+ matchIndex = newMatchIndex;
+ if (lookBackLength==0) { /* no back possible */
+ size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
+ if ((size_t)longest < maxML) {
+ assert(prefixPtr - prefixIdx + matchIndex != ip);
+ if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break;
+ assert(maxML < 2 GB);
+ longest = (int)maxML;
+ *matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip;
+ }
+ { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
+ if (distToNextPattern > matchIndex) break; /* avoid overflow */
+ matchIndex -= distToNextPattern;
+ } } } } }
+ continue;
+ } }
+ } } /* PA optimization */
+
+ /* follow current chain */
+ matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
+
+ } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
+
+ if ( dict == usingDictCtxHc
+ && nbAttempts > 0
+ && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
+ size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
+ U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ assert(dictEndOffset <= 1 GB);
+ matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
+ const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == pattern) {
+ int mlt;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
+ mlt -= back;
+ if (mlt > longest) {
+ longest = mlt;
+ *matchpos = prefixPtr - prefixIdx + matchIndex + back;
+ *startpos = ip + back;
+ } }
+
+ { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
+ dictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ } } }
+
+ return longest;
+}
+
+LZ4_FORCE_INLINE int
+LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE** matchpos,
+ const int maxNbAttempts,
+ const int patternAnalysis,
+ const dictCtx_directive dict)
+{
+ const BYTE* uselessPtr = ip;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
+}
+
+/* LZ4HC_encodeSequence() :
+ * @return : 0 if ok,
+ * 1 if buffer issue detected */
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
+ int matchLength,
+ const BYTE* const match,
+ limitedOutput_directive limit,
+ BYTE* oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
+ size_t length;
+ BYTE* const token = op++;
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
+ static const BYTE* start = NULL;
+ static U32 totalCost = 0;
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
+ U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
+ U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
+ U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
+ if (start==NULL) start = anchor; /* only works for single segment */
+ /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
+ pos,
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
+ cost, totalCost);
+ totalCost += cost;
+#endif
+
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
+ if (length >= RUN_MASK) {
+ size_t len = length - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
+ } else {
+ *token = (BYTE)(length << ML_BITS);
+ }
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
+
+ /* Encode Offset */
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
+
+ /* Encode MatchLength */
+ assert(matchLength >= MINMATCH);
+ length = (size_t)matchLength - MINMATCH;
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
+ if (length >= ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
+}
+#undef ip
+#undef op
+#undef anchor
+
+LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const source,
+ char* const dest,
+ int* srcSizePtr,
+ int const maxOutputSize,
+ int maxNbAttempts,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ const int inputSize = *srcSizePtr;
+ const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = (iend - LASTLITERALS);
+
+ BYTE* optr = (BYTE*) dest;
+ BYTE* op = (BYTE*) dest;
+ BYTE* oend = op + maxOutputSize;
+
+ int ml0, ml, ml2, ml3;
+ const BYTE* start0;
+ const BYTE* ref0;
+ const BYTE* ref = NULL;
+ const BYTE* start2 = NULL;
+ const BYTE* ref2 = NULL;
+ const BYTE* start3 = NULL;
+ const BYTE* ref3 = NULL;
+
+ /* init */
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
+ if (ml<MINMATCH) { ip++; continue; }
+
+ /* saved, in case we would skip too much */
+ start0 = ip; ref0 = ref; ml0 = ml;
+
+_Search2:
+ if (ip+ml <= mflimit) {
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml2 = ml;
+ }
+
+ if (ml2 == ml) { /* No better match => encode ML1 */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ continue;
+ }
+
+ if (start0 < ip) { /* first match was skipped at least once */
+ if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
+ ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
+ } }
+
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) { /* First Match too small : removed */
+ ml = ml2;
+ ip = start2;
+ ref =ref2;
+ goto _Search2;
+ }
+
+_Search3:
+ /* At this stage, we have :
+ * ml2 > ml1, and
+ * ip1+3 <= ip2 (usually < ip1+ml1) */
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = ml;
+ if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
+ if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = new_ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ }
+ /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
+
+ if (start2 + ml2 <= mflimit) {
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml3 = ml2;
+ }
+
+ if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip+ml) ml = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start2;
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
+ ml = ml2;
+ ref = ref2;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip+ml) {
+ int correction = (int)(ip+ml - start2);
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ if (ml2 < MINMATCH) {
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ }
+ }
+
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start3;
+ ref = ref3;
+ ml = ml3;
+
+ start0 = start2;
+ ref0 = ref2;
+ ml0 = ml2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ goto _Search3;
+ }
+
+ /*
+ * OK, now we have 3 ascending matches;
+ * let's write the first one ML1.
+ * ip & ref are known; Now decide ml.
+ */
+ if (start2 < ip+ml) {
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
+ if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ } else {
+ ml = (int)(start2 - ip);
+ }
+ }
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+
+ /* ML2 becomes ML1 */
+ ip = start2; ref = ref2; ml = ml2;
+
+ /* ML3 becomes ML2 */
+ start2 = start3; ref2 = ref3; ml2 = ml3;
+
+ /* let's find a new ML3 */
+ goto _Search3;
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) return 0;
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ return (int) (((char*)op)-dest);
+
+_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
+ op = optr; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ } }
+ goto _last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+
+static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
+ const char* const source, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ int const nbSearches, size_t sufficient_len,
+ const limitedOutput_directive limit, int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed);
+
+
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
+ typedef struct {
+ lz4hc_strat_e strat;
+ int nbSearches;
+ U32 targetLength;
+ } cParams_t;
+ static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
+ { lz4hc, 2, 16 }, /* 0, unused */
+ { lz4hc, 2, 16 }, /* 1, unused */
+ { lz4hc, 2, 16 }, /* 2, unused */
+ { lz4hc, 4, 16 }, /* 3 */
+ { lz4hc, 8, 16 }, /* 4 */
+ { lz4hc, 16, 16 }, /* 5 */
+ { lz4hc, 32, 16 }, /* 6 */
+ { lz4hc, 64, 16 }, /* 7 */
+ { lz4hc, 128, 16 }, /* 8 */
+ { lz4hc, 256, 16 }, /* 9 */
+ { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
+ { lz4opt, 512,128 }, /*11 */
+ { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
+ };
+
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ ctx, src, *srcSizePtr, limit);
+
+ if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
+
+ ctx->end += *srcSizePtr;
+ if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
+ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
+ { cParams_t const cParam = clTable[cLevel];
+ HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
+ int result;
+
+ if (cParam.strat == lz4hc) {
+ result = LZ4HC_compress_hashChain(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, limit, dict);
+ } else {
+ assert(cParam.strat == lz4opt);
+ result = LZ4HC_compress_optimal(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, cParam.targetLength, limit,
+ cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
+ dict, favor);
+ }
+ if (result <= 0) ctx->dirty = 1;
+ return result;
+ }
+}
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
+
+static int
+LZ4HC_compress_generic_noDictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ assert(ctx->dictCtx == NULL);
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
+}
+
+static int
+LZ4HC_compress_generic_dictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
+ assert(ctx->dictCtx != NULL);
+ if (position >= 64 KB) {
+ ctx->dictCtx = NULL;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else if (position == 0 && *srcSizePtr > 4 KB) {
+ LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
+ LZ4HC_setExternalDict(ctx, (const BYTE *)src);
+ ctx->compressionLevel = (short)cLevel;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
+ }
+}
+
+static int
+LZ4HC_compress_generic (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ if (ctx->dictCtx == NULL) {
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ }
+}
+
+
+int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
+
+static size_t LZ4_streamHC_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+/* state is presumed correctly initialized,
+ * in which case its size and alignment have already been validate */
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
+ LZ4HC_init_internal (ctx, (const BYTE*)src);
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
+}
+
+int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
+}
+
+int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ int cSize;
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
+ if (statePtr==NULL) return 0;
+#else
+ LZ4_streamHC_t state;
+ LZ4_streamHC_t* const statePtr = &state;
+#endif
+ cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ FREEMEM(statePtr);
+#endif
+ return cSize;
+}
+
+/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
+int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
+ LZ4_setCompressionLevel(ctx, cLevel);
+ return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
+}
+
+
+
+/**************************************
+* Streaming Functions
+**************************************/
+/* allocation */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_streamHC_t* LZ4_createStreamHC(void)
+{
+ LZ4_streamHC_t* const state =
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL) return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
+}
+
+int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
+{
+ DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
+ if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
+ FREEMEM(LZ4_streamHCPtr);
+ return 0;
+}
+#endif
+
+
+LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
+{
+ LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
+ if (buffer == NULL) return NULL;
+ if (size < sizeof(LZ4_streamHC_t)) return NULL;
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
+ /* init */
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
+ return LZ4_streamHCPtr;
+}
+
+/* just a stub */
+void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (LZ4_streamHCPtr->internal_donotuse.dirty) {
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ } else {
+ /* preserve end - prefixStart : can trigger clearTable's threshold */
+ if (LZ4_streamHCPtr->internal_donotuse.end != NULL) {
+ LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.prefixStart;
+ } else {
+ assert(LZ4_streamHCPtr->internal_donotuse.prefixStart == NULL);
+ }
+ LZ4_streamHCPtr->internal_donotuse.prefixStart = NULL;
+ LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
+ }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
+ if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
+}
+
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
+{
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
+}
+
+/* LZ4_loadDictHC() :
+ * LZ4_streamHCPtr is presumed properly initialized */
+int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* dictionary, int dictSize)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
+ assert(LZ4_streamHCPtr != NULL);
+ if (dictSize > 64 KB) {
+ dictionary += (size_t)dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ /* need a full initialization, there are bad side-effects when using resetFast() */
+ { int const cLevel = ctxPtr->compressionLevel;
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
+ }
+ LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
+ ctxPtr->end = (const BYTE*)dictionary + dictSize;
+ if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
+ return dictSize;
+}
+
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
+ working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
+}
+
+/* compression */
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
+{
+ DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
+ if (ctxPtr->end >= ctxPtr->prefixStart + 4)
+ LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
+
+ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictStart = ctxPtr->prefixStart;
+ ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
+ ctxPtr->prefixStart = newBlock;
+ ctxPtr->end = newBlock;
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
+
+ /* cannot reference an extDict and a dictCtx at the same time */
+ ctxPtr->dictCtx = NULL;
+}
+
+static int
+LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
+ assert(ctxPtr != NULL);
+ /* auto-init if forgotten */
+ if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE*)src != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
+
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
+ const BYTE* const dictBegin = ctxPtr->dictStart;
+ const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
+ if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
+ if (sourceEnd > dictEnd) sourceEnd = dictEnd;
+ ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
+ ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) {
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictStart = ctxPtr->prefixStart;
+ } } }
+
+ return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
+}
+
+int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
+{
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
+}
+
+
+
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
+int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
+{
+ LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ assert(prefixSize >= 0);
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ if (dictSize < 4) dictSize = 0;
+ if (dictSize > prefixSize) dictSize = prefixSize;
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ LZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
+ streamPtr->end = (const BYTE*)safeBuffer + dictSize;
+ streamPtr->prefixStart = streamPtr->end - dictSize;
+ streamPtr->dictLimit = endIndex - (U32)dictSize;
+ streamPtr->lowLimit = endIndex - (U32)dictSize;
+ streamPtr->dictStart = streamPtr->prefixStart;
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+
+
+/***************************************************
+* Deprecated Functions
+***************************************************/
+
+/* These functions currently generate deprecation warnings */
+
+/* Wrappers for deprecated compression functions */
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
+
+
+/* Deprecated streaming functions */
+int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
+
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
+ * @return : 0 on success, !=0 if error */
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
+ if (hc4 == NULL) return 1; /* init failed */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void* LZ4_createHC (const char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
+ if (hc4 == NULL) return NULL; /* not enough memory */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return hc4;
+}
+
+int LZ4_freeHC (void* LZ4HC_Data)
+{
+ if (!LZ4HC_Data) return 0; /* support free on NULL */
+ FREEMEM(LZ4HC_Data);
+ return 0;
+}
+#endif
+
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
+}
+
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
+}
+
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
+{
+ LZ4_streamHC_t* const ctx = (LZ4_streamHC_t*)LZ4HC_Data;
+ const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit;
+ LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
+ /* avoid const char * -> char * conversion warning :( */
+ return (char*)(uptrval)bufferStart;
+}
+
+
+/* ================================================
+ * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
+ * ===============================================*/
+typedef struct {
+ int price;
+ int off;
+ int mlen;
+ int litlen;
+} LZ4HC_optimal_t;
+
+/* price in bytes */
+LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
+{
+ int price = litlen;
+ assert(litlen >= 0);
+ if (litlen >= (int)RUN_MASK)
+ price += 1 + ((litlen-(int)RUN_MASK) / 255);
+ return price;
+}
+
+
+/* requires mlen >= MINMATCH */
+LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
+{
+ int price = 1 + 2 ; /* token + 16-bit offset */
+ assert(litlen >= 0);
+ assert(mlen >= MINMATCH);
+
+ price += LZ4HC_literalsPrice(litlen);
+
+ if (mlen >= (int)(ML_MASK+MINMATCH))
+ price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
+
+ return price;
+}
+
+
+typedef struct {
+ int off;
+ int len;
+} LZ4HC_match_t;
+
+LZ4_FORCE_INLINE LZ4HC_match_t
+LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
+ const BYTE* ip, const BYTE* const iHighLimit,
+ int minLen, int nbSearches,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ LZ4HC_match_t match = { 0 , 0 };
+ const BYTE* matchPtr = NULL;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
+ if (matchLength <= minLen) return match;
+ if (favorDecSpeed) {
+ if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
+ }
+ match.len = matchLength;
+ match.off = (int)(ip-matchPtr);
+ return match;
+}
+
+
+static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
+ const char* const source,
+ char* dst,
+ int* srcSizePtr,
+ int dstCapacity,
+ int const nbSearches,
+ size_t sufficient_len,
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ int retval = 0;
+#define TRAILING_LITERALS 3
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
+ LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
+#endif
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + *srcSizePtr;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+ BYTE* op = (BYTE*) dst;
+ BYTE* opSaved = (BYTE*) dst;
+ BYTE* oend = op + dstCapacity;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ const BYTE* ovref = NULL;
+
+ /* init */
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ if (opt == NULL) goto _return_label;
+#endif
+ DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ int const llen = (int)(ip - anchor);
+ int best_mlen, best_off;
+ int cur, last_match_pos = 0;
+
+ LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ if (firstMatch.len==0) { ip++; continue; }
+
+ if ((size_t)firstMatch.len > sufficient_len) {
+ /* good enough solution : immediate encoding */
+ int const firstML = firstMatch.len;
+ const BYTE* const matchPos = ip - firstMatch.off;
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovref = matchPos;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ /* set prices for first positions (literals) */
+ { int rPos;
+ for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
+ int const cost = LZ4HC_literalsPrice(llen + rPos);
+ opt[rPos].mlen = 1;
+ opt[rPos].off = 0;
+ opt[rPos].litlen = llen + rPos;
+ opt[rPos].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ rPos, cost, opt[rPos].litlen);
+ } }
+ /* set prices using initial match */
+ { int mlen = MINMATCH;
+ int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
+ int const offset = firstMatch.off;
+ assert(matchML < LZ4_OPT_NUM);
+ for ( ; mlen <= matchML ; mlen++) {
+ int const cost = LZ4HC_sequencePrice(llen, mlen);
+ opt[mlen].mlen = mlen;
+ opt[mlen].off = offset;
+ opt[mlen].litlen = llen;
+ opt[mlen].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
+ mlen, cost, mlen);
+ } }
+ last_match_pos = firstMatch.len;
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+
+ /* check further positions */
+ for (cur = 1; cur < last_match_pos; cur++) {
+ const BYTE* const curPtr = ip + cur;
+ LZ4HC_match_t newMatch;
+
+ if (curPtr > mflimit) break;
+ DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
+ cur, opt[cur].price, opt[cur+1].price, cur+1);
+ if (fullUpdate) {
+ /* not useful to search here if next position has same (or lower) cost */
+ if ( (opt[cur+1].price <= opt[cur].price)
+ /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
+ && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
+ continue;
+ } else {
+ /* not useful to search here if next position has same (or lower) cost */
+ if (opt[cur+1].price <= opt[cur].price) continue;
+ }
+
+ DEBUGLOG(7, "search at rPos:%u", cur);
+ if (fullUpdate)
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ else
+ /* only test matches of minimum length; slightly faster, but misses a few bytes */
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
+ if (!newMatch.len) continue;
+
+ if ( ((size_t)newMatch.len > sufficient_len)
+ || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
+ /* immediate encoding */
+ best_mlen = newMatch.len;
+ best_off = newMatch.off;
+ last_match_pos = cur + 1;
+ goto encode;
+ }
+
+ /* before match : set price with literals at beginning */
+ { int const baseLitlen = opt[cur].litlen;
+ int litlen;
+ for (litlen = 1; litlen < MINMATCH; litlen++) {
+ int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
+ int const pos = cur + litlen;
+ if (price < opt[pos].price) {
+ opt[pos].mlen = 1; /* literal */
+ opt[pos].off = 0;
+ opt[pos].litlen = baseLitlen+litlen;
+ opt[pos].price = price;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
+ pos, price, opt[pos].litlen);
+ } } }
+
+ /* set prices using match at position = cur */
+ { int const matchML = newMatch.len;
+ int ml = MINMATCH;
+
+ assert(cur + newMatch.len < LZ4_OPT_NUM);
+ for ( ; ml <= matchML ; ml++) {
+ int const pos = cur + ml;
+ int const offset = newMatch.off;
+ int price;
+ int ll;
+ DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
+ pos, last_match_pos);
+ if (opt[cur].mlen == 1) {
+ ll = opt[cur].litlen;
+ price = ((cur > ll) ? opt[cur - ll].price : 0)
+ + LZ4HC_sequencePrice(ll, ml);
+ } else {
+ ll = 0;
+ price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
+ }
+
+ assert((U32)favorDecSpeed <= 1);
+ if (pos > last_match_pos+TRAILING_LITERALS
+ || price <= opt[pos].price - (int)favorDecSpeed) {
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
+ pos, price, ml);
+ assert(pos < LZ4_OPT_NUM);
+ if ( (ml == matchML) /* last pos of last match */
+ && (last_match_pos < pos) )
+ last_match_pos = pos;
+ opt[pos].mlen = ml;
+ opt[pos].off = offset;
+ opt[pos].litlen = ll;
+ opt[pos].price = price;
+ } } }
+ /* complete following positions with literals */
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+ } /* for (cur = 1; cur <= last_match_pos; cur++) */
+
+ assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
+ best_mlen = opt[last_match_pos].mlen;
+ best_off = opt[last_match_pos].off;
+ cur = last_match_pos - best_mlen;
+
+encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+ assert(cur < LZ4_OPT_NUM);
+ assert(last_match_pos >= 1); /* == 1 when only one candidate */
+ DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
+ { int candidate_pos = cur;
+ int selected_matchLength = best_mlen;
+ int selected_offset = best_off;
+ while (1) { /* from end to beginning */
+ int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
+ int const next_offset = opt[candidate_pos].off;
+ DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
+ opt[candidate_pos].mlen = selected_matchLength;
+ opt[candidate_pos].off = selected_offset;
+ selected_matchLength = next_matchLength;
+ selected_offset = next_offset;
+ if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
+ assert(next_matchLength > 0); /* can be 1, means literal */
+ candidate_pos -= next_matchLength;
+ } }
+
+ /* encode all recorded sequences in order */
+ { int rPos = 0; /* relative position (to ip) */
+ while (rPos < last_match_pos) {
+ int const ml = opt[rPos].mlen;
+ int const offset = opt[rPos].off;
+ if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
+ rPos += ml;
+ assert(ml >= MINMATCH);
+ assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovref = ip - offset;
+ goto _dest_overflow;
+ } } }
+ } /* while (ip <= mflimit) */
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ retval = (int) ((char*)op-dst);
+ goto _return_label;
+
+_dest_overflow:
+if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
+ } }
+ goto _last_literals;
+}
+_return_label:
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ FREEMEM(opt);
+#endif
+ return retval;
+}
diff --git a/mfbt/lz4/lz4hc.h b/mfbt/lz4/lz4hc.h
new file mode 100644
index 0000000000..e937acfefd
--- /dev/null
+++ b/mfbt/lz4/lz4hc.h
@@ -0,0 +1,413 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Header File
+ Copyright (C) 2011-2020, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#ifndef LZ4_HC_H_19834876238432
+#define LZ4_HC_H_19834876238432
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+/* note : lz4hc requires lz4.h/lz4.c for compilation */
+#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
+
+
+/* --- Useful constants --- */
+#define LZ4HC_CLEVEL_MIN 3
+#define LZ4HC_CLEVEL_DEFAULT 9
+#define LZ4HC_CLEVEL_OPT_MIN 10
+#define LZ4HC_CLEVEL_MAX 12
+
+
+/*-************************************
+ * Block Compression
+ **************************************/
+/*! LZ4_compress_HC() :
+ * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
+ * `dst` must be already allocated.
+ * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
+ * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
+ * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
+ * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
+ * @return : the number of bytes written into 'dst'
+ * or 0 if compression fails.
+ */
+LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
+
+
+/* Note :
+ * Decompression functions are provided within "lz4.h" (BSD license)
+ */
+
+
+/*! LZ4_compress_HC_extStateHC() :
+ * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
+ * `state` size is provided by LZ4_sizeofStateHC().
+ * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
+ */
+LZ4LIB_API int LZ4_sizeofStateHC(void);
+LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
+
+
+/*! LZ4_compress_HC_destSize() : v1.9.0+
+ * Will compress as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided in 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
+ */
+LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize,
+ int compressionLevel);
+
+
+/*-************************************
+ * Streaming Compression
+ * Bufferless synchronous API
+ **************************************/
+ typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
+
+/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
+ * These functions create and release memory for LZ4 HC streaming state.
+ * Newly created states are automatically initialized.
+ * A same state can be used multiple times consecutively,
+ * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
+LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
+
+/*
+ These functions compress data in successive blocks of any size,
+ using previous blocks as dictionary, to improve compression ratio.
+ One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
+ There is an exception for ring buffers, which can be smaller than 64 KB.
+ Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
+
+ Before starting compression, state must be allocated and properly initialized.
+ LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
+
+ Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
+ or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
+ LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
+ which is automatically the case when state is created using LZ4_createStreamHC().
+
+ After reset, a first "fictional block" can be designated as initial dictionary,
+ using LZ4_loadDictHC() (Optional).
+
+ Invoke LZ4_compress_HC_continue() to compress each successive block.
+ The number of blocks is unlimited.
+ Previous input blocks, including initial dictionary when present,
+ must remain accessible and unmodified during compression.
+
+ It's allowed to update compression level anytime between blocks,
+ using LZ4_setCompressionLevel() (experimental).
+
+ 'dst' buffer should be sized to handle worst case scenarios
+ (see LZ4_compressBound(), it ensures compression success).
+ In case of failure, the API does not guarantee recovery,
+ so the state _must_ be reset.
+ To ensure compression success
+ whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
+ consider using LZ4_compress_HC_continue_destSize().
+
+ Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
+ it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
+ Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
+
+ After completing a streaming compression,
+ it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
+ just by resetting it, using LZ4_resetStreamHC_fast().
+*/
+
+LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
+LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
+
+LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
+ const char* src, char* dst,
+ int srcSize, int maxDstSize);
+
+/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
+ * Similar to LZ4_compress_HC_continue(),
+ * but will read as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided into 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
+ * Note that this function may not consume the entire input.
+ */
+LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize);
+
+LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
+
+
+
+/*^**********************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***********************************************/
+
+/*-******************************************************************
+ * PRIVATE DEFINITIONS :
+ * Do not use these definitions directly.
+ * They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
+ * Declare an `LZ4_streamHC_t` directly, rather than any type below.
+ * Even then, only do so in the context of static linking, as definitions may change between versions.
+ ********************************************************************/
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+
+#define LZ4HC_HASH_LOG 15
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+
+/* Never ever use these definitions directly !
+ * Declare or allocate an LZ4_streamHC_t instead.
+**/
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal
+{
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
+ const LZ4_byte* prefixStart; /* Indexes relative to this position */
+ const LZ4_byte* dictStart; /* alternate reference for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more dict */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+ const LZ4HC_CCtx_internal* dictCtx;
+};
+
+#define LZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */
+union LZ4_streamHC_u {
+ char minStateSize[LZ4_STREAMHC_MINSIZE];
+ LZ4HC_CCtx_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamHC_t */
+
+/* LZ4_streamHC_t :
+ * This structure allows static allocation of LZ4 HC streaming state.
+ * This can be used to allocate statically on stack, or as part of a larger structure.
+ *
+ * Such state **must** be initialized using LZ4_initStreamHC() before first use.
+ *
+ * Note that invoking LZ4_initStreamHC() is not required when
+ * the state was created using LZ4_createStreamHC() (which is recommended).
+ * Using the normal builder, a newly created state is automatically initialized.
+ *
+ * Static allocation shall only be used in combination with static linking.
+ */
+
+/* LZ4_initStreamHC() : v1.9.0+
+ * Required before first use of a statically allocated LZ4_streamHC_t.
+ * Before v1.9.0 : use LZ4_resetStreamHC() instead
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC(void* buffer, size_t size);
+
+
+/*-************************************
+* Deprecated Functions
+**************************************/
+/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
+
+/* deprecated compression functions */
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, use of
+ * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
+ * than preserve a window-sized chunk of history.
+ */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
+LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
+#endif
+LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
+LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
+
+
+/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
+ * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
+ * which is now the recommended function to start a new stream of blocks,
+ * but cannot be used to initialize a memory segment containing arbitrary garbage data.
+ *
+ * It is recommended to switch to LZ4_initStreamHC().
+ * LZ4_resetStreamHC() will generate deprecation warnings in a future version.
+ */
+LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_H_19834876238432 */
+
+
+/*-**************************************************
+ * !!!!! STATIC LINKING ONLY !!!!!
+ * Following definitions are considered experimental.
+ * They should not be linked from DLL,
+ * as there is no guarantee of API stability yet.
+ * Prototypes will be promoted to "stable" status
+ * after successful usage in real-life scenarios.
+ ***************************************************/
+#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
+#ifndef LZ4_HC_SLO_098092834
+#define LZ4_HC_SLO_098092834
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
+#include "lz4.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
+ * It's possible to change compression level
+ * between successive invocations of LZ4_compress_HC_continue*()
+ * for dynamic adaptation.
+ */
+LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
+ * Opt. Parser will favor decompression speed over compression ratio.
+ * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
+ */
+LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
+
+/*! LZ4_resetStreamHC_fast() : v1.9.0+
+ * When an LZ4_streamHC_t is known to be in a internally coherent state,
+ * it can often be prepared for a new compression with almost no work, only
+ * sometimes falling back to the full, expensive reset that is always required
+ * when the stream is in an indeterminate state (i.e., the reset performed by
+ * LZ4_resetStreamHC()).
+ *
+ * LZ4_streamHCs are guaranteed to be in a valid state when:
+ * - returned from LZ4_createStreamHC()
+ * - reset by LZ4_resetStreamHC()
+ * - memset(stream, 0, sizeof(LZ4_streamHC_t))
+ * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
+ * - the stream was in a valid state and was then used in any compression call
+ * that returned success
+ * - the stream was in an indeterminate state and was used in a compression
+ * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
+ * returned success
+ *
+ * Note:
+ * A stream that was last used in a compression call that returned an error
+ * may be passed to this function. However, it will be fully reset, which will
+ * clear any existing history and settings from the context.
+ */
+LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_compress_HC_extStateHC_fastReset() :
+ * A variant of LZ4_compress_HC_extStateHC().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStreamHC_fast() for a definition of
+ * "correctly initialized"). From a high level, the difference is that this
+ * function initializes the provided state with a call to
+ * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
+ * call to LZ4_resetStreamHC().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
+ void* state,
+ const char* src, char* dst,
+ int srcSize, int dstCapacity,
+ int compressionLevel);
+
+/*! LZ4_attach_HC_dictionary() :
+ * This is an experimental API that allows for the efficient use of a
+ * static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
+ LZ4_streamHC_t *working_stream,
+ const LZ4_streamHC_t *dictionary_stream);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_SLO_098092834 */
+#endif /* LZ4_HC_STATIC_LINKING_ONLY */
diff --git a/mfbt/lz4/xxhash.c b/mfbt/lz4/xxhash.c
new file mode 100644
index 0000000000..083b039d70
--- /dev/null
+++ b/mfbt/lz4/xxhash.c
@@ -0,0 +1,43 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Copyright (C) 2012-2021 Yann Collet
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at:
+ * - xxHash homepage: https://www.xxhash.com
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+
+/*
+ * xxhash.c instantiates functions defined in xxhash.h
+ */
+
+#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
+#define XXH_IMPLEMENTATION /* access definitions */
+
+#include "xxhash.h"
diff --git a/mfbt/lz4/xxhash.h b/mfbt/lz4/xxhash.h
new file mode 100644
index 0000000000..a18e8c762d
--- /dev/null
+++ b/mfbt/lz4/xxhash.h
@@ -0,0 +1,6773 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Header File
+ * Copyright (C) 2012-2021 Yann Collet
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at:
+ * - xxHash homepage: https://www.xxhash.com
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+/*!
+ * @mainpage xxHash
+ *
+ * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
+ * limits.
+ *
+ * It is proposed in four flavors, in three families:
+ * 1. @ref XXH32_family
+ * - Classic 32-bit hash function. Simple, compact, and runs on almost all
+ * 32-bit and 64-bit systems.
+ * 2. @ref XXH64_family
+ * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
+ * 64-bit systems (but _not_ 32-bit systems).
+ * 3. @ref XXH3_family
+ * - Modern 64-bit and 128-bit hash function family which features improved
+ * strength and performance across the board, especially on smaller data.
+ * It benefits greatly from SIMD and 64-bit without requiring it.
+ *
+ * Benchmarks
+ * ---
+ * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
+ * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
+ *
+ * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
+ * | -------------------- | ------- | ----: | ---------------: | ------------------: |
+ * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
+ * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
+ * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
+ * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
+ * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
+ * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
+ * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
+ * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
+ * | City64 | | 64 | 22.0 GB/s | 76.6 |
+ * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
+ * | City128 | | 128 | 21.7 GB/s | 57.7 |
+ * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
+ * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
+ * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
+ * | Mum | | 64 | 18.0 GB/s | 67.0 |
+ * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
+ * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
+ * | City32 | | 32 | 9.1 GB/s | 66.0 |
+ * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
+ * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
+ * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
+ * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
+ * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
+ * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
+ * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
+ * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
+ * | MD5* | | 128 | 0.6 GB/s | 7.8 |
+ * @note
+ * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
+ * even though it is mandatory on x64.
+ * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
+ * by modern standards.
+ * - Small data velocity is a rough average of algorithm's efficiency for small
+ * data. For more accurate information, see the wiki.
+ * - More benchmarks and strength tests are found on the wiki:
+ * https://github.com/Cyan4973/xxHash/wiki
+ *
+ * Usage
+ * ------
+ * All xxHash variants use a similar API. Changing the algorithm is a trivial
+ * substitution.
+ *
+ * @pre
+ * For functions which take an input and length parameter, the following
+ * requirements are assumed:
+ * - The range from [`input`, `input + length`) is valid, readable memory.
+ * - The only exception is if the `length` is `0`, `input` may be `NULL`.
+ * - For C++, the objects must have the *TriviallyCopyable* property, as the
+ * functions access bytes directly as if it was an array of `unsigned char`.
+ *
+ * @anchor single_shot_example
+ * **Single Shot**
+ *
+ * These functions are stateless functions which hash a contiguous block of memory,
+ * immediately returning the result. They are the easiest and usually the fastest
+ * option.
+ *
+ * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
+ *
+ * @code{.c}
+ * #include <string.h>
+ * #include "xxhash.h"
+ *
+ * // Example for a function which hashes a null terminated string with XXH32().
+ * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
+ * {
+ * // NULL pointers are only valid if the length is zero
+ * size_t length = (string == NULL) ? 0 : strlen(string);
+ * return XXH32(string, length, seed);
+ * }
+ * @endcode
+ *
+ * @anchor streaming_example
+ * **Streaming**
+ *
+ * These groups of functions allow incremental hashing of unknown size, even
+ * more than what would fit in a size_t.
+ *
+ * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
+ *
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <assert.h>
+ * #include "xxhash.h"
+ * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
+ * XXH64_hash_t hashFile(FILE* f)
+ * {
+ * // Allocate a state struct. Do not just use malloc() or new.
+ * XXH3_state_t* state = XXH3_createState();
+ * assert(state != NULL && "Out of memory!");
+ * // Reset the state to start a new hashing session.
+ * XXH3_64bits_reset(state);
+ * char buffer[4096];
+ * size_t count;
+ * // Read the file in chunks
+ * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
+ * // Run update() as many times as necessary to process the data
+ * XXH3_64bits_update(state, buffer, count);
+ * }
+ * // Retrieve the finalized hash. This will not change the state.
+ * XXH64_hash_t result = XXH3_64bits_digest(state);
+ * // Free the state. Do not use free().
+ * XXH3_freeState(state);
+ * return result;
+ * }
+ * @endcode
+ *
+ * @file xxhash.h
+ * xxHash prototypes and implementation
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* ****************************
+ * INLINE mode
+ ******************************/
+/*!
+ * @defgroup public Public API
+ * Contains details on the public xxHash functions.
+ * @{
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Gives access to internal state declaration, required for static allocation.
+ *
+ * Incompatible with dynamic linking, due to risks of ABI changes.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_STATIC_LINKING_ONLY
+/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
+
+/*!
+ * @brief Gives access to internal definitions.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #define XXH_IMPLEMENTATION
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_IMPLEMENTATION
+/* Do not undef XXH_IMPLEMENTATION for Doxygen */
+
+/*!
+ * @brief Exposes the implementation and marks all functions as `inline`.
+ *
+ * Use these build macros to inline xxhash into the target unit.
+ * Inlining improves performance on small inputs, especially when the length is
+ * expressed as a compile-time constant:
+ *
+ * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
+ *
+ * It also keeps xxHash symbols private to the unit, so they are not exported.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * @endcode
+ * Do not compile and link xxhash.o as a separate object, as it is not useful.
+ */
+# define XXH_INLINE_ALL
+# undef XXH_INLINE_ALL
+/*!
+ * @brief Exposes the implementation without marking functions as inline.
+ */
+# define XXH_PRIVATE_API
+# undef XXH_PRIVATE_API
+/*!
+ * @brief Emulate a namespace by transparently prefixing all symbols.
+ *
+ * If you want to include _and expose_ xxHash functions from within your own
+ * library, but also want to avoid symbol collisions with other libraries which
+ * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
+ * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
+ * (therefore, avoid empty or numeric values).
+ *
+ * Note that no change is required within the calling program as long as it
+ * includes `xxhash.h`: Regular symbol names will be automatically translated
+ * by this header.
+ */
+# define XXH_NAMESPACE /* YOUR NAME HERE */
+# undef XXH_NAMESPACE
+#endif
+
+#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
+ && !defined(XXH_INLINE_ALL_31684351384)
+ /* this section should be traversed only once */
+# define XXH_INLINE_ALL_31684351384
+ /* give access to the advanced API, required to compile implementations */
+# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
+# define XXH_STATIC_LINKING_ONLY
+ /* make all functions private */
+# undef XXH_PUBLIC_API
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+ /* note: this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+
+ /*
+ * This part deals with the special case where a unit wants to inline xxHash,
+ * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
+ * such as part of some previously included *.h header file.
+ * Without further action, the new include would just be ignored,
+ * and functions would effectively _not_ be inlined (silent failure).
+ * The following macros solve this situation by prefixing all inlined names,
+ * avoiding naming collision with previous inclusions.
+ */
+ /* Before that, we unconditionally #undef all symbols,
+ * in case they were already defined with XXH_NAMESPACE.
+ * They will then be redefined for XXH_INLINE_ALL
+ */
+# undef XXH_versionNumber
+ /* XXH32 */
+# undef XXH32
+# undef XXH32_createState
+# undef XXH32_freeState
+# undef XXH32_reset
+# undef XXH32_update
+# undef XXH32_digest
+# undef XXH32_copyState
+# undef XXH32_canonicalFromHash
+# undef XXH32_hashFromCanonical
+ /* XXH64 */
+# undef XXH64
+# undef XXH64_createState
+# undef XXH64_freeState
+# undef XXH64_reset
+# undef XXH64_update
+# undef XXH64_digest
+# undef XXH64_copyState
+# undef XXH64_canonicalFromHash
+# undef XXH64_hashFromCanonical
+ /* XXH3_64bits */
+# undef XXH3_64bits
+# undef XXH3_64bits_withSecret
+# undef XXH3_64bits_withSeed
+# undef XXH3_64bits_withSecretandSeed
+# undef XXH3_createState
+# undef XXH3_freeState
+# undef XXH3_copyState
+# undef XXH3_64bits_reset
+# undef XXH3_64bits_reset_withSeed
+# undef XXH3_64bits_reset_withSecret
+# undef XXH3_64bits_update
+# undef XXH3_64bits_digest
+# undef XXH3_generateSecret
+ /* XXH3_128bits */
+# undef XXH128
+# undef XXH3_128bits
+# undef XXH3_128bits_withSeed
+# undef XXH3_128bits_withSecret
+# undef XXH3_128bits_reset
+# undef XXH3_128bits_reset_withSeed
+# undef XXH3_128bits_reset_withSecret
+# undef XXH3_128bits_reset_withSecretandSeed
+# undef XXH3_128bits_update
+# undef XXH3_128bits_digest
+# undef XXH128_isEqual
+# undef XXH128_cmp
+# undef XXH128_canonicalFromHash
+# undef XXH128_hashFromCanonical
+ /* Finally, free the namespace itself */
+# undef XXH_NAMESPACE
+
+ /* employ the namespace for XXH_INLINE_ALL */
+# define XXH_NAMESPACE XXH_INLINE_
+ /*
+ * Some identifiers (enums, type names) are not symbols,
+ * but they must nonetheless be renamed to avoid redeclaration.
+ * Alternative solution: do not redeclare them.
+ * However, this requires some #ifdefs, and has a more dispersed impact.
+ * Meanwhile, renaming can be achieved in a single place.
+ */
+# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
+# define XXH_OK XXH_IPREF(XXH_OK)
+# define XXH_ERROR XXH_IPREF(XXH_ERROR)
+# define XXH_errorcode XXH_IPREF(XXH_errorcode)
+# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
+# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
+# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
+# define XXH32_state_s XXH_IPREF(XXH32_state_s)
+# define XXH32_state_t XXH_IPREF(XXH32_state_t)
+# define XXH64_state_s XXH_IPREF(XXH64_state_s)
+# define XXH64_state_t XXH_IPREF(XXH64_state_t)
+# define XXH3_state_s XXH_IPREF(XXH3_state_s)
+# define XXH3_state_t XXH_IPREF(XXH3_state_t)
+# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
+ /* Ensure the header is parsed again, even if it was previously included */
+# undef XXHASH_H_5627135585666179
+# undef XXHASH_H_STATIC_13879238742
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/* ****************************************************************
+ * Stable API
+ *****************************************************************/
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+/*! @brief Marks a global symbol. */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+/* XXH32 */
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+/* XXH64 */
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+/* XXH3_64bits */
+# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
+# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
+# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
+# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
+# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
+# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
+# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
+# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
+# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
+# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
+# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
+# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
+# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
+# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
+/* XXH3_128bits */
+# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
+# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
+# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
+# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
+# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
+# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
+# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
+# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
+# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
+# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
+# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
+# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
+# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
+# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
+# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Compiler specifics
+***************************************/
+
+/* specific declaration modes for Windows */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#if defined (__GNUC__)
+# define XXH_CONSTF __attribute__((const))
+# define XXH_PUREF __attribute__((pure))
+# define XXH_MALLOCF __attribute__((malloc))
+#else
+# define XXH_CONSTF /* disable */
+# define XXH_PUREF
+# define XXH_MALLOCF
+#endif
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 8
+#define XXH_VERSION_RELEASE 2
+/*! @brief Version number, encoded as two digits each */
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+
+/*!
+ * @brief Obtains the xxHash version.
+ *
+ * This is mostly useful when xxHash is compiled as a shared library,
+ * since the returned value comes from the library, as opposed to header file.
+ *
+ * @return @ref XXH_VERSION_NUMBER of the invoked library.
+ */
+XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Common basic types
+******************************/
+#include <stddef.h> /* size_t */
+/*!
+ * @brief Exit code for the streaming API.
+ */
+typedef enum {
+ XXH_OK = 0, /*!< OK */
+ XXH_ERROR /*!< Error */
+} XXH_errorcode;
+
+
+/*-**********************************************************************
+* 32-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
+/*!
+ * @brief An unsigned 32-bit integer.
+ *
+ * Not necessarily defined to `uint32_t` but functionally equivalent.
+ */
+typedef uint32_t XXH32_hash_t;
+
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint32_t XXH32_hash_t;
+
+#else
+# include <limits.h>
+# if UINT_MAX == 0xFFFFFFFFUL
+ typedef unsigned int XXH32_hash_t;
+# elif ULONG_MAX == 0xFFFFFFFFUL
+ typedef unsigned long XXH32_hash_t;
+# else
+# error "unsupported platform: need a 32-bit type"
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH32_family XXH32 family
+ * @ingroup public
+ * Contains functions used in the classic 32-bit xxHash algorithm.
+ *
+ * @note
+ * XXH32 is useful for older platforms, with no or poor 64-bit performance.
+ * Note that the @ref XXH3_family provides competitive speed for both 32-bit
+ * and 64-bit systems, and offers true 64/128 bit hash results.
+ *
+ * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
+ * @see @ref XXH32_impl for implementation details
+ * @{
+ */
+
+/*!
+ * @brief Calculates the 32-bit hash of @p input using xxHash32.
+ *
+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
+ *
+ * See @ref single_shot_example "Single Shot Example" for an example.
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 32-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 32-bit hash value.
+ *
+ * @see
+ * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
+
+#ifndef XXH_NO_STREAM
+/*!
+ * Streaming functions generate the xxHash value from an incremental input.
+ * This method is slower than single-call functions, due to state management.
+ * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * An XXH state must first be allocated using `XXH*_createState()`.
+ *
+ * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
+ *
+ * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
+ *
+ * The function returns an error code, with 0 meaning OK, and any other value
+ * meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a
+ * digest, and generate new hash values later on by invoking `XXH*_digest()`.
+ *
+ * When done, release the state using `XXH*_freeState()`.
+ *
+ * @see streaming_example at the top of @ref xxhash.h for an example.
+ */
+
+/*!
+ * @typedef struct XXH32_state_s XXH32_state_t
+ * @brief The opaque state struct for the XXH32 streaming API.
+ *
+ * @see XXH32_state_s for details.
+ */
+typedef struct XXH32_state_s XXH32_state_t;
+
+/*!
+ * @brief Allocates an @ref XXH32_state_t.
+ *
+ * Must be freed with XXH32_freeState().
+ * @return An allocated XXH32_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
+/*!
+ * @brief Frees an @ref XXH32_state_t.
+ *
+ * Must be allocated with XXH32_createState().
+ * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+/*!
+ * @brief Copies one @ref XXH32_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH32_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH32_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 32-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH32_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH32_state_t.
+ *
+ * @note
+ * Calling XXH32_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash32 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ * This the simplest and fastest format for further post-processing.
+ *
+ * However, this leaves open the question of what is the order on the byte level,
+ * since little and big endian conventions will store the same number differently.
+ *
+ * The canonical representation settles this issue by mandating big-endian
+ * convention, the same convention as human-readable numbers (large digits first).
+ *
+ * When writing hash values to storage, sending them over a network, or printing
+ * them, it's highly recommended to use the canonical representation to ensure
+ * portability across a wider range of systems, present and future.
+ *
+ * The following functions allow transformation of hash values to and from
+ * canonical format.
+ */
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
+ */
+typedef struct {
+ unsigned char digest[4]; /*!< Hash bytes, big endian */
+} XXH32_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
+ *
+ * @param dst The @ref XXH32_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH32_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
+ *
+ * @param src The @ref XXH32_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+
+/*! @cond Doxygen ignores this part */
+#ifdef __has_attribute
+# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define XXH_HAS_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * C23 __STDC_VERSION__ number hasn't been specified yet. For now
+ * leave as `201711L` (C17 + 1).
+ * TODO: Update to correct value when its been specified.
+ */
+#define XXH_C23_VN 201711L
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
+# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define XXH_HAS_C_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+# define XXH_HAS_CPP_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
+ * introduced in CPP17 and C23.
+ * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
+ */
+#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
+# define XXH_FALLTHROUGH [[fallthrough]]
+#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
+# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
+#else
+# define XXH_FALLTHROUGH /* fallthrough */
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_NOESCAPE for annotated pointers in public API.
+ * https://clang.llvm.org/docs/AttributeReference.html#noescape
+ * As of writing this, only supported by clang.
+ */
+#if XXH_HAS_ATTRIBUTE(noescape)
+# define XXH_NOESCAPE __attribute__((noescape))
+#else
+# define XXH_NOESCAPE
+#endif
+/*! @endcond */
+
+
+/*!
+ * @}
+ * @ingroup public
+ * @{
+ */
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+* 64-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
+/*!
+ * @brief An unsigned 64-bit integer.
+ *
+ * Not necessarily defined to `uint64_t` but functionally equivalent.
+ */
+typedef uint64_t XXH64_hash_t;
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t XXH64_hash_t;
+#else
+# include <limits.h>
+# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
+ /* LP64 ABI says uint64_t is unsigned long */
+ typedef unsigned long XXH64_hash_t;
+# else
+ /* the following type must have a width of 64-bit */
+ typedef unsigned long long XXH64_hash_t;
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH64_family XXH64 family
+ * @ingroup public
+ * @{
+ * Contains functions used in the classic 64-bit xxHash algorithm.
+ *
+ * @note
+ * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
+ * and offers true 64/128 bit hash results.
+ * It provides better speed for systems with vector processing capabilities.
+ */
+
+/*!
+ * @brief Calculates the 64-bit hash of @p input using xxHash64.
+ *
+ * This function usually runs faster on 64-bit systems, but slower on 32-bit
+ * systems (see benchmark).
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 64-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 64-bit hash.
+ *
+ * @see
+ * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*!
+ * @brief The opaque state struct for the XXH64 streaming API.
+ *
+ * @see XXH64_state_s for details.
+ */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*!
+ * @brief Allocates an @ref XXH64_state_t.
+ *
+ * Must be freed with XXH64_freeState().
+ * @return An allocated XXH64_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
+
+/*!
+ * @brief Frees an @ref XXH64_state_t.
+ *
+ * Must be allocated with XXH64_createState().
+ * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH64_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH64_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH64_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH64_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH64_state_t.
+ *
+ * @note
+ * Calling XXH64_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash64 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+/******* Canonical representation *******/
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
+ */
+typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
+ *
+ * @param dst The @ref XXH64_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH64_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
+ *
+ * @param src The @ref XXH64_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
+
+#ifndef XXH_NO_XXH3
+
+/*!
+ * @}
+ * ************************************************************************
+ * @defgroup XXH3_family XXH3 family
+ * @ingroup public
+ * @{
+ *
+ * XXH3 is a more recent hash algorithm featuring:
+ * - Improved speed for both small and large inputs
+ * - True 64-bit and 128-bit outputs
+ * - SIMD acceleration
+ * - Improved 32-bit viability
+ *
+ * Speed analysis methodology is explained here:
+ *
+ * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
+ *
+ * Compared to XXH64, expect XXH3 to run approximately
+ * ~2x faster on large inputs and >3x faster on small ones,
+ * exact differences vary depending on platform.
+ *
+ * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
+ * but does not require it.
+ * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
+ * at competitive speeds, even without vector support. Further details are
+ * explained in the implementation.
+ *
+ * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
+ * implementations for many common platforms:
+ * - AVX512
+ * - AVX2
+ * - SSE2
+ * - ARM NEON
+ * - WebAssembly SIMD128
+ * - POWER8 VSX
+ * - s390x ZVector
+ * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
+ * selects the best version according to predefined macros. For the x86 family, an
+ * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
+ *
+ * XXH3 implementation is portable:
+ * it has a generic C90 formulation that can be compiled on any platform,
+ * all implementations generate exactly the same hash value on all platforms.
+ * Starting from v0.8.0, it's also labelled "stable", meaning that
+ * any future version will also generate the same hash value.
+ *
+ * XXH3 offers 2 variants, _64bits and _128bits.
+ *
+ * When only 64 bits are needed, prefer invoking the _64bits variant, as it
+ * reduces the amount of mixing, resulting in faster speed on small inputs.
+ * It's also generally simpler to manipulate a scalar return type than a struct.
+ *
+ * The API supports one-shot hashing, streaming mode, and custom secrets.
+ */
+/*-**********************************************************************
+* XXH3 64-bit variant
+************************************************************************/
+
+/*!
+ * @brief 64-bit unseeded variant of XXH3.
+ *
+ * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief 64-bit seeded variant of XXH3
+ *
+ * This variant generates a custom secret on the fly based on default secret
+ * altered using the `seed` value.
+ *
+ * While this operation is decently fast, note that it's not completely free.
+ *
+ * @note
+ * seed == 0 produces the same results as @ref XXH3_64bits().
+ *
+ * @param input The data to hash
+ * @param length The length
+ * @param seed The 64-bit seed to alter the state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/*!
+ * The bare minimum size for a custom secret.
+ *
+ * @see
+ * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
+ * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
+ */
+#define XXH3_SECRET_SIZE_MIN 136
+
+/*!
+ * @brief 64-bit variant of XXH3 with a custom "secret".
+ *
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the secret impacts the dispersion of the hash algorithm.
+ * Therefore, the secret _must_ look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever in doubt about the "randomness" of the blob of bytes,
+ * consider employing "XXH3_generateSecret()" instead (see below).
+ * It will generate a proper high entropy secret derived from the blob of bytes.
+ * Another advantage of using XXH3_generateSecret() is that
+ * it guarantees that all bits within the initial blob of bytes
+ * will impact every bit of the output.
+ * This is not necessarily the case when using the blob of bytes directly
+ * because, when hashing _small_ inputs, only a portion of the secret is employed.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ */
+
+/*!
+ * @brief The state struct for the XXH3 streaming API.
+ *
+ * @see XXH3_state_s for details.
+ */
+typedef struct XXH3_state_s XXH3_state_t;
+XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH3_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * XXH3_64bits_reset_withSecret():
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+ * and the quality of produced hash values depends on secret's entropy
+ * (secret's content should look like a bunch of random bytes).
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 64-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* note : canonical representation of XXH3 is the same as XXH64
+ * since they both produce XXH64_hash_t values */
+
+
+/*-**********************************************************************
+* XXH3 128-bit variant
+************************************************************************/
+
+/*!
+ * @brief The return value from 128-bit hashes.
+ *
+ * Stored in little endian order, although the fields themselves are in native
+ * endianness.
+ */
+typedef struct {
+ XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
+ XXH64_hash_t high64; /*!< `value >> 64` */
+} XXH128_hash_t;
+
+/*!
+ * @brief Unseeded 128-bit variant of XXH3
+ *
+ * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
+ * for shorter inputs.
+ *
+ * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
+/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ *
+ * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
+ * Use already declared XXH3_createState() and XXH3_freeState().
+ *
+ * All reset and streaming functions have same meaning as their 64-bit counterpart.
+ */
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 128-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* Following helper functions make it possible to compare XXH128_hast_t values.
+ * Since XXH128_hash_t is a structure, this capability is not offered by the language.
+ * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
+
+/*!
+ * XXH128_isEqual():
+ * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
+
+/*!
+ * @brief Compares two @ref XXH128_hash_t
+ * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
+ *
+ * @return: >0 if *h128_1 > *h128_2
+ * =0 if *h128_1 == *h128_2
+ * <0 if *h128_1 < *h128_2
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
+
+
+/******* Canonical representation *******/
+typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
+
+
+/*!
+ * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
+ *
+ * @param dst The @ref XXH128_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH128_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
+ *
+ * @param src The @ref XXH128_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
+
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+
+/*!
+ * @}
+ */
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
+#define XXHASH_H_STATIC_13879238742
+/* ****************************************************************************
+ * This section contains declarations which are not guaranteed to remain stable.
+ * They may change in future versions, becoming incompatible with a different
+ * version of the library.
+ * These declarations should only be used with static linking.
+ * Never use them in association with dynamic linking!
+ ***************************************************************************** */
+
+/*
+ * These definitions are only present to allow static allocation
+ * of XXH states, on stack or in a struct, for example.
+ * Never **ever** access their members directly.
+ */
+
+/*!
+ * @internal
+ * @brief Structure for XXH32 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH32_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH64_state_s, XXH3_state_s
+ */
+struct XXH32_state_s {
+ XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
+ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
+ XXH32_hash_t v[4]; /*!< Accumulator lanes */
+ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
+ XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
+}; /* typedef'd to XXH32_state_t */
+
+
+#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
+
+/*!
+ * @internal
+ * @brief Structure for XXH64 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH64_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH32_state_s, XXH3_state_s
+ */
+struct XXH64_state_s {
+ XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
+ XXH64_hash_t v[4]; /*!< Accumulator lanes */
+ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
+ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
+ XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
+}; /* typedef'd to XXH64_state_t */
+
+#ifndef XXH_NO_XXH3
+
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
+# include <stdalign.h>
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
+/* In C++ alignas() is a keyword */
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__GNUC__)
+# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
+#elif defined(_MSC_VER)
+# define XXH_ALIGN(n) __declspec(align(n))
+#else
+# define XXH_ALIGN(n) /* disabled */
+#endif
+
+/* Old GCC versions only accept the attribute after the type in structures. */
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
+ && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
+ && defined(__GNUC__)
+# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
+#else
+# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
+#endif
+
+/*!
+ * @brief The size of the internal XXH3 buffer.
+ *
+ * This is the optimal update size for incremental hashing.
+ *
+ * @see XXH3_64b_update(), XXH3_128b_update().
+ */
+#define XXH3_INTERNALBUFFER_SIZE 256
+
+/*!
+ * @internal
+ * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
+ *
+ * This is the size used in @ref XXH3_kSecret and the seeded functions.
+ *
+ * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
+ */
+#define XXH3_SECRET_DEFAULT_SIZE 192
+
+/*!
+ * @internal
+ * @brief Structure for XXH3 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
+ * Otherwise it is an opaque type.
+ * Never use this definition in combination with dynamic library.
+ * This allows fields to safely be changed in the future.
+ *
+ * @note ** This structure has a strict alignment requirement of 64 bytes!! **
+ * Do not allocate this with `malloc()` or `new`,
+ * it will not be sufficiently aligned.
+ * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
+ *
+ * Typedef'd to @ref XXH3_state_t.
+ * Do never access the members of this struct directly.
+ *
+ * @see XXH3_INITSTATE() for stack initialization.
+ * @see XXH3_createState(), XXH3_freeState().
+ * @see XXH32_state_s, XXH64_state_s
+ */
+struct XXH3_state_s {
+ XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
+ /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
+ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
+ /*!< Used to store a custom secret generated from a seed. */
+ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
+ /*!< The internal buffer. @see XXH32_state_s::mem32 */
+ XXH32_hash_t bufferedSize;
+ /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
+ XXH32_hash_t useSeed;
+ /*!< Reserved field. Needed for padding on 64-bit. */
+ size_t nbStripesSoFar;
+ /*!< Number or stripes processed. */
+ XXH64_hash_t totalLen;
+ /*!< Total length hashed. 64-bit even on 32-bit targets. */
+ size_t nbStripesPerBlock;
+ /*!< Number of stripes per block. */
+ size_t secretLimit;
+ /*!< Size of @ref customSecret or @ref extSecret */
+ XXH64_hash_t seed;
+ /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
+ XXH64_hash_t reserved64;
+ /*!< Reserved field. */
+ const unsigned char* extSecret;
+ /*!< Reference to an external secret for the _withSecret variants, NULL
+ * for other variants. */
+ /* note: there may be some padding at the end due to alignment on 64 bytes */
+}; /* typedef'd to XXH3_state_t */
+
+#undef XXH_ALIGN_MEMBER
+
+/*!
+ * @brief Initializes a stack-allocated `XXH3_state_s`.
+ *
+ * When the @ref XXH3_state_t structure is merely emplaced on stack,
+ * it should be initialized with XXH3_INITSTATE() or a memset()
+ * in case its first reset uses XXH3_NNbits_reset_withSeed().
+ * This init can be omitted if the first reset uses default or _withSecret mode.
+ * This operation isn't necessary when the state is created with XXH3_createState().
+ * Note that this doesn't prepare the state for a streaming operation,
+ * it's still necessary to use XXH3_NNbits_reset*() afterwards.
+ */
+#define XXH3_INITSTATE(XXH3_state_ptr) \
+ do { \
+ XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
+ tmp_xxh3_state_ptr->seed = 0; \
+ tmp_xxh3_state_ptr->extSecret = NULL; \
+ } while(0)
+
+
+/*!
+ * simple alias to pre-selected XXH3_128bits variant
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+
+
+/* === Experimental API === */
+/* Symbols defined below must be considered tied to a specific library version. */
+
+/*!
+ * XXH3_generateSecret():
+ *
+ * Derive a high-entropy secret from any user-defined content, named customSeed.
+ * The generated secret can be used in combination with `*_withSecret()` functions.
+ * The `_withSecret()` variants are useful to provide a higher level of protection
+ * than 64-bit seed, as it becomes much more difficult for an external actor to
+ * guess how to impact the calculation logic.
+ *
+ * The function accepts as input a custom seed of any length and any content,
+ * and derives from it a high-entropy secret of length @p secretSize into an
+ * already allocated buffer @p secretBuffer.
+ *
+ * The generated secret can then be used with any `*_withSecret()` variant.
+ * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
+ * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
+ * are part of this list. They all accept a `secret` parameter
+ * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
+ * _and_ feature very high entropy (consist of random-looking bytes).
+ * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
+ * be employed to ensure proper quality.
+ *
+ * @p customSeed can be anything. It can have any size, even small ones,
+ * and its content can be anything, even "poor entropy" sources such as a bunch
+ * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
+ *
+ * @pre
+ * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
+ * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
+ *
+ * Example code:
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <stdlib.h>
+ * #include <string.h>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Hashes argv[2] using the entropy from argv[1].
+ * int main(int argc, char* argv[])
+ * {
+ * char secret[XXH3_SECRET_SIZE_MIN];
+ * if (argv != 3) { return 1; }
+ * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
+ * XXH64_hash_t h = XXH3_64bits_withSecret(
+ * argv[2], strlen(argv[2]),
+ * secret, sizeof(secret)
+ * );
+ * printf("%016llx\n", (unsigned long long) h);
+ * }
+ * @endcode
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
+
+/*!
+ * @brief Generate the same secret as the _withSeed() variants.
+ *
+ * The generated secret can be used in combination with
+ *`*_withSecret()` and `_withSecretandSeed()` variants.
+ *
+ * Example C++ `std::string` hash class:
+ * @code{.cpp}
+ * #include <string>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Slow, seeds each time
+ * class HashSlow {
+ * XXH64_hash_t seed;
+ * public:
+ * HashSlow(XXH64_hash_t s) : seed{s} {}
+ * size_t operator()(const std::string& x) const {
+ * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
+ * }
+ * };
+ * // Fast, caches the seeded secret for future uses.
+ * class HashFast {
+ * unsigned char secret[XXH3_SECRET_SIZE_MIN];
+ * public:
+ * HashFast(XXH64_hash_t s) {
+ * XXH3_generateSecret_fromSeed(secret, seed);
+ * }
+ * size_t operator()(const std::string& x) const {
+ * return size_t{
+ * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
+ * };
+ * }
+ * };
+ * @endcode
+ * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
+ * @param seed The seed to seed the state.
+ */
+XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
+
+/*!
+ * These variants generate hash values using either
+ * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
+ * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX).
+ *
+ * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
+ * `_withSeed()` has to generate the secret on the fly for "large" keys.
+ * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
+ * `_withSecret()` has to generate the masks on the fly for "small" keys,
+ * which requires more instructions than _withSeed() variants.
+ * Therefore, _withSecretandSeed variant combines the best of both worlds.
+ *
+ * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
+ * this variant produces *exactly* the same results as `_withSeed()` variant,
+ * hence offering only a pure speed benefit on "large" input,
+ * by skipping the need to regenerate the secret for every large input.
+ *
+ * Another usage scenario is to hash the secret to a 64-bit hash value,
+ * for example with XXH3_64bits(), which then becomes the seed,
+ * and then employ both the seed and the secret in _withSecretandSeed().
+ * On top of speed, an added benefit is that each bit in the secret
+ * has a 50% chance to swap each bit in the output, via its impact to the seed.
+ *
+ * This is not guaranteed when using the secret directly in "small data" scenarios,
+ * because only portions of the secret are employed for small data.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#ifndef XXH_NO_STREAM
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#endif /* !XXH_NO_STREAM */
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# define XXH_IMPLEMENTATION
+#endif
+
+#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
+
+
+/* ======================================================================== */
+/* ======================================================================== */
+/* ======================================================================== */
+
+
+/*-**********************************************************************
+ * xxHash implementation
+ *-**********************************************************************
+ * xxHash's implementation used to be hosted inside xxhash.c.
+ *
+ * However, inlining requires implementation to be visible to the compiler,
+ * hence be included alongside the header.
+ * Previously, implementation was hosted inside xxhash.c,
+ * which was then #included when inlining was activated.
+ * This construction created issues with a few build and install systems,
+ * as it required xxhash.c to be stored in /include directory.
+ *
+ * xxHash implementation is now directly integrated within xxhash.h.
+ * As a consequence, xxhash.c is no longer needed in /include.
+ *
+ * xxhash.c is still available and is still useful.
+ * In a "normal" setup, when xxhash is not inlined,
+ * xxhash.h only exposes the prototypes and public symbols,
+ * while xxhash.c can be built into an object file xxhash.o
+ * which can then be linked into the final binary.
+ ************************************************************************/
+
+#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
+ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
+# define XXH_IMPLEM_13a8737387
+
+/* *************************************
+* Tuning parameters
+***************************************/
+
+/*!
+ * @defgroup tuning Tuning parameters
+ * @{
+ *
+ * Various macros to control xxHash's behavior.
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Define this to disable 64-bit code.
+ *
+ * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
+ */
+# define XXH_NO_LONG_LONG
+# undef XXH_NO_LONG_LONG /* don't actually */
+/*!
+ * @brief Controls how unaligned memory is accessed.
+ *
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable.
+ *
+ * Unfortunately, on some target/compiler combinations, the generated assembly
+ * is sub-optimal.
+ *
+ * The below switch allow selection of a different access method
+ * in the search for improved performance.
+ *
+ * @par Possible options:
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
+ * @par
+ * Use `memcpy()`. Safe and portable. Note that most modern compilers will
+ * eliminate the function call and treat it as an unaligned access.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
+ * @par
+ * Depends on compiler extensions and is therefore not portable.
+ * This method is safe _if_ your compiler supports it,
+ * and *generally* as fast or faster than `memcpy`.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
+ * @par
+ * Casts directly and dereferences. This method doesn't depend on the
+ * compiler, but it violates the C standard as it directly dereferences an
+ * unaligned pointer. It can generate buggy code on targets which do not
+ * support unaligned memory accesses, but in some circumstances, it's the
+ * only known way to get the most performance.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
+ * @par
+ * Also portable. This can generate the best code on old compilers which don't
+ * inline small `memcpy()` calls, and it might also be faster on big-endian
+ * systems which lack a native byteswap instruction. However, some compilers
+ * will emit literal byteshifts even if the target supports unaligned access.
+ *
+ *
+ * @warning
+ * Methods 1 and 2 rely on implementation-defined behavior. Use these with
+ * care, as what works on one compiler/platform/optimization level may cause
+ * another to read garbage data or even crash.
+ *
+ * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
+ *
+ * Prefer these methods in priority order (0 > 3 > 1 > 2)
+ */
+# define XXH_FORCE_MEMORY_ACCESS 0
+
+/*!
+ * @def XXH_SIZE_OPT
+ * @brief Controls how much xxHash optimizes for size.
+ *
+ * xxHash, when compiled, tends to result in a rather large binary size. This
+ * is mostly due to heavy usage to forced inlining and constant folding of the
+ * @ref XXH3_family to increase performance.
+ *
+ * However, some developers prefer size over speed. This option can
+ * significantly reduce the size of the generated code. When using the `-Os`
+ * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
+ * otherwise it is defined to 0.
+ *
+ * Most of these size optimizations can be controlled manually.
+ *
+ * This is a number from 0-2.
+ * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
+ * comes first.
+ * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
+ * conservative and disables hacks that increase code size. It implies the
+ * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
+ * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
+ * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
+ * Performance may cry. For example, the single shot functions just use the
+ * streaming API.
+ */
+# define XXH_SIZE_OPT 0
+
+/*!
+ * @def XXH_FORCE_ALIGN_CHECK
+ * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
+ * and XXH64() only).
+ *
+ * This is an important performance trick for architectures without decent
+ * unaligned memory access performance.
+ *
+ * It checks for input alignment, and when conditions are met, uses a "fast
+ * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
+ * faster_ read speed.
+ *
+ * The check costs one initial branch per hash, which is generally negligible,
+ * but not zero.
+ *
+ * Moreover, it's not useful to generate an additional code path if memory
+ * access uses the same instruction for both aligned and unaligned
+ * addresses (e.g. x86 and aarch64).
+ *
+ * In these cases, the alignment check can be removed by setting this macro to 0.
+ * Then the code will always use unaligned memory access.
+ * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
+ * which are platforms known to offer good unaligned memory accesses performance.
+ *
+ * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
+ *
+ * This option does not affect XXH3 (only XXH32 and XXH64).
+ */
+# define XXH_FORCE_ALIGN_CHECK 0
+
+/*!
+ * @def XXH_NO_INLINE_HINTS
+ * @brief When non-zero, sets all functions to `static`.
+ *
+ * By default, xxHash tries to force the compiler to inline almost all internal
+ * functions.
+ *
+ * This can usually improve performance due to reduced jumping and improved
+ * constant folding, but significantly increases the size of the binary which
+ * might not be favorable.
+ *
+ * Additionally, sometimes the forced inlining can be detrimental to performance,
+ * depending on the architecture.
+ *
+ * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
+ * compiler full control on whether to inline or not.
+ *
+ * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
+ * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
+ */
+# define XXH_NO_INLINE_HINTS 0
+
+/*!
+ * @def XXH3_INLINE_SECRET
+ * @brief Determines whether to inline the XXH3 withSecret code.
+ *
+ * When the secret size is known, the compiler can improve the performance
+ * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
+ *
+ * However, if the secret size is not known, it doesn't have any benefit. This
+ * happens when xxHash is compiled into a global symbol. Therefore, if
+ * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
+ *
+ * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
+ * that are *sometimes* force inline on -Og, and it is impossible to automatically
+ * detect this optimization level.
+ */
+# define XXH3_INLINE_SECRET 0
+
+/*!
+ * @def XXH32_ENDJMP
+ * @brief Whether to use a jump for `XXH32_finalize`.
+ *
+ * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
+ * This is generally preferable for performance,
+ * but depending on exact architecture, a jmp may be preferable.
+ *
+ * This setting is only possibly making a difference for very small inputs.
+ */
+# define XXH32_ENDJMP 0
+
+/*!
+ * @internal
+ * @brief Redefines old internal names.
+ *
+ * For compatibility with code that uses xxHash's internals before the names
+ * were changed to improve namespacing. There is no other reason to use this.
+ */
+# define XXH_OLD_NAMES
+# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
+
+/*!
+ * @def XXH_NO_STREAM
+ * @brief Disables the streaming API.
+ *
+ * When xxHash is not inlined and the streaming functions are not used, disabling
+ * the streaming functions can improve code size significantly, especially with
+ * the @ref XXH3_family which tends to make constant folded copies of itself.
+ */
+# define XXH_NO_STREAM
+# undef XXH_NO_STREAM /* don't actually */
+#endif /* XXH_DOXYGEN */
+/*!
+ * @}
+ */
+
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+ /* prefer __packed__ structures (method 1) for GCC
+ * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
+ * which for some reason does unaligned loads. */
+# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+#ifndef XXH_SIZE_OPT
+ /* default to 1 for -Os or -Oz */
+# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
+# define XXH_SIZE_OPT 1
+# else
+# define XXH_SIZE_OPT 0
+# endif
+#endif
+
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+ /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
+# if XXH_SIZE_OPT >= 1 || \
+ defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+#ifndef XXH_NO_INLINE_HINTS
+# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
+# define XXH_NO_INLINE_HINTS 1
+# else
+# define XXH_NO_INLINE_HINTS 0
+# endif
+#endif
+
+#ifndef XXH3_INLINE_SECRET
+# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
+ || !defined(XXH_INLINE_ALL)
+# define XXH3_INLINE_SECRET 0
+# else
+# define XXH3_INLINE_SECRET 1
+# endif
+#endif
+
+#ifndef XXH32_ENDJMP
+/* generally preferable for performance */
+# define XXH32_ENDJMP 0
+#endif
+
+/*!
+ * @defgroup impl Implementation
+ * @{
+ */
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+#if defined(XXH_NO_STREAM)
+/* nothing */
+#elif defined(XXH_NO_STDLIB)
+
+/* When requesting to disable any mention of stdlib,
+ * the library loses the ability to invoked malloc / free.
+ * In practice, it means that functions like `XXH*_createState()`
+ * will always fail, and return NULL.
+ * This flag is useful in situations where
+ * xxhash.h is integrated into some kernel, embedded or limited environment
+ * without access to dynamic allocation.
+ */
+
+static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
+static void XXH_free(void* p) { (void)p; }
+
+#else
+
+/*
+ * Modify the local functions below should you wish to use
+ * different memory routines for malloc() and free()
+ */
+#include <stdlib.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than malloc().
+ */
+static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than free().
+ */
+static void XXH_free(void* p) { free(p); }
+
+#endif /* XXH_NO_STDLIB */
+
+#include <string.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than memcpy().
+ */
+static void* XXH_memcpy(void* dest, const void* src, size_t size)
+{
+ return memcpy(dest,src,size);
+}
+
+#include <limits.h> /* ULLONG_MAX */
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio warning fix */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+#if XXH_NO_INLINE_HINTS /* disable inlining hints */
+# if defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __attribute__((unused))
+# else
+# define XXH_FORCE_INLINE static
+# endif
+# define XXH_NO_INLINE static
+/* enable inlining hints */
+#elif defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
+# define XXH_NO_INLINE static __attribute__((noinline))
+#elif defined(_MSC_VER) /* Visual Studio */
+# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
+#elif defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
+# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
+#else
+# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
+#endif
+
+#if XXH3_INLINE_SECRET
+# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
+#else
+# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
+#endif
+
+
+/* *************************************
+* Debug
+***************************************/
+/*!
+ * @ingroup tuning
+ * @def XXH_DEBUGLEVEL
+ * @brief Sets the debugging level.
+ *
+ * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
+ * compiler's command line options. The value must be a number.
+ */
+#ifndef XXH_DEBUGLEVEL
+# ifdef DEBUGLEVEL /* backwards compat */
+# define XXH_DEBUGLEVEL DEBUGLEVEL
+# else
+# define XXH_DEBUGLEVEL 0
+# endif
+#endif
+
+#if (XXH_DEBUGLEVEL>=1)
+# include <assert.h> /* note: can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# if defined(__INTEL_COMPILER)
+# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
+# else
+# define XXH_ASSERT(c) XXH_ASSUME(c)
+# endif
+#endif
+
+/* note: use after variable declarations */
+#ifndef XXH_STATIC_ASSERT
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
+# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
+# else
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
+# endif
+# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
+#endif
+
+/*!
+ * @internal
+ * @def XXH_COMPILER_GUARD(var)
+ * @brief Used to prevent unwanted optimizations for @p var.
+ *
+ * It uses an empty GCC inline assembly statement with a register constraint
+ * which forces @p var into a general purpose register (eg eax, ebx, ecx
+ * on x86) and marks it as modified.
+ *
+ * This is used in a few places to avoid unwanted autovectorization (e.g.
+ * XXH32_round()). All vectorization we want is explicit via intrinsics,
+ * and _usually_ isn't wanted elsewhere.
+ *
+ * We also use it to prevent unwanted constant folding for AArch64 in
+ * XXH3_initCustomSecret_scalar().
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
+#else
+# define XXH_COMPILER_GUARD(var) ((void)0)
+#endif
+
+/* Specifically for NEON vectors which use the "w" constraint, on
+ * Clang. */
+#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
+#else
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
+#endif
+
+/* *************************************
+* Basic Types
+***************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
+#endif
+typedef XXH32_hash_t xxh_u32;
+
+#ifdef XXH_OLD_NAMES
+# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
+# define BYTE xxh_u8
+# define U8 xxh_u8
+# define U32 xxh_u32
+#endif
+
+/* *** Memory access *** */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_read32(const void* ptr)
+ * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit native endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readBE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit big endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
+ * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
+ * always @ref XXH_alignment::XXH_unaligned.
+ *
+ * @param ptr The pointer to read from.
+ * @param align Whether @p ptr is aligned.
+ * @pre
+ * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
+ * aligned.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE32 and XXH_readBE32.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/*
+ * Force direct memory access. Only works on CPU which support unaligned memory
+ * access in hardware.
+ */
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+#endif
+static xxh_u32 XXH_read32(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
+ return *((const xxh_unalign32*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u32 XXH_read32(const void* memPtr)
+{
+ xxh_u32 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* *** Endianness *** */
+
+/*!
+ * @ingroup tuning
+ * @def XXH_CPU_LITTLE_ENDIAN
+ * @brief Whether the target is little endian.
+ *
+ * Defined to 1 if the target is little endian, or 0 if it is big endian.
+ * It can be defined externally, for example on the compiler command line.
+ *
+ * If it is not defined,
+ * a runtime check (which is usually constant folded) is used instead.
+ *
+ * @note
+ * This is not necessarily defined to an integer constant.
+ *
+ * @see XXH_isLittleEndian() for the runtime check.
+ */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+/*
+ * Try to detect endianness automatically, to avoid the nonstandard behavior
+ * in `XXH_isLittleEndian()`
+ */
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
+/*!
+ * @internal
+ * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
+ *
+ * Most compilers will constant fold this.
+ */
+static int XXH_isLittleEndian(void)
+{
+ /*
+ * Portable and well-defined behavior.
+ * Don't use static: it is detrimental to performance.
+ */
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
+#endif
+
+
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifdef __has_builtin
+# define XXH_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define XXH_HAS_BUILTIN(x) 0
+#endif
+
+
+
+/*
+ * C23 and future versions have standard "unreachable()".
+ * Once it has been implemented reliably we can add it as an
+ * additional case:
+ *
+ * ```
+ * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
+ * # include <stddef.h>
+ * # ifdef unreachable
+ * # define XXH_UNREACHABLE() unreachable()
+ * # endif
+ * #endif
+ * ```
+ *
+ * Note C++23 also has std::unreachable() which can be detected
+ * as follows:
+ * ```
+ * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
+ * # include <utility>
+ * # define XXH_UNREACHABLE() std::unreachable()
+ * #endif
+ * ```
+ * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
+ * We don't use that as including `<utility>` in `extern "C"` blocks
+ * doesn't work on GCC12
+ */
+
+#if XXH_HAS_BUILTIN(__builtin_unreachable)
+# define XXH_UNREACHABLE() __builtin_unreachable()
+
+#elif defined(_MSC_VER)
+# define XXH_UNREACHABLE() __assume(0)
+
+#else
+# define XXH_UNREACHABLE()
+#endif
+
+#if XXH_HAS_BUILTIN(__builtin_assume)
+# define XXH_ASSUME(c) __builtin_assume(c)
+#else
+# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
+#endif
+
+/*!
+ * @internal
+ * @def XXH_rotl32(x,r)
+ * @brief 32-bit rotate left.
+ *
+ * @param x The 32-bit integer to be rotated.
+ * @param r The number of bits to rotate.
+ * @pre
+ * @p r > 0 && @p r < 32
+ * @note
+ * @p x and @p r may be evaluated multiple times.
+ * @return The rotated result.
+ */
+#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
+ && XXH_HAS_BUILTIN(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
+/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
+#elif defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
+# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
+#endif
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_swap32(xxh_u32 x)
+ * @brief A 32-bit byteswap.
+ *
+ * @param x The 32-bit integer to byteswap.
+ * @return @p x, byteswapped.
+ */
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static xxh_u32 XXH_swap32 (xxh_u32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+
+/*!
+ * @internal
+ * @brief Enum to indicate whether a pointer is aligned.
+ */
+typedef enum {
+ XXH_aligned, /*!< Aligned */
+ XXH_unaligned /*!< Possibly unaligned */
+} XXH_alignment;
+
+/*
+ * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
+ *
+ * This is ideal for older compilers which don't inline memcpy.
+ */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u32)bytePtr[1] << 8)
+ | ((xxh_u32)bytePtr[2] << 16)
+ | ((xxh_u32)bytePtr[3] << 24);
+}
+
+XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[3]
+ | ((xxh_u32)bytePtr[2] << 8)
+ | ((xxh_u32)bytePtr[1] << 16)
+ | ((xxh_u32)bytePtr[0] << 24);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+}
+
+static xxh_u32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u32
+XXH_readLE32_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned) {
+ return XXH_readLE32(ptr);
+ } else {
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
+ }
+}
+
+
+/* *************************************
+* Misc
+***************************************/
+/*! @ingroup public */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @defgroup XXH32_impl XXH32 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH32 implementation.
+ * @{
+ */
+ /* #define instead of static const, to be used as initializers */
+#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
+#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
+#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
+#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
+#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME32_1 XXH_PRIME32_1
+# define PRIME32_2 XXH_PRIME32_2
+# define PRIME32_3 XXH_PRIME32_3
+# define PRIME32_4 XXH_PRIME32_4
+# define PRIME32_5 XXH_PRIME32_5
+#endif
+
+/*!
+ * @internal
+ * @brief Normal stripe processing routine.
+ *
+ * This shuffles the bits so that any bit from @p input impacts several bits in
+ * @p acc.
+ *
+ * @param acc The accumulator lane.
+ * @param input The stripe of input to mix.
+ * @return The mixed accumulator lane.
+ */
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
+{
+ acc += input * XXH_PRIME32_2;
+ acc = XXH_rotl32(acc, 13);
+ acc *= XXH_PRIME32_1;
+#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
+ /*
+ * UGLY HACK:
+ * A compiler fence is the only thing that prevents GCC and Clang from
+ * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
+ * reason) without globally disabling SSE4.1.
+ *
+ * The reason we want to avoid vectorization is because despite working on
+ * 4 integers at a time, there are multiple factors slowing XXH32 down on
+ * SSE4:
+ * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
+ * newer chips!) making it slightly slower to multiply four integers at
+ * once compared to four integers independently. Even when pmulld was
+ * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
+ * just to multiply unless doing a long operation.
+ *
+ * - Four instructions are required to rotate,
+ * movqda tmp, v // not required with VEX encoding
+ * pslld tmp, 13 // tmp <<= 13
+ * psrld v, 19 // x >>= 19
+ * por v, tmp // x |= tmp
+ * compared to one for scalar:
+ * roll v, 13 // reliably fast across the board
+ * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
+ *
+ * - Instruction level parallelism is actually more beneficial here because
+ * the SIMD actually serializes this operation: While v1 is rotating, v2
+ * can load data, while v3 can multiply. SSE forces them to operate
+ * together.
+ *
+ * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
+ * the loop. NEON is only faster on the A53, and with the newer cores, it is less
+ * than half the speed.
+ *
+ * Additionally, this is used on WASM SIMD128 because it JITs to the same
+ * SIMD instructions and has the same issue.
+ */
+ XXH_COMPILER_GUARD(acc);
+#endif
+ return acc;
+}
+
+/*!
+ * @internal
+ * @brief Mixes all bits to finalize the hash.
+ *
+ * The final mix ensures that all input bits have a chance to impact any bit in
+ * the output digest, resulting in an unbiased distribution.
+ *
+ * @param hash The hash to avalanche.
+ * @return The avalanched hash.
+ */
+static xxh_u32 XXH32_avalanche(xxh_u32 hash)
+{
+ hash ^= hash >> 15;
+ hash *= XXH_PRIME32_2;
+ hash ^= hash >> 13;
+ hash *= XXH_PRIME32_3;
+ hash ^= hash >> 16;
+ return hash;
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-15 bytes of @p ptr.
+ *
+ * There may be up to 15 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 16.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash.
+ * @see XXH64_finalize().
+ */
+static XXH_PUREF xxh_u32
+XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define XXH_PROCESS1 do { \
+ hash += (*ptr++) * XXH_PRIME32_5; \
+ hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
+} while (0)
+
+#define XXH_PROCESS4 do { \
+ hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
+ ptr += 4; \
+ hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
+} while (0)
+
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+
+ /* Compact rerolled version; generally faster */
+ if (!XXH32_ENDJMP) {
+ len &= 15;
+ while (len >= 4) {
+ XXH_PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ XXH_PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(hash);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 8: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 4: XXH_PROCESS4;
+ return XXH32_avalanche(hash);
+
+ case 13: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 9: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 5: XXH_PROCESS4;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 14: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 10: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 6: XXH_PROCESS4;
+ XXH_PROCESS1;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 15: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 11: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 7: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 3: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 2: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 1: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 0: return XXH32_avalanche(hash);
+ }
+ XXH_ASSERT(0);
+ return hash; /* reaching this point is deemed impossible */
+ }
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1 XXH_PROCESS1
+# define PROCESS4 XXH_PROCESS4
+#else
+# undef XXH_PROCESS1
+# undef XXH_PROCESS4
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH32().
+ *
+ * @param input , len , seed Directly passed from @ref XXH32().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
+{
+ xxh_u32 h32;
+
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=16) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ xxh_u32 v2 = seed + XXH_PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - XXH_PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + XXH_PRIME32_5;
+ }
+
+ h32 += (xxh_u32)len;
+
+ return XXH32_finalize(h32, input, len&15, align);
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, (const xxh_u8*)input, len);
+ return XXH32_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
+}
+
+
+
+/******* Hash streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ statePtr->v[1] = seed + XXH_PRIME32_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME32_1;
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH32_update(XXH32_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len_32 += (XXH32_hash_t)len;
+ state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const xxh_u8* const limit = bEnd - 16;
+
+ do {
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
+{
+ xxh_u32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v[0], 1)
+ + XXH_rotl32(state->v[1], 7)
+ + XXH_rotl32(state->v[2], 12)
+ + XXH_rotl32(state->v[3], 18);
+ } else {
+ h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*!
+ * @ingroup XXH32_family
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ *
+ * The canonical representation uses big endian convention, the same convention
+ * as human-readable numbers (large digits first).
+ *
+ * This way, hash values can be written into a file or buffer, remaining
+ * comparable across different systems.
+ *
+ * The following functions allow transformation of hash values to and from their
+ * canonical format.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @ingroup impl
+ * @{
+ */
+/******* Memory access *******/
+
+typedef XXH64_hash_t xxh_u64;
+
+#ifdef XXH_OLD_NAMES
+# define U64 xxh_u64
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE64 and XXH_readBE64.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ return *(const xxh_u64*) memPtr;
+}
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+#endif
+static xxh_u64 XXH_read64(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
+ return *((const xxh_unalign64*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ xxh_u64 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static xxh_u64 XXH_swap64(xxh_u64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u64)bytePtr[1] << 8)
+ | ((xxh_u64)bytePtr[2] << 16)
+ | ((xxh_u64)bytePtr[3] << 24)
+ | ((xxh_u64)bytePtr[4] << 32)
+ | ((xxh_u64)bytePtr[5] << 40)
+ | ((xxh_u64)bytePtr[6] << 48)
+ | ((xxh_u64)bytePtr[7] << 56);
+}
+
+XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[7]
+ | ((xxh_u64)bytePtr[6] << 8)
+ | ((xxh_u64)bytePtr[5] << 16)
+ | ((xxh_u64)bytePtr[4] << 24)
+ | ((xxh_u64)bytePtr[3] << 32)
+ | ((xxh_u64)bytePtr[2] << 40)
+ | ((xxh_u64)bytePtr[1] << 48)
+ | ((xxh_u64)bytePtr[0] << 56);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+}
+
+static xxh_u64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u64
+XXH_readLE64_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return XXH_readLE64(ptr);
+ else
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
+}
+
+
+/******* xxh64 *******/
+/*!
+ * @}
+ * @defgroup XXH64_impl XXH64 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH64 implementation.
+ * @{
+ */
+/* #define rather that static const, to be used as initializers */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME64_1 XXH_PRIME64_1
+# define PRIME64_2 XXH_PRIME64_2
+# define PRIME64_3 XXH_PRIME64_3
+# define PRIME64_4 XXH_PRIME64_4
+# define PRIME64_5 XXH_PRIME64_5
+#endif
+
+/*! @copydoc XXH32_round */
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
+{
+ acc += input * XXH_PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= XXH_PRIME64_1;
+ return acc;
+}
+
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
+ return acc;
+}
+
+/*! @copydoc XXH32_avalanche */
+static xxh_u64 XXH64_avalanche(xxh_u64 hash)
+{
+ hash ^= hash >> 33;
+ hash *= XXH_PRIME64_2;
+ hash ^= hash >> 29;
+ hash *= XXH_PRIME64_3;
+ hash ^= hash >> 32;
+ return hash;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-31 bytes of @p ptr.
+ *
+ * There may be up to 31 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 32.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash
+ * @see XXH32_finalize().
+ */
+static XXH_PUREF xxh_u64
+XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+ len &= 31;
+ while (len >= 8) {
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
+ ptr += 8;
+ hash ^= k1;
+ hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ len -= 8;
+ }
+ if (len >= 4) {
+ hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
+ ptr += 4;
+ hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ len -= 4;
+ }
+ while (len > 0) {
+ hash ^= (*ptr++) * XXH_PRIME64_5;
+ hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
+ --len;
+ }
+ return XXH64_avalanche(hash);
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1_64 XXH_PROCESS1_64
+# define PROCESS4_64 XXH_PROCESS4_64
+# define PROCESS8_64 XXH_PROCESS8_64
+#else
+# undef XXH_PROCESS1_64
+# undef XXH_PROCESS4_64
+# undef XXH_PROCESS8_64
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH64().
+ *
+ * @param input , len , seed Directly passed from @ref XXH64().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
+{
+ xxh_u64 h64;
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=32) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 31;
+ xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ xxh_u64 v2 = seed + XXH_PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - XXH_PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) len;
+
+ return XXH64_finalize(h64, input, len, align);
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, (const xxh_u8*)input, len);
+ return XXH64_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+
+#endif
+}
+
+/******* Hash Streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH64_family*/
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ statePtr->v[1] = seed + XXH_PRIME64_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME64_1;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const xxh_u8* const limit = bEnd - 32;
+
+ do {
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
+{
+ xxh_u64 h64;
+
+ if (state->total_len >= 32) {
+ h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
+ h64 = XXH64_mergeRound(h64, state->v[0]);
+ h64 = XXH64_mergeRound(h64, state->v[1]);
+ h64 = XXH64_mergeRound(h64, state->v[2]);
+ h64 = XXH64_mergeRound(h64, state->v[3]);
+ } else {
+ h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) state->total_len;
+
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+#ifndef XXH_NO_XXH3
+
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+/*!
+ * @}
+ * @defgroup XXH3_impl XXH3 implementation
+ * @ingroup impl
+ * @{
+ */
+
+/* === Compiler specifics === */
+
+#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
+# define XXH_RESTRICT /* disable */
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
+# define XXH_RESTRICT restrict
+#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
+ || (defined (__clang__)) \
+ || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
+ || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
+/*
+ * There are a LOT more compilers that recognize __restrict but this
+ * covers the major ones.
+ */
+# define XXH_RESTRICT __restrict
+#else
+# define XXH_RESTRICT /* disable */
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
+ || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
+ || defined(__clang__)
+# define XXH_likely(x) __builtin_expect(x, 1)
+# define XXH_unlikely(x) __builtin_expect(x, 0)
+#else
+# define XXH_likely(x) (x)
+# define XXH_unlikely(x) (x)
+#endif
+
+#ifndef XXH_HAS_INCLUDE
+# ifdef __has_include
+# define XXH_HAS_INCLUDE(x) __has_include(x)
+# else
+# define XXH_HAS_INCLUDE(x) 0
+# endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# if defined(__ARM_FEATURE_SVE)
+# include <arm_sve.h>
+# endif
+# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
+ || (defined(_M_ARM) && _M_ARM >= 7) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC) \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
+# define inline __inline__ /* circumvent a clang bug */
+# include <arm_neon.h>
+# undef inline
+# elif defined(__AVX2__)
+# include <immintrin.h>
+# elif defined(__SSE2__)
+# include <emmintrin.h>
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# include <intrin.h>
+#endif
+
+/*
+ * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
+ * remaining a true 64-bit/128-bit hash function.
+ *
+ * This is done by prioritizing a subset of 64-bit operations that can be
+ * emulated without too many steps on the average 32-bit machine.
+ *
+ * For example, these two lines seem similar, and run equally fast on 64-bit:
+ *
+ * xxh_u64 x;
+ * x ^= (x >> 47); // good
+ * x ^= (x >> 13); // bad
+ *
+ * However, to a 32-bit machine, there is a major difference.
+ *
+ * x ^= (x >> 47) looks like this:
+ *
+ * x.lo ^= (x.hi >> (47 - 32));
+ *
+ * while x ^= (x >> 13) looks like this:
+ *
+ * // note: funnel shifts are not usually cheap.
+ * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
+ * x.hi ^= (x.hi >> 13);
+ *
+ * The first one is significantly faster than the second, simply because the
+ * shift is larger than 32. This means:
+ * - All the bits we need are in the upper 32 bits, so we can ignore the lower
+ * 32 bits in the shift.
+ * - The shift result will always fit in the lower 32 bits, and therefore,
+ * we can ignore the upper 32 bits in the xor.
+ *
+ * Thanks to this optimization, XXH3 only requires these features to be efficient:
+ *
+ * - Usable unaligned access
+ * - A 32-bit or 64-bit ALU
+ * - If 32-bit, a decent ADC instruction
+ * - A 32 or 64-bit multiply with a 64-bit result
+ * - For the 128-bit variant, a decent byteswap helps short inputs.
+ *
+ * The first two are already required by XXH32, and almost all 32-bit and 64-bit
+ * platforms which can run XXH32 can run XXH3 efficiently.
+ *
+ * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
+ * notable exception.
+ *
+ * First of all, Thumb-1 lacks support for the UMULL instruction which
+ * performs the important long multiply. This means numerous __aeabi_lmul
+ * calls.
+ *
+ * Second of all, the 8 functional registers are just not enough.
+ * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
+ * Lo registers, and this shuffling results in thousands more MOVs than A32.
+ *
+ * A32 and T32 don't have this limitation. They can access all 14 registers,
+ * do a 32->64 multiply with UMULL, and the flexible operand allowing free
+ * shifts is helpful, too.
+ *
+ * Therefore, we do a quick sanity check.
+ *
+ * If compiling Thumb-1 for a target which supports ARM instructions, we will
+ * emit a warning, as it is not a "sane" platform to compile for.
+ *
+ * Usually, if this happens, it is because of an accident and you probably need
+ * to specify -march, as you likely meant to compile for a newer architecture.
+ *
+ * Credit: large sections of the vectorial and asm source code paths
+ * have been contributed by @easyaspi314
+ */
+#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
+# warning "XXH3 is highly inefficient without ARM or Thumb-2."
+#endif
+
+/* ==========================================
+ * Vectorization detection
+ * ========================================== */
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @ingroup tuning
+ * @brief Overrides the vectorization implementation chosen for XXH3.
+ *
+ * Can be defined to 0 to disable SIMD or any of the values mentioned in
+ * @ref XXH_VECTOR_TYPE.
+ *
+ * If this is not defined, it uses predefined macros to determine the best
+ * implementation.
+ */
+# define XXH_VECTOR XXH_SCALAR
+/*!
+ * @ingroup tuning
+ * @brief Possible values for @ref XXH_VECTOR.
+ *
+ * Note that these are actually implemented as macros.
+ *
+ * If this is not defined, it is detected automatically.
+ * internal macro XXH_X86DISPATCH overrides this.
+ */
+enum XXH_VECTOR_TYPE /* fake enum */ {
+ XXH_SCALAR = 0, /*!< Portable scalar version */
+ XXH_SSE2 = 1, /*!<
+ * SSE2 for Pentium 4, Opteron, all x86_64.
+ *
+ * @note SSE2 is also guaranteed on Windows 10, macOS, and
+ * Android x86.
+ */
+ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
+ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
+ XXH_NEON = 4, /*!<
+ * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
+ * via the SIMDeverywhere polyfill provided with the
+ * Emscripten SDK.
+ */
+ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
+ XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
+};
+/*!
+ * @ingroup tuning
+ * @brief Selects the minimum alignment for XXH3's accumulators.
+ *
+ * When using SIMD, this should match the alignment required for said vector
+ * type, so, for example, 32 for AVX2.
+ *
+ * Default: Auto detected.
+ */
+# define XXH_ACC_ALIGN 8
+#endif
+
+/* Actual definition */
+#ifndef XXH_DOXYGEN
+# define XXH_SCALAR 0
+# define XXH_SSE2 1
+# define XXH_AVX2 2
+# define XXH_AVX512 3
+# define XXH_NEON 4
+# define XXH_VSX 5
+# define XXH_SVE 6
+#endif
+
+#ifndef XXH_VECTOR /* can be defined on command line */
+# if defined(__ARM_FEATURE_SVE)
+# define XXH_VECTOR XXH_SVE
+# elif ( \
+ defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
+ || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
+ ) && ( \
+ defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
+ )
+# define XXH_VECTOR XXH_NEON
+# elif defined(__AVX512F__)
+# define XXH_VECTOR XXH_AVX512
+# elif defined(__AVX2__)
+# define XXH_VECTOR XXH_AVX2
+# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+# define XXH_VECTOR XXH_SSE2
+# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
+ || (defined(__s390x__) && defined(__VEC__)) \
+ && defined(__GNUC__) /* TODO: IBM XL */
+# define XXH_VECTOR XXH_VSX
+# else
+# define XXH_VECTOR XXH_SCALAR
+# endif
+#endif
+
+/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
+#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
+# ifdef _MSC_VER
+# pragma warning(once : 4606)
+# else
+# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
+# endif
+# undef XXH_VECTOR
+# define XXH_VECTOR XXH_SCALAR
+#endif
+
+/*
+ * Controls the alignment of the accumulator,
+ * for compatibility with aligned vector loads, which are usually faster.
+ */
+#ifndef XXH_ACC_ALIGN
+# if defined(XXH_X86DISPATCH)
+# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
+# elif XXH_VECTOR == XXH_SCALAR /* scalar */
+# define XXH_ACC_ALIGN 8
+# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
+# define XXH_ACC_ALIGN 32
+# elif XXH_VECTOR == XXH_NEON /* neon */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_VSX /* vsx */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
+# define XXH_ACC_ALIGN 64
+# elif XXH_VECTOR == XXH_SVE /* sve */
+# define XXH_ACC_ALIGN 64
+# endif
+#endif
+
+#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
+ || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#elif XXH_VECTOR == XXH_SVE
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#else
+# define XXH_SEC_ALIGN 8
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_ALIASING __attribute__((may_alias))
+#else
+# define XXH_ALIASING /* nothing */
+#endif
+
+/*
+ * UGLY HACK:
+ * GCC usually generates the best code with -O3 for xxHash.
+ *
+ * However, when targeting AVX2, it is overzealous in its unrolling resulting
+ * in code roughly 3/4 the speed of Clang.
+ *
+ * There are other issues, such as GCC splitting _mm256_loadu_si256 into
+ * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
+ * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
+ *
+ * That is why when compiling the AVX2 version, it is recommended to use either
+ * -O2 -mavx2 -march=haswell
+ * or
+ * -O2 -mavx2 -mno-avx256-split-unaligned-load
+ * for decent performance, or to use Clang instead.
+ *
+ * Fortunately, we can control the first one with a pragma that forces GCC into
+ * -O2, but the other one we can't control without "failed to inline always
+ * inline function due to target mismatch" warnings.
+ */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC push_options
+# pragma GCC optimize("-O2")
+#endif
+
+#if XXH_VECTOR == XXH_NEON
+
+/*
+ * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
+ * optimizes out the entire hashLong loop because of the aliasing violation.
+ *
+ * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
+ * so the only option is to mark it as aliasing.
+ */
+typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
+
+/*!
+ * @internal
+ * @brief `vld1q_u64` but faster and alignment-safe.
+ *
+ * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
+ * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
+ *
+ * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
+ * prohibits load-store optimizations. Therefore, a direct dereference is used.
+ *
+ * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
+ * unaligned load.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
+{
+ return *(xxh_aliasing_uint64x2_t const *)ptr;
+}
+#else
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
+{
+ return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
+}
+#endif
+
+/*!
+ * @internal
+ * @brief `vmlal_u32` on low and high halves of a vector.
+ *
+ * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
+ * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
+ * with `vmlal_u32`.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* Inline assembly is the only way */
+ __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
+ return acc;
+}
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* This intrinsic works as expected */
+ return vmlal_high_u32(acc, lhs, rhs);
+}
+#else
+/* Portable intrinsic versions */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
+}
+/*! @copydoc XXH_vmlal_low_u32
+ * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
+}
+#endif
+
+/*!
+ * @ingroup tuning
+ * @brief Controls the NEON to scalar ratio for XXH3
+ *
+ * This can be set to 2, 4, 6, or 8.
+ *
+ * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
+ *
+ * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
+ * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
+ * bandwidth.
+ *
+ * This is even more noticeable on the more advanced cores like the Cortex-A76 which
+ * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
+ *
+ * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
+ * and 2 scalar lanes, which is chosen by default.
+ *
+ * This does not apply to Apple processors or 32-bit processors, which run better with
+ * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
+ *
+ * This change benefits CPUs with large micro-op buffers without negatively affecting
+ * most other CPUs:
+ *
+ * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
+ * |:----------------------|:--------------------|----------:|-----------:|------:|
+ * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
+ * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
+ * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
+ * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
+ *
+ * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
+ *
+ * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
+ * it effectively becomes worse 4.
+ *
+ * @see XXH3_accumulate_512_neon()
+ */
+# ifndef XXH3_NEON_LANES
+# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
+ && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
+# define XXH3_NEON_LANES 6
+# else
+# define XXH3_NEON_LANES XXH_ACC_NB
+# endif
+# endif
+#endif /* XXH_VECTOR == XXH_NEON */
+
+/*
+ * VSX and Z Vector helpers.
+ *
+ * This is very messy, and any pull requests to clean this up are welcome.
+ *
+ * There are a lot of problems with supporting VSX and s390x, due to
+ * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
+ */
+#if XXH_VECTOR == XXH_VSX
+/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
+ * and `pixel`. This is a problem for obvious reasons.
+ *
+ * These keywords are unnecessary; the spec literally says they are
+ * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
+ * after including the header.
+ *
+ * We use pragma push_macro/pop_macro to keep the namespace clean. */
+# pragma push_macro("bool")
+# pragma push_macro("vector")
+# pragma push_macro("pixel")
+/* silence potential macro redefined warnings */
+# undef bool
+# undef vector
+# undef pixel
+
+# if defined(__s390x__)
+# include <s390intrin.h>
+# else
+# include <altivec.h>
+# endif
+
+/* Restore the original macro values, if applicable. */
+# pragma pop_macro("pixel")
+# pragma pop_macro("vector")
+# pragma pop_macro("bool")
+
+typedef __vector unsigned long long xxh_u64x2;
+typedef __vector unsigned char xxh_u8x16;
+typedef __vector unsigned xxh_u32x4;
+
+/*
+ * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
+ */
+typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
+
+# ifndef XXH_VSX_BE
+# if defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_VSX_BE 1
+# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
+# warning "-maltivec=be is not recommended. Please use native endianness."
+# define XXH_VSX_BE 1
+# else
+# define XXH_VSX_BE 0
+# endif
+# endif /* !defined(XXH_VSX_BE) */
+
+# if XXH_VSX_BE
+# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
+# define XXH_vec_revb vec_revb
+# else
+/*!
+ * A polyfill for POWER9's vec_revb().
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
+{
+ xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
+ return vec_perm(val, val, vByteSwap);
+}
+# endif
+# endif /* XXH_VSX_BE */
+
+/*!
+ * Performs an unaligned vector load and byte swaps it on big endian.
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
+{
+ xxh_u64x2 ret;
+ XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
+# if XXH_VSX_BE
+ ret = XXH_vec_revb(ret);
+# endif
+ return ret;
+}
+
+/*
+ * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
+ *
+ * These intrinsics weren't added until GCC 8, despite existing for a while,
+ * and they are endian dependent. Also, their meaning swap depending on version.
+ * */
+# if defined(__s390x__)
+ /* s390x is always big endian, no issue on this platform */
+# define XXH_vec_mulo vec_mulo
+# define XXH_vec_mule vec_mule
+# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
+/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
+ /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
+# define XXH_vec_mulo __builtin_altivec_vmulouw
+# define XXH_vec_mule __builtin_altivec_vmuleuw
+# else
+/* gcc needs inline assembly */
+/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+# endif /* XXH_vec_mulo, XXH_vec_mule */
+#endif /* XXH_VECTOR == XXH_VSX */
+
+#if XXH_VECTOR == XXH_SVE
+#define ACCRND(acc, offset) \
+do { \
+ svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
+ svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
+ svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
+ svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
+ svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
+ svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
+ svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
+ acc = svadd_u64_x(mask, acc, mul); \
+} while (0)
+#endif /* XXH_VECTOR == XXH_SVE */
+
+/* prefetch
+ * can be disabled, by declaring XXH_NO_PREFETCH build macro */
+#if defined(XXH_NO_PREFETCH)
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+#else
+# if XXH_SIZE_OPT >= 1
+# define XXH_PREFETCH(ptr) (void)(ptr)
+# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# else
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* XXH_NO_PREFETCH */
+
+
+/* ==========================================
+ * XXH3 default settings
+ * ========================================== */
+
+#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
+
+#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
+# error "default keyset is not large enough"
+#endif
+
+/*! Pseudorandom secret taken directly from FARSH. */
+XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+ 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
+ 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
+ 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+ 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
+ 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
+ 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+ 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
+ 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
+ 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+ 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
+ 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
+ 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
+};
+
+static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
+static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
+
+#ifdef XXH_OLD_NAMES
+# define kSecret XXH3_kSecret
+#endif
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Calculates a 32-bit to 64-bit long multiply.
+ *
+ * Implemented as a macro.
+ *
+ * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
+ * need to (but it shouldn't need to anyways, it is about 7 instructions to do
+ * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
+ * use that instead of the normal method.
+ *
+ * If you are compiling for platforms like Thumb-1 and don't have a better option,
+ * you may also want to write your own long multiply routine here.
+ *
+ * @param x, y Numbers to be multiplied
+ * @return 64-bit product of the low 32 bits of @p x and @p y.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64(xxh_u64 x, xxh_u64 y)
+{
+ return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
+}
+#elif defined(_MSC_VER) && defined(_M_IX86)
+# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
+#else
+/*
+ * Downcast + upcast is usually better than masking on older compilers like
+ * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
+ *
+ * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
+ * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
+ */
+# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
+#endif
+
+/*!
+ * @brief Calculates a 64->128-bit long multiply.
+ *
+ * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
+ * version.
+ *
+ * @param lhs , rhs The 64-bit integers to be multiplied
+ * @return The 128-bit result represented in an @ref XXH128_hash_t.
+ */
+static XXH128_hash_t
+XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
+{
+ /*
+ * GCC/Clang __uint128_t method.
+ *
+ * On most 64-bit targets, GCC and Clang define a __uint128_t type.
+ * This is usually the best way as it usually uses a native long 64-bit
+ * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
+ *
+ * Usually.
+ *
+ * Despite being a 32-bit platform, Clang (and emscripten) define this type
+ * despite not having the arithmetic for it. This results in a laggy
+ * compiler builtin call which calculates a full 128-bit multiply.
+ * In that case it is best to use the portable one.
+ * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
+ */
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
+ && defined(__SIZEOF_INT128__) \
+ || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+
+ __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
+ XXH128_hash_t r128;
+ r128.low64 = (xxh_u64)(product);
+ r128.high64 = (xxh_u64)(product >> 64);
+ return r128;
+
+ /*
+ * MSVC for x64's _umul128 method.
+ *
+ * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
+ *
+ * This compiles to single operand MUL on x64.
+ */
+#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(_umul128)
+#endif
+ xxh_u64 product_high;
+ xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
+ XXH128_hash_t r128;
+ r128.low64 = product_low;
+ r128.high64 = product_high;
+ return r128;
+
+ /*
+ * MSVC for ARM64's __umulh method.
+ *
+ * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
+ */
+#elif defined(_M_ARM64) || defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(__umulh)
+#endif
+ XXH128_hash_t r128;
+ r128.low64 = lhs * rhs;
+ r128.high64 = __umulh(lhs, rhs);
+ return r128;
+
+#else
+ /*
+ * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
+ *
+ * This is a fast and simple grade school multiply, which is shown below
+ * with base 10 arithmetic instead of base 0x100000000.
+ *
+ * 9 3 // D2 lhs = 93
+ * x 7 5 // D2 rhs = 75
+ * ----------
+ * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
+ * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
+ * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
+ * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
+ * ---------
+ * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
+ * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
+ * ---------
+ * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
+ *
+ * The reasons for adding the products like this are:
+ * 1. It avoids manual carry tracking. Just like how
+ * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
+ * This avoids a lot of complexity.
+ *
+ * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
+ * instruction available in ARM's Digital Signal Processing extension
+ * in 32-bit ARMv6 and later, which is shown below:
+ *
+ * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
+ * {
+ * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
+ * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
+ * *RdHi = (xxh_u32)(product >> 32);
+ * }
+ *
+ * This instruction was designed for efficient long multiplication, and
+ * allows this to be calculated in only 4 instructions at speeds
+ * comparable to some 64-bit ALUs.
+ *
+ * 3. It isn't terrible on other platforms. Usually this will be a couple
+ * of 32-bit ADD/ADCs.
+ */
+
+ /* First calculate all of the cross products. */
+ xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
+ xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
+ xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
+ xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
+
+ /* Now add the products together. These will never overflow. */
+ xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+ xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+ xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+ XXH128_hash_t r128;
+ r128.low64 = lower;
+ r128.high64 = upper;
+ return r128;
+#endif
+}
+
+/*!
+ * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
+ *
+ * The reason for the separate function is to prevent passing too many structs
+ * around by value. This will hopefully inline the multiply, but we don't force it.
+ *
+ * @param lhs , rhs The 64-bit integers to multiply
+ * @return The low 64 bits of the product XOR'd by the high 64 bits.
+ * @see XXH_mult64to128()
+ */
+static xxh_u64
+XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
+{
+ XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
+ return product.low64 ^ product.high64;
+}
+
+/*! Seems to produce slightly better code on GCC for some reason. */
+XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
+{
+ XXH_ASSERT(0 <= shift && shift < 64);
+ return v64 ^ (v64 >> shift);
+}
+
+/*
+ * This is a fast avalanche stage,
+ * suitable when input bits are already partially mixed
+ */
+static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
+{
+ h64 = XXH_xorshift64(h64, 37);
+ h64 *= PRIME_MX1;
+ h64 = XXH_xorshift64(h64, 32);
+ return h64;
+}
+
+/*
+ * This is a stronger avalanche,
+ * inspired by Pelle Evensen's rrmxmx
+ * preferable when input has not been previously mixed
+ */
+static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
+{
+ /* this mix is inspired by Pelle Evensen's rrmxmx */
+ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
+ h64 *= PRIME_MX2;
+ h64 ^= (h64 >> 35) + len ;
+ h64 *= PRIME_MX2;
+ return XXH_xorshift64(h64, 28);
+}
+
+
+/* ==========================================
+ * Short keys
+ * ==========================================
+ * One of the shortcomings of XXH32 and XXH64 was that their performance was
+ * sub-optimal on short lengths. It used an iterative algorithm which strongly
+ * favored lengths that were a multiple of 4 or 8.
+ *
+ * Instead of iterating over individual inputs, we use a set of single shot
+ * functions which piece together a range of lengths and operate in constant time.
+ *
+ * Additionally, the number of multiplies has been significantly reduced. This
+ * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
+ *
+ * Depending on the platform, this may or may not be faster than XXH32, but it
+ * is almost guaranteed to be faster than XXH64.
+ */
+
+/*
+ * At very short lengths, there isn't enough input to fully hide secrets, or use
+ * the entire secret.
+ *
+ * There is also only a limited amount of mixing we can do before significantly
+ * impacting performance.
+ *
+ * Therefore, we use different sections of the secret and always mix two secret
+ * samples with an XOR. This should have no effect on performance on the
+ * seedless or withSeed variants because everything _should_ be constant folded
+ * by modern compilers.
+ *
+ * The XOR mixing hides individual parts of the secret and increases entropy.
+ *
+ * This adds an extra layer of strength for custom secrets.
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combined = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combined = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combined = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
+ return XXH64_avalanche(keyed);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input1 = XXH_readLE32(input);
+ xxh_u32 const input2 = XXH_readLE32(input + len - 4);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
+ xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
+ xxh_u64 const keyed = input64 ^ bitflip;
+ return XXH3_rrmxmx(keyed, len);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
+ xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
+ xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
+ xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
+ xxh_u64 const acc = len
+ + XXH_swap64(input_lo) + input_hi
+ + XXH3_mul128_fold64(input_lo, input_hi);
+ return XXH3_avalanche(acc);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
+ if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
+ return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
+ }
+}
+
+/*
+ * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
+ * multiplication by zero, affecting hashes of lengths 17 to 240.
+ *
+ * However, they are very unlikely.
+ *
+ * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
+ * unseeded non-cryptographic hashes, it does not attempt to defend itself
+ * against specially crafted inputs, only random inputs.
+ *
+ * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
+ * cancelling out the secret is taken an arbitrary number of times (addressed
+ * in XXH3_accumulate_512), this collision is very unlikely with random inputs
+ * and/or proper seeding:
+ *
+ * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
+ * function that is only called up to 16 times per hash with up to 240 bytes of
+ * input.
+ *
+ * This is not too bad for a non-cryptographic hash function, especially with
+ * only 64 bit outputs.
+ *
+ * The 128-bit variant (which trades some speed for strength) is NOT affected
+ * by this, although it is always a good idea to use a proper seed if you care
+ * about strength.
+ */
+XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
+{
+#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
+ /*
+ * UGLY HACK:
+ * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
+ * slower code.
+ *
+ * By forcing seed64 into a register, we disrupt the cost model and
+ * cause it to scalarize. See `XXH32_round()`
+ *
+ * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
+ * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
+ * GCC 9.2, despite both emitting scalar code.
+ *
+ * GCC generates much better scalar code than Clang for the rest of XXH3,
+ * which is why finding a more optimal codepath is an interest.
+ */
+ XXH_COMPILER_GUARD(seed64);
+#endif
+ { xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 const input_hi = XXH_readLE64(input+8);
+ return XXH3_mul128_fold64(
+ input_lo ^ (XXH_readLE64(secret) + seed64),
+ input_hi ^ (XXH_readLE64(secret+8) - seed64)
+ );
+ }
+}
+
+/* For mid range keys, XXH3 uses a Mum-hash variant. */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+#if XXH_SIZE_OPT >= 1
+ /* Smaller and cleaner, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
+ acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
+ } while (i-- != 0);
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc += XXH3_mix16B(input+48, secret+96, seed);
+ acc += XXH3_mix16B(input+len-64, secret+112, seed);
+ }
+ acc += XXH3_mix16B(input+32, secret+64, seed);
+ acc += XXH3_mix16B(input+len-48, secret+80, seed);
+ }
+ acc += XXH3_mix16B(input+16, secret+32, seed);
+ acc += XXH3_mix16B(input+len-32, secret+48, seed);
+ }
+ acc += XXH3_mix16B(input+0, secret+0, seed);
+ acc += XXH3_mix16B(input+len-16, secret+16, seed);
+#endif
+ return XXH3_avalanche(acc);
+ }
+}
+
+#define XXH3_MIDSIZE_MAX 240
+
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ #define XXH3_MIDSIZE_STARTOFFSET 3
+ #define XXH3_MIDSIZE_LASTOFFSET 17
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+ xxh_u64 acc_end;
+ unsigned int const nbRounds = (unsigned int)len / 16;
+ unsigned int i;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+ for (i=0; i<8; i++) {
+ acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
+ }
+ /* last bytes */
+ acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
+ XXH_ASSERT(nbRounds >= 8);
+ acc = XXH3_avalanche(acc);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
+ * In everywhere else, it uses scalar code.
+ *
+ * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
+ * would still be slower than UMAAL (see XXH_mult64to128).
+ *
+ * Unfortunately, Clang doesn't handle the long multiplies properly and
+ * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
+ * scalarized into an ugly mess of VMOV.32 instructions.
+ *
+ * This mess is difficult to avoid without turning autovectorization
+ * off completely, but they are usually relatively minor and/or not
+ * worth it to fix.
+ *
+ * This loop is the easiest to fix, as unlike XXH32, this pragma
+ * _actually works_ because it is a loop vectorization instead of an
+ * SLP vectorization.
+ */
+ #pragma clang loop vectorize(disable)
+#endif
+ for (i=8 ; i < nbRounds; i++) {
+ /*
+ * Prevents clang for unrolling the acc loop and interleaving with this one.
+ */
+ XXH_COMPILER_GUARD(acc);
+ acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
+ }
+ return XXH3_avalanche(acc + acc_end);
+ }
+}
+
+
+/* ======= Long Keys ======= */
+
+#define XXH_STRIPE_LEN 64
+#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
+#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
+
+#ifdef XXH_OLD_NAMES
+# define STRIPE_LEN XXH_STRIPE_LEN
+# define ACC_NB XXH_ACC_NB
+#endif
+
+#ifndef XXH_PREFETCH_DIST
+# ifdef __clang__
+# define XXH_PREFETCH_DIST 320
+# else
+# if (XXH_VECTOR == XXH_AVX512)
+# define XXH_PREFETCH_DIST 512
+# else
+# define XXH_PREFETCH_DIST 384
+# endif
+# endif /* __clang__ */
+#endif /* XXH_PREFETCH_DIST */
+
+/*
+ * These macros are to generate an XXH3_accumulate() function.
+ * The two arguments select the name suffix and target attribute.
+ *
+ * The name of this symbol is XXH3_accumulate_<name>() and it calls
+ * XXH3_accumulate_512_<name>().
+ *
+ * It may be useful to hand implement this function if the compiler fails to
+ * optimize the inline function.
+ */
+#define XXH3_ACCUMULATE_TEMPLATE(name) \
+void \
+XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
+ const xxh_u8* XXH_RESTRICT input, \
+ const xxh_u8* XXH_RESTRICT secret, \
+ size_t nbStripes) \
+{ \
+ size_t n; \
+ for (n = 0; n < nbStripes; n++ ) { \
+ const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
+ XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
+ XXH3_accumulate_512_##name( \
+ acc, \
+ in, \
+ secret + n*XXH_SECRET_CONSUME_RATE); \
+ } \
+}
+
+
+XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
+{
+ if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
+ XXH_memcpy(dst, &v64, sizeof(v64));
+}
+
+/* Several intrinsic functions below are supposed to accept __int64 as argument,
+ * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
+ * However, several environments do not define __int64 type,
+ * requiring a workaround.
+ */
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+ typedef int64_t xxh_i64;
+#else
+ /* the following type must have a width of 64-bit */
+ typedef long long xxh_i64;
+#endif
+
+
+/*
+ * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
+ *
+ * It is a hardened version of UMAC, based off of FARSH's implementation.
+ *
+ * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
+ * implementations, and it is ridiculously fast.
+ *
+ * We harden it by mixing the original input to the accumulators as well as the product.
+ *
+ * This means that in the (relatively likely) case of a multiply by zero, the
+ * original input is preserved.
+ *
+ * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
+ * cross-pollination, as otherwise the upper and lower halves would be
+ * essentially independent.
+ *
+ * This doesn't matter on 64-bit hashes since they all get merged together in
+ * the end, so we skip the extra step.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+#if (XXH_VECTOR == XXH_AVX512) \
+ || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
+
+#ifndef XXH_TARGET_AVX512
+# define XXH_TARGET_AVX512 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ __m512i* const xacc = (__m512i *) acc;
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+
+ {
+ /* data_vec = input[0]; */
+ __m512i const data_vec = _mm512_loadu_si512 (input);
+ /* key_vec = secret[0]; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ /* data_key = data_vec ^ key_vec; */
+ __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
+ /* xacc[0] += swap(data_vec); */
+ __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
+ __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
+ /* xacc[0] += product; */
+ *xacc = _mm512_add_epi64(product, sum);
+ }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
+
+/*
+ * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
+ *
+ * Multiplication isn't perfect, as explained by Google in HighwayHash:
+ *
+ * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
+ * // varying degrees. In descending order of goodness, bytes
+ * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
+ * // As expected, the upper and lower bytes are much worse.
+ *
+ * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
+ *
+ * Since our algorithm uses a pseudorandom secret to add some variance into the
+ * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
+ *
+ * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
+ * extraction.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+ { __m512i* const xacc = (__m512i*) acc;
+ const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
+
+ /* xacc[0] ^= (xacc[0] >> 47) */
+ __m512i const acc_vec = *xacc;
+ __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
+ /* xacc[0] ^= secret; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
+
+ /* xacc[0] *= XXH_PRIME32_1; */
+ __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
+ __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
+ __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
+ *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
+ XXH_ASSERT(((size_t)customSecret & 63) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
+ __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
+ __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
+
+ const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
+ __m512i* const dest = ( __m512i*) customSecret;
+ int i;
+ XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 63) == 0);
+ for (i=0; i < nbRounds; ++i) {
+ dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_AVX2) \
+ || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
+
+#ifndef XXH_TARGET_AVX2
+# define XXH_TARGET_AVX2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xinput = (const __m256i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* data_vec = xinput[i]; */
+ __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm256_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+ const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m256i const acc_vec = xacc[i];
+ __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
+ __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
+ __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
+ __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
+ (void)(&XXH_writeLE64);
+ XXH_PREFETCH(customSecret);
+ { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
+
+ const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
+ __m256i* dest = ( __m256i*) customSecret;
+
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dest);
+# endif
+ XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 31) == 0);
+
+ /* GCC -O2 need unroll loop manually */
+ dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
+ dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
+ dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
+ dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
+ dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
+ dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
+ }
+}
+
+#endif
+
+/* x86dispatch always generates SSE2 */
+#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
+
+#ifndef XXH_TARGET_SSE2
+# define XXH_TARGET_SSE2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* SSE2 is just a half-scale version of the AVX2 version. */
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xinput = (const __m128i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* data_vec = xinput[i]; */
+ __m128i const data_vec = _mm_loadu_si128 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
+ __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+ const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m128i const acc_vec = xacc[i];
+ __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
+ __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
+ __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
+
+# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
+ XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
+ __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
+# else
+ __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
+# endif
+ int i;
+
+ const void* const src16 = XXH3_kSecret;
+ __m128i* dst16 = (__m128i*) customSecret;
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dst16);
+# endif
+ XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dst16 & 15) == 0);
+
+ for (i=0; i < nbRounds; ++i) {
+ dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_NEON)
+
+/* forward declarations for the scalar routines */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+/*!
+ * @internal
+ * @brief The bulk processing loop for NEON and WASM SIMD128.
+ *
+ * The NEON code path is actually partially scalar when running on AArch64. This
+ * is to optimize the pipelining and can have up to 15% speedup depending on the
+ * CPU, and it also mitigates some GCC codegen issues.
+ *
+ * @see XXH3_NEON_LANES for configuring this and details about this optimization.
+ *
+ * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
+ * integers instead of the other platforms which mask full 64-bit vectors,
+ * so the setup is more complicated than just shifting right.
+ *
+ * Additionally, there is an optimization for 4 lanes at once noted below.
+ *
+ * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
+ * there needs to be *three* versions of the accumulate operation used
+ * for the remaining 2 lanes.
+ *
+ * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
+ * nearly perfectly.
+ */
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
+ { /* GCC for darwin arm64 does not like aliasing here */
+ xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
+ /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
+ uint8_t const* xinput = (const uint8_t *) input;
+ uint8_t const* xsecret = (const uint8_t *) secret;
+
+ size_t i;
+#ifdef __wasm_simd128__
+ /*
+ * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
+ * is constant propagated, which results in it converting it to this
+ * inside the loop:
+ *
+ * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
+ * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
+ * ...
+ *
+ * This requires a full 32-bit address immediate (and therefore a 6 byte
+ * instruction) as well as an add for each offset.
+ *
+ * Putting an asm guard prevents it from folding (at the cost of losing
+ * the alignment hint), and uses the free offset in `v128.load` instead
+ * of adding secret_offset each time which overall reduces code size by
+ * about a kilobyte and improves performance.
+ */
+ XXH_COMPILER_GUARD(xsecret);
+#endif
+ /* Scalar lanes use the normal scalarRound routine */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+ i = 0;
+ /* 4 NEON lanes at a time. */
+ for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
+ uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
+ /* data_swap = swap(data_vec) */
+ uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
+ uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
+ uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
+
+ /*
+ * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
+ * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
+ * get one vector with the low 32 bits of each lane, and one vector
+ * with the high 32 bits of each lane.
+ *
+ * The intrinsic returns a double vector because the original ARMv7-a
+ * instruction modified both arguments in place. AArch64 and SIMD128 emit
+ * two instructions from this intrinsic.
+ *
+ * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
+ * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
+ */
+ uint32x4x2_t unzipped = vuzpq_u32(
+ vreinterpretq_u32_u64(data_key_1),
+ vreinterpretq_u32_u64(data_key_2)
+ );
+ /* data_key_lo = data_key & 0xFFFFFFFF */
+ uint32x4_t data_key_lo = unzipped.val[0];
+ /* data_key_hi = data_key >> 32 */
+ uint32x4_t data_key_hi = unzipped.val[1];
+ /*
+ * Then, we can split the vectors horizontally and multiply which, as for most
+ * widening intrinsics, have a variant that works on both high half vectors
+ * for free on AArch64. A similar instruction is available on SIMD128.
+ *
+ * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
+ */
+ uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
+ uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
+ /*
+ * Clang reorders
+ * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * to
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
+ *
+ * While it would make sense in theory since the addition is faster,
+ * for reasons likely related to umlal being limited to certain NEON
+ * pipelines, this is worse. A compiler guard fixes this.
+ */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64(xacc[i], sum_1);
+ xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
+ }
+ /* Operate on the remaining NEON lanes 2 at a time. */
+ for (; i < XXH3_NEON_LANES / 2; i++) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ /* acc_vec_2 = swap(data_vec) */
+ uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* For two lanes, just use VMOVN and VSHRN. */
+ /* data_key_lo = data_key & 0xFFFFFFFF; */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* data_key_hi = data_key >> 32; */
+ uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
+ /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
+ uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
+ /* Same Clang workaround as before */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64 (xacc[i], sum);
+ }
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
+ uint8_t const* xsecret = (uint8_t const*) secret;
+
+ size_t i;
+ /* WASM uses operator overloads and doesn't need these. */
+#ifndef __wasm_simd128__
+ /* { prime32_1, prime32_1 } */
+ uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
+ /* { 0, prime32_1, 0, prime32_1 } */
+ uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
+#endif
+
+ /* AArch64 uses both scalar and neon at the same time */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+ for (i=0; i < XXH3_NEON_LANES / 2; i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ uint64x2_t acc_vec = xacc[i];
+ uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
+ uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
+
+ /* xacc[i] ^= xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* xacc[i] *= XXH_PRIME32_1 */
+#ifdef __wasm_simd128__
+ /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
+ xacc[i] = data_key * XXH_PRIME32_1;
+#else
+ /*
+ * Expanded version with portable NEON intrinsics
+ *
+ * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
+ *
+ * prod_hi = hi(data_key) * lo(prime) << 32
+ *
+ * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
+ * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
+ * and avoid the shift.
+ */
+ uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
+ /* Extract low bits for vmlal_u32 */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
+ xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
+#endif
+ }
+ }
+}
+#endif
+
+#if (XXH_VECTOR == XXH_VSX)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* presumed aligned */
+ xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
+ xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
+ xxh_u64x2 const v32 = { 32, 32 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* data_vec = xinput[i]; */
+ xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
+ /* key_vec = xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+ /* shuffled = (data_key << 32) | (data_key >> 32); */
+ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
+ /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
+ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
+ /* acc_vec = xacc[i]; */
+ xxh_u64x2 acc_vec = xacc[i];
+ acc_vec += product;
+
+ /* swap high and low halves */
+#ifdef __s390x__
+ acc_vec += vec_permi(data_vec, data_vec, 2);
+#else
+ acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
+#endif
+ xacc[i] = acc_vec;
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ const xxh_u8* const xsecret = (const xxh_u8*) secret;
+ /* constants */
+ xxh_u64x2 const v32 = { 32, 32 };
+ xxh_u64x2 const v47 = { 47, 47 };
+ xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ xxh_u64x2 const acc_vec = xacc[i];
+ xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
+
+ /* xacc[i] ^= xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+
+ /* xacc[i] *= XXH_PRIME32_1 */
+ /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
+ xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
+ /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
+ xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
+ xacc[i] = prod_odd + (prod_even << v32);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_SVE)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc);
+ ACCRND(vacc, 0);
+ svst1_u64(mask, xacc, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret,
+ size_t nbStripes)
+{
+ if (nbStripes != 0) {
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc + 0);
+ do {
+ /* svprfd(svbool_t, void *, enum svfprop); */
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(vacc, 0);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+ }
+}
+
+#endif
+
+/* scalar variants - universal */
+
+#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
+/*
+ * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
+ * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
+ *
+ * While this might not seem like much, as AArch64 is a 64-bit architecture, only
+ * big Cortex designs have a full 64-bit multiplier.
+ *
+ * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
+ * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
+ * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
+ *
+ * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
+ * not have this penalty and does the mask automatically.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ xxh_u64 ret;
+ /* note: %x = 64-bit register, %w = 32-bit register */
+ __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
+ return ret;
+}
+#else
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
+}
+#endif
+
+/*!
+ * @internal
+ * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* xacc = (xxh_u64*) acc;
+ xxh_u8 const* xinput = (xxh_u8 const*) input;
+ xxh_u8 const* xsecret = (xxh_u8 const*) secret;
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
+ {
+ xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
+ xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
+ xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
+ xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
+ }
+}
+
+/*!
+ * @internal
+ * @brief Processes a 64 byte block of data using the scalar path.
+ */
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__arm__) || defined(__thumb2__)) \
+ && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
+ && XXH_SIZE_OPT <= 0
+# pragma GCC unroll 8
+#endif
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
+
+/*!
+ * @internal
+ * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
+ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
+ XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ {
+ xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
+ xxh_u64 acc64 = xacc[lane];
+ acc64 = XXH_xorshift64(acc64, 47);
+ acc64 ^= key64;
+ acc64 *= XXH_PRIME32_1;
+ xacc[lane] = acc64;
+ }
+}
+
+/*!
+ * @internal
+ * @brief Scrambles the accumulators after a large chunk has been read
+ */
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ /*
+ * We need a separate pointer for the hack below,
+ * which requires a non-const pointer.
+ * Any decent compiler will optimize this out otherwise.
+ */
+ const xxh_u8* kSecretPtr = XXH3_kSecret;
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+
+#if defined(__GNUC__) && defined(__aarch64__)
+ /*
+ * UGLY HACK:
+ * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
+ * placed sequentially, in order, at the top of the unrolled loop.
+ *
+ * While MOVK is great for generating constants (2 cycles for a 64-bit
+ * constant compared to 4 cycles for LDR), it fights for bandwidth with
+ * the arithmetic instructions.
+ *
+ * I L S
+ * MOVK
+ * MOVK
+ * MOVK
+ * MOVK
+ * ADD
+ * SUB STR
+ * STR
+ * By forcing loads from memory (as the asm line causes the compiler to assume
+ * that XXH3_kSecretPtr has been changed), the pipelines are used more
+ * efficiently:
+ * I L S
+ * LDR
+ * ADD LDR
+ * SUB STR
+ * STR
+ *
+ * See XXH3_NEON_LANES for details on the pipsline.
+ *
+ * XXH3_64bits_withSeed, len == 256, Snapdragon 835
+ * without hack: 2654.4 MB/s
+ * with hack: 3202.9 MB/s
+ */
+ XXH_COMPILER_GUARD(kSecretPtr);
+#endif
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
+ int i;
+ for (i=0; i < nbRounds; i++) {
+ /*
+ * The asm hack causes the compiler to assume that kSecretPtr aliases with
+ * customSecret, and on aarch64, this prevented LDP from merging two
+ * loads together for free. Putting the loads together before the stores
+ * properly generates LDP.
+ */
+ xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
+ xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
+ } }
+}
+
+
+typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
+typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
+typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
+
+
+#if (XXH_VECTOR == XXH_AVX512)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
+#define XXH3_accumulate XXH3_accumulate_avx512
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
+
+#elif (XXH_VECTOR == XXH_AVX2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
+#define XXH3_accumulate XXH3_accumulate_avx2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
+
+#elif (XXH_VECTOR == XXH_SSE2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
+#define XXH3_accumulate XXH3_accumulate_sse2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
+
+#elif (XXH_VECTOR == XXH_NEON)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_neon
+#define XXH3_accumulate XXH3_accumulate_neon
+#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_VSX)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
+#define XXH3_accumulate XXH3_accumulate_vsx
+#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_SVE)
+#define XXH3_accumulate_512 XXH3_accumulate_512_sve
+#define XXH3_accumulate XXH3_accumulate_sve
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#else /* scalar */
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
+#define XXH3_accumulate XXH3_accumulate_scalar
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#endif
+
+#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
+# undef XXH3_initCustomSecret
+# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+#endif
+
+XXH_FORCE_INLINE void
+XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+ size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+ size_t const nb_blocks = (len - 1) / block_len;
+
+ size_t n;
+
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+
+ for (n = 0; n < nb_blocks; n++) {
+ f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
+ f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
+ }
+
+ /* last partial block */
+ XXH_ASSERT(len > XXH_STRIPE_LEN);
+ { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+ XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
+ f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
+
+ /* last stripe */
+ { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
+#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
+ XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+ } }
+}
+
+XXH_FORCE_INLINE xxh_u64
+XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
+{
+ return XXH3_mul128_fold64(
+ acc[0] ^ XXH_readLE64(secret),
+ acc[1] ^ XXH_readLE64(secret+8) );
+}
+
+static XXH64_hash_t
+XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
+{
+ xxh_u64 result64 = start;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++) {
+ result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
+ * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
+ * XXH3_64bits, len == 256, Snapdragon 835:
+ * without hack: 2063.7 MB/s
+ * with hack: 2560.7 MB/s
+ */
+ XXH_COMPILER_GUARD(result64);
+#endif
+ }
+
+ return XXH3_avalanche(result64);
+}
+
+#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
+ XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
+ const void* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ /* do not align on 8, so that the secret is different from the accumulator */
+#define XXH_SECRET_MERGEACCS_START 11
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
+}
+
+/*
+ * It's important for performance to transmit secret's size (when it's static)
+ * so that the compiler can properly optimize the vectorized loop.
+ * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's preferable for performance that XXH3_hashLong is not inlined,
+ * as it results in a smaller function for small data, easier to the instruction cache.
+ * Note that inside this no_inline function, we do inline the internal loop,
+ * and provide a statically defined secret size to allow optimization of vector loop.
+ */
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * XXH3_hashLong_64b_withSeed():
+ * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
+ * and then use this key for long mode hashing.
+ *
+ * This operation is decently fast but nonetheless costs a little bit of time.
+ * Try to avoid it whenever possible (typically when seed==0).
+ *
+ * It's important for performance that XXH3_hashLong is not inlined. Not sure
+ * why (uop cache maybe?), but the difference is large and easily measurable.
+ */
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
+ XXH64_hash_t seed,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+#if XXH_SIZE_OPT <= 0
+ if (seed == 0)
+ return XXH3_hashLong_64b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+#endif
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed);
+ return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+
+typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong64_f f_hashLong)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secretLen` condition is not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ * Also, note that function signature doesn't offer room to return an error.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
+}
+
+
+/* === Public entry point === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
+{
+ return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
+{
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+}
+
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (length <= XXH3_MIDSIZE_MAX)
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
+}
+
+
+/* === XXH3 streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * Malloc's a pointer that is always aligned to align.
+ *
+ * This must be freed with `XXH_alignedFree()`.
+ *
+ * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
+ * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
+ * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
+ *
+ * This underalignment previously caused a rather obvious crash which went
+ * completely unnoticed due to XXH3_createState() not actually being tested.
+ * Credit to RedSpah for noticing this bug.
+ *
+ * The alignment is done manually: Functions like posix_memalign or _mm_malloc
+ * are avoided: To maintain portability, we would have to write a fallback
+ * like this anyways, and besides, testing for the existence of library
+ * functions without relying on external build tools is impossible.
+ *
+ * The method is simple: Overallocate, manually align, and store the offset
+ * to the original behind the returned pointer.
+ *
+ * Align must be a power of 2 and 8 <= align <= 128.
+ */
+static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
+{
+ XXH_ASSERT(align <= 128 && align >= 8); /* range check */
+ XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
+ XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
+ { /* Overallocate to make room for manual realignment and an offset byte */
+ xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
+ if (base != NULL) {
+ /*
+ * Get the offset needed to align this pointer.
+ *
+ * Even if the returned pointer is aligned, there will always be
+ * at least one byte to store the offset to the original pointer.
+ */
+ size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
+ /* Add the offset for the now-aligned pointer */
+ xxh_u8* ptr = base + offset;
+
+ XXH_ASSERT((size_t)ptr % align == 0);
+
+ /* Store the offset immediately before the returned pointer. */
+ ptr[-1] = (xxh_u8)offset;
+ return ptr;
+ }
+ return NULL;
+ }
+}
+/*
+ * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
+ * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
+ */
+static void XXH_alignedFree(void* p)
+{
+ if (p != NULL) {
+ xxh_u8* ptr = (xxh_u8*)p;
+ /* Get the offset byte we added in XXH_malloc. */
+ xxh_u8 offset = ptr[-1];
+ /* Free the original malloc'd pointer */
+ xxh_u8* base = ptr - offset;
+ XXH_free(base);
+ }
+}
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Allocate an @ref XXH3_state_t.
+ *
+ * Must be freed with XXH3_freeState().
+ * @return An allocated XXH3_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
+{
+ XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
+ if (state==NULL) return NULL;
+ XXH3_INITSTATE(state);
+ return state;
+}
+
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Frees an @ref XXH3_state_t.
+ *
+ * Must be allocated with XXH3_createState().
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
+{
+ XXH_alignedFree(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
+{
+ XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
+}
+
+static void
+XXH3_reset_internal(XXH3_state_t* statePtr,
+ XXH64_hash_t seed,
+ const void* secret, size_t secretSize)
+{
+ size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
+ size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
+ XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
+ XXH_ASSERT(statePtr != NULL);
+ /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
+ memset((char*)statePtr + initStart, 0, initLength);
+ statePtr->acc[0] = XXH_PRIME32_3;
+ statePtr->acc[1] = XXH_PRIME64_1;
+ statePtr->acc[2] = XXH_PRIME64_2;
+ statePtr->acc[3] = XXH_PRIME64_3;
+ statePtr->acc[4] = XXH_PRIME64_4;
+ statePtr->acc[5] = XXH_PRIME32_2;
+ statePtr->acc[6] = XXH_PRIME64_5;
+ statePtr->acc[7] = XXH_PRIME32_1;
+ statePtr->seed = seed;
+ statePtr->useSeed = (seed != 0);
+ statePtr->extSecret = (const unsigned char*)secret;
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+ statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
+ statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, secret, secretSize);
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (seed==0) return XXH3_64bits_reset(statePtr);
+ if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
+ XXH3_initCustomSecret(statePtr->customSecret, seed);
+ XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, seed64, secret, secretSize);
+ statePtr->useSeed = 1; /* always, even if seed64==0 */
+ return XXH_OK;
+}
+
+/*!
+ * @internal
+ * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
+ *
+ * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
+ *
+ * @param acc Pointer to the 8 accumulator lanes
+ * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
+ * @param nbStripesPerBlock Number of stripes in a block
+ * @param input Input pointer
+ * @param nbStripes Number of stripes to process
+ * @param secret Secret pointer
+ * @param secretLimit Offset of the last block in @p secret
+ * @param f_acc Pointer to an XXH3_accumulate implementation
+ * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
+ * @return Pointer past the end of @p input after processing
+ */
+XXH_FORCE_INLINE const xxh_u8 *
+XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
+ size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
+ const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
+ /* Process full blocks */
+ if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
+ /* Process the initial partial block... */
+ size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
+
+ do {
+ /* Accumulate and scramble */
+ f_acc(acc, input, initialSecret, nbStripesThisIter);
+ f_scramble(acc, secret + secretLimit);
+ input += nbStripesThisIter * XXH_STRIPE_LEN;
+ nbStripes -= nbStripesThisIter;
+ /* Then continue the loop with the full block size */
+ nbStripesThisIter = nbStripesPerBlock;
+ initialSecret = secret;
+ } while (nbStripes >= nbStripesPerBlock);
+ *nbStripesSoFarPtr = 0;
+ }
+ /* Process a partial block */
+ if (nbStripes > 0) {
+ f_acc(acc, input, initialSecret, nbStripes);
+ input += nbStripes * XXH_STRIPE_LEN;
+ *nbStripesSoFarPtr += nbStripes;
+ }
+ /* Return end pointer */
+ return input;
+}
+
+#ifndef XXH3_STREAM_USE_STACK
+# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
+# define XXH3_STREAM_USE_STACK 1
+# endif
+#endif
+/*
+ * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
+ */
+XXH_FORCE_INLINE XXH_errorcode
+XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ XXH_ASSERT(state != NULL);
+ { const xxh_u8* const bEnd = input + len;
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* For some reason, gcc and MSVC seem to suffer greatly
+ * when operating accumulators directly into state.
+ * Operating into stack space seems to enable proper optimization.
+ * clang, on the other hand, doesn't seem to need this trick */
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
+ XXH_memcpy(acc, state->acc, sizeof(acc));
+#else
+ xxh_u64* XXH_RESTRICT const acc = state->acc;
+#endif
+ state->totalLen += len;
+ XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
+
+ /* small input : just fill in tmp buffer */
+ if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
+ XXH_memcpy(state->buffer + state->bufferedSize, input, len);
+ state->bufferedSize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ /* total input is now > XXH3_INTERNALBUFFER_SIZE */
+ #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
+ XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
+
+ /*
+ * Internal buffer is partially filled (always, except at beginning)
+ * Complete it, then consume it.
+ */
+ if (state->bufferedSize) {
+ size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
+ XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
+ input += loadSize;
+ XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, XXH3_INTERNALBUFFER_STRIPES,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ state->bufferedSize = 0;
+ }
+ XXH_ASSERT(input < bEnd);
+ if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
+ size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
+ input = XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ input, nbStripes,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+
+ }
+ /* Some remaining input (always) : buffer it */
+ XXH_ASSERT(input < bEnd);
+ XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
+ XXH_ASSERT(state->bufferedSize == 0);
+ XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
+ state->bufferedSize = (XXH32_hash_t)(bEnd-input);
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* save stack accumulators into state */
+ XXH_memcpy(state->acc, acc, sizeof(acc));
+#endif
+ }
+
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_update(state, (const xxh_u8*)input, len,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+
+XXH_FORCE_INLINE void
+XXH3_digest_long (XXH64_hash_t* acc,
+ const XXH3_state_t* state,
+ const unsigned char* secret)
+{
+ xxh_u8 lastStripe[XXH_STRIPE_LEN];
+ const xxh_u8* lastStripePtr;
+
+ /*
+ * Digest on a local copy. This way, the state remains unaltered, and it can
+ * continue ingesting more input afterwards.
+ */
+ XXH_memcpy(acc, state->acc, sizeof(state->acc));
+ if (state->bufferedSize >= XXH_STRIPE_LEN) {
+ /* Consume remaining stripes then point to remaining data in buffer */
+ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
+ size_t nbStripesSoFar = state->nbStripesSoFar;
+ XXH3_consumeStripes(acc,
+ &nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, nbStripes,
+ secret, state->secretLimit,
+ XXH3_accumulate, XXH3_scrambleAcc);
+ lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
+ } else { /* bufferedSize < XXH_STRIPE_LEN */
+ /* Copy to temp buffer */
+ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
+ XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
+ XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
+ XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
+ lastStripePtr = lastStripe;
+ }
+ /* Last stripe */
+ XXH3_accumulate_512(acc,
+ lastStripePtr,
+ secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ return XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ }
+ /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
+ if (state->useSeed)
+ return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+
+
+/* ==========================================
+ * XXH3 128 bits (a.k.a XXH128)
+ * ==========================================
+ * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
+ * even without counting the significantly larger output size.
+ *
+ * For example, extra steps are taken to avoid the seed-dependent collisions
+ * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
+ *
+ * This strength naturally comes at the cost of some speed, especially on short
+ * lengths. Note that longer hashes are about as fast as the 64-bit version
+ * due to it using only a slight modification of the 64-bit loop.
+ *
+ * XXH128 is also more oriented towards 64-bit machines. It is still extremely
+ * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
+ */
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ /* A doubled version of 1to3_64b with different constants. */
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
+ xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
+ xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
+ xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
+ XXH128_hash_t h128;
+ h128.low64 = XXH64_avalanche(keyed_lo);
+ h128.high64 = XXH64_avalanche(keyed_hi);
+ return h128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input_lo = XXH_readLE32(input);
+ xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
+ xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
+ xxh_u64 const keyed = input_64 ^ bitflip;
+
+ /* Shift len to the left to ensure it is even, this avoids even multiplies. */
+ XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
+
+ m128.high64 += (m128.low64 << 1);
+ m128.low64 ^= (m128.high64 >> 3);
+
+ m128.low64 = XXH_xorshift64(m128.low64, 35);
+ m128.low64 *= PRIME_MX2;
+ m128.low64 = XXH_xorshift64(m128.low64, 28);
+ m128.high64 = XXH3_avalanche(m128.high64);
+ return m128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
+ xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
+ xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 input_hi = XXH_readLE64(input + len - 8);
+ XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+ /*
+ * Put len in the middle of m128 to ensure that the length gets mixed to
+ * both the low and high bits in the 128x64 multiply below.
+ */
+ m128.low64 += (xxh_u64)(len - 1) << 54;
+ input_hi ^= bitfliph;
+ /*
+ * Add the high 32 bits of input_hi to the high 32 bits of m128, then
+ * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
+ * the high 64 bits of m128.
+ *
+ * The best approach to this operation is different on 32-bit and 64-bit.
+ */
+ if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
+ /*
+ * 32-bit optimized version, which is more readable.
+ *
+ * On 32-bit, it removes an ADC and delays a dependency between the two
+ * halves of m128.high64, but it generates an extra mask on 64-bit.
+ */
+ m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
+ } else {
+ /*
+ * 64-bit optimized (albeit more confusing) version.
+ *
+ * Uses some properties of addition and multiplication to remove the mask:
+ *
+ * Let:
+ * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
+ * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
+ * c = XXH_PRIME32_2
+ *
+ * a + (b * c)
+ * Inverse Property: x + y - x == y
+ * a + (b * (1 + c - 1))
+ * Distributive Property: x * (y + z) == (x * y) + (x * z)
+ * a + (b * 1) + (b * (c - 1))
+ * Identity Property: x * 1 == x
+ * a + b + (b * (c - 1))
+ *
+ * Substitute a, b, and c:
+ * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ *
+ * Since input_hi.hi + input_hi.lo == input_hi, we get this:
+ * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ */
+ m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
+ }
+ /* m128 ^= XXH_swap64(m128 >> 64); */
+ m128.low64 ^= XXH_swap64(m128.high64);
+
+ { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
+ XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
+ h128.high64 += m128.high64 * XXH_PRIME64_2;
+
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = XXH3_avalanche(h128.high64);
+ return h128;
+ } }
+}
+
+/*
+ * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
+ if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
+ { XXH128_hash_t h128;
+ xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
+ xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
+ h128.low64 = XXH64_avalanche(seed ^ bitflipl);
+ h128.high64 = XXH64_avalanche( seed ^ bitfliph);
+ return h128;
+ } }
+}
+
+/*
+ * A bit slower than XXH3_mix16B, but handles multiply by zero better.
+ */
+XXH_FORCE_INLINE XXH128_hash_t
+XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
+ const xxh_u8* secret, XXH64_hash_t seed)
+{
+ acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
+ acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
+ acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
+ acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
+ return acc;
+}
+
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { XXH128_hash_t acc;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+
+#if XXH_SIZE_OPT >= 1
+ {
+ /* Smaller, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
+ } while (i-- != 0);
+ }
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
+ }
+ acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
+ }
+ acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
+ }
+ acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
+#endif
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ { XXH128_hash_t acc;
+ unsigned i;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+ /*
+ * We set as `i` as offset + 32. We do this so that unchanged
+ * `len` can be used as upper bound. This reaches a sweet spot
+ * where both x86 and aarch64 get simple agen and good codegen
+ * for the loop.
+ */
+ for (i = 32; i < 160; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + i - 32,
+ seed);
+ }
+ acc.low64 = XXH3_avalanche(acc.low64);
+ acc.high64 = XXH3_avalanche(acc.high64);
+ /*
+ * NB: `i <= len` will duplicate the last 32-bytes if
+ * len % 32 was zero. This is an unfortunate necessity to keep
+ * the hash result stable.
+ */
+ for (i=160; i <= len; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
+ seed);
+ }
+ /* last bytes */
+ acc = XXH128_mix32B(acc,
+ input + len - 16,
+ input + len - 32,
+ secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
+ (XXH64_hash_t)0 - seed);
+
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)len * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + secretSize
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)len * XXH_PRIME64_2));
+ return h128;
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong() is not inlined.
+ */
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's important for performance to pass @p secretLen (when it's static)
+ * to the compiler, so that it can properly optimize the vectorized loop.
+ *
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+ if (seed64 == 0)
+ return XXH3_hashLong_128b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed64);
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const void* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_128bits_internal(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong128_f f_hl128)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secret` conditions are not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hl128(input, len, seed64, secret, secretLen);
+}
+
+
+/* === Public XXH128 API === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ (const xxh_u8*)secret, secretSize,
+ XXH3_hashLong_128b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_internal(input, len, seed,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_withSeed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_withSeed(input, len, seed);
+}
+
+
+/* === XXH3 128-bit streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * All initialization and update functions are identical to 64-bit streaming variant.
+ * The only difference is the finalization routine.
+ */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ return XXH3_64bits_reset(statePtr);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSeed(statePtr, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_64bits_update(state, input, len);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + state->secretLimit + XXH_STRIPE_LEN
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
+ return h128;
+ }
+ }
+ /* len <= XXH3_MIDSIZE_MAX : short code */
+ if (state->seed)
+ return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+/* 128-bit utility functions */
+
+#include <string.h> /* memcmp, memcpy */
+
+/* return : 1 is equal, 0 if different */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
+{
+ /* note : XXH128_hash_t is compact, it has no padding byte */
+ return !(memcmp(&h1, &h2, sizeof(h1)));
+}
+
+/* This prototype is compatible with stdlib's qsort().
+ * @return : >0 if *h128_1 > *h128_2
+ * <0 if *h128_1 < *h128_2
+ * =0 if *h128_1 == *h128_2 */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
+{
+ XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
+ XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
+ int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
+ /* note : bets that, in most cases, hash values are different */
+ if (hcmp) return hcmp;
+ return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+}
+
+
+/*====== Canonical representation ======*/
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) {
+ hash.high64 = XXH_swap64(hash.high64);
+ hash.low64 = XXH_swap64(hash.low64);
+ }
+ XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
+ XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
+{
+ XXH128_hash_t h;
+ h.high64 = XXH_readBE64(src);
+ h.low64 = XXH_readBE64(src->digest + 8);
+ return h;
+}
+
+
+
+/* ==========================================
+ * Secret generators
+ * ==========================================
+ */
+#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
+{
+ XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
+ XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
+{
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(secretBuffer != NULL);
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+#else
+ /* production mode, assert() are disabled */
+ if (secretBuffer == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+#endif
+
+ if (customSeedSize == 0) {
+ customSeed = XXH3_kSecret;
+ customSeedSize = XXH_SECRET_DEFAULT_SIZE;
+ }
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(customSeed != NULL);
+#else
+ if (customSeed == NULL) return XXH_ERROR;
+#endif
+
+ /* Fill secretBuffer with a copy of customSeed - repeat as needed */
+ { size_t pos = 0;
+ while (pos < secretSize) {
+ size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
+ memcpy((char*)secretBuffer + pos, customSeed, toCopy);
+ pos += toCopy;
+ } }
+
+ { size_t const nbSeg16 = secretSize / 16;
+ size_t n;
+ XXH128_canonical_t scrambler;
+ XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+ for (n=0; n<nbSeg16; n++) {
+ XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
+ XXH3_combine16((char*)secretBuffer + n*16, h128);
+ }
+ /* last segment */
+ XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
+ }
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
+{
+ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ XXH3_initCustomSecret(secret, seed);
+ XXH_ASSERT(secretBuffer != NULL);
+ memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
+}
+
+
+
+/* Pop our optimization override from above */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC pop_options
+#endif
+
+#endif /* XXH_NO_LONG_LONG */
+
+#endif /* XXH_NO_XXH3 */
+
+/*!
+ * @}
+ */
+#endif /* XXH_IMPLEMENTATION */
+
+
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
diff --git a/mfbt/moz.build b/mfbt/moz.build
new file mode 100644
index 0000000000..3a3ab46f71
--- /dev/null
+++ b/mfbt/moz.build
@@ -0,0 +1,214 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "MFBT")
+
+Library("mfbt")
+
+EXPORTS += [
+ "../third_party/rust/encoding_c_mem/include/encoding_rs_mem.h",
+]
+
+EXPORTS.mozilla = [
+ "Algorithm.h",
+ "Alignment.h",
+ "AllocPolicy.h",
+ "AlreadyAddRefed.h",
+ "Array.h",
+ "ArrayUtils.h",
+ "Assertions.h",
+ "AtomicBitfields.h",
+ "Atomics.h",
+ "Attributes.h",
+ "BinarySearch.h",
+ "BitSet.h",
+ "BloomFilter.h",
+ "Buffer.h",
+ "BufferList.h",
+ "Casting.h",
+ "ChaosMode.h",
+ "Char16.h",
+ "CheckedInt.h",
+ "CompactPair.h",
+ "Compiler.h",
+ "Compression.h",
+ "DbgMacro.h",
+ "DebugOnly.h",
+ "DefineEnum.h",
+ "DoublyLinkedList.h",
+ "EndianUtils.h",
+ "EnumeratedArray.h",
+ "EnumeratedRange.h",
+ "EnumSet.h",
+ "EnumTypeTraits.h",
+ "fallible.h",
+ "FastBernoulliTrial.h",
+ "FloatingPoint.h",
+ "FStream.h",
+ "FunctionRef.h",
+ "FunctionTypeTraits.h",
+ "Fuzzing.h",
+ "HashFunctions.h",
+ "HashTable.h",
+ "HelperMacros.h",
+ "InitializedOnce.h",
+ "IntegerRange.h",
+ "IntegerTypeTraits.h",
+ "JSONWriter.h",
+ "JsRust.h",
+ "Latin1.h",
+ "Likely.h",
+ "LinkedList.h",
+ "Literals.h",
+ "MacroArgs.h",
+ "MacroForEach.h",
+ "MathAlgorithms.h",
+ "Maybe.h",
+ "MaybeOneOf.h",
+ "MaybeStorageBase.h",
+ "MemoryChecking.h",
+ "MemoryReporting.h",
+ "MoveOnlyFunction.h",
+ "MruCache.h",
+ "NeverDestroyed.h",
+ "NonDereferenceable.h",
+ "NotNull.h",
+ "Opaque.h",
+ "OperatorNewExtensions.h",
+ "PairHash.h",
+ "Path.h",
+ "PodOperations.h",
+ "Poison.h",
+ "RandomNum.h",
+ "Range.h",
+ "RangedArray.h",
+ "RangedPtr.h",
+ "ReentrancyGuard.h",
+ "RefCounted.h",
+ "RefCountType.h",
+ "RefPtr.h",
+ "Result.h",
+ "ResultExtensions.h",
+ "ResultVariant.h",
+ "ReverseIterator.h",
+ "RollingMean.h",
+ "Saturate.h",
+ "ScopeExit.h",
+ "SegmentedVector.h",
+ "SHA1.h",
+ "SharedLibrary.h",
+ "SmallPointerArray.h",
+ "Span.h",
+ "SplayTree.h",
+ "SPSCQueue.h",
+ "StaticAnalysisFunctions.h",
+ "TaggedAnonymousMemory.h",
+ "Tainting.h",
+ "TemplateLib.h",
+ "TextUtils.h",
+ "ThreadLocal.h",
+ "ThreadSafety.h",
+ "ThreadSafeWeakPtr.h",
+ "ToString.h",
+ "Try.h",
+ "TypedEnumBits.h",
+ "Types.h",
+ "UniquePtr.h",
+ "UniquePtrExtensions.h",
+ "Unused.h",
+ "Utf8.h",
+ "Variant.h",
+ "Vector.h",
+ "WeakPtr.h",
+ "WrappingOperations.h",
+ "XorShift128PlusRNG.h",
+]
+
+EXPORTS["double-conversion"] = [
+ "double-conversion/double-conversion/double-conversion.h",
+ "double-conversion/double-conversion/double-to-string.h",
+ "double-conversion/double-conversion/string-to-double.h",
+ "double-conversion/double-conversion/utils.h",
+]
+
+EXPORTS.function2 += [
+ "/third_party/function2/include/function2/function2.hpp",
+]
+
+LOCAL_INCLUDES += [
+ "/mfbt/double-conversion",
+]
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ EXPORTS.mozilla += [
+ "WindowsVersion.h",
+ ]
+
+if CONFIG["OS_ARCH"] == "WASI":
+ EXPORTS.mozilla += [
+ "WasiAtomic.h",
+ ]
+
+if CONFIG["MOZ_TSAN"]:
+ EXPORTS.mozilla += [
+ "TsanOptions.h",
+ ]
+
+UNIFIED_SOURCES += [
+ "Assertions.cpp",
+ "ChaosMode.cpp",
+ "Compression.cpp",
+ "double-conversion/double-conversion/bignum-dtoa.cc",
+ "double-conversion/double-conversion/bignum.cc",
+ "double-conversion/double-conversion/cached-powers.cc",
+ "double-conversion/double-conversion/double-to-string.cc",
+ "double-conversion/double-conversion/fast-dtoa.cc",
+ "double-conversion/double-conversion/fixed-dtoa.cc",
+ "double-conversion/double-conversion/string-to-double.cc",
+ "double-conversion/double-conversion/strtod.cc",
+ "FloatingPoint.cpp",
+ "HashFunctions.cpp",
+ "JSONWriter.cpp",
+ "Poison.cpp",
+ "RandomNum.cpp",
+ "SHA1.cpp",
+ "TaggedAnonymousMemory.cpp",
+ "UniquePtrExtensions.cpp",
+ "Unused.cpp",
+ "Utf8.cpp",
+]
+
+if CONFIG["MOZ_BUILD_APP"] not in (
+ "memory",
+ "tools/update-programs",
+):
+ # Building MFBT tests adds a large overhead when building.
+ TEST_DIRS += ["tests"]
+
+DEFINES["IMPL_MFBT"] = True
+
+SOURCES += [
+ "lz4/lz4.c",
+ "lz4/lz4file.c",
+ "lz4/lz4frame.c",
+ "lz4/lz4hc.c",
+ "lz4/xxhash.c",
+]
+
+SOURCES["lz4/xxhash.c"].flags += ["-Wno-unused-function"]
+
+DisableStlWrapping()
+
+if CONFIG["MOZ_NEEDS_LIBATOMIC"]:
+ OS_LIBS += ["atomic"]
+
+DEFINES["LZ4LIB_VISIBILITY"] = ""
+
+# This is kind of gross because this is not a subdirectory,
+# but pure_virtual requires mfbt to build and some projects
+# don't use mfbt.
+DIRS += ["../build/pure_virtual"]
diff --git a/mfbt/tests/TestAlgorithm.cpp b/mfbt/tests/TestAlgorithm.cpp
new file mode 100644
index 0000000000..c5b0ffff12
--- /dev/null
+++ b/mfbt/tests/TestAlgorithm.cpp
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Algorithm.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+
+#include <iterator>
+
+static constexpr bool even(int32_t n) { return !(n & 1); }
+static constexpr bool odd(int32_t n) { return (n & 1); }
+
+using namespace mozilla;
+
+void TestAllOf() {
+ using std::begin;
+ using std::end;
+
+ constexpr static int32_t arr1[3] = {1, 2, 3};
+ MOZ_RELEASE_ASSERT(!AllOf(begin(arr1), end(arr1), even));
+ MOZ_RELEASE_ASSERT(!AllOf(begin(arr1), end(arr1), odd));
+ static_assert(!AllOf(arr1, arr1 + ArrayLength(arr1), even), "1-1");
+ static_assert(!AllOf(arr1, arr1 + ArrayLength(arr1), odd), "1-2");
+
+ constexpr static int32_t arr2[3] = {1, 3, 5};
+ MOZ_RELEASE_ASSERT(!AllOf(begin(arr2), end(arr2), even));
+ MOZ_RELEASE_ASSERT(AllOf(begin(arr2), end(arr2), odd));
+ static_assert(!AllOf(arr2, arr2 + ArrayLength(arr2), even), "2-1");
+ static_assert(AllOf(arr2, arr2 + ArrayLength(arr2), odd), "2-2");
+
+ constexpr static int32_t arr3[3] = {2, 4, 6};
+ MOZ_RELEASE_ASSERT(AllOf(begin(arr3), end(arr3), even));
+ MOZ_RELEASE_ASSERT(!AllOf(begin(arr3), end(arr3), odd));
+ static_assert(AllOf(arr3, arr3 + ArrayLength(arr3), even), "3-1");
+ static_assert(!AllOf(arr3, arr3 + ArrayLength(arr3), odd), "3-2");
+}
+
+void TestAnyOf() {
+ using std::begin;
+ using std::end;
+
+ // The Android NDK's STL doesn't support `constexpr` `std::array::begin`, see
+ // bug 1677484. Hence using a raw array here.
+ constexpr int32_t arr1[1] = {0};
+ static_assert(!AnyOf(arr1, arr1, even));
+ static_assert(!AnyOf(arr1, arr1, odd));
+
+ constexpr int32_t arr2[] = {1};
+ static_assert(!AnyOf(begin(arr2), end(arr2), even));
+ static_assert(AnyOf(begin(arr2), end(arr2), odd));
+
+ constexpr int32_t arr3[] = {2};
+ static_assert(AnyOf(begin(arr3), end(arr3), even));
+ static_assert(!AnyOf(begin(arr3), end(arr3), odd));
+
+ constexpr int32_t arr4[] = {1, 2};
+ static_assert(AnyOf(begin(arr4), end(arr4), even));
+ static_assert(AnyOf(begin(arr4), end(arr4), odd));
+}
+
+int main() {
+ TestAllOf();
+ TestAnyOf();
+ return 0;
+}
diff --git a/mfbt/tests/TestArray.cpp b/mfbt/tests/TestArray.cpp
new file mode 100644
index 0000000000..ff41001e0a
--- /dev/null
+++ b/mfbt/tests/TestArray.cpp
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Array.h"
+
+void TestInitialValueByConstructor() {
+ using namespace mozilla;
+ // Style 1
+ Array<int32_t, 3> arr1(1, 2, 3);
+ MOZ_RELEASE_ASSERT(arr1[0] == 1);
+ MOZ_RELEASE_ASSERT(arr1[1] == 2);
+ MOZ_RELEASE_ASSERT(arr1[2] == 3);
+ // Style 2
+ Array<int32_t, 3> arr2{5, 6, 7};
+ MOZ_RELEASE_ASSERT(arr2[0] == 5);
+ MOZ_RELEASE_ASSERT(arr2[1] == 6);
+ MOZ_RELEASE_ASSERT(arr2[2] == 7);
+ // Style 3
+ Array<int32_t, 3> arr3({8, 9, 10});
+ MOZ_RELEASE_ASSERT(arr3[0] == 8);
+ MOZ_RELEASE_ASSERT(arr3[1] == 9);
+ MOZ_RELEASE_ASSERT(arr3[2] == 10);
+}
+
+int main() {
+ TestInitialValueByConstructor();
+ return 0;
+}
diff --git a/mfbt/tests/TestArrayUtils.cpp b/mfbt/tests/TestArrayUtils.cpp
new file mode 100644
index 0000000000..b50531a3a8
--- /dev/null
+++ b/mfbt/tests/TestArrayUtils.cpp
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+
+using mozilla::IsInRange;
+
+static void TestIsInRangeNonClass() {
+ void* nul = nullptr;
+ int* intBegin = nullptr;
+ int* intEnd = intBegin + 1;
+ int* intEnd2 = intBegin + 2;
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, intBegin, intEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, intEnd, intEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(intBegin, intBegin, intEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(intEnd, intBegin, intEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(intBegin, intBegin, intEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(intEnd, intBegin, intEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(intEnd2, intBegin, intEnd2));
+
+ uintptr_t uintBegin = uintptr_t(intBegin);
+ uintptr_t uintEnd = uintptr_t(intEnd);
+ uintptr_t uintEnd2 = uintptr_t(intEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, uintBegin, uintEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, uintEnd, uintEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(intBegin, uintBegin, uintEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(intEnd, uintBegin, uintEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(intBegin, uintBegin, uintEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(intEnd, uintBegin, uintEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(intEnd2, uintBegin, uintEnd2));
+}
+
+static void TestIsInRangeVoid() {
+ int* intBegin = nullptr;
+ int* intEnd = intBegin + 1;
+ int* intEnd2 = intBegin + 2;
+
+ void* voidBegin = intBegin;
+ void* voidEnd = intEnd;
+ void* voidEnd2 = intEnd2;
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, intBegin, intEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd, intBegin, intEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, voidBegin, voidEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd, voidBegin, voidEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, intBegin, intEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(voidEnd, intBegin, intEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd2, intBegin, intEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, voidBegin, voidEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(voidEnd, voidBegin, voidEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd2, voidBegin, voidEnd2));
+
+ uintptr_t uintBegin = uintptr_t(intBegin);
+ uintptr_t uintEnd = uintptr_t(intEnd);
+ uintptr_t uintEnd2 = uintptr_t(intEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, uintBegin, uintEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd, uintBegin, uintEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(voidBegin, uintBegin, uintEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(voidEnd, uintBegin, uintEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(voidEnd2, uintBegin, uintEnd2));
+}
+
+struct Base {
+ int mX;
+};
+
+static void TestIsInRangeClass() {
+ void* nul = nullptr;
+ Base* baseBegin = nullptr;
+ Base* baseEnd = baseBegin + 1;
+ Base* baseEnd2 = baseBegin + 2;
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, baseBegin, baseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, baseEnd, baseEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, baseBegin, baseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, baseBegin, baseEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, baseBegin, baseEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, baseBegin, baseEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, baseBegin, baseEnd2));
+
+ uintptr_t ubaseBegin = uintptr_t(baseBegin);
+ uintptr_t ubaseEnd = uintptr_t(baseEnd);
+ uintptr_t ubaseEnd2 = uintptr_t(baseEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, ubaseBegin, ubaseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, ubaseEnd, ubaseEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, ubaseBegin, ubaseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, ubaseBegin, ubaseEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, ubaseBegin, ubaseEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, ubaseBegin, ubaseEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, ubaseBegin, ubaseEnd2));
+}
+
+struct EmptyBase {};
+
+static void TestIsInRangeEmptyClass() {
+ void* nul = nullptr;
+ EmptyBase* baseBegin = nullptr;
+ EmptyBase* baseEnd = baseBegin + 1;
+ EmptyBase* baseEnd2 = baseBegin + 2;
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, baseBegin, baseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, baseEnd, baseEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, baseBegin, baseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, baseBegin, baseEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, baseBegin, baseEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, baseBegin, baseEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, baseBegin, baseEnd2));
+
+ uintptr_t ubaseBegin = uintptr_t(baseBegin);
+ uintptr_t ubaseEnd = uintptr_t(baseEnd);
+ uintptr_t ubaseEnd2 = uintptr_t(baseEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, ubaseBegin, ubaseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, ubaseEnd, ubaseEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, ubaseBegin, ubaseEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, ubaseBegin, ubaseEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, ubaseBegin, ubaseEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, ubaseBegin, ubaseEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, ubaseBegin, ubaseEnd2));
+}
+
+struct Derived : Base {};
+
+static void TestIsInRangeClassDerived() {
+ void* nul = nullptr;
+ Derived* derivedBegin = nullptr;
+ Derived* derivedEnd = derivedBegin + 1;
+ Derived* derivedEnd2 = derivedBegin + 2;
+
+ Base* baseBegin = static_cast<Base*>(derivedBegin);
+ Base* baseEnd = static_cast<Base*>(derivedEnd);
+ Base* baseEnd2 = static_cast<Base*>(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, derivedEnd, derivedEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, derivedBegin, derivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, derivedBegin, derivedEnd2));
+
+ uintptr_t uderivedBegin = uintptr_t(derivedBegin);
+ uintptr_t uderivedEnd = uintptr_t(derivedEnd);
+ uintptr_t uderivedEnd2 = uintptr_t(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd, uderivedBegin, uderivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(derivedEnd, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd2, uderivedBegin, uderivedEnd2));
+}
+
+struct DerivedEmpty : EmptyBase {};
+
+static void TestIsInRangeClassDerivedEmpty() {
+ void* nul = nullptr;
+ DerivedEmpty* derivedEmptyBegin = nullptr;
+ DerivedEmpty* derivedEmptyEnd = derivedEmptyBegin + 1;
+ DerivedEmpty* derivedEmptyEnd2 = derivedEmptyBegin + 2;
+
+ EmptyBase* baseBegin = static_cast<EmptyBase*>(derivedEmptyBegin);
+ EmptyBase* baseEnd = static_cast<EmptyBase*>(derivedEmptyEnd);
+ EmptyBase* baseEnd2 = static_cast<EmptyBase*>(derivedEmptyEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, derivedEmptyBegin, derivedEmptyEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, derivedEmptyEnd, derivedEmptyEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedEmptyBegin, derivedEmptyEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, derivedEmptyBegin, derivedEmptyEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedEmptyBegin, derivedEmptyEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, derivedEmptyBegin, derivedEmptyEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, derivedEmptyBegin, derivedEmptyEnd2));
+
+ uintptr_t uderivedEmptyBegin = uintptr_t(derivedEmptyBegin);
+ uintptr_t uderivedEmptyEnd = uintptr_t(derivedEmptyEnd);
+ uintptr_t uderivedEmptyEnd2 = uintptr_t(derivedEmptyEnd2);
+
+ MOZ_RELEASE_ASSERT(
+ IsInRange(derivedEmptyBegin, uderivedEmptyBegin, uderivedEmptyEnd));
+ MOZ_RELEASE_ASSERT(
+ !IsInRange(derivedEmptyEnd, uderivedEmptyBegin, uderivedEmptyEnd));
+
+ MOZ_RELEASE_ASSERT(
+ IsInRange(derivedEmptyBegin, uderivedEmptyBegin, uderivedEmptyEnd2));
+ MOZ_RELEASE_ASSERT(
+ IsInRange(derivedEmptyEnd, uderivedEmptyBegin, uderivedEmptyEnd2));
+ MOZ_RELEASE_ASSERT(
+ !IsInRange(derivedEmptyEnd2, uderivedEmptyBegin, uderivedEmptyEnd2));
+}
+
+struct ExtraDerived : Base {
+ int y;
+};
+
+static void TestIsInRangeClassExtraDerived() {
+ void* nul = nullptr;
+ ExtraDerived* derivedBegin = nullptr;
+ ExtraDerived* derivedEnd = derivedBegin + 1;
+ ExtraDerived* derivedEnd2 = derivedBegin + 2;
+
+ Base* baseBegin = static_cast<Base*>(derivedBegin);
+ Base* baseEnd = static_cast<Base*>(derivedEnd);
+ Base* baseEnd2 = static_cast<Base*>(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, derivedEnd, derivedEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, derivedBegin, derivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, derivedBegin, derivedEnd2));
+
+ uintptr_t uderivedBegin = uintptr_t(derivedBegin);
+ uintptr_t uderivedEnd = uintptr_t(derivedEnd);
+ uintptr_t uderivedEnd2 = uintptr_t(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd, uderivedBegin, uderivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(derivedEnd, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd2, uderivedBegin, uderivedEnd2));
+}
+
+struct ExtraDerivedEmpty : EmptyBase {
+ int y;
+};
+
+static void TestIsInRangeClassExtraDerivedEmpty() {
+ void* nul = nullptr;
+ ExtraDerivedEmpty* derivedBegin = nullptr;
+ ExtraDerivedEmpty* derivedEnd = derivedBegin + 1;
+ ExtraDerivedEmpty* derivedEnd2 = derivedBegin + 2;
+
+ EmptyBase* baseBegin = static_cast<EmptyBase*>(derivedBegin);
+ EmptyBase* baseEnd = static_cast<EmptyBase*>(derivedEnd);
+ EmptyBase* baseEnd2 = static_cast<EmptyBase*>(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(nul, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(nul, derivedEnd, derivedEnd2));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd, derivedBegin, derivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(baseBegin, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(baseEnd, derivedBegin, derivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(baseEnd2, derivedBegin, derivedEnd2));
+
+ uintptr_t uderivedBegin = uintptr_t(derivedBegin);
+ uintptr_t uderivedEnd = uintptr_t(derivedEnd);
+ uintptr_t uderivedEnd2 = uintptr_t(derivedEnd2);
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd, uderivedBegin, uderivedEnd));
+
+ MOZ_RELEASE_ASSERT(IsInRange(derivedBegin, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(IsInRange(derivedEnd, uderivedBegin, uderivedEnd2));
+ MOZ_RELEASE_ASSERT(!IsInRange(derivedEnd2, uderivedBegin, uderivedEnd2));
+}
+
+int main() {
+ TestIsInRangeNonClass();
+ TestIsInRangeVoid();
+ TestIsInRangeClass();
+ TestIsInRangeEmptyClass();
+ TestIsInRangeClassDerived();
+ TestIsInRangeClassDerivedEmpty();
+ TestIsInRangeClassExtraDerived();
+ TestIsInRangeClassExtraDerivedEmpty();
+ return 0;
+}
diff --git a/mfbt/tests/TestAtomicBitfields.cpp b/mfbt/tests/TestAtomicBitfields.cpp
new file mode 100644
index 0000000000..237dbde538
--- /dev/null
+++ b/mfbt/tests/TestAtomicBitfields.cpp
@@ -0,0 +1,189 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/AtomicBitfields.h"
+
+// This is a big macro mess, so let's summarize what's in here right up front:
+//
+// |TestDocumentationExample| is intended to be a copy-paste of the example
+// in the macro's documentation, to make sure it's correct.
+//
+//
+// |TestJammedWithFlags| tests using every bit of the type for bool flags.
+// 64-bit isn't tested due to macro limitations.
+//
+//
+// |TestLopsided| tests an instance with the following configuration:
+//
+// * a 1-bit boolean
+// * an (N-1)-bit uintN_t
+//
+// It tests both orderings of these fields.
+//
+// Hopefully these are enough to cover all the nasty boundary conditions
+// (that still compile).
+
+// ==================== TestDocumentationExample ========================
+
+struct MyType {
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, 8,
+ ((bool, IsDownloaded, 1), (uint32_t, SomeData, 2),
+ (uint8_t, OtherData, 5)))
+
+ int32_t aNormalInteger;
+
+ explicit MyType(uint32_t aSomeData) : aNormalInteger(7) {
+ StoreSomeData(aSomeData);
+ // Other bitfields were already default initialized to 0/false
+ }
+};
+
+void TestDocumentationExample() {
+ MyType val(3);
+
+ if (!val.LoadIsDownloaded()) {
+ val.StoreOtherData(2);
+ val.StoreIsDownloaded(true);
+ }
+}
+
+// ====================== TestJammedWithFlags =========================
+
+#define TIMES_8(aFunc, aSeparator, aArgs) \
+ MOZ_FOR_EACH_SEPARATED(aFunc, aSeparator, aArgs, (1, 2, 3, 4, 5, 6, 7, 8))
+#define TIMES_16(aFunc, aSeparator, aArgs) \
+ MOZ_FOR_EACH_SEPARATED( \
+ aFunc, aSeparator, aArgs, \
+ (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))
+#define TIMES_32(aFunc, aSeparator, aArgs) \
+ MOZ_FOR_EACH_SEPARATED( \
+ aFunc, aSeparator, aArgs, \
+ (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, \
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))
+
+#define CHECK_BOOL(aIndex) \
+ MOZ_ASSERT(val.LoadFlag##aIndex() == false); \
+ val.StoreFlag##aIndex(true); \
+ MOZ_ASSERT(val.LoadFlag##aIndex() == true); \
+ val.StoreFlag##aIndex(false); \
+ MOZ_ASSERT(val.LoadFlag##aIndex() == false);
+
+#define GENERATE_TEST_JAMMED_WITH_FLAGS(aSize) \
+ void TestJammedWithFlags##aSize() { \
+ JammedWithFlags##aSize val; \
+ TIMES_##aSize(CHECK_BOOL, (;), ()); \
+ }
+
+#define TEST_JAMMED_WITH_FLAGS(aSize) TestJammedWithFlags##aSize();
+
+// ========================= TestLopsided ===========================
+
+#define GENERATE_TEST_LOPSIDED_FUNC(aSide, aSize) \
+ void TestLopsided##aSide##aSize() { \
+ Lopsided##aSide##aSize val; \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == false); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == 0); \
+ val.StoreHappyLittleBit(true); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == true); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == 0); \
+ val.StoreLargeAndInCharge(1); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == true); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == 1); \
+ val.StoreLargeAndInCharge(0); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == true); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == 0); \
+ uint##aSize##_t size = aSize; \
+ uint##aSize##_t int_max = (~(1ull << (size - 1))) - 1; \
+ val.StoreLargeAndInCharge(int_max); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == true); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == int_max); \
+ val.StoreHappyLittleBit(false); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == false); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == int_max); \
+ val.StoreLargeAndInCharge(int_max); \
+ MOZ_ASSERT(val.LoadHappyLittleBit() == false); \
+ MOZ_ASSERT(val.LoadLargeAndInCharge() == int_max); \
+ }
+
+#define GENERATE_TEST_LOPSIDED(aSize) \
+ struct LopsidedA##aSize { \
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, aSize, \
+ ((bool, HappyLittleBit, 1), \
+ (uint##aSize##_t, LargeAndInCharge, ((aSize)-1)))) \
+ }; \
+ struct LopsidedB##aSize { \
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, aSize, \
+ ((uint##aSize##_t, LargeAndInCharge, ((aSize)-1)), \
+ (bool, HappyLittleBit, 1))) \
+ }; \
+ GENERATE_TEST_LOPSIDED_FUNC(A, aSize); \
+ GENERATE_TEST_LOPSIDED_FUNC(B, aSize);
+
+#define TEST_LOPSIDED(aSize) \
+ TestLopsidedA##aSize(); \
+ TestLopsidedB##aSize();
+
+// ==================== generate and run the tests ======================
+
+// There's an unknown bug in clang-cl-9 (used for win64-ccov) that makes
+// generating these with the TIMES_N macro not work. So these are written out
+// explicitly to unbork CI.
+struct JammedWithFlags8 {
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, 8,
+ ((bool, Flag1, 1), (bool, Flag2, 1), (bool, Flag3, 1),
+ (bool, Flag4, 1), (bool, Flag5, 1), (bool, Flag6, 1),
+ (bool, Flag7, 1), (bool, Flag8, 1)))
+};
+
+struct JammedWithFlags16 {
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, 16,
+ ((bool, Flag1, 1), (bool, Flag2, 1), (bool, Flag3, 1),
+ (bool, Flag4, 1), (bool, Flag5, 1), (bool, Flag6, 1),
+ (bool, Flag7, 1), (bool, Flag8, 1), (bool, Flag9, 1),
+ (bool, Flag10, 1), (bool, Flag11, 1), (bool, Flag12, 1),
+ (bool, Flag13, 1), (bool, Flag14, 1), (bool, Flag15, 1),
+ (bool, Flag16, 1)))
+};
+
+struct JammedWithFlags32 {
+ MOZ_ATOMIC_BITFIELDS(mAtomicFields, 32,
+ ((bool, Flag1, 1), (bool, Flag2, 1), (bool, Flag3, 1),
+ (bool, Flag4, 1), (bool, Flag5, 1), (bool, Flag6, 1),
+ (bool, Flag7, 1), (bool, Flag8, 1), (bool, Flag9, 1),
+ (bool, Flag10, 1), (bool, Flag11, 1), (bool, Flag12, 1),
+ (bool, Flag13, 1), (bool, Flag14, 1), (bool, Flag15, 1),
+ (bool, Flag16, 1), (bool, Flag17, 1), (bool, Flag18, 1),
+ (bool, Flag19, 1), (bool, Flag20, 1), (bool, Flag21, 1),
+ (bool, Flag22, 1), (bool, Flag23, 1), (bool, Flag24, 1),
+ (bool, Flag25, 1), (bool, Flag26, 1), (bool, Flag27, 1),
+ (bool, Flag28, 1), (bool, Flag29, 1), (bool, Flag30, 1),
+ (bool, Flag31, 1), (bool, Flag32, 1)))
+};
+
+GENERATE_TEST_JAMMED_WITH_FLAGS(8)
+GENERATE_TEST_JAMMED_WITH_FLAGS(16)
+GENERATE_TEST_JAMMED_WITH_FLAGS(32)
+// MOZ_FOR_EACH_64 doesn't exist :(
+
+GENERATE_TEST_LOPSIDED(8)
+GENERATE_TEST_LOPSIDED(16)
+GENERATE_TEST_LOPSIDED(32)
+GENERATE_TEST_LOPSIDED(64)
+
+int main() {
+ TestDocumentationExample();
+
+ TEST_JAMMED_WITH_FLAGS(8);
+ TEST_JAMMED_WITH_FLAGS(16);
+ TEST_JAMMED_WITH_FLAGS(32);
+
+ TEST_LOPSIDED(8);
+ TEST_LOPSIDED(16);
+ TEST_LOPSIDED(32);
+ TEST_LOPSIDED(64);
+ return 0;
+}
diff --git a/mfbt/tests/TestAtomics.cpp b/mfbt/tests/TestAtomics.cpp
new file mode 100644
index 0000000000..7d333d37c1
--- /dev/null
+++ b/mfbt/tests/TestAtomics.cpp
@@ -0,0 +1,274 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+
+#include <stdint.h>
+
+using mozilla::Atomic;
+using mozilla::MemoryOrdering;
+using mozilla::Relaxed;
+using mozilla::ReleaseAcquire;
+using mozilla::SequentiallyConsistent;
+
+#define A(a, b) MOZ_RELEASE_ASSERT(a, b)
+
+template <typename T, MemoryOrdering Order>
+static void TestTypeWithOrdering() {
+ Atomic<T, Order> atomic(5);
+ A(atomic == 5, "Atomic variable did not initialize");
+
+ // Test atomic increment
+ A(++atomic == T(6), "Atomic increment did not work");
+ A(atomic++ == T(6), "Atomic post-increment did not work");
+ A(atomic == T(7), "Atomic post-increment did not work");
+
+ // Test atomic decrement
+ A(--atomic == 6, "Atomic decrement did not work");
+ A(atomic-- == 6, "Atomic post-decrement did not work");
+ A(atomic == 5, "Atomic post-decrement did not work");
+
+ // Test other arithmetic.
+ T result;
+ result = (atomic += T(5));
+ A(atomic == T(10), "Atomic += did not work");
+ A(result == T(10), "Atomic += returned the wrong value");
+ result = (atomic -= T(3));
+ A(atomic == T(7), "Atomic -= did not work");
+ A(result == T(7), "Atomic -= returned the wrong value");
+
+ // Test assignment
+ result = (atomic = T(5));
+ A(atomic == T(5), "Atomic assignment failed");
+ A(result == T(5), "Atomic assignment returned the wrong value");
+
+ // Test logical operations.
+ result = (atomic ^= T(2));
+ A(atomic == T(7), "Atomic ^= did not work");
+ A(result == T(7), "Atomic ^= returned the wrong value");
+ result = (atomic ^= T(4));
+ A(atomic == T(3), "Atomic ^= did not work");
+ A(result == T(3), "Atomic ^= returned the wrong value");
+ result = (atomic |= T(8));
+ A(atomic == T(11), "Atomic |= did not work");
+ A(result == T(11), "Atomic |= returned the wrong value");
+ result = (atomic |= T(8));
+ A(atomic == T(11), "Atomic |= did not work");
+ A(result == T(11), "Atomic |= returned the wrong value");
+ result = (atomic &= T(12));
+ A(atomic == T(8), "Atomic &= did not work");
+ A(result == T(8), "Atomic &= returned the wrong value");
+
+ // Test exchange.
+ atomic = T(30);
+ result = atomic.exchange(42);
+ A(atomic == T(42), "Atomic exchange did not work");
+ A(result == T(30), "Atomic exchange returned the wrong value");
+
+ // Test CAS.
+ atomic = T(1);
+ bool boolResult = atomic.compareExchange(0, 2);
+ A(!boolResult, "CAS should have returned false.");
+ A(atomic == T(1), "CAS shouldn't have done anything.");
+
+ boolResult = atomic.compareExchange(1, 42);
+ A(boolResult, "CAS should have succeeded.");
+ A(atomic == T(42), "CAS should have changed atomic's value.");
+}
+
+template <typename T, MemoryOrdering Order>
+static void TestPointerWithOrdering() {
+ T array1[10];
+ Atomic<T*, Order> atomic(array1);
+ A(atomic == array1, "Atomic variable did not initialize");
+
+ // Test atomic increment
+ A(++atomic == array1 + 1, "Atomic increment did not work");
+ A(atomic++ == array1 + 1, "Atomic post-increment did not work");
+ A(atomic == array1 + 2, "Atomic post-increment did not work");
+
+ // Test atomic decrement
+ A(--atomic == array1 + 1, "Atomic decrement did not work");
+ A(atomic-- == array1 + 1, "Atomic post-decrement did not work");
+ A(atomic == array1, "Atomic post-decrement did not work");
+
+ // Test other arithmetic operations
+ T* result;
+ result = (atomic += 2);
+ A(atomic == array1 + 2, "Atomic += did not work");
+ A(result == array1 + 2, "Atomic += returned the wrong value");
+ result = (atomic -= 1);
+ A(atomic == array1 + 1, "Atomic -= did not work");
+ A(result == array1 + 1, "Atomic -= returned the wrong value");
+
+ // Test stores
+ result = (atomic = array1);
+ A(atomic == array1, "Atomic assignment did not work");
+ A(result == array1, "Atomic assignment returned the wrong value");
+
+ // Test exchange
+ atomic = array1 + 2;
+ result = atomic.exchange(array1);
+ A(atomic == array1, "Atomic exchange did not work");
+ A(result == array1 + 2, "Atomic exchange returned the wrong value");
+
+ atomic = array1;
+ bool boolResult = atomic.compareExchange(array1 + 1, array1 + 2);
+ A(!boolResult, "CAS should have returned false.");
+ A(atomic == array1, "CAS shouldn't have done anything.");
+
+ boolResult = atomic.compareExchange(array1, array1 + 3);
+ A(boolResult, "CAS should have succeeded.");
+ A(atomic == array1 + 3, "CAS should have changed atomic's value.");
+}
+
+enum EnumType {
+ EnumType_0 = 0,
+ EnumType_1 = 1,
+ EnumType_2 = 2,
+ EnumType_3 = 3
+};
+
+template <MemoryOrdering Order>
+static void TestEnumWithOrdering() {
+ Atomic<EnumType, Order> atomic(EnumType_2);
+ A(atomic == EnumType_2, "Atomic variable did not initialize");
+
+ // Test assignment
+ EnumType result;
+ result = (atomic = EnumType_3);
+ A(atomic == EnumType_3, "Atomic assignment failed");
+ A(result == EnumType_3, "Atomic assignment returned the wrong value");
+
+ // Test exchange.
+ atomic = EnumType_1;
+ result = atomic.exchange(EnumType_2);
+ A(atomic == EnumType_2, "Atomic exchange did not work");
+ A(result == EnumType_1, "Atomic exchange returned the wrong value");
+
+ // Test CAS.
+ atomic = EnumType_1;
+ bool boolResult = atomic.compareExchange(EnumType_0, EnumType_2);
+ A(!boolResult, "CAS should have returned false.");
+ A(atomic == EnumType_1, "CAS shouldn't have done anything.");
+
+ boolResult = atomic.compareExchange(EnumType_1, EnumType_3);
+ A(boolResult, "CAS should have succeeded.");
+ A(atomic == EnumType_3, "CAS should have changed atomic's value.");
+}
+
+enum class EnumClass : uint32_t {
+ Value0 = 0,
+ Value1 = 1,
+ Value2 = 2,
+ Value3 = 3
+};
+
+template <MemoryOrdering Order>
+static void TestEnumClassWithOrdering() {
+ Atomic<EnumClass, Order> atomic(EnumClass::Value2);
+ A(atomic == EnumClass::Value2, "Atomic variable did not initialize");
+
+ // Test assignment
+ EnumClass result;
+ result = (atomic = EnumClass::Value3);
+ A(atomic == EnumClass::Value3, "Atomic assignment failed");
+ A(result == EnumClass::Value3, "Atomic assignment returned the wrong value");
+
+ // Test exchange.
+ atomic = EnumClass::Value1;
+ result = atomic.exchange(EnumClass::Value2);
+ A(atomic == EnumClass::Value2, "Atomic exchange did not work");
+ A(result == EnumClass::Value1, "Atomic exchange returned the wrong value");
+
+ // Test CAS.
+ atomic = EnumClass::Value1;
+ bool boolResult =
+ atomic.compareExchange(EnumClass::Value0, EnumClass::Value2);
+ A(!boolResult, "CAS should have returned false.");
+ A(atomic == EnumClass::Value1, "CAS shouldn't have done anything.");
+
+ boolResult = atomic.compareExchange(EnumClass::Value1, EnumClass::Value3);
+ A(boolResult, "CAS should have succeeded.");
+ A(atomic == EnumClass::Value3, "CAS should have changed atomic's value.");
+}
+
+template <MemoryOrdering Order>
+static void TestBoolWithOrdering() {
+ Atomic<bool, Order> atomic(false);
+ A(atomic == false, "Atomic variable did not initialize");
+
+ // Test assignment
+ bool result;
+ result = (atomic = true);
+ A(atomic == true, "Atomic assignment failed");
+ A(result == true, "Atomic assignment returned the wrong value");
+
+ // Test exchange.
+ atomic = false;
+ result = atomic.exchange(true);
+ A(atomic == true, "Atomic exchange did not work");
+ A(result == false, "Atomic exchange returned the wrong value");
+
+ // Test CAS.
+ atomic = false;
+ bool boolResult = atomic.compareExchange(true, false);
+ A(!boolResult, "CAS should have returned false.");
+ A(atomic == false, "CAS shouldn't have done anything.");
+
+ boolResult = atomic.compareExchange(false, true);
+ A(boolResult, "CAS should have succeeded.");
+ A(atomic == true, "CAS should have changed atomic's value.");
+}
+
+template <typename T>
+static void TestType() {
+ TestTypeWithOrdering<T, SequentiallyConsistent>();
+ TestTypeWithOrdering<T, ReleaseAcquire>();
+ TestTypeWithOrdering<T, Relaxed>();
+}
+
+template <typename T>
+static void TestPointer() {
+ TestPointerWithOrdering<T, SequentiallyConsistent>();
+ TestPointerWithOrdering<T, ReleaseAcquire>();
+ TestPointerWithOrdering<T, Relaxed>();
+}
+
+static void TestEnum() {
+ TestEnumWithOrdering<SequentiallyConsistent>();
+ TestEnumWithOrdering<ReleaseAcquire>();
+ TestEnumWithOrdering<Relaxed>();
+
+ TestEnumClassWithOrdering<SequentiallyConsistent>();
+ TestEnumClassWithOrdering<ReleaseAcquire>();
+ TestEnumClassWithOrdering<Relaxed>();
+}
+
+static void TestBool() {
+ TestBoolWithOrdering<SequentiallyConsistent>();
+ TestBoolWithOrdering<ReleaseAcquire>();
+ TestBoolWithOrdering<Relaxed>();
+}
+
+#undef A
+
+int main() {
+ TestType<uint32_t>();
+ TestType<int32_t>();
+ TestType<uint64_t>();
+ TestType<int64_t>();
+ TestType<intptr_t>();
+ TestType<uintptr_t>();
+ TestPointer<int>();
+ TestPointer<float>();
+ TestPointer<uint16_t*>();
+ TestPointer<uint32_t*>();
+ TestEnum();
+ TestBool();
+ return 0;
+}
diff --git a/mfbt/tests/TestBinarySearch.cpp b/mfbt/tests/TestBinarySearch.cpp
new file mode 100644
index 0000000000..3cd28b309f
--- /dev/null
+++ b/mfbt/tests/TestBinarySearch.cpp
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/Vector.h"
+
+#include <cstdlib>
+
+using mozilla::ArrayLength;
+using mozilla::BinarySearch;
+using mozilla::BinarySearchIf;
+using mozilla::Vector;
+
+#define A(a) MOZ_RELEASE_ASSERT(a)
+
+struct Person {
+ int mAge;
+ int mId;
+ Person(int aAge, int aId) : mAge(aAge), mId(aId) {}
+};
+
+struct GetAge {
+ Vector<Person>& mV;
+ explicit GetAge(Vector<Person>& aV) : mV(aV) {}
+ int operator[](size_t index) const { return mV[index].mAge; }
+};
+
+struct RangeFinder {
+ const int mLower, mUpper;
+ RangeFinder(int lower, int upper) : mLower(lower), mUpper(upper) {}
+ int operator()(int val) const {
+ if (val >= mUpper) return -1;
+ if (val < mLower) return 1;
+ return 0;
+ }
+};
+
+static void TestBinarySearch() {
+ size_t m;
+
+ Vector<int> v1;
+ MOZ_RELEASE_ASSERT(v1.append(2));
+ MOZ_RELEASE_ASSERT(v1.append(4));
+ MOZ_RELEASE_ASSERT(v1.append(6));
+ MOZ_RELEASE_ASSERT(v1.append(8));
+
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 1, &m) && m == 0);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 0, v1.length(), 2, &m) && m == 0);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 3, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 0, v1.length(), 4, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 5, &m) && m == 2);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 0, v1.length(), 6, &m) && m == 2);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 7, &m) && m == 3);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 0, v1.length(), 8, &m) && m == 3);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 9, &m) && m == 4);
+
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 1, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 2, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 3, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 1, 3, 4, &m) && m == 1);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 5, &m) && m == 2);
+ MOZ_RELEASE_ASSERT(BinarySearch(v1, 1, 3, 6, &m) && m == 2);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 7, &m) && m == 3);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 8, &m) && m == 3);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 9, &m) && m == 3);
+
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, 0, 0, &m) && m == 0);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, 0, 9, &m) && m == 0);
+
+ Vector<int> v2;
+ MOZ_RELEASE_ASSERT(!BinarySearch(v2, 0, 0, 0, &m) && m == 0);
+ MOZ_RELEASE_ASSERT(!BinarySearch(v2, 0, 0, 9, &m) && m == 0);
+
+ Vector<Person> v3;
+ MOZ_RELEASE_ASSERT(v3.append(Person(2, 42)));
+ MOZ_RELEASE_ASSERT(v3.append(Person(4, 13)));
+ MOZ_RELEASE_ASSERT(v3.append(Person(6, 360)));
+
+ A(!BinarySearch(GetAge(v3), 0, v3.length(), 1, &m) && m == 0);
+ A(BinarySearch(GetAge(v3), 0, v3.length(), 2, &m) && m == 0);
+ A(!BinarySearch(GetAge(v3), 0, v3.length(), 3, &m) && m == 1);
+ A(BinarySearch(GetAge(v3), 0, v3.length(), 4, &m) && m == 1);
+ A(!BinarySearch(GetAge(v3), 0, v3.length(), 5, &m) && m == 2);
+ A(BinarySearch(GetAge(v3), 0, v3.length(), 6, &m) && m == 2);
+ A(!BinarySearch(GetAge(v3), 0, v3.length(), 7, &m) && m == 3);
+}
+
+static void TestBinarySearchIf() {
+ const int v1[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ const size_t len = ArrayLength(v1);
+ size_t m;
+
+ A(BinarySearchIf(v1, 0, len, RangeFinder(2, 3), &m) && m == 2);
+ A(!BinarySearchIf(v1, 0, len, RangeFinder(-5, -2), &m) && m == 0);
+ A(BinarySearchIf(v1, 0, len, RangeFinder(3, 5), &m) && m >= 3 && m < 5);
+ A(!BinarySearchIf(v1, 0, len, RangeFinder(10, 12), &m) && m == 10);
+}
+
+static void TestEqualRange() {
+ struct CompareN {
+ int mVal;
+ explicit CompareN(int n) : mVal(n) {}
+ int operator()(int aVal) const { return mVal - aVal; }
+ };
+
+ constexpr int kMaxNumber = 100;
+ constexpr int kMaxRepeat = 2;
+
+ Vector<int> sortedArray;
+ MOZ_RELEASE_ASSERT(sortedArray.reserve(kMaxNumber * kMaxRepeat));
+
+ // Make a sorted array by appending the loop counter [0, kMaxRepeat] times
+ // in each iteration. The array will be something like:
+ // [0, 0, 1, 1, 2, 2, 8, 9, ..., kMaxNumber]
+ for (int i = 0; i <= kMaxNumber; ++i) {
+ int repeat = rand() % (kMaxRepeat + 1);
+ for (int j = 0; j < repeat; ++j) {
+ MOZ_RELEASE_ASSERT(sortedArray.emplaceBack(i));
+ }
+ }
+
+ for (int i = -1; i < kMaxNumber + 1; ++i) {
+ auto bounds = EqualRange(sortedArray, 0, sortedArray.length(), CompareN(i));
+
+ MOZ_RELEASE_ASSERT(bounds.first <= sortedArray.length());
+ MOZ_RELEASE_ASSERT(bounds.second <= sortedArray.length());
+ MOZ_RELEASE_ASSERT(bounds.first <= bounds.second);
+
+ if (bounds.first == 0) {
+ MOZ_RELEASE_ASSERT(sortedArray[0] >= i);
+ } else if (bounds.first == sortedArray.length()) {
+ MOZ_RELEASE_ASSERT(sortedArray[sortedArray.length() - 1] < i);
+ } else {
+ MOZ_RELEASE_ASSERT(sortedArray[bounds.first - 1] < i);
+ MOZ_RELEASE_ASSERT(sortedArray[bounds.first] >= i);
+ }
+
+ if (bounds.second == 0) {
+ MOZ_RELEASE_ASSERT(sortedArray[0] > i);
+ } else if (bounds.second == sortedArray.length()) {
+ MOZ_RELEASE_ASSERT(sortedArray[sortedArray.length() - 1] <= i);
+ } else {
+ MOZ_RELEASE_ASSERT(sortedArray[bounds.second - 1] <= i);
+ MOZ_RELEASE_ASSERT(sortedArray[bounds.second] > i);
+ }
+ }
+}
+
+int main() {
+ TestBinarySearch();
+ TestBinarySearchIf();
+ TestEqualRange();
+ return 0;
+}
diff --git a/mfbt/tests/TestBitSet.cpp b/mfbt/tests/TestBitSet.cpp
new file mode 100644
index 0000000000..2bd1923a15
--- /dev/null
+++ b/mfbt/tests/TestBitSet.cpp
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/BitSet.h"
+
+using mozilla::BitSet;
+
+template <typename Storage>
+class BitSetSuite {
+ template <size_t N>
+ using TestBitSet = BitSet<N, Storage>;
+
+ static constexpr size_t kBitsPerWord = sizeof(Storage) * 8;
+
+ static constexpr Storage kAllBitsSet = ~Storage{0};
+
+ public:
+ void testLength() {
+ MOZ_RELEASE_ASSERT(TestBitSet<1>().Storage().LengthBytes() ==
+ sizeof(Storage));
+
+ MOZ_RELEASE_ASSERT(TestBitSet<1>().Storage().Length() == 1);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord>().Storage().Length() == 1);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord + 1>().Storage().Length() == 2);
+ }
+
+ void testConstruct() {
+ MOZ_RELEASE_ASSERT(TestBitSet<1>().Storage()[0] == 0);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord>().Storage()[0] == 0);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord + 1>().Storage()[0] == 0);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord + 1>().Storage()[1] == 0);
+
+ TestBitSet<1> bitset1;
+ bitset1.SetAll();
+ TestBitSet<kBitsPerWord> bitsetW;
+ bitsetW.SetAll();
+ TestBitSet<kBitsPerWord + 1> bitsetW1;
+ bitsetW1.SetAll();
+
+ MOZ_RELEASE_ASSERT(bitset1.Storage()[0] == 1);
+ MOZ_RELEASE_ASSERT(bitsetW.Storage()[0] == kAllBitsSet);
+ MOZ_RELEASE_ASSERT(bitsetW1.Storage()[0] == kAllBitsSet);
+ MOZ_RELEASE_ASSERT(bitsetW1.Storage()[1] == 1);
+
+ MOZ_RELEASE_ASSERT(TestBitSet<1>(bitset1).Storage()[0] == 1);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord>(bitsetW).Storage()[0] ==
+ kAllBitsSet);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord + 1>(bitsetW1).Storage()[0] ==
+ kAllBitsSet);
+ MOZ_RELEASE_ASSERT(TestBitSet<kBitsPerWord + 1>(bitsetW1).Storage()[1] ==
+ 1);
+
+ MOZ_RELEASE_ASSERT(TestBitSet<1>(bitset1.Storage()).Storage()[0] == 1);
+ MOZ_RELEASE_ASSERT(
+ TestBitSet<kBitsPerWord>(bitsetW.Storage()).Storage()[0] ==
+ kAllBitsSet);
+ MOZ_RELEASE_ASSERT(
+ TestBitSet<kBitsPerWord + 1>(bitsetW1.Storage()).Storage()[0] ==
+ kAllBitsSet);
+ MOZ_RELEASE_ASSERT(
+ TestBitSet<kBitsPerWord + 1>(bitsetW1.Storage()).Storage()[1] == 1);
+ }
+
+ void testSetBit() {
+ TestBitSet<kBitsPerWord + 2> bitset;
+ MOZ_RELEASE_ASSERT(!bitset.Test(3));
+ MOZ_RELEASE_ASSERT(!bitset[3]);
+ MOZ_RELEASE_ASSERT(!bitset.Test(kBitsPerWord + 1));
+ MOZ_RELEASE_ASSERT(!bitset[kBitsPerWord + 1]);
+
+ bitset[3] = true;
+ MOZ_RELEASE_ASSERT(bitset.Test(3));
+ MOZ_RELEASE_ASSERT(bitset[3]);
+
+ bitset[kBitsPerWord + 1] = true;
+ MOZ_RELEASE_ASSERT(bitset.Test(3));
+ MOZ_RELEASE_ASSERT(bitset[3]);
+ MOZ_RELEASE_ASSERT(bitset.Test(kBitsPerWord + 1));
+ MOZ_RELEASE_ASSERT(bitset[kBitsPerWord + 1]);
+
+ bitset.ResetAll();
+ for (size_t i = 0; i < decltype(bitset)::Size(); i++) {
+ MOZ_RELEASE_ASSERT(!bitset[i]);
+ }
+
+ bitset.SetAll();
+ for (size_t i = 0; i < decltype(bitset)::Size(); i++) {
+ MOZ_RELEASE_ASSERT(bitset[i]);
+ }
+
+ // Test trailing unused bits are not set by SetAll().
+ MOZ_RELEASE_ASSERT(bitset.Storage()[1] == 3);
+
+ bitset.ResetAll();
+ for (size_t i = 0; i < decltype(bitset)::Size(); i++) {
+ MOZ_RELEASE_ASSERT(!bitset[i]);
+ }
+ }
+
+ void runTests() {
+ testLength();
+ testConstruct();
+ testSetBit();
+ }
+};
+
+int main() {
+ BitSetSuite<uint8_t>().runTests();
+ BitSetSuite<uint32_t>().runTests();
+ BitSetSuite<uint64_t>().runTests();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestBloomFilter.cpp b/mfbt/tests/TestBloomFilter.cpp
new file mode 100644
index 0000000000..a233858826
--- /dev/null
+++ b/mfbt/tests/TestBloomFilter.cpp
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/BloomFilter.h"
+#include "mozilla/UniquePtr.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+using mozilla::BitBloomFilter;
+using mozilla::CountingBloomFilter;
+
+class FilterChecker {
+ public:
+ explicit FilterChecker(uint32_t aHash) : mHash(aHash) {}
+
+ uint32_t hash() const { return mHash; }
+
+ private:
+ uint32_t mHash;
+};
+
+void testBitBloomFilter() {
+ const mozilla::UniquePtr filter =
+ mozilla::MakeUnique<BitBloomFilter<12, FilterChecker>>();
+ MOZ_RELEASE_ASSERT(filter);
+
+ FilterChecker one(1);
+ FilterChecker two(0x20000);
+
+ filter->add(&one);
+ MOZ_RELEASE_ASSERT(filter->mightContain(&one), "Filter should contain 'one'");
+
+ MOZ_RELEASE_ASSERT(!filter->mightContain(&two),
+ "Filter claims to contain 'two' when it should not");
+
+ // Test multiple addition
+ filter->add(&two);
+ MOZ_RELEASE_ASSERT(filter->mightContain(&two),
+ "Filter should contain 'two' after 'two' is added");
+ filter->add(&two);
+ MOZ_RELEASE_ASSERT(filter->mightContain(&two),
+ "Filter should contain 'two' after 'two' is added again");
+
+ filter->clear();
+
+ MOZ_RELEASE_ASSERT(!filter->mightContain(&one), "clear() failed to work");
+ MOZ_RELEASE_ASSERT(!filter->mightContain(&two), "clear() failed to work");
+}
+
+void testCountingBloomFilter() {
+ const mozilla::UniquePtr filter =
+ mozilla::MakeUnique<CountingBloomFilter<12, FilterChecker>>();
+ MOZ_RELEASE_ASSERT(filter);
+
+ FilterChecker one(1);
+ FilterChecker two(0x20000);
+ FilterChecker many(0x10000);
+ FilterChecker multiple(0x20001);
+
+ filter->add(&one);
+ MOZ_RELEASE_ASSERT(filter->mightContain(&one), "Filter should contain 'one'");
+
+ MOZ_RELEASE_ASSERT(!filter->mightContain(&multiple),
+ "Filter claims to contain 'multiple' when it should not");
+
+ MOZ_RELEASE_ASSERT(filter->mightContain(&many),
+ "Filter should contain 'many' (false positive)");
+
+ filter->add(&two);
+ MOZ_RELEASE_ASSERT(filter->mightContain(&multiple),
+ "Filter should contain 'multiple' (false positive)");
+
+ // Test basic removals
+ filter->remove(&two);
+ MOZ_RELEASE_ASSERT(
+ !filter->mightContain(&multiple),
+ "Filter claims to contain 'multiple' when it should not after two "
+ "was removed");
+
+ // Test multiple addition/removal
+ const size_t FILTER_SIZE = 255;
+ for (size_t i = 0; i < FILTER_SIZE - 1; ++i) {
+ filter->add(&two);
+ }
+ MOZ_RELEASE_ASSERT(
+ filter->mightContain(&multiple),
+ "Filter should contain 'multiple' after 'two' added lots of times "
+ "(false positive)");
+
+ for (size_t i = 0; i < FILTER_SIZE - 1; ++i) {
+ filter->remove(&two);
+ }
+ MOZ_RELEASE_ASSERT(
+ !filter->mightContain(&multiple),
+ "Filter claims to contain 'multiple' when it should not after two "
+ "was removed lots of times");
+
+ // Test overflowing the filter buckets
+ for (size_t i = 0; i < FILTER_SIZE + 1; ++i) {
+ filter->add(&two);
+ }
+ MOZ_RELEASE_ASSERT(
+ filter->mightContain(&multiple),
+ "Filter should contain 'multiple' after 'two' added lots more "
+ "times (false positive)");
+
+ for (size_t i = 0; i < FILTER_SIZE + 1; ++i) {
+ filter->remove(&two);
+ }
+ MOZ_RELEASE_ASSERT(
+ filter->mightContain(&multiple),
+ "Filter claims to not contain 'multiple' even though we should "
+ "have run out of space in the buckets (false positive)");
+ MOZ_RELEASE_ASSERT(
+ filter->mightContain(&two),
+ "Filter claims to not contain 'two' even though we should have "
+ "run out of space in the buckets (false positive)");
+
+ filter->remove(&one);
+
+ MOZ_RELEASE_ASSERT(
+ !filter->mightContain(&one),
+ "Filter should not contain 'one', because we didn't overflow its "
+ "bucket");
+
+ filter->clear();
+
+ MOZ_RELEASE_ASSERT(!filter->mightContain(&multiple),
+ "clear() failed to work");
+}
+
+int main() {
+ testBitBloomFilter();
+ testCountingBloomFilter();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestBufferList.cpp b/mfbt/tests/TestBufferList.cpp
new file mode 100644
index 0000000000..9c0d69d7d6
--- /dev/null
+++ b/mfbt/tests/TestBufferList.cpp
@@ -0,0 +1,372 @@
+/* -*- Mode: C++; tab-width: 9; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This is included first to ensure it doesn't implicitly depend on anything
+// else.
+#include "mozilla/BufferList.h"
+
+// It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
+// but MFBT cannot use mozalloc.
+class InfallibleAllocPolicy {
+ public:
+ template <typename T>
+ T* pod_malloc(size_t aNumElems) {
+ if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ MOZ_CRASH("TestBufferList.cpp: overflow");
+ }
+ T* rv = static_cast<T*>(malloc(aNumElems * sizeof(T)));
+ if (!rv) {
+ MOZ_CRASH("TestBufferList.cpp: out of memory");
+ }
+ return rv;
+ }
+
+ template <typename T>
+ void free_(T* aPtr, size_t aNumElems = 0) {
+ free(aPtr);
+ }
+
+ void reportAllocOverflow() const {}
+
+ bool checkSimulatedOOM() const { return true; }
+};
+
+typedef mozilla::BufferList<InfallibleAllocPolicy> BufferList;
+
+int main(void) {
+ const size_t kInitialSize = 16;
+ const size_t kInitialCapacity = 24;
+ const size_t kStandardCapacity = 32;
+
+ BufferList bl(kInitialSize, kInitialCapacity, kStandardCapacity);
+
+ memset(bl.Start(), 0x0c, kInitialSize);
+ MOZ_RELEASE_ASSERT(bl.Size() == kInitialSize);
+
+ // Simple iteration and access.
+
+ BufferList::IterImpl iter(bl.Iter());
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == kInitialSize);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(kInitialSize));
+ MOZ_RELEASE_ASSERT(!iter.HasRoomFor(kInitialSize + 1));
+ MOZ_RELEASE_ASSERT(!iter.HasRoomFor(size_t(-1)));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0c);
+ MOZ_RELEASE_ASSERT(!iter.Done());
+
+ iter.Advance(bl, 4);
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == kInitialSize - 4);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(kInitialSize - 4));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0c);
+ MOZ_RELEASE_ASSERT(!iter.Done());
+
+ iter.Advance(bl, 11);
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == kInitialSize - 4 - 11);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(kInitialSize - 4 - 11));
+ MOZ_RELEASE_ASSERT(!iter.HasRoomFor(kInitialSize - 4 - 11 + 1));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0c);
+ MOZ_RELEASE_ASSERT(!iter.Done());
+
+ iter.Advance(bl, kInitialSize - 4 - 11);
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == 0);
+ MOZ_RELEASE_ASSERT(!iter.HasRoomFor(1));
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ // Writing to the buffer.
+
+ const size_t kSmallWrite = 16;
+
+ char toWrite[kSmallWrite];
+ memset(toWrite, 0x0a, kSmallWrite);
+ MOZ_ALWAYS_TRUE(bl.WriteBytes(toWrite, kSmallWrite));
+
+ MOZ_RELEASE_ASSERT(bl.Size() == kInitialSize + kSmallWrite);
+
+ iter = bl.Iter();
+ iter.Advance(bl, kInitialSize);
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() ==
+ kInitialCapacity - kInitialSize);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(kInitialCapacity - kInitialSize));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0a);
+
+ // AdvanceAcrossSegments.
+
+ iter = bl.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl, kInitialCapacity - 4));
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == 4);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(4));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0a);
+
+ iter = bl.Iter();
+ MOZ_RELEASE_ASSERT(
+ iter.AdvanceAcrossSegments(bl, kInitialSize + kSmallWrite - 4));
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == 4);
+ MOZ_RELEASE_ASSERT(iter.HasRoomFor(4));
+ MOZ_RELEASE_ASSERT(*iter.Data() == 0x0a);
+
+ MOZ_RELEASE_ASSERT(
+ bl.Iter().AdvanceAcrossSegments(bl, kInitialSize + kSmallWrite - 1));
+ MOZ_RELEASE_ASSERT(
+ bl.Iter().AdvanceAcrossSegments(bl, kInitialSize + kSmallWrite));
+ MOZ_RELEASE_ASSERT(
+ !bl.Iter().AdvanceAcrossSegments(bl, kInitialSize + kSmallWrite + 1));
+ MOZ_RELEASE_ASSERT(!bl.Iter().AdvanceAcrossSegments(bl, size_t(-1)));
+
+ // Reading non-contiguous bytes.
+
+ char toRead[kSmallWrite];
+ iter = bl.Iter();
+ iter.Advance(bl, kInitialSize);
+ bl.ReadBytes(iter, toRead, kSmallWrite);
+ MOZ_RELEASE_ASSERT(memcmp(toRead, toWrite, kSmallWrite) == 0);
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ // Make sure reading up to the end of a segment advances the iter to the next
+ // segment.
+ iter = bl.Iter();
+ bl.ReadBytes(iter, toRead, kInitialSize);
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() ==
+ kInitialCapacity - kInitialSize);
+
+ const size_t kBigWrite = 1024;
+
+ char* toWriteBig = static_cast<char*>(malloc(kBigWrite));
+ for (unsigned i = 0; i < kBigWrite; i++) {
+ toWriteBig[i] = i % 37;
+ }
+ MOZ_ALWAYS_TRUE(bl.WriteBytes(toWriteBig, kBigWrite));
+
+ char* toReadBig = static_cast<char*>(malloc(kBigWrite));
+ iter = bl.Iter();
+ MOZ_RELEASE_ASSERT(
+ iter.AdvanceAcrossSegments(bl, kInitialSize + kSmallWrite));
+ bl.ReadBytes(iter, toReadBig, kBigWrite);
+ MOZ_RELEASE_ASSERT(memcmp(toReadBig, toWriteBig, kBigWrite) == 0);
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ free(toReadBig);
+ free(toWriteBig);
+
+ // Currently bl contains these segments:
+ // #0: offset 0, [0x0c]*16 + [0x0a]*8, size 24
+ // #1: offset 24, [0x0a]*8 + [i%37 for i in 0..24], size 32
+ // #2: offset 56, [i%37 for i in 24..56, size 32
+ // ...
+ // #32: offset 1016, [i%37 for i in 984..1016], size 32
+ // #33: offset 1048, [i%37 for i in 1016..1024], size 8
+
+ static size_t kTotalSize = kInitialSize + kSmallWrite + kBigWrite;
+
+ MOZ_RELEASE_ASSERT(bl.Size() == kTotalSize);
+
+ static size_t kLastSegmentSize =
+ (kTotalSize - kInitialCapacity) % kStandardCapacity;
+
+ iter = bl.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(
+ bl, kTotalSize - kLastSegmentSize - kStandardCapacity));
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == kStandardCapacity);
+ iter.Advance(bl, kStandardCapacity);
+ MOZ_RELEASE_ASSERT(iter.RemainingInSegment() == kLastSegmentSize);
+ MOZ_RELEASE_ASSERT(
+ unsigned(*iter.Data()) ==
+ (kTotalSize - kLastSegmentSize - kInitialSize - kSmallWrite) % 37);
+
+ // Clear.
+
+ bl.Clear();
+ MOZ_RELEASE_ASSERT(bl.Size() == 0);
+ MOZ_RELEASE_ASSERT(bl.Iter().Done());
+
+ // Move assignment.
+
+ const size_t kSmallCapacity = 8;
+
+ BufferList bl2(0, kSmallCapacity, kSmallCapacity);
+ MOZ_ALWAYS_TRUE(bl2.WriteBytes(toWrite, kSmallWrite));
+ MOZ_ALWAYS_TRUE(bl2.WriteBytes(toWrite, kSmallWrite));
+ MOZ_ALWAYS_TRUE(bl2.WriteBytes(toWrite, kSmallWrite));
+
+ bl = std::move(bl2);
+ MOZ_RELEASE_ASSERT(bl2.Size() == 0);
+ MOZ_RELEASE_ASSERT(bl2.Iter().Done());
+
+ iter = bl.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl, kSmallWrite * 3));
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ // MoveFallible
+
+ bool success;
+ bl2 = bl.MoveFallible<InfallibleAllocPolicy>(&success);
+ MOZ_RELEASE_ASSERT(success);
+ MOZ_RELEASE_ASSERT(bl.Size() == 0);
+ MOZ_RELEASE_ASSERT(bl.Iter().Done());
+ MOZ_RELEASE_ASSERT(bl2.Size() == kSmallWrite * 3);
+
+ iter = bl2.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl2, kSmallWrite * 3));
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ bl = bl2.MoveFallible<InfallibleAllocPolicy>(&success);
+
+ // Borrowing.
+
+ const size_t kBorrowStart = 4;
+ const size_t kBorrowSize = 24;
+
+ iter = bl.Iter();
+ iter.Advance(bl, kBorrowStart);
+ bl2 = bl.Borrow<InfallibleAllocPolicy>(iter, kBorrowSize, &success);
+ MOZ_RELEASE_ASSERT(success);
+ MOZ_RELEASE_ASSERT(bl2.Size() == kBorrowSize);
+
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(
+ bl, kSmallWrite * 3 - kBorrowSize - kBorrowStart));
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ iter = bl2.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl2, kBorrowSize));
+ MOZ_RELEASE_ASSERT(iter.Done());
+
+ BufferList::IterImpl iter1(bl.Iter()), iter2(bl2.Iter());
+ iter1.Advance(bl, kBorrowStart);
+ MOZ_RELEASE_ASSERT(iter1.Data() == iter2.Data());
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl, kBorrowSize - 5));
+ MOZ_RELEASE_ASSERT(iter2.AdvanceAcrossSegments(bl2, kBorrowSize - 5));
+ MOZ_RELEASE_ASSERT(iter1.Data() == iter2.Data());
+
+ // RangeLength.
+
+ BufferList bl12(0, 0, 8);
+ MOZ_ALWAYS_TRUE(bl12.WriteBytes("abcdefgh", 8));
+ MOZ_ALWAYS_TRUE(bl12.WriteBytes("12345678", 8));
+
+ // |iter| is at position 0 (1st segment).
+ iter = bl12.Iter();
+ iter1 = bl12.Iter();
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 0);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 4);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 8);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 12);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 3));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 15);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(iter1.Done());
+
+ // |iter| is at position 1 (1st segment).
+ iter = bl12.Iter();
+ iter1 = bl12.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 0);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 4);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 8);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 12);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 2));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 14);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(iter1.Done());
+
+ // |iter| is at position 8 (2nd segment).
+ iter = bl12.Iter();
+ iter1 = bl12.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl12, 8));
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 8));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 0);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 4);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 3));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 7);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(iter1.Done());
+
+ // |iter| is at position 9 (2nd segment).
+ iter = bl12.Iter();
+ iter1 = bl12.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl12, 9));
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 9));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 0);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 4));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 4);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 2));
+ MOZ_RELEASE_ASSERT(bl12.RangeLength(iter, iter1) == 6);
+ MOZ_RELEASE_ASSERT(iter1.AdvanceAcrossSegments(bl12, 1));
+ MOZ_RELEASE_ASSERT(iter1.Done());
+
+ BufferList bl13(0, 0, 8);
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("abcdefgh", 8));
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("12345678", 8));
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("ABCDEFGH", 8));
+ MOZ_RELEASE_ASSERT(bl13.Size() == 24);
+
+ // At segment border
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl13, 8));
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 16);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 8);
+
+ // Restore state
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("12345678", 8));
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("ABCDEFGH", 8));
+ MOZ_RELEASE_ASSERT(bl13.Size() == 24);
+
+ // Before segment border
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl13, 7));
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 17);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 7);
+
+ // Restore state
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("h", 1));
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("12345678", 8));
+ MOZ_ALWAYS_TRUE(bl13.WriteBytes("ABCDEFGH", 8));
+ MOZ_RELEASE_ASSERT(bl13.Size() == 24);
+
+ // In last segment
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl13, 20));
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 4);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 20);
+
+ // No-op truncate
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 0);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 20);
+
+ // No-op truncate with fresh iterator
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(bl13, 20));
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 0);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 20);
+
+ // Truncate at start of buffer
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 20);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 0);
+
+ // No-op truncate at start of buffer
+ iter = bl13.Iter();
+ MOZ_RELEASE_ASSERT(bl13.Truncate(iter) == 0);
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(bl13.Size() == 0);
+
+ return 0;
+}
diff --git a/mfbt/tests/TestCasting.cpp b/mfbt/tests/TestCasting.cpp
new file mode 100644
index 0000000000..9b040956c7
--- /dev/null
+++ b/mfbt/tests/TestCasting.cpp
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Casting.h"
+#include "mozilla/ThreadSafety.h"
+
+#include <stdint.h>
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+using mozilla::AssertedCast;
+using mozilla::BitwiseCast;
+using mozilla::detail::IsInBounds;
+
+static const uint8_t floatMantissaBitsPlusOne = 24;
+static const uint8_t doubleMantissaBitsPlusOne = 53;
+
+template <typename Uint, typename Ulong, bool = (sizeof(Uint) == sizeof(Ulong))>
+struct UintUlongBitwiseCast;
+
+template <typename Uint, typename Ulong>
+struct UintUlongBitwiseCast<Uint, Ulong, true> {
+ static void test() {
+ MOZ_RELEASE_ASSERT(BitwiseCast<Ulong>(Uint(8675309)) == Ulong(8675309));
+ }
+};
+
+template <typename Uint, typename Ulong>
+struct UintUlongBitwiseCast<Uint, Ulong, false> {
+ static void test() {}
+};
+
+static void TestBitwiseCast() {
+ MOZ_RELEASE_ASSERT(BitwiseCast<int>(int(8675309)) == int(8675309));
+ UintUlongBitwiseCast<unsigned int, unsigned long>::test();
+}
+
+static void TestSameSize() {
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int16_t>(int16_t(0))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int16_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int16_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, uint16_t>(uint16_t(UINT16_MAX))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, int16_t>(uint16_t(0))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint16_t, int16_t>(uint16_t(-1))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint16_t>(int16_t(-1))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, uint16_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint16_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int32_t, uint32_t>(int32_t(INT32_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, uint32_t>(int32_t(INT32_MIN))));
+}
+
+static void TestToBiggerSize() {
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int32_t>(int16_t(0))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int32_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int32_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, uint32_t>(uint16_t(UINT16_MAX))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, int32_t>(uint16_t(0))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, int32_t>(uint16_t(-1))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint32_t>(int16_t(-1))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, uint32_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint32_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int32_t, uint64_t>(int32_t(INT32_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, uint64_t>(int32_t(INT32_MIN))));
+}
+
+static void TestToSmallerSize() {
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, int8_t>(int16_t(0))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, int8_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, int8_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint16_t, uint8_t>(uint16_t(UINT16_MAX))));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, int8_t>(uint16_t(0))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint16_t, int8_t>(uint16_t(-1))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint8_t>(int16_t(-1))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint8_t>(int16_t(INT16_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int16_t, uint8_t>(int16_t(INT16_MIN))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, uint16_t>(int32_t(INT32_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, uint16_t>(int32_t(INT32_MIN))));
+
+ // Boundary cases
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, int32_t>(int64_t(INT32_MIN) - 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, int32_t>(int64_t(INT32_MIN))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, int32_t>(int64_t(INT32_MIN) + 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, int32_t>(int64_t(INT32_MAX) - 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, int32_t>(int64_t(INT32_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, int32_t>(int64_t(INT32_MAX) + 1)));
+
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, uint32_t>(int64_t(-1))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, uint32_t>(int64_t(0))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, uint32_t>(int64_t(1))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, uint32_t>(int64_t(UINT32_MAX) - 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, uint32_t>(int64_t(UINT32_MAX))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, uint32_t>(int64_t(UINT32_MAX) + 1)));
+}
+
+template <typename In, typename Out>
+void checkBoundariesFloating(In aEpsilon = {}, Out aIntegerOffset = {}) {
+ // Check the max value of the input float can't be represented as an integer.
+ // This is true for all floating point and integer width.
+ MOZ_RELEASE_ASSERT((!IsInBounds<In, Out>(std::numeric_limits<In>::max())));
+ // Check that the max value of the integer, as a float, minus an offset that
+ // depends on the magnitude, can be represented as an integer.
+ MOZ_RELEASE_ASSERT((IsInBounds<In, Out>(
+ static_cast<In>(std::numeric_limits<Out>::max() - aIntegerOffset))));
+ // Check that the max value of the integer, plus a number that depends on the
+ // magnitude of the number, can't be represented as this integer (because it
+ // becomes too big).
+ MOZ_RELEASE_ASSERT((!IsInBounds<In, Out>(
+ aEpsilon + static_cast<In>(std::numeric_limits<Out>::max()))));
+ if constexpr (std::is_signed_v<In>) {
+ // Same for negative numbers.
+ MOZ_RELEASE_ASSERT(
+ (!IsInBounds<In, Out>(std::numeric_limits<In>::lowest())));
+ MOZ_RELEASE_ASSERT((IsInBounds<In, Out>(
+ static_cast<In>(std::numeric_limits<Out>::lowest()))));
+ MOZ_RELEASE_ASSERT((!IsInBounds<In, Out>(
+ static_cast<In>(std::numeric_limits<Out>::lowest()) - aEpsilon)));
+ } else {
+ // Check for negative floats and unsigned integer types.
+ MOZ_RELEASE_ASSERT((!IsInBounds<In, Out>(static_cast<In>(-1))));
+ }
+}
+
+void TestFloatConversion() {
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint64_t, float>(UINT64_MAX)));
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint32_t, float>(UINT32_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, float>(UINT16_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint8_t, float>(UINT8_MAX)));
+
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, float>(INT64_MAX)));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, float>(INT64_MIN)));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, float>(INT32_MAX)));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int32_t, float>(INT32_MIN)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, float>(INT16_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, float>(INT16_MIN)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int8_t, float>(INT8_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int8_t, float>(INT8_MIN)));
+
+ MOZ_RELEASE_ASSERT((!IsInBounds<uint64_t, double>(UINT64_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint32_t, double>(UINT32_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint16_t, double>(UINT16_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<uint8_t, double>(UINT8_MAX)));
+
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, double>(INT64_MAX)));
+ MOZ_RELEASE_ASSERT((!IsInBounds<int64_t, double>(INT64_MIN)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int32_t, double>(INT32_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int32_t, double>(INT32_MIN)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, double>(INT16_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int16_t, double>(INT16_MIN)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int8_t, double>(INT8_MAX)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int8_t, double>(INT8_MIN)));
+
+ // Floor check
+ MOZ_RELEASE_ASSERT((IsInBounds<float, uint64_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<uint64_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, uint32_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<uint32_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, uint16_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<uint16_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, uint8_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<uint8_t>(4.3f) == 4u));
+
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int64_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int64_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int32_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int32_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int16_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int16_t>(4.3f) == 4u));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int8_t>(4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int8_t>(4.3f) == 4u));
+
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int64_t>(-4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int64_t>(-4.3f) == -4));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int32_t>(-4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int32_t>(-4.3f) == -4));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int16_t>(-4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int16_t>(-4.3f) == -4));
+ MOZ_RELEASE_ASSERT((IsInBounds<float, int8_t>(-4.3)));
+ MOZ_RELEASE_ASSERT((AssertedCast<int8_t>(-4.3f) == -4));
+
+ // Bound check for float to unsigned integer conversion. The parameters are
+ // espilons and offsets allowing to check boundaries, that depend on the
+ // magnitude of the numbers.
+ checkBoundariesFloating<double, uint64_t>(2049.);
+ checkBoundariesFloating<double, uint32_t>(1.);
+ checkBoundariesFloating<double, uint16_t>(1.);
+ checkBoundariesFloating<double, uint8_t>(1.);
+ // Large number because of the lack of precision of floats at this magnitude
+ checkBoundariesFloating<float, uint64_t>(1.1e12f);
+ checkBoundariesFloating<float, uint32_t>(1.f, 128u);
+ checkBoundariesFloating<float, uint16_t>(1.f);
+ checkBoundariesFloating<float, uint8_t>(1.f);
+
+ checkBoundariesFloating<double, int64_t>(1025.);
+ checkBoundariesFloating<double, int32_t>(1.);
+ checkBoundariesFloating<double, int16_t>(1.);
+ checkBoundariesFloating<double, int8_t>(1.);
+ // Large number because of the lack of precision of floats at this magnitude
+ checkBoundariesFloating<float, int64_t>(1.1e12f);
+ checkBoundariesFloating<float, int32_t>(256.f, 64u);
+ checkBoundariesFloating<float, int16_t>(1.f);
+ checkBoundariesFloating<float, int8_t>(1.f);
+
+ // Integer to floating point, boundary cases
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, float>(
+ int64_t(std::pow(2, floatMantissaBitsPlusOne)) + 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, float>(
+ int64_t(std::pow(2, floatMantissaBitsPlusOne)))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, float>(
+ int64_t(std::pow(2, floatMantissaBitsPlusOne)) - 1)));
+
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, float>(
+ int64_t(-std::pow(2, floatMantissaBitsPlusOne)) - 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, float>(
+ int64_t(-std::pow(2, floatMantissaBitsPlusOne)))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, float>(
+ int64_t(-std::pow(2, floatMantissaBitsPlusOne)) + 1)));
+
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, double>(
+ uint64_t(std::pow(2, doubleMantissaBitsPlusOne)) + 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, double>(
+ uint64_t(std::pow(2, doubleMantissaBitsPlusOne)))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, double>(
+ uint64_t(std::pow(2, doubleMantissaBitsPlusOne)) - 1)));
+
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, double>(
+ int64_t(-std::pow(2, doubleMantissaBitsPlusOne)) - 1)));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, double>(
+ int64_t(-std::pow(2, doubleMantissaBitsPlusOne)))));
+ MOZ_RELEASE_ASSERT((IsInBounds<int64_t, double>(
+ int64_t(-std::pow(2, doubleMantissaBitsPlusOne)) + 1)));
+
+ MOZ_RELEASE_ASSERT(!(IsInBounds<uint64_t, double>(UINT64_MAX)));
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, double>(INT64_MAX)));
+ MOZ_RELEASE_ASSERT(!(IsInBounds<int64_t, double>(INT64_MIN)));
+
+ MOZ_RELEASE_ASSERT(
+ !(IsInBounds<double, float>(std::numeric_limits<double>::max())));
+ MOZ_RELEASE_ASSERT(
+ !(IsInBounds<double, float>(-std::numeric_limits<double>::max())));
+}
+
+int main() {
+ TestBitwiseCast();
+
+ TestSameSize();
+ TestToBiggerSize();
+ TestToSmallerSize();
+ TestFloatConversion();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestCeilingFloor.cpp b/mfbt/tests/TestCeilingFloor.cpp
new file mode 100644
index 0000000000..7bdd6ea27c
--- /dev/null
+++ b/mfbt/tests/TestCeilingFloor.cpp
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+using mozilla::CeilingLog2;
+using mozilla::FloorLog2;
+using mozilla::RoundUpPow2;
+
+static void TestCeiling() {
+ for (uint32_t i = 0; i <= 1; i++) {
+ MOZ_RELEASE_ASSERT(CeilingLog2(i) == 0);
+ }
+ for (uint32_t i = 2; i <= 2; i++) {
+ MOZ_RELEASE_ASSERT(CeilingLog2(i) == 1);
+ }
+ for (uint32_t i = 3; i <= 4; i++) {
+ MOZ_RELEASE_ASSERT(CeilingLog2(i) == 2);
+ }
+ for (uint32_t i = 5; i <= 8; i++) {
+ MOZ_RELEASE_ASSERT(CeilingLog2(i) == 3);
+ }
+ for (uint32_t i = 9; i <= 16; i++) {
+ MOZ_RELEASE_ASSERT(CeilingLog2(i) == 4);
+ }
+}
+
+static void TestFloor() {
+ for (uint32_t i = 0; i <= 1; i++) {
+ MOZ_RELEASE_ASSERT(FloorLog2(i) == 0);
+ }
+ for (uint32_t i = 2; i <= 3; i++) {
+ MOZ_RELEASE_ASSERT(FloorLog2(i) == 1);
+ }
+ for (uint32_t i = 4; i <= 7; i++) {
+ MOZ_RELEASE_ASSERT(FloorLog2(i) == 2);
+ }
+ for (uint32_t i = 8; i <= 15; i++) {
+ MOZ_RELEASE_ASSERT(FloorLog2(i) == 3);
+ }
+ for (uint32_t i = 16; i <= 31; i++) {
+ MOZ_RELEASE_ASSERT(FloorLog2(i) == 4);
+ }
+}
+
+static void TestRoundUpPow2() {
+ MOZ_RELEASE_ASSERT(RoundUpPow2(0) == 1);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(1) == 1);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(2) == 2);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(3) == 4);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(4) == 4);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(5) == 8);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(6) == 8);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(7) == 8);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(8) == 8);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(9) == 16);
+
+ MOZ_RELEASE_ASSERT(RoundUpPow2(15) == 16);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(16) == 16);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(17) == 32);
+
+ MOZ_RELEASE_ASSERT(RoundUpPow2(31) == 32);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(32) == 32);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(33) == 64);
+
+ size_t MaxPow2 = size_t(1) << (sizeof(size_t) * CHAR_BIT - 1);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(MaxPow2 - 1) == MaxPow2);
+ MOZ_RELEASE_ASSERT(RoundUpPow2(MaxPow2) == MaxPow2);
+ // not valid to round up when past the max power of two
+}
+
+int main() {
+ TestCeiling();
+ TestFloor();
+
+ TestRoundUpPow2();
+ return 0;
+}
diff --git a/mfbt/tests/TestCheckedInt.cpp b/mfbt/tests/TestCheckedInt.cpp
new file mode 100644
index 0000000000..309c882d3b
--- /dev/null
+++ b/mfbt/tests/TestCheckedInt.cpp
@@ -0,0 +1,615 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/CheckedInt.h"
+
+#include <iostream>
+#include <climits>
+#include <type_traits>
+
+using namespace mozilla;
+
+int gIntegerTypesTested = 0;
+int gTestsPassed = 0;
+int gTestsFailed = 0;
+
+void verifyImplFunction(bool aX, bool aExpected, const char* aFile, int aLine,
+ int aSize, bool aIsTSigned) {
+ if (aX == aExpected) {
+ gTestsPassed++;
+ } else {
+ gTestsFailed++;
+ std::cerr << "Test failed at " << aFile << ":" << aLine;
+ std::cerr << " with T a ";
+ if (aIsTSigned) {
+ std::cerr << "signed";
+ } else {
+ std::cerr << "unsigned";
+ }
+ std::cerr << " " << CHAR_BIT * aSize << "-bit integer type" << std::endl;
+ }
+}
+
+#define VERIFY_IMPL(x, expected) \
+ verifyImplFunction((x), (expected), __FILE__, __LINE__, sizeof(T), \
+ std::is_signed_v<T>)
+
+#define VERIFY(x) VERIFY_IMPL(x, true)
+#define VERIFY_IS_FALSE(x) VERIFY_IMPL(x, false)
+#define VERIFY_IS_VALID(x) VERIFY_IMPL((x).isValid(), true)
+#define VERIFY_IS_INVALID(x) VERIFY_IMPL((x).isValid(), false)
+#define VERIFY_IS_VALID_IF(x, condition) VERIFY_IMPL((x).isValid(), (condition))
+
+template <typename T, size_t Size = sizeof(T)>
+struct testTwiceBiggerType {
+ static void run() {
+ VERIFY(
+ detail::IsSupported<typename detail::TwiceBiggerType<T>::Type>::value);
+ VERIFY(sizeof(typename detail::TwiceBiggerType<T>::Type) == 2 * sizeof(T));
+ VERIFY(bool(std::is_signed_v<typename detail::TwiceBiggerType<T>::Type>) ==
+ bool(std::is_signed_v<T>));
+ }
+};
+
+template <typename T>
+struct testTwiceBiggerType<T, 8> {
+ static void run() {
+ VERIFY_IS_FALSE(
+ detail::IsSupported<typename detail::TwiceBiggerType<T>::Type>::value);
+ }
+};
+
+template <typename T>
+void test() {
+ static bool alreadyRun = false;
+ // Integer types from different families may just be typedefs for types from
+ // other families. E.g. int32_t might be just a typedef for int. No point
+ // re-running the same tests then.
+ if (alreadyRun) {
+ return;
+ }
+ alreadyRun = true;
+
+ VERIFY(detail::IsSupported<T>::value);
+ const bool isTSigned = std::is_signed_v<T>;
+ VERIFY(bool(isTSigned) == !bool(T(-1) > T(0)));
+
+ testTwiceBiggerType<T>::run();
+
+ using unsignedT = std::make_unsigned_t<T>;
+
+ VERIFY(sizeof(unsignedT) == sizeof(T));
+ VERIFY(std::is_signed_v<unsignedT> == false);
+
+ const CheckedInt<T> max(std::numeric_limits<T>::max());
+ const CheckedInt<T> min(std::numeric_limits<T>::min());
+
+ // Check MinValue and MaxValue, since they are custom implementations and a
+ // mistake there could potentially NOT be caught by any other tests... while
+ // making everything wrong!
+
+ unsignedT bit = 1;
+ unsignedT unsignedMinValue(min.value());
+ unsignedT unsignedMaxValue(max.value());
+ for (size_t i = 0; i < sizeof(T) * CHAR_BIT - 1; i++) {
+ VERIFY((unsignedMinValue & bit) == 0);
+ bit <<= 1;
+ }
+ VERIFY((unsignedMinValue & bit) == (isTSigned ? bit : unsignedT(0)));
+ VERIFY(unsignedMaxValue == unsignedT(~unsignedMinValue));
+
+ const CheckedInt<T> zero(0);
+ const CheckedInt<T> one(1);
+ const CheckedInt<T> two(2);
+ const CheckedInt<T> three(3);
+ const CheckedInt<T> four(4);
+
+ /* Addition / subtraction checks */
+
+ VERIFY_IS_VALID(zero + zero);
+ VERIFY(zero + zero == zero);
+ VERIFY_IS_FALSE(zero + zero == one); // Check == doesn't always return true
+ VERIFY_IS_VALID(zero + one);
+ VERIFY(zero + one == one);
+ VERIFY_IS_VALID(one + one);
+ VERIFY(one + one == two);
+
+ const CheckedInt<T> maxMinusOne = max - one;
+ const CheckedInt<T> maxMinusTwo = max - two;
+ VERIFY_IS_VALID(maxMinusOne);
+ VERIFY_IS_VALID(maxMinusTwo);
+ VERIFY_IS_VALID(maxMinusOne + one);
+ VERIFY_IS_VALID(maxMinusTwo + one);
+ VERIFY_IS_VALID(maxMinusTwo + two);
+ VERIFY(maxMinusOne + one == max);
+ VERIFY(maxMinusTwo + one == maxMinusOne);
+ VERIFY(maxMinusTwo + two == max);
+
+ VERIFY_IS_VALID(max + zero);
+ VERIFY_IS_VALID(max - zero);
+ VERIFY_IS_INVALID(max + one);
+ VERIFY_IS_INVALID(max + two);
+ VERIFY_IS_INVALID(max + maxMinusOne);
+ VERIFY_IS_INVALID(max + max);
+
+ const CheckedInt<T> minPlusOne = min + one;
+ const CheckedInt<T> minPlusTwo = min + two;
+ VERIFY_IS_VALID(minPlusOne);
+ VERIFY_IS_VALID(minPlusTwo);
+ VERIFY_IS_VALID(minPlusOne - one);
+ VERIFY_IS_VALID(minPlusTwo - one);
+ VERIFY_IS_VALID(minPlusTwo - two);
+ VERIFY(minPlusOne - one == min);
+ VERIFY(minPlusTwo - one == minPlusOne);
+ VERIFY(minPlusTwo - two == min);
+
+ const CheckedInt<T> minMinusOne = min - one;
+ VERIFY_IS_VALID(min + zero);
+ VERIFY_IS_VALID(min - zero);
+ VERIFY_IS_INVALID(min - one);
+ VERIFY_IS_INVALID(min - two);
+ VERIFY_IS_INVALID(min - minMinusOne);
+ VERIFY_IS_VALID(min - min);
+
+ const CheckedInt<T> maxOverTwo = max / two;
+ VERIFY_IS_VALID(maxOverTwo + maxOverTwo);
+ VERIFY_IS_VALID(maxOverTwo + one);
+ VERIFY((maxOverTwo + one) - one == maxOverTwo);
+ VERIFY_IS_VALID(maxOverTwo - maxOverTwo);
+ VERIFY(maxOverTwo - maxOverTwo == zero);
+
+ const CheckedInt<T> minOverTwo = min / two;
+ VERIFY_IS_VALID(minOverTwo + minOverTwo);
+ VERIFY_IS_VALID(minOverTwo + one);
+ VERIFY((minOverTwo + one) - one == minOverTwo);
+ VERIFY_IS_VALID(minOverTwo - minOverTwo);
+ VERIFY(minOverTwo - minOverTwo == zero);
+
+ VERIFY_IS_INVALID(min - one);
+ VERIFY_IS_INVALID(min - two);
+
+ if (isTSigned) {
+ VERIFY_IS_INVALID(min + min);
+ VERIFY_IS_INVALID(minOverTwo + minOverTwo + minOverTwo);
+ VERIFY_IS_INVALID(zero - min + min);
+ VERIFY_IS_INVALID(one - min + min);
+ }
+
+ /* Modulo checks */
+ VERIFY_IS_INVALID(zero % zero);
+ VERIFY_IS_INVALID(one % zero);
+ VERIFY_IS_VALID(zero % one);
+ VERIFY_IS_VALID(zero % max);
+ VERIFY_IS_VALID(one % max);
+ VERIFY_IS_VALID(max % one);
+ VERIFY_IS_VALID(max % max);
+ if (isTSigned) {
+ const CheckedInt<T> minusOne = zero - one;
+ VERIFY_IS_INVALID(minusOne % minusOne);
+ VERIFY_IS_INVALID(zero % minusOne);
+ VERIFY_IS_INVALID(one % minusOne);
+ VERIFY_IS_INVALID(minusOne % one);
+
+ VERIFY_IS_INVALID(min % min);
+ VERIFY_IS_INVALID(zero % min);
+ VERIFY_IS_INVALID(min % one);
+ }
+
+ /* Unary operator- checks */
+
+ const CheckedInt<T> negOne = -one;
+ const CheckedInt<T> negTwo = -two;
+
+ if (isTSigned) {
+ VERIFY_IS_VALID(-max);
+ VERIFY_IS_INVALID(-min);
+ VERIFY(-max - min == one);
+ VERIFY_IS_VALID(-max - one);
+ VERIFY_IS_VALID(negOne);
+ VERIFY_IS_VALID(-max + negOne);
+ VERIFY_IS_VALID(negOne + one);
+ VERIFY(negOne + one == zero);
+ VERIFY_IS_VALID(negTwo);
+ VERIFY_IS_VALID(negOne + negOne);
+ VERIFY(negOne + negOne == negTwo);
+ } else {
+ VERIFY_IS_INVALID(-max);
+ VERIFY_IS_VALID(-min);
+ VERIFY(min == zero);
+ VERIFY_IS_INVALID(negOne);
+ }
+
+ /* multiplication checks */
+
+ VERIFY_IS_VALID(zero * zero);
+ VERIFY(zero * zero == zero);
+ VERIFY_IS_VALID(zero * one);
+ VERIFY(zero * one == zero);
+ VERIFY_IS_VALID(one * zero);
+ VERIFY(one * zero == zero);
+ VERIFY_IS_VALID(one * one);
+ VERIFY(one * one == one);
+ VERIFY_IS_VALID(one * three);
+ VERIFY(one * three == three);
+ VERIFY_IS_VALID(two * two);
+ VERIFY(two * two == four);
+
+ VERIFY_IS_INVALID(max * max);
+ VERIFY_IS_INVALID(maxOverTwo * max);
+ VERIFY_IS_INVALID(maxOverTwo * maxOverTwo);
+
+ const CheckedInt<T> maxApproxSqrt(T(T(1) << (CHAR_BIT * sizeof(T) / 2)));
+
+ VERIFY_IS_VALID(maxApproxSqrt);
+ VERIFY_IS_VALID(maxApproxSqrt * two);
+ VERIFY_IS_INVALID(maxApproxSqrt * maxApproxSqrt);
+ VERIFY_IS_INVALID(maxApproxSqrt * maxApproxSqrt * maxApproxSqrt);
+
+ if (isTSigned) {
+ VERIFY_IS_INVALID(min * min);
+ VERIFY_IS_INVALID(minOverTwo * min);
+ VERIFY_IS_INVALID(minOverTwo * minOverTwo);
+
+ const CheckedInt<T> minApproxSqrt = -maxApproxSqrt;
+
+ VERIFY_IS_VALID(minApproxSqrt);
+ VERIFY_IS_VALID(minApproxSqrt * two);
+ VERIFY_IS_INVALID(minApproxSqrt * maxApproxSqrt);
+ VERIFY_IS_INVALID(minApproxSqrt * minApproxSqrt);
+ }
+
+ // make sure to check all 4 paths in signed multiplication validity check.
+ // test positive * positive
+ VERIFY_IS_VALID(max * one);
+ VERIFY(max * one == max);
+ VERIFY_IS_INVALID(max * two);
+ VERIFY_IS_VALID(maxOverTwo * two);
+ VERIFY((maxOverTwo + maxOverTwo) == (maxOverTwo * two));
+
+ if (isTSigned) {
+ // test positive * negative
+ VERIFY_IS_VALID(max * negOne);
+ VERIFY_IS_VALID(-max);
+ VERIFY(max * negOne == -max);
+ VERIFY_IS_VALID(one * min);
+ VERIFY_IS_INVALID(max * negTwo);
+ VERIFY_IS_VALID(maxOverTwo * negTwo);
+ VERIFY_IS_VALID(two * minOverTwo);
+ VERIFY_IS_VALID((maxOverTwo + one) * negTwo);
+ VERIFY_IS_INVALID((maxOverTwo + two) * negTwo);
+ VERIFY_IS_INVALID(two * (minOverTwo - one));
+
+ // test negative * positive
+ VERIFY_IS_VALID(min * one);
+ VERIFY_IS_VALID(minPlusOne * one);
+ VERIFY_IS_INVALID(min * two);
+ VERIFY_IS_VALID(minOverTwo * two);
+ VERIFY(minOverTwo * two == min);
+ VERIFY_IS_INVALID((minOverTwo - one) * negTwo);
+ VERIFY_IS_INVALID(negTwo * max);
+ VERIFY_IS_VALID(minOverTwo * two);
+ VERIFY(minOverTwo * two == min);
+ VERIFY_IS_VALID(negTwo * maxOverTwo);
+ VERIFY_IS_INVALID((minOverTwo - one) * two);
+ VERIFY_IS_VALID(negTwo * (maxOverTwo + one));
+ VERIFY_IS_INVALID(negTwo * (maxOverTwo + two));
+
+ // test negative * negative
+ VERIFY_IS_INVALID(min * negOne);
+ VERIFY_IS_VALID(minPlusOne * negOne);
+ VERIFY(minPlusOne * negOne == max);
+ VERIFY_IS_INVALID(min * negTwo);
+ VERIFY_IS_INVALID(minOverTwo * negTwo);
+ VERIFY_IS_INVALID(negOne * min);
+ VERIFY_IS_VALID(negOne * minPlusOne);
+ VERIFY(negOne * minPlusOne == max);
+ VERIFY_IS_INVALID(negTwo * min);
+ VERIFY_IS_INVALID(negTwo * minOverTwo);
+ }
+
+ /* Division checks */
+
+ VERIFY_IS_VALID(one / one);
+ VERIFY(one / one == one);
+ VERIFY_IS_VALID(three / three);
+ VERIFY(three / three == one);
+ VERIFY_IS_VALID(four / two);
+ VERIFY(four / two == two);
+ VERIFY((four * three) / four == three);
+
+ // Check that div by zero is invalid
+ VERIFY_IS_INVALID(zero / zero);
+ VERIFY_IS_INVALID(one / zero);
+ VERIFY_IS_INVALID(two / zero);
+ VERIFY_IS_INVALID(negOne / zero);
+ VERIFY_IS_INVALID(max / zero);
+ VERIFY_IS_INVALID(min / zero);
+
+ if (isTSigned) {
+ // Check that min / -1 is invalid
+ VERIFY_IS_INVALID(min / negOne);
+
+ // Check that the test for div by -1 isn't banning other numerators than min
+ VERIFY_IS_VALID(one / negOne);
+ VERIFY_IS_VALID(zero / negOne);
+ VERIFY_IS_VALID(negOne / negOne);
+ VERIFY_IS_VALID(max / negOne);
+ }
+
+ /* Check that invalidity is correctly preserved by arithmetic ops */
+
+ const CheckedInt<T> someInvalid = max + max;
+ VERIFY_IS_INVALID(someInvalid + zero);
+ VERIFY_IS_INVALID(someInvalid - zero);
+ VERIFY_IS_INVALID(zero + someInvalid);
+ VERIFY_IS_INVALID(zero - someInvalid);
+ VERIFY_IS_INVALID(-someInvalid);
+ VERIFY_IS_INVALID(someInvalid * zero);
+ VERIFY_IS_INVALID(someInvalid * one);
+ VERIFY_IS_INVALID(zero * someInvalid);
+ VERIFY_IS_INVALID(one * someInvalid);
+ VERIFY_IS_INVALID(someInvalid / zero);
+ VERIFY_IS_INVALID(someInvalid / one);
+ VERIFY_IS_INVALID(zero / someInvalid);
+ VERIFY_IS_INVALID(one / someInvalid);
+ VERIFY_IS_INVALID(someInvalid % zero);
+ VERIFY_IS_INVALID(someInvalid % one);
+ VERIFY_IS_INVALID(zero % someInvalid);
+ VERIFY_IS_INVALID(one % someInvalid);
+ VERIFY_IS_INVALID(someInvalid + someInvalid);
+ VERIFY_IS_INVALID(someInvalid - someInvalid);
+ VERIFY_IS_INVALID(someInvalid * someInvalid);
+ VERIFY_IS_INVALID(someInvalid / someInvalid);
+ VERIFY_IS_INVALID(someInvalid % someInvalid);
+
+ // Check that mixing checked integers with plain integers in expressions is
+ // allowed
+
+ VERIFY(one + T(2) == three);
+ VERIFY(2 + one == three);
+ {
+ CheckedInt<T> x = one;
+ x += 2;
+ VERIFY(x == three);
+ }
+ VERIFY(two - 1 == one);
+ VERIFY(2 - one == one);
+ {
+ CheckedInt<T> x = two;
+ x -= 1;
+ VERIFY(x == one);
+ }
+ VERIFY(one * 2 == two);
+ VERIFY(2 * one == two);
+ {
+ CheckedInt<T> x = one;
+ x *= 2;
+ VERIFY(x == two);
+ }
+ VERIFY(four / 2 == two);
+ VERIFY(4 / two == two);
+ {
+ CheckedInt<T> x = four;
+ x /= 2;
+ VERIFY(x == two);
+ }
+ VERIFY(three % 2 == one);
+ VERIFY(3 % two == one);
+ {
+ CheckedInt<T> x = three;
+ x %= 2;
+ VERIFY(x == one);
+ }
+
+ VERIFY(one == 1);
+ VERIFY(1 == one);
+ VERIFY_IS_FALSE(two == 1);
+ VERIFY_IS_FALSE(1 == two);
+ VERIFY_IS_FALSE(someInvalid == 1);
+ VERIFY_IS_FALSE(1 == someInvalid);
+
+ // Check that compound operators work when both sides of the expression
+ // are checked integers
+ {
+ CheckedInt<T> x = one;
+ x += two;
+ VERIFY(x == three);
+ }
+ {
+ CheckedInt<T> x = two;
+ x -= one;
+ VERIFY(x == one);
+ }
+ {
+ CheckedInt<T> x = one;
+ x *= two;
+ VERIFY(x == two);
+ }
+ {
+ CheckedInt<T> x = four;
+ x /= two;
+ VERIFY(x == two);
+ }
+ {
+ CheckedInt<T> x = three;
+ x %= two;
+ VERIFY(x == one);
+ }
+
+ // Check that compound operators work when both sides of the expression
+ // are checked integers and the right-hand side is invalid
+ {
+ CheckedInt<T> x = one;
+ x += someInvalid;
+ VERIFY_IS_INVALID(x);
+ }
+ {
+ CheckedInt<T> x = two;
+ x -= someInvalid;
+ VERIFY_IS_INVALID(x);
+ }
+ {
+ CheckedInt<T> x = one;
+ x *= someInvalid;
+ VERIFY_IS_INVALID(x);
+ }
+ {
+ CheckedInt<T> x = four;
+ x /= someInvalid;
+ VERIFY_IS_INVALID(x);
+ }
+ {
+ CheckedInt<T> x = three;
+ x %= someInvalid;
+ VERIFY_IS_INVALID(x);
+ }
+
+ // Check simple casting between different signedness and sizes.
+ {
+ CheckedInt<uint8_t> foo = CheckedInt<uint16_t>(2).toChecked<uint8_t>();
+ VERIFY_IS_VALID(foo);
+ VERIFY(foo == 2);
+ }
+ {
+ CheckedInt<uint8_t> foo = CheckedInt<uint16_t>(255).toChecked<uint8_t>();
+ VERIFY_IS_VALID(foo);
+ VERIFY(foo == 255);
+ }
+ {
+ CheckedInt<uint8_t> foo = CheckedInt<uint16_t>(256).toChecked<uint8_t>();
+ VERIFY_IS_INVALID(foo);
+ }
+ {
+ CheckedInt<uint8_t> foo = CheckedInt<int8_t>(-2).toChecked<uint8_t>();
+ VERIFY_IS_INVALID(foo);
+ }
+
+ // Check that construction of CheckedInt from an integer value of a
+ // mismatched type is checked Also check casting between all types.
+
+#define VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE2(U, V, PostVExpr) \
+ { \
+ bool isUSigned = std::is_signed_v<U>; \
+ VERIFY_IS_VALID(CheckedInt<T>(V(0) PostVExpr)); \
+ VERIFY_IS_VALID(CheckedInt<T>(V(1) PostVExpr)); \
+ VERIFY_IS_VALID(CheckedInt<T>(V(100) PostVExpr)); \
+ if (isUSigned) { \
+ VERIFY_IS_VALID_IF(CheckedInt<T>(V(-1) PostVExpr), isTSigned); \
+ } \
+ if (sizeof(U) > sizeof(T)) { \
+ VERIFY_IS_INVALID(CheckedInt<T>( \
+ V(std::numeric_limits<T>::max()) PostVExpr + one.value())); \
+ } \
+ VERIFY_IS_VALID_IF( \
+ CheckedInt<T>(std::numeric_limits<U>::max()), \
+ (sizeof(T) > sizeof(U) || \
+ ((sizeof(T) == sizeof(U)) && (isUSigned || !isTSigned)))); \
+ VERIFY_IS_VALID_IF(CheckedInt<T>(std::numeric_limits<U>::min()), \
+ isUSigned == false ? 1 \
+ : bool(isTSigned) == false ? 0 \
+ : sizeof(T) >= sizeof(U)); \
+ }
+#define VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(U) \
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE2(U, U, +zero) \
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE2(U, CheckedInt<U>, .toChecked<T>())
+
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(int8_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(uint8_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(int16_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(uint16_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(int32_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(uint32_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(int64_t)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(uint64_t)
+
+ typedef signed char signedChar;
+ typedef unsigned char unsignedChar;
+ typedef unsigned short unsignedShort;
+ typedef unsigned int unsignedInt;
+ typedef unsigned long unsignedLong;
+ typedef long long longLong;
+ typedef unsigned long long unsignedLongLong;
+
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(char)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(signedChar)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(unsignedChar)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(short)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(unsignedShort)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(int)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(unsignedInt)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(long)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(unsignedLong)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(longLong)
+ VERIFY_CONSTRUCTION_FROM_INTEGER_TYPE(unsignedLongLong)
+
+ /* Test increment/decrement operators */
+
+ CheckedInt<T> x, y;
+ x = one;
+ y = x++;
+ VERIFY(x == two);
+ VERIFY(y == one);
+ x = one;
+ y = ++x;
+ VERIFY(x == two);
+ VERIFY(y == two);
+ x = one;
+ y = x--;
+ VERIFY(x == zero);
+ VERIFY(y == one);
+ x = one;
+ y = --x;
+ VERIFY(x == zero);
+ VERIFY(y == zero);
+ x = max;
+ VERIFY_IS_VALID(x++);
+ x = max;
+ VERIFY_IS_INVALID(++x);
+ x = min;
+ VERIFY_IS_VALID(x--);
+ x = min;
+ VERIFY_IS_INVALID(--x);
+
+ gIntegerTypesTested++;
+}
+
+int main() {
+ test<int8_t>();
+ test<uint8_t>();
+ test<int16_t>();
+ test<uint16_t>();
+ test<int32_t>();
+ test<uint32_t>();
+ test<int64_t>();
+ test<uint64_t>();
+
+ test<char>();
+ test<signed char>();
+ test<unsigned char>();
+ test<short>();
+ test<unsigned short>();
+ test<int>();
+ test<unsigned int>();
+ test<long>();
+ test<unsigned long>();
+ test<long long>();
+ test<unsigned long long>();
+
+ const int MIN_TYPES_TESTED = 9;
+ if (gIntegerTypesTested < MIN_TYPES_TESTED) {
+ std::cerr << "Only " << gIntegerTypesTested << " have been tested. "
+ << "This should not be less than " << MIN_TYPES_TESTED << "."
+ << std::endl;
+ gTestsFailed++;
+ }
+
+ std::cerr << gTestsFailed << " tests failed, " << gTestsPassed
+ << " tests passed out of " << gTestsFailed + gTestsPassed
+ << " tests, covering " << gIntegerTypesTested
+ << " distinct integer types." << std::endl;
+
+ return gTestsFailed > 0;
+}
diff --git a/mfbt/tests/TestCompactPair.cpp b/mfbt/tests/TestCompactPair.cpp
new file mode 100644
index 0000000000..66300c338a
--- /dev/null
+++ b/mfbt/tests/TestCompactPair.cpp
@@ -0,0 +1,160 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/CompactPair.h"
+
+using mozilla::CompactPair;
+using mozilla::MakeCompactPair;
+
+// Sizes aren't part of the guaranteed CompactPair interface, but we want to
+// verify our attempts at compactness through EBO are moderately functional,
+// *somewhere*.
+#define INSTANTIATE(T1, T2, name, size) \
+ CompactPair<T1, T2> name##_1(T1(0), T2(0)); \
+ static_assert(sizeof(name##_1.first()) > 0, \
+ "first method should work on CompactPair<" #T1 ", " #T2 ">"); \
+ \
+ static_assert(sizeof(name##_1.second()) > 0, \
+ "second method should work on CompactPair<" #T1 ", " #T2 ">"); \
+ \
+ static_assert(sizeof(name##_1) == (size), \
+ "CompactPair<" #T1 ", " #T2 "> has an unexpected size"); \
+ \
+ CompactPair<T2, T1> name##_2(T2(0), T1(0)); \
+ static_assert(sizeof(name##_2.first()) > 0, \
+ "first method should work on CompactPair<" #T2 ", " #T1 ">"); \
+ \
+ static_assert(sizeof(name##_2.second()) > 0, \
+ "second method should work on CompactPair<" #T2 ", " #T1 ">"); \
+ \
+ static_assert(sizeof(name##_2) == (size), \
+ "CompactPair<" #T2 ", " #T1 "> has an unexpected size");
+
+static constexpr std::size_t sizemax(std::size_t a, std::size_t b) {
+ return (a > b) ? a : b;
+}
+
+INSTANTIATE(int, int, prim1, 2 * sizeof(int));
+INSTANTIATE(int, long, prim2,
+ sizeof(long) + sizemax(sizeof(int), alignof(long)));
+
+struct EmptyClass {
+ explicit EmptyClass(int) {}
+};
+struct NonEmpty {
+ char mC;
+ explicit NonEmpty(int) : mC('\0') {}
+};
+
+INSTANTIATE(int, EmptyClass, both1, sizeof(int));
+INSTANTIATE(int, NonEmpty, both2, sizeof(int) + alignof(int));
+INSTANTIATE(EmptyClass, NonEmpty, both3, 1);
+
+struct A {
+ char dummy;
+ explicit A(int) : dummy('\0') {}
+};
+struct B : A {
+ explicit B(int aI) : A(aI) {}
+};
+
+INSTANTIATE(A, A, class1, 2);
+INSTANTIATE(A, B, class2, 2);
+INSTANTIATE(A, EmptyClass, class3, 1);
+
+struct EmptyNonMovableNonDefaultConstructible {
+ explicit EmptyNonMovableNonDefaultConstructible(int) {}
+
+ EmptyNonMovableNonDefaultConstructible(
+ const EmptyNonMovableNonDefaultConstructible&) = delete;
+ EmptyNonMovableNonDefaultConstructible(
+ EmptyNonMovableNonDefaultConstructible&&) = delete;
+ EmptyNonMovableNonDefaultConstructible& operator=(
+ const EmptyNonMovableNonDefaultConstructible&) = delete;
+ EmptyNonMovableNonDefaultConstructible& operator=(
+ EmptyNonMovableNonDefaultConstructible&&) = delete;
+};
+
+static void TestInPlaceConstruction() {
+ constexpr int firstValue = 42;
+ constexpr int secondValue = 43;
+
+ {
+ const CompactPair<EmptyNonMovableNonDefaultConstructible, int> pair{
+ std::piecewise_construct, std::tuple(firstValue),
+ std::tuple(secondValue)};
+ MOZ_RELEASE_ASSERT(pair.second() == secondValue);
+ }
+
+ {
+ const CompactPair<int, EmptyNonMovableNonDefaultConstructible> pair{
+ std::piecewise_construct, std::tuple(firstValue),
+ std::tuple(secondValue)};
+ MOZ_RELEASE_ASSERT(pair.first() == firstValue);
+ }
+
+ {
+ const CompactPair<int, int> pair{std::piecewise_construct,
+ std::tuple(firstValue),
+ std::tuple(secondValue)};
+ MOZ_RELEASE_ASSERT(pair.first() == firstValue);
+ MOZ_RELEASE_ASSERT(pair.second() == secondValue);
+ }
+
+ {
+ const CompactPair<EmptyNonMovableNonDefaultConstructible,
+ EmptyNonMovableNonDefaultConstructible>
+ pair{std::piecewise_construct, std::tuple(firstValue),
+ std::tuple(secondValue)};
+
+ // nothing to assert here...
+ }
+}
+
+struct OtherEmpty : EmptyClass {
+ explicit OtherEmpty(int aI) : EmptyClass(aI) {}
+};
+
+// C++11 requires distinct objects of the same type, within the same "most
+// derived object", to have different addresses. CompactPair allocates its
+// elements as two bases, a base and a member, or two members. If the two
+// elements have non-zero size or are unrelated, no big deal. But if they're
+// both empty and related, something -- possibly both -- must be inflated.
+// Exactly which are inflated depends which CompactPairHelper specialization is
+// used. We could potentially assert something about size for this case, but
+// whatever we could assert would be very finicky. Plus it's two empty classes
+// -- hardly likely. So don't bother trying to assert anything about this case.
+// INSTANTIATE(EmptyClass, OtherEmpty, class4, ...something finicky...);
+
+int main() {
+ A a(0);
+ B b(0);
+ const A constA(0);
+ const B constB(0);
+
+ // Check that MakeCompactPair generates CompactPair objects of the correct
+ // types.
+ static_assert(
+ std::is_same_v<decltype(MakeCompactPair(A(0), B(0))), CompactPair<A, B>>,
+ "MakeCompactPair should strip rvalue references");
+ static_assert(
+ std::is_same_v<decltype(MakeCompactPair(a, b)), CompactPair<A, B>>,
+ "MakeCompactPair should strip lvalue references");
+ static_assert(std::is_same_v<decltype(MakeCompactPair(constA, constB)),
+ CompactPair<A, B>>,
+ "MakeCompactPair should strip CV-qualifiers");
+
+ // Check that copy assignment and move assignment work.
+ a = constA;
+ a = A(0);
+
+ TestInPlaceConstruction();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestCountPopulation.cpp b/mfbt/tests/TestCountPopulation.cpp
new file mode 100644
index 0000000000..23234bbe5a
--- /dev/null
+++ b/mfbt/tests/TestCountPopulation.cpp
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+using mozilla::CountPopulation32;
+
+static void TestCountPopulation32() {
+ MOZ_RELEASE_ASSERT(CountPopulation32(0xFFFFFFFF) == 32);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0xF0FF1000) == 13);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x7F8F0001) == 13);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x3FFF0100) == 15);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x1FF50010) == 12);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00800000) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00400000) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00008000) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00004000) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00000080) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00000040) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00000001) == 1);
+ MOZ_RELEASE_ASSERT(CountPopulation32(0x00000000) == 0);
+}
+
+int main() {
+ TestCountPopulation32();
+ return 0;
+}
diff --git a/mfbt/tests/TestCountZeroes.cpp b/mfbt/tests/TestCountZeroes.cpp
new file mode 100644
index 0000000000..4c8effc9cd
--- /dev/null
+++ b/mfbt/tests/TestCountZeroes.cpp
@@ -0,0 +1,92 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+using mozilla::CountLeadingZeroes32;
+using mozilla::CountLeadingZeroes64;
+using mozilla::CountTrailingZeroes32;
+using mozilla::CountTrailingZeroes64;
+
+static void TestLeadingZeroes32() {
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0xF0FF1000) == 0);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x7F8F0001) == 1);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x3FFF0100) == 2);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x1FF50010) == 3);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00800000) == 8);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00400000) == 9);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00008000) == 16);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00004000) == 17);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00000080) == 24);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00000040) == 25);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes32(0x00000001) == 31);
+}
+
+static void TestLeadingZeroes64() {
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0xF000F0F010000000) == 0);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x70F080F000000001) == 1);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x30F0F0F000100000) == 2);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x10F0F05000000100) == 3);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0080000000000001) == 8);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0040000010001000) == 9);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x000080F010000000) == 16);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x000040F010000000) == 17);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000008000100100) == 24);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000004100010010) == 25);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000080100100) == 32);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000041001010) == 33);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000800100) == 40);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000411010) == 41);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000008001) == 48);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000004010) == 49);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000000081) == 56);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000000040) == 57);
+ MOZ_RELEASE_ASSERT(CountLeadingZeroes64(0x0000000000000001) == 63);
+}
+
+static void TestTrailingZeroes32() {
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x0100FFFF) == 0);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x7000FFFE) == 1);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x0080FFFC) == 2);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x0080FFF8) == 3);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x010FFF00) == 8);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x7000FE00) == 9);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x10CF0000) == 16);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x0BDE0000) == 17);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x0F000000) == 24);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0xDE000000) == 25);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes32(0x80000000) == 31);
+}
+
+static void TestTrailingZeroes64() {
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x000100000F0F0F0F) == 0);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x070000000F0F0F0E) == 1);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x000008000F0F0F0C) == 2);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x000008000F0F0F08) == 3);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0xC001000F0F0F0F00) == 8);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x0200000F0F0F0E00) == 9);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0xB0C10F0FEFDF0000) == 16);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x0AAA00F0FF0E0000) == 17);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0xD010F0FEDF000000) == 24);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x7AAF0CF0BE000000) == 25);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x20F0A5D100000000) == 32);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x489BF0B200000000) == 33);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0xE0F0D10000000000) == 40);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x97F0B20000000000) == 41);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x2C07000000000000) == 48);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x1FBA000000000000) == 49);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x0100000000000000) == 56);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x0200000000000000) == 57);
+ MOZ_RELEASE_ASSERT(CountTrailingZeroes64(0x8000000000000000) == 63);
+}
+
+int main() {
+ TestLeadingZeroes32();
+ TestLeadingZeroes64();
+ TestTrailingZeroes32();
+ TestTrailingZeroes64();
+ return 0;
+}
diff --git a/mfbt/tests/TestDefineEnum.cpp b/mfbt/tests/TestDefineEnum.cpp
new file mode 100644
index 0000000000..b5fbe3a0fd
--- /dev/null
+++ b/mfbt/tests/TestDefineEnum.cpp
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DefineEnum.h"
+
+// Sanity test for MOZ_DEFINE_ENUM.
+
+MOZ_DEFINE_ENUM(TestEnum1, (EnumeratorA, EnumeratorB, EnumeratorC));
+
+static_assert(EnumeratorA == 0, "Unexpected enumerator value");
+static_assert(EnumeratorB == 1, "Unexpected enumerator value");
+static_assert(EnumeratorC == 2, "Unexpected enumerator value");
+static_assert(kHighestTestEnum1 == EnumeratorC, "Incorrect highest value");
+static_assert(kTestEnum1Count == 3, "Incorrect enumerator count");
+
+// Sanity test for MOZ_DEFINE_ENUM_CLASS.
+
+MOZ_DEFINE_ENUM_CLASS(TestEnum2, (A, B, C));
+
+static_assert(TestEnum2::A == TestEnum2(0), "Unexpected enumerator value");
+static_assert(TestEnum2::B == TestEnum2(1), "Unexpected enumerator value");
+static_assert(TestEnum2::C == TestEnum2(2), "Unexpected enumerator value");
+static_assert(kHighestTestEnum2 == TestEnum2::C, "Incorrect highest value");
+static_assert(kTestEnum2Count == 3, "Incorrect enumerator count");
+
+// TODO: Test that the _WITH_BASE variants generate enumerators with the
+// correct underlying types. To do this, we need an |UnderlyingType|
+// type trait, which needs compiler support (recent versions of
+// compilers in the GCC family provide an |__underlying_type| builtin
+// for this purpose.
+
+// Sanity test for MOZ_DEFINE_ENUM[_CLASS]_AT_CLASS_SCOPE.
+
+struct TestClass {
+ // clang-format off
+ MOZ_DEFINE_ENUM_AT_CLASS_SCOPE(
+ TestEnum3, (
+ EnumeratorA,
+ EnumeratorB,
+ EnumeratorC
+ ));
+
+ MOZ_DEFINE_ENUM_CLASS_AT_CLASS_SCOPE(
+ TestEnum4, (
+ A,
+ B,
+ C
+ ));
+ // clang-format on
+
+ static_assert(EnumeratorA == 0, "Unexpected enumerator value");
+ static_assert(EnumeratorB == 1, "Unexpected enumerator value");
+ static_assert(EnumeratorC == 2, "Unexpected enumerator value");
+ static_assert(sHighestTestEnum3 == EnumeratorC, "Incorrect highest value");
+ static_assert(sTestEnum3Count == 3, "Incorrect enumerator count");
+
+ static_assert(TestEnum4::A == TestEnum4(0), "Unexpected enumerator value");
+ static_assert(TestEnum4::B == TestEnum4(1), "Unexpected enumerator value");
+ static_assert(TestEnum4::C == TestEnum4(2), "Unexpected enumerator value");
+ static_assert(sHighestTestEnum4 == TestEnum4::C, "Incorrect highest value");
+ static_assert(sTestEnum4Count == 3, "Incorrect enumerator count");
+};
+
+// Test that MOZ_DEFINE_ENUM doesn't allow giving enumerators initializers.
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+MOZ_DEFINE_ENUM_CLASS(EnumWithInitializer1, (A = -1, B, C))
+MOZ_DEFINE_ENUM_CLASS(EnumWithInitializer2, (A = 1, B, C))
+MOZ_DEFINE_ENUM_CLASS(EnumWithInitializer3, (A, B = 6, C))
+#endif
+
+int main() {
+ // Nothing to do here, all tests are static_asserts.
+ return 0;
+}
diff --git a/mfbt/tests/TestDoublyLinkedList.cpp b/mfbt/tests/TestDoublyLinkedList.cpp
new file mode 100644
index 0000000000..3065b15ddb
--- /dev/null
+++ b/mfbt/tests/TestDoublyLinkedList.cpp
@@ -0,0 +1,306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/DoublyLinkedList.h"
+
+using mozilla::DoublyLinkedList;
+using mozilla::DoublyLinkedListElement;
+
+struct SomeClass : public DoublyLinkedListElement<SomeClass> {
+ unsigned int mValue;
+ explicit SomeClass(int aValue) : mValue(aValue) {}
+ void incr() { ++mValue; }
+ bool operator==(const SomeClass& other) const {
+ return mValue == other.mValue;
+ }
+};
+
+template <typename ListType, size_t N>
+static void CheckListValues(ListType& list, unsigned int (&values)[N]) {
+ size_t count = 0;
+ for (auto& x : list) {
+ MOZ_RELEASE_ASSERT(x.mValue == values[count]);
+ ++count;
+ }
+ MOZ_RELEASE_ASSERT(count == N);
+}
+
+static void TestDoublyLinkedList() {
+ DoublyLinkedList<SomeClass> list;
+
+ SomeClass one(1), two(2), three(3);
+
+ MOZ_RELEASE_ASSERT(list.isEmpty());
+ MOZ_RELEASE_ASSERT(!list.begin());
+ MOZ_RELEASE_ASSERT(!list.end());
+
+ for (SomeClass& x : list) {
+ MOZ_RELEASE_ASSERT(x.mValue);
+ MOZ_RELEASE_ASSERT(false);
+ }
+
+ list.pushFront(&one);
+ {
+ unsigned int check[]{1};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(list.contains(one));
+ MOZ_RELEASE_ASSERT(!list.contains(two));
+ MOZ_RELEASE_ASSERT(!list.contains(three));
+
+ MOZ_RELEASE_ASSERT(!list.isEmpty());
+ MOZ_RELEASE_ASSERT(list.begin()->mValue == 1);
+ MOZ_RELEASE_ASSERT(!list.end());
+
+ list.pushFront(&two);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(list.begin()->mValue == 2);
+ MOZ_RELEASE_ASSERT(!list.end());
+ MOZ_RELEASE_ASSERT(!list.contains(three));
+
+ list.pushBack(&three);
+ {
+ unsigned int check[]{2, 1, 3};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(list.begin()->mValue == 2);
+ MOZ_RELEASE_ASSERT(!list.end());
+
+ list.remove(&one);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ list.insertBefore(list.find(three), &one);
+ {
+ unsigned int check[]{2, 1, 3};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&three);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ list.insertBefore(list.find(two), &three);
+ {
+ unsigned int check[]{3, 2, 1};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&three);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ list.insertBefore(++list.find(two), &three);
+ {
+ unsigned int check[]{2, 3, 1};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&one);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&two);
+ {
+ unsigned int check[]{3};
+ CheckListValues(list, check);
+ }
+
+ list.insertBefore(list.find(three), &two);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&three);
+ {
+ unsigned int check[]{2};
+ CheckListValues(list, check);
+ }
+
+ list.remove(&two);
+ MOZ_RELEASE_ASSERT(list.isEmpty());
+
+ list.pushBack(&three);
+ {
+ unsigned int check[]{3};
+ CheckListValues(list, check);
+ }
+
+ list.pushFront(&two);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ // This should modify the values of |two| and |three| as pointers to them are
+ // stored in the list, not copies.
+ for (SomeClass& x : list) {
+ x.incr();
+ }
+
+ MOZ_RELEASE_ASSERT(*list.begin() == two);
+ MOZ_RELEASE_ASSERT(*++list.begin() == three);
+
+ SomeClass four(4);
+ MOZ_RELEASE_ASSERT(++list.begin() == list.find(four));
+}
+
+struct InTwoLists {
+ explicit InTwoLists(unsigned int aValue) : mValue(aValue) {}
+ DoublyLinkedListElement<InTwoLists> mListOne;
+ DoublyLinkedListElement<InTwoLists> mListTwo;
+ unsigned int mValue;
+
+ struct GetListOneTrait {
+ static DoublyLinkedListElement<InTwoLists>& Get(InTwoLists* aThis) {
+ return aThis->mListOne;
+ }
+ };
+};
+
+namespace mozilla {
+
+template <>
+struct GetDoublyLinkedListElement<InTwoLists> {
+ static DoublyLinkedListElement<InTwoLists>& Get(InTwoLists* aThis) {
+ return aThis->mListTwo;
+ }
+};
+
+} // namespace mozilla
+
+static void TestCustomAccessor() {
+ DoublyLinkedList<InTwoLists, InTwoLists::GetListOneTrait> listOne;
+ DoublyLinkedList<InTwoLists> listTwo;
+
+ InTwoLists one(1);
+ InTwoLists two(2);
+
+ listOne.pushBack(&one);
+ listOne.pushBack(&two);
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(listOne, check);
+ }
+
+ listTwo.pushBack(&one);
+ listTwo.pushBack(&two);
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(listOne, check);
+ }
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(listTwo, check);
+ }
+
+ (void)listTwo.popBack();
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(listOne, check);
+ }
+ {
+ unsigned int check[]{1};
+ CheckListValues(listTwo, check);
+ }
+
+ (void)listOne.popBack();
+ {
+ unsigned int check[]{1};
+ CheckListValues(listOne, check);
+ }
+ {
+ unsigned int check[]{1};
+ CheckListValues(listTwo, check);
+ }
+}
+
+static void TestSafeDoubleLinkedList() {
+ mozilla::SafeDoublyLinkedList<SomeClass> list;
+ auto* elt1 = new SomeClass(0);
+ auto* elt2 = new SomeClass(0);
+ auto* elt3 = new SomeClass(0);
+ auto* elt4 = new SomeClass(0);
+ list.pushBack(elt1);
+ list.pushBack(elt2);
+ list.pushBack(elt3);
+ auto iter = list.begin();
+
+ // basic tests for iterator validity
+ MOZ_RELEASE_ASSERT(
+ &*iter == elt1,
+ "iterator returned by begin() must point to the first element!");
+ MOZ_RELEASE_ASSERT(
+ &*(iter.next()) == elt2,
+ "iterator returned by begin() must have the second element as 'next'!");
+ list.remove(elt2);
+ MOZ_RELEASE_ASSERT(
+ &*(iter.next()) == elt3,
+ "After removal of the 2nd element 'next' must point to the 3rd element!");
+ ++iter;
+ MOZ_RELEASE_ASSERT(
+ &*iter == elt3,
+ "After advancing one step the current element must be the 3rd one!");
+ MOZ_RELEASE_ASSERT(!iter.next(), "This is the last element of the list!");
+ list.pushBack(elt4);
+ MOZ_RELEASE_ASSERT(&*(iter.next()) == elt4,
+ "After adding an element at the end of the list the "
+ "iterator must be updated!");
+
+ // advance to last element, then remove last element
+ ++iter;
+ list.popBack();
+ MOZ_RELEASE_ASSERT(bool(iter) == false,
+ "After removing the last element, the iterator pointing "
+ "to the last element must be empty!");
+
+ // iterate the whole remaining list, increment values
+ for (auto& el : list) {
+ el.incr();
+ }
+ MOZ_RELEASE_ASSERT(elt1->mValue == 1);
+ MOZ_RELEASE_ASSERT(elt2->mValue == 0);
+ MOZ_RELEASE_ASSERT(elt3->mValue == 1);
+ MOZ_RELEASE_ASSERT(elt4->mValue == 0);
+
+ // Removing the first element of the list while iterating must empty the
+ // iterator
+ for (auto it = list.begin(); it != list.end(); ++it) {
+ MOZ_RELEASE_ASSERT(bool(it) == true, "The iterator must contain a value!");
+ list.popFront();
+ MOZ_RELEASE_ASSERT(
+ bool(it) == false,
+ "After removing the first element, the iterator must be empty!");
+ }
+
+ delete elt1;
+ delete elt2;
+ delete elt3;
+ delete elt4;
+}
+
+int main() {
+ TestDoublyLinkedList();
+ TestCustomAccessor();
+ TestSafeDoubleLinkedList();
+ return 0;
+}
diff --git a/mfbt/tests/TestEndian.cpp b/mfbt/tests/TestEndian.cpp
new file mode 100644
index 0000000000..7f275375f6
--- /dev/null
+++ b/mfbt/tests/TestEndian.cpp
@@ -0,0 +1,501 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/EndianUtils.h"
+
+#include <stddef.h>
+
+using mozilla::BigEndian;
+using mozilla::LittleEndian;
+using mozilla::NativeEndian;
+
+template <typename T>
+void TestSingleSwap(T aValue, T aSwappedValue) {
+#if MOZ_LITTLE_ENDIAN()
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToBigEndian(aValue) == aSwappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromBigEndian(aValue) == aSwappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToNetworkOrder(aValue) == aSwappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromNetworkOrder(aValue) ==
+ aSwappedValue);
+#else
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToLittleEndian(aValue) == aSwappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromLittleEndian(aValue) ==
+ aSwappedValue);
+#endif
+}
+
+template <typename T>
+void TestSingleNoSwap(T aValue, T aUnswappedValue) {
+#if MOZ_LITTLE_ENDIAN()
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToLittleEndian(aValue) ==
+ aUnswappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromLittleEndian(aValue) ==
+ aUnswappedValue);
+#else
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToBigEndian(aValue) == aUnswappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromBigEndian(aValue) ==
+ aUnswappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapToNetworkOrder(aValue) ==
+ aUnswappedValue);
+ MOZ_RELEASE_ASSERT(NativeEndian::swapFromNetworkOrder(aValue) ==
+ aUnswappedValue);
+#endif
+}
+
+// EndianUtils.h functions are declared as protected in a base class and
+// then re-exported as public in public derived classes. The
+// standardese around explicit instantiation of templates is not clear
+// in such cases. Provide these wrappers to make things more explicit.
+// For your own enlightenment, you may wish to peruse:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=56152 and subsequently
+// http://j.mp/XosS6S .
+#define WRAP_COPYTO(NAME) \
+ template <typename T> \
+ void NAME(void* aDst, const T* aSrc, size_t aCount) { \
+ NativeEndian::NAME<T>(aDst, aSrc, aCount); \
+ }
+
+WRAP_COPYTO(copyAndSwapToLittleEndian)
+WRAP_COPYTO(copyAndSwapToBigEndian)
+WRAP_COPYTO(copyAndSwapToNetworkOrder)
+
+#define WRAP_COPYFROM(NAME) \
+ template <typename T> \
+ void NAME(T* aDst, const void* aSrc, size_t aCount) { \
+ NativeEndian::NAME<T>(aDst, aSrc, aCount); \
+ }
+
+WRAP_COPYFROM(copyAndSwapFromLittleEndian)
+WRAP_COPYFROM(copyAndSwapFromBigEndian)
+WRAP_COPYFROM(copyAndSwapFromNetworkOrder)
+
+#define WRAP_IN_PLACE(NAME) \
+ template <typename T> \
+ void NAME(T* aP, size_t aCount) { \
+ NativeEndian::NAME<T>(aP, aCount); \
+ }
+WRAP_IN_PLACE(swapToLittleEndianInPlace)
+WRAP_IN_PLACE(swapFromLittleEndianInPlace)
+WRAP_IN_PLACE(swapToBigEndianInPlace)
+WRAP_IN_PLACE(swapFromBigEndianInPlace)
+WRAP_IN_PLACE(swapToNetworkOrderInPlace)
+WRAP_IN_PLACE(swapFromNetworkOrderInPlace)
+
+enum SwapExpectation { Swap, NoSwap };
+
+template <typename T, size_t Count>
+void TestBulkSwapToSub(enum SwapExpectation aExpectSwap,
+ const T (&aValues)[Count],
+ void (*aSwapperFunc)(void*, const T*, size_t),
+ T (*aReaderFunc)(const void*)) {
+ const size_t arraySize = 2 * Count;
+ const size_t bufferSize = arraySize * sizeof(T);
+ static uint8_t buffer[bufferSize];
+ const uint8_t fillValue = 0xa5;
+ static uint8_t checkBuffer[bufferSize];
+
+ MOZ_RELEASE_ASSERT(bufferSize > 2 * sizeof(T));
+
+ memset(checkBuffer, fillValue, bufferSize);
+
+ for (size_t startPosition = 0; startPosition < sizeof(T); ++startPosition) {
+ for (size_t nValues = 0; nValues < Count; ++nValues) {
+ memset(buffer, fillValue, bufferSize);
+ aSwapperFunc(buffer + startPosition, aValues, nValues);
+
+ MOZ_RELEASE_ASSERT(memcmp(buffer, checkBuffer, startPosition) == 0);
+ size_t valuesEndPosition = startPosition + sizeof(T) * nValues;
+ MOZ_RELEASE_ASSERT(memcmp(buffer + valuesEndPosition,
+ checkBuffer + valuesEndPosition,
+ bufferSize - valuesEndPosition) == 0);
+ if (aExpectSwap == NoSwap) {
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer + startPosition, aValues, nValues * sizeof(T)) == 0);
+ }
+ for (size_t i = 0; i < nValues; ++i) {
+ MOZ_RELEASE_ASSERT(
+ aReaderFunc(buffer + startPosition + sizeof(T) * i) == aValues[i]);
+ }
+ }
+ }
+}
+
+template <typename T, size_t Count>
+void TestBulkSwapFromSub(enum SwapExpectation aExpectSwap,
+ const T (&aValues)[Count],
+ void (*aSwapperFunc)(T*, const void*, size_t),
+ T (*aReaderFunc)(const void*)) {
+ const size_t arraySize = 2 * Count;
+ const size_t bufferSize = arraySize * sizeof(T);
+ static T buffer[arraySize];
+ const uint8_t fillValue = 0xa5;
+ static T checkBuffer[arraySize];
+
+ memset(checkBuffer, fillValue, bufferSize);
+
+ for (size_t startPosition = 0; startPosition < Count; ++startPosition) {
+ for (size_t nValues = 0; nValues < (Count - startPosition); ++nValues) {
+ memset(buffer, fillValue, bufferSize);
+ aSwapperFunc(buffer + startPosition, aValues, nValues);
+
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer, checkBuffer, startPosition * sizeof(T)) == 0);
+ size_t valuesEndPosition = startPosition + nValues;
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer + valuesEndPosition, checkBuffer + valuesEndPosition,
+ (arraySize - valuesEndPosition) * sizeof(T)) == 0);
+ if (aExpectSwap == NoSwap) {
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer + startPosition, aValues, nValues * sizeof(T)) == 0);
+ }
+ for (size_t i = 0; i < nValues; ++i) {
+ MOZ_RELEASE_ASSERT(aReaderFunc(buffer + startPosition + i) ==
+ aValues[i]);
+ }
+ }
+ }
+}
+
+template <typename T, size_t Count>
+void TestBulkInPlaceSub(enum SwapExpectation aExpectSwap,
+ const T (&aValues)[Count],
+ void (*aSwapperFunc)(T*, size_t),
+ T (*aReaderFunc)(const void*)) {
+ const size_t bufferCount = 4 * Count;
+ const size_t bufferSize = bufferCount * sizeof(T);
+ static T buffer[bufferCount];
+ const T fillValue = 0xa5;
+ static T checkBuffer[bufferCount];
+
+ MOZ_RELEASE_ASSERT(bufferSize > 2 * sizeof(T));
+
+ memset(checkBuffer, fillValue, bufferSize);
+
+ for (size_t startPosition = 0; startPosition < Count; ++startPosition) {
+ for (size_t nValues = 0; nValues < Count; ++nValues) {
+ memset(buffer, fillValue, bufferSize);
+ memcpy(buffer + startPosition, aValues, nValues * sizeof(T));
+ aSwapperFunc(buffer + startPosition, nValues);
+
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer, checkBuffer, startPosition * sizeof(T)) == 0);
+ size_t valuesEndPosition = startPosition + nValues;
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer + valuesEndPosition, checkBuffer + valuesEndPosition,
+ bufferSize - valuesEndPosition * sizeof(T)) == 0);
+ if (aExpectSwap == NoSwap) {
+ MOZ_RELEASE_ASSERT(
+ memcmp(buffer + startPosition, aValues, nValues * sizeof(T)) == 0);
+ }
+ for (size_t i = 0; i < nValues; ++i) {
+ MOZ_RELEASE_ASSERT(aReaderFunc(buffer + startPosition + i) ==
+ aValues[i]);
+ }
+ }
+ }
+}
+
+template <typename T>
+struct Reader {};
+
+#define SPECIALIZE_READER(TYPE, READ_FUNC) \
+ template <> \
+ struct Reader<TYPE> { \
+ static TYPE readLE(const void* aP) { return LittleEndian::READ_FUNC(aP); } \
+ static TYPE readBE(const void* aP) { return BigEndian::READ_FUNC(aP); } \
+ };
+
+SPECIALIZE_READER(uint16_t, readUint16)
+SPECIALIZE_READER(uint32_t, readUint32)
+SPECIALIZE_READER(uint64_t, readUint64)
+SPECIALIZE_READER(int16_t, readInt16)
+SPECIALIZE_READER(int32_t, readInt32)
+SPECIALIZE_READER(int64_t, readInt64)
+
+template <typename T, size_t Count>
+void TestBulkSwap(const T (&aBytes)[Count]) {
+#if MOZ_LITTLE_ENDIAN()
+ TestBulkSwapToSub(Swap, aBytes, copyAndSwapToBigEndian<T>, Reader<T>::readBE);
+ TestBulkSwapFromSub(Swap, aBytes, copyAndSwapFromBigEndian<T>,
+ Reader<T>::readBE);
+ TestBulkSwapToSub(Swap, aBytes, copyAndSwapToNetworkOrder<T>,
+ Reader<T>::readBE);
+ TestBulkSwapFromSub(Swap, aBytes, copyAndSwapFromNetworkOrder<T>,
+ Reader<T>::readBE);
+#else
+ TestBulkSwapToSub(Swap, aBytes, copyAndSwapToLittleEndian<T>,
+ Reader<T>::readLE);
+ TestBulkSwapFromSub(Swap, aBytes, copyAndSwapFromLittleEndian<T>,
+ Reader<T>::readLE);
+#endif
+}
+
+template <typename T, size_t Count>
+void TestBulkNoSwap(const T (&aBytes)[Count]) {
+#if MOZ_LITTLE_ENDIAN()
+ TestBulkSwapToSub(NoSwap, aBytes, copyAndSwapToLittleEndian<T>,
+ Reader<T>::readLE);
+ TestBulkSwapFromSub(NoSwap, aBytes, copyAndSwapFromLittleEndian<T>,
+ Reader<T>::readLE);
+#else
+ TestBulkSwapToSub(NoSwap, aBytes, copyAndSwapToBigEndian<T>,
+ Reader<T>::readBE);
+ TestBulkSwapFromSub(NoSwap, aBytes, copyAndSwapFromBigEndian<T>,
+ Reader<T>::readBE);
+ TestBulkSwapToSub(NoSwap, aBytes, copyAndSwapToNetworkOrder<T>,
+ Reader<T>::readBE);
+ TestBulkSwapFromSub(NoSwap, aBytes, copyAndSwapFromNetworkOrder<T>,
+ Reader<T>::readBE);
+#endif
+}
+
+template <typename T, size_t Count>
+void TestBulkInPlaceSwap(const T (&aBytes)[Count]) {
+#if MOZ_LITTLE_ENDIAN()
+ TestBulkInPlaceSub(Swap, aBytes, swapToBigEndianInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(Swap, aBytes, swapFromBigEndianInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(Swap, aBytes, swapToNetworkOrderInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(Swap, aBytes, swapFromNetworkOrderInPlace<T>,
+ Reader<T>::readBE);
+#else
+ TestBulkInPlaceSub(Swap, aBytes, swapToLittleEndianInPlace<T>,
+ Reader<T>::readLE);
+ TestBulkInPlaceSub(Swap, aBytes, swapFromLittleEndianInPlace<T>,
+ Reader<T>::readLE);
+#endif
+}
+
+template <typename T, size_t Count>
+void TestBulkInPlaceNoSwap(const T (&aBytes)[Count]) {
+#if MOZ_LITTLE_ENDIAN()
+ TestBulkInPlaceSub(NoSwap, aBytes, swapToLittleEndianInPlace<T>,
+ Reader<T>::readLE);
+ TestBulkInPlaceSub(NoSwap, aBytes, swapFromLittleEndianInPlace<T>,
+ Reader<T>::readLE);
+#else
+ TestBulkInPlaceSub(NoSwap, aBytes, swapToBigEndianInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(NoSwap, aBytes, swapFromBigEndianInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(NoSwap, aBytes, swapToNetworkOrderInPlace<T>,
+ Reader<T>::readBE);
+ TestBulkInPlaceSub(NoSwap, aBytes, swapFromNetworkOrderInPlace<T>,
+ Reader<T>::readBE);
+#endif
+}
+
+int main() {
+ static const uint8_t unsigned_bytes[16] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08};
+ static const int8_t signed_bytes[16] = {
+ -0x0f, -0x0e, -0x0d, -0x0c, -0x0b, -0x0a, -0x09, -0x08,
+ -0x0f, -0x0e, -0x0d, -0x0c, -0x0b, -0x0a, -0x09, -0x08};
+ static const uint16_t uint16_values[8] = {0x0102, 0x0304, 0x0506, 0x0708,
+ 0x0102, 0x0304, 0x0506, 0x0708};
+ static const int16_t int16_values[8] = {
+ int16_t(0xf1f2), int16_t(0xf3f4), int16_t(0xf5f6), int16_t(0xf7f8),
+ int16_t(0xf1f2), int16_t(0xf3f4), int16_t(0xf5f6), int16_t(0xf7f8)};
+ static const uint32_t uint32_values[4] = {0x01020304, 0x05060708, 0x01020304,
+ 0x05060708};
+ static const int32_t int32_values[4] = {
+ int32_t(0xf1f2f3f4), int32_t(0xf5f6f7f8), int32_t(0xf1f2f3f4),
+ int32_t(0xf5f6f7f8)};
+ static const uint64_t uint64_values[2] = {0x0102030405060708,
+ 0x0102030405060708};
+ static const int64_t int64_values[2] = {int64_t(0xf1f2f3f4f5f6f7f8),
+ int64_t(0xf1f2f3f4f5f6f7f8)};
+ uint8_t buffer[8];
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint16(&unsigned_bytes[0]) == 0x0201);
+ MOZ_RELEASE_ASSERT(BigEndian::readUint16(&unsigned_bytes[0]) == 0x0102);
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint32(&unsigned_bytes[0]) ==
+ 0x04030201U);
+ MOZ_RELEASE_ASSERT(BigEndian::readUint32(&unsigned_bytes[0]) == 0x01020304U);
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint64(&unsigned_bytes[0]) ==
+ 0x0807060504030201ULL);
+ MOZ_RELEASE_ASSERT(BigEndian::readUint64(&unsigned_bytes[0]) ==
+ 0x0102030405060708ULL);
+
+ if (sizeof(uintptr_t) == 8) {
+ // MSVC warning C4309 is "'static_cast': truncation of constant value" and
+ // will hit for the literal casts below in 32-bit builds -- in dead code,
+ // because only the other arm of this |if| runs. Turn off the warning for
+ // these two uses in dead code.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4309)
+#endif
+ MOZ_RELEASE_ASSERT(LittleEndian::readUintptr(&unsigned_bytes[0]) ==
+ static_cast<uintptr_t>(0x0807060504030201ULL));
+ MOZ_RELEASE_ASSERT(BigEndian::readUintptr(&unsigned_bytes[0]) ==
+ static_cast<uintptr_t>(0x0102030405060708ULL));
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+ } else {
+ MOZ_RELEASE_ASSERT(LittleEndian::readUintptr(&unsigned_bytes[0]) ==
+ 0x04030201U);
+ MOZ_RELEASE_ASSERT(BigEndian::readUintptr(&unsigned_bytes[0]) ==
+ 0x01020304U);
+ }
+
+ LittleEndian::writeUint16(&buffer[0], 0x0201);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint16_t)) ==
+ 0);
+ BigEndian::writeUint16(&buffer[0], 0x0102);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint16_t)) ==
+ 0);
+
+ LittleEndian::writeUint32(&buffer[0], 0x04030201U);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint32_t)) ==
+ 0);
+ BigEndian::writeUint32(&buffer[0], 0x01020304U);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint32_t)) ==
+ 0);
+
+ LittleEndian::writeUint64(&buffer[0], 0x0807060504030201ULL);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint64_t)) ==
+ 0);
+ BigEndian::writeUint64(&buffer[0], 0x0102030405060708ULL);
+ MOZ_RELEASE_ASSERT(memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uint64_t)) ==
+ 0);
+
+ memset(&buffer[0], 0xff, sizeof(buffer));
+ LittleEndian::writeUintptr(&buffer[0], uintptr_t(0x0807060504030201ULL));
+ MOZ_RELEASE_ASSERT(
+ memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uintptr_t)) == 0);
+ if (sizeof(uintptr_t) == 4) {
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint32(&buffer[4]) == 0xffffffffU);
+ }
+
+ memset(&buffer[0], 0xff, sizeof(buffer));
+ if (sizeof(uintptr_t) == 8) {
+ BigEndian::writeUintptr(&buffer[0], uintptr_t(0x0102030405060708ULL));
+ } else {
+ BigEndian::writeUintptr(&buffer[0], uintptr_t(0x01020304U));
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint32(&buffer[4]) == 0xffffffffU);
+ }
+ MOZ_RELEASE_ASSERT(
+ memcmp(&unsigned_bytes[0], &buffer[0], sizeof(uintptr_t)) == 0);
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readInt16(&signed_bytes[0]) ==
+ int16_t(0xf2f1));
+ MOZ_RELEASE_ASSERT(BigEndian::readInt16(&signed_bytes[0]) == int16_t(0xf1f2));
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readInt32(&signed_bytes[0]) ==
+ int32_t(0xf4f3f2f1));
+ MOZ_RELEASE_ASSERT(BigEndian::readInt32(&signed_bytes[0]) ==
+ int32_t(0xf1f2f3f4));
+
+ MOZ_RELEASE_ASSERT(LittleEndian::readInt64(&signed_bytes[0]) ==
+ int64_t(0xf8f7f6f5f4f3f2f1LL));
+ MOZ_RELEASE_ASSERT(BigEndian::readInt64(&signed_bytes[0]) ==
+ int64_t(0xf1f2f3f4f5f6f7f8LL));
+
+ if (sizeof(uintptr_t) == 8) {
+ MOZ_RELEASE_ASSERT(LittleEndian::readIntptr(&signed_bytes[0]) ==
+ intptr_t(0xf8f7f6f5f4f3f2f1LL));
+ MOZ_RELEASE_ASSERT(BigEndian::readIntptr(&signed_bytes[0]) ==
+ intptr_t(0xf1f2f3f4f5f6f7f8LL));
+ } else {
+ MOZ_RELEASE_ASSERT(LittleEndian::readIntptr(&signed_bytes[0]) ==
+ intptr_t(0xf4f3f2f1));
+ MOZ_RELEASE_ASSERT(BigEndian::readIntptr(&signed_bytes[0]) ==
+ intptr_t(0xf1f2f3f4));
+ }
+
+ LittleEndian::writeInt16(&buffer[0], int16_t(0xf2f1));
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int16_t)) ==
+ 0);
+ BigEndian::writeInt16(&buffer[0], int16_t(0xf1f2));
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int16_t)) ==
+ 0);
+
+ LittleEndian::writeInt32(&buffer[0], 0xf4f3f2f1);
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int32_t)) ==
+ 0);
+ BigEndian::writeInt32(&buffer[0], 0xf1f2f3f4);
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int32_t)) ==
+ 0);
+
+ LittleEndian::writeInt64(&buffer[0], 0xf8f7f6f5f4f3f2f1LL);
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int64_t)) ==
+ 0);
+ BigEndian::writeInt64(&buffer[0], 0xf1f2f3f4f5f6f7f8LL);
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(int64_t)) ==
+ 0);
+
+ memset(&buffer[0], 0xff, sizeof(buffer));
+ LittleEndian::writeIntptr(&buffer[0], intptr_t(0xf8f7f6f5f4f3f2f1LL));
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(intptr_t)) ==
+ 0);
+ if (sizeof(intptr_t) == 4) {
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint32(&buffer[4]) == 0xffffffffU);
+ }
+
+ memset(&buffer[0], 0xff, sizeof(buffer));
+ if (sizeof(intptr_t) == 8) {
+ BigEndian::writeIntptr(&buffer[0], intptr_t(0xf1f2f3f4f5f6f7f8LL));
+ } else {
+ BigEndian::writeIntptr(&buffer[0], intptr_t(0xf1f2f3f4));
+ MOZ_RELEASE_ASSERT(LittleEndian::readUint32(&buffer[4]) == 0xffffffffU);
+ }
+ MOZ_RELEASE_ASSERT(memcmp(&signed_bytes[0], &buffer[0], sizeof(intptr_t)) ==
+ 0);
+
+ TestSingleSwap(uint16_t(0xf2f1), uint16_t(0xf1f2));
+ TestSingleSwap(uint32_t(0xf4f3f2f1), uint32_t(0xf1f2f3f4));
+ TestSingleSwap(uint64_t(0xf8f7f6f5f4f3f2f1), uint64_t(0xf1f2f3f4f5f6f7f8));
+
+ TestSingleSwap(int16_t(0xf2f1), int16_t(0xf1f2));
+ TestSingleSwap(int32_t(0xf4f3f2f1), int32_t(0xf1f2f3f4));
+ TestSingleSwap(int64_t(0xf8f7f6f5f4f3f2f1), int64_t(0xf1f2f3f4f5f6f7f8));
+
+ TestSingleNoSwap(uint16_t(0xf2f1), uint16_t(0xf2f1));
+ TestSingleNoSwap(uint32_t(0xf4f3f2f1), uint32_t(0xf4f3f2f1));
+ TestSingleNoSwap(uint64_t(0xf8f7f6f5f4f3f2f1), uint64_t(0xf8f7f6f5f4f3f2f1));
+
+ TestSingleNoSwap(int16_t(0xf2f1), int16_t(0xf2f1));
+ TestSingleNoSwap(int32_t(0xf4f3f2f1), int32_t(0xf4f3f2f1));
+ TestSingleNoSwap(int64_t(0xf8f7f6f5f4f3f2f1), int64_t(0xf8f7f6f5f4f3f2f1));
+
+ TestBulkSwap(uint16_values);
+ TestBulkSwap(int16_values);
+ TestBulkSwap(uint32_values);
+ TestBulkSwap(int32_values);
+ TestBulkSwap(uint64_values);
+ TestBulkSwap(int64_values);
+
+ TestBulkNoSwap(uint16_values);
+ TestBulkNoSwap(int16_values);
+ TestBulkNoSwap(uint32_values);
+ TestBulkNoSwap(int32_values);
+ TestBulkNoSwap(uint64_values);
+ TestBulkNoSwap(int64_values);
+
+ TestBulkInPlaceSwap(uint16_values);
+ TestBulkInPlaceSwap(int16_values);
+ TestBulkInPlaceSwap(uint32_values);
+ TestBulkInPlaceSwap(int32_values);
+ TestBulkInPlaceSwap(uint64_values);
+ TestBulkInPlaceSwap(int64_values);
+
+ TestBulkInPlaceNoSwap(uint16_values);
+ TestBulkInPlaceNoSwap(int16_values);
+ TestBulkInPlaceNoSwap(uint32_values);
+ TestBulkInPlaceNoSwap(int32_values);
+ TestBulkInPlaceNoSwap(uint64_values);
+ TestBulkInPlaceNoSwap(int64_values);
+
+ return 0;
+}
diff --git a/mfbt/tests/TestEnumSet.cpp b/mfbt/tests/TestEnumSet.cpp
new file mode 100644
index 0000000000..c47710a715
--- /dev/null
+++ b/mfbt/tests/TestEnumSet.cpp
@@ -0,0 +1,306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/BitSet.h"
+#include "mozilla/EnumSet.h"
+#include "mozilla/Vector.h"
+
+#include <type_traits>
+
+using namespace mozilla;
+
+enum SeaBird {
+ PENGUIN,
+ ALBATROSS,
+ FULMAR,
+ PRION,
+ SHEARWATER,
+ GADFLY_PETREL,
+ TRUE_PETREL,
+ DIVING_PETREL,
+ STORM_PETREL,
+ PELICAN,
+ GANNET,
+ BOOBY,
+ CORMORANT,
+ FRIGATEBIRD,
+ TROPICBIRD,
+ SKUA,
+ GULL,
+ TERN,
+ SKIMMER,
+ AUK,
+
+ SEA_BIRD_COUNT
+};
+
+enum class SmallEnum : uint8_t {
+ Foo,
+ Bar,
+};
+
+enum class BigEnum : uint64_t {
+ Foo,
+ Bar = 35,
+};
+
+template <typename Storage = typename std::make_unsigned<
+ typename std::underlying_type<SeaBird>::type>::type>
+class EnumSetSuite {
+ public:
+ using EnumSetSeaBird = EnumSet<SeaBird, Storage>;
+
+ EnumSetSuite()
+ : mAlcidae(),
+ mDiomedeidae(ALBATROSS),
+ mPetrelProcellariidae(GADFLY_PETREL, TRUE_PETREL),
+ mNonPetrelProcellariidae(FULMAR, PRION, SHEARWATER),
+ mPetrels(GADFLY_PETREL, TRUE_PETREL, DIVING_PETREL, STORM_PETREL) {}
+
+ void runTests() {
+ testSize();
+ testContains();
+ testAddTo();
+ testAdd();
+ testAddAll();
+ testUnion();
+ testRemoveFrom();
+ testRemove();
+ testRemoveAllFrom();
+ testRemoveAll();
+ testIntersect();
+ testInsersection();
+ testEquality();
+ testDuplicates();
+ testIteration();
+ testInitializerListConstuctor();
+ testBigEnum();
+ }
+
+ private:
+ void testEnumSetLayout() {
+#ifndef DEBUG
+ static_assert(sizeof(EnumSet<SmallEnum>) == sizeof(SmallEnum),
+ "EnumSet should be no bigger than the enum by default");
+ static_assert(sizeof(EnumSet<SmallEnum, uint32_t>) == sizeof(uint32_t),
+ "EnumSet should be able to have its size overriden.");
+ static_assert(std::is_trivially_copyable_v<EnumSet<SmallEnum>>,
+ "EnumSet should be lightweight outside of debug.");
+#endif
+ }
+
+ void testSize() {
+ MOZ_RELEASE_ASSERT(mAlcidae.size() == 0);
+ MOZ_RELEASE_ASSERT(mDiomedeidae.size() == 1);
+ MOZ_RELEASE_ASSERT(mPetrelProcellariidae.size() == 2);
+ MOZ_RELEASE_ASSERT(mNonPetrelProcellariidae.size() == 3);
+ MOZ_RELEASE_ASSERT(mPetrels.size() == 4);
+ }
+
+ void testContains() {
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(PENGUIN));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(ALBATROSS));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(FULMAR));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(PRION));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(SHEARWATER));
+ MOZ_RELEASE_ASSERT(mPetrels.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(mPetrels.contains(TRUE_PETREL));
+ MOZ_RELEASE_ASSERT(mPetrels.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(mPetrels.contains(STORM_PETREL));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(PELICAN));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(GANNET));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(BOOBY));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(CORMORANT));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(FRIGATEBIRD));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(TROPICBIRD));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(SKUA));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(GULL));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(TERN));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(SKIMMER));
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(AUK));
+ }
+
+ void testCopy() {
+ EnumSetSeaBird likes = mPetrels;
+ likes -= TRUE_PETREL;
+ MOZ_RELEASE_ASSERT(mPetrels.size() == 4);
+ MOZ_RELEASE_ASSERT(mPetrels.contains(TRUE_PETREL));
+
+ MOZ_RELEASE_ASSERT(likes.size() == 3);
+ MOZ_RELEASE_ASSERT(likes.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(STORM_PETREL));
+ }
+
+ void testAddTo() {
+ EnumSetSeaBird seen = mPetrels;
+ seen += CORMORANT;
+ seen += TRUE_PETREL;
+ MOZ_RELEASE_ASSERT(mPetrels.size() == 4);
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(CORMORANT));
+ MOZ_RELEASE_ASSERT(seen.size() == 5);
+ MOZ_RELEASE_ASSERT(seen.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(TRUE_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(STORM_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(CORMORANT));
+ }
+
+ void testAdd() {
+ EnumSetSeaBird seen = mPetrels + CORMORANT + STORM_PETREL;
+ MOZ_RELEASE_ASSERT(mPetrels.size() == 4);
+ MOZ_RELEASE_ASSERT(!mPetrels.contains(CORMORANT));
+ MOZ_RELEASE_ASSERT(seen.size() == 5);
+ MOZ_RELEASE_ASSERT(seen.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(TRUE_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(STORM_PETREL));
+ MOZ_RELEASE_ASSERT(seen.contains(CORMORANT));
+ }
+
+ void testAddAll() {
+ EnumSetSeaBird procellariidae;
+ procellariidae += mPetrelProcellariidae;
+ procellariidae += mNonPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(procellariidae.size() == 5);
+
+ // Both procellariidae and mPetrels include GADFLY_PERTEL and TRUE_PETREL
+ EnumSetSeaBird procellariiformes;
+ procellariiformes += mDiomedeidae;
+ procellariiformes += procellariidae;
+ procellariiformes += mPetrels;
+ MOZ_RELEASE_ASSERT(procellariiformes.size() == 8);
+ }
+
+ void testUnion() {
+ EnumSetSeaBird procellariidae =
+ mPetrelProcellariidae + mNonPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(procellariidae.size() == 5);
+
+ // Both procellariidae and mPetrels include GADFLY_PETREL and TRUE_PETREL
+ EnumSetSeaBird procellariiformes = mDiomedeidae + procellariidae + mPetrels;
+ MOZ_RELEASE_ASSERT(procellariiformes.size() == 8);
+ }
+
+ void testRemoveFrom() {
+ EnumSetSeaBird likes = mPetrels;
+ likes -= TRUE_PETREL;
+ likes -= DIVING_PETREL;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(STORM_PETREL));
+ }
+
+ void testRemove() {
+ EnumSetSeaBird likes = mPetrels - TRUE_PETREL - DIVING_PETREL;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(STORM_PETREL));
+ }
+
+ void testRemoveAllFrom() {
+ EnumSetSeaBird likes = mPetrels;
+ likes -= mPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(STORM_PETREL));
+ }
+
+ void testRemoveAll() {
+ EnumSetSeaBird likes = mPetrels - mPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(DIVING_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(STORM_PETREL));
+ }
+
+ void testIntersect() {
+ EnumSetSeaBird likes = mPetrels;
+ likes &= mPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(TRUE_PETREL));
+ }
+
+ void testInsersection() {
+ EnumSetSeaBird likes = mPetrels & mPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(likes.size() == 2);
+ MOZ_RELEASE_ASSERT(likes.contains(GADFLY_PETREL));
+ MOZ_RELEASE_ASSERT(likes.contains(TRUE_PETREL));
+ }
+
+ void testEquality() {
+ EnumSetSeaBird likes = mPetrels & mPetrelProcellariidae;
+ MOZ_RELEASE_ASSERT(likes == EnumSetSeaBird(GADFLY_PETREL, TRUE_PETREL));
+ }
+
+ void testDuplicates() {
+ EnumSetSeaBird likes = mPetrels;
+ likes += GADFLY_PETREL;
+ likes += TRUE_PETREL;
+ likes += DIVING_PETREL;
+ likes += STORM_PETREL;
+ MOZ_RELEASE_ASSERT(likes.size() == 4);
+ MOZ_RELEASE_ASSERT(likes == mPetrels);
+ }
+
+ void testIteration() {
+ EnumSetSeaBird birds;
+ Vector<SeaBird> vec;
+
+ for (auto bird : birds) {
+ MOZ_RELEASE_ASSERT(vec.append(bird));
+ }
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+
+ birds += DIVING_PETREL;
+ birds += GADFLY_PETREL;
+ birds += STORM_PETREL;
+ birds += TRUE_PETREL;
+ for (auto bird : birds) {
+ MOZ_RELEASE_ASSERT(vec.append(bird));
+ }
+
+ MOZ_RELEASE_ASSERT(vec.length() == 4);
+ MOZ_RELEASE_ASSERT(vec[0] == GADFLY_PETREL);
+ MOZ_RELEASE_ASSERT(vec[1] == TRUE_PETREL);
+ MOZ_RELEASE_ASSERT(vec[2] == DIVING_PETREL);
+ MOZ_RELEASE_ASSERT(vec[3] == STORM_PETREL);
+ }
+
+ void testInitializerListConstuctor() {
+ EnumSetSeaBird empty{};
+ MOZ_RELEASE_ASSERT(empty.size() == 0);
+ MOZ_RELEASE_ASSERT(empty.isEmpty());
+
+ EnumSetSeaBird someBirds{SKIMMER, GULL, BOOBY};
+ MOZ_RELEASE_ASSERT(someBirds.size() == 3);
+ MOZ_RELEASE_ASSERT(someBirds.contains(SKIMMER));
+ MOZ_RELEASE_ASSERT(someBirds.contains(GULL));
+ MOZ_RELEASE_ASSERT(someBirds.contains(BOOBY));
+ }
+
+ void testBigEnum() {
+ EnumSet<BigEnum> set;
+ set += BigEnum::Bar;
+ MOZ_RELEASE_ASSERT(set.serialize() ==
+ (uint64_t(1) << uint64_t(BigEnum::Bar)));
+ }
+
+ EnumSetSeaBird mAlcidae;
+ EnumSetSeaBird mDiomedeidae;
+ EnumSetSeaBird mPetrelProcellariidae;
+ EnumSetSeaBird mNonPetrelProcellariidae;
+ EnumSetSeaBird mPetrels;
+};
+
+int main() {
+ EnumSetSuite<uint32_t> suite1;
+ suite1.runTests();
+
+ EnumSetSuite<BitSet<SEA_BIRD_COUNT>> suite2;
+ suite2.runTests();
+ return 0;
+}
diff --git a/mfbt/tests/TestEnumTypeTraits.cpp b/mfbt/tests/TestEnumTypeTraits.cpp
new file mode 100644
index 0000000000..1065c92a7b
--- /dev/null
+++ b/mfbt/tests/TestEnumTypeTraits.cpp
@@ -0,0 +1,159 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/EnumTypeTraits.h"
+
+#include <cstdint>
+
+using namespace mozilla;
+
+/* Feature check for EnumTypeFitsWithin. */
+
+#define MAKE_FIXED_EMUM_FOR_TYPE(IntType) \
+ enum FixedEnumFor_##IntType : IntType{ \
+ A_##IntType, \
+ B_##IntType, \
+ C_##IntType, \
+ };
+
+template <typename EnumType, typename IntType>
+static void TestShouldFit() {
+ static_assert(EnumTypeFitsWithin<EnumType, IntType>::value,
+ "Should fit within exact/promoted integral type");
+}
+
+template <typename EnumType, typename IntType>
+static void TestShouldNotFit() {
+ static_assert(!EnumTypeFitsWithin<EnumType, IntType>::value,
+ "Should not fit within");
+}
+
+void TestFitForTypes() {
+ // check for int8_t
+ MAKE_FIXED_EMUM_FOR_TYPE(int8_t);
+ TestShouldFit<FixedEnumFor_int8_t, int8_t>();
+ TestShouldFit<FixedEnumFor_int8_t, int16_t>();
+ TestShouldFit<FixedEnumFor_int8_t, int32_t>();
+ TestShouldFit<FixedEnumFor_int8_t, int64_t>();
+
+ TestShouldNotFit<FixedEnumFor_int8_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_int8_t, uint16_t>();
+ TestShouldNotFit<FixedEnumFor_int8_t, uint32_t>();
+ TestShouldNotFit<FixedEnumFor_int8_t, uint64_t>();
+
+ // check for uint8_t
+ MAKE_FIXED_EMUM_FOR_TYPE(uint8_t);
+ TestShouldFit<FixedEnumFor_uint8_t, uint8_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, uint16_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, uint32_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, uint64_t>();
+
+ TestShouldNotFit<FixedEnumFor_uint8_t, int8_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, int16_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, int32_t>();
+ TestShouldFit<FixedEnumFor_uint8_t, int64_t>();
+
+ // check for int16_t
+ MAKE_FIXED_EMUM_FOR_TYPE(int16_t);
+ TestShouldNotFit<FixedEnumFor_int16_t, int8_t>();
+ TestShouldFit<FixedEnumFor_int16_t, int16_t>();
+ TestShouldFit<FixedEnumFor_int16_t, int32_t>();
+ TestShouldFit<FixedEnumFor_int16_t, int64_t>();
+
+ TestShouldNotFit<FixedEnumFor_int16_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_int16_t, uint16_t>();
+ TestShouldNotFit<FixedEnumFor_int16_t, uint32_t>();
+ TestShouldNotFit<FixedEnumFor_int16_t, uint64_t>();
+
+ // check for uint16_t
+ MAKE_FIXED_EMUM_FOR_TYPE(uint16_t);
+ TestShouldNotFit<FixedEnumFor_uint16_t, uint8_t>();
+ TestShouldFit<FixedEnumFor_uint16_t, uint16_t>();
+ TestShouldFit<FixedEnumFor_uint16_t, uint32_t>();
+ TestShouldFit<FixedEnumFor_uint16_t, uint64_t>();
+
+ TestShouldNotFit<FixedEnumFor_uint16_t, int8_t>();
+ TestShouldNotFit<FixedEnumFor_uint16_t, int16_t>();
+ TestShouldFit<FixedEnumFor_uint16_t, int32_t>();
+ TestShouldFit<FixedEnumFor_uint16_t, int64_t>();
+
+ // check for int32_t
+ MAKE_FIXED_EMUM_FOR_TYPE(int32_t);
+ TestShouldNotFit<FixedEnumFor_int32_t, int8_t>();
+ TestShouldNotFit<FixedEnumFor_int32_t, int16_t>();
+ TestShouldFit<FixedEnumFor_int32_t, int32_t>();
+ TestShouldFit<FixedEnumFor_int32_t, int64_t>();
+
+ TestShouldNotFit<FixedEnumFor_int32_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_int32_t, uint16_t>();
+ TestShouldNotFit<FixedEnumFor_int32_t, uint32_t>();
+ TestShouldNotFit<FixedEnumFor_int32_t, uint64_t>();
+
+ // check for uint32_t
+ MAKE_FIXED_EMUM_FOR_TYPE(uint32_t);
+ TestShouldNotFit<FixedEnumFor_uint32_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_uint32_t, uint16_t>();
+ TestShouldFit<FixedEnumFor_uint32_t, uint32_t>();
+ TestShouldFit<FixedEnumFor_uint32_t, uint64_t>();
+
+ TestShouldNotFit<FixedEnumFor_uint32_t, int8_t>();
+ TestShouldNotFit<FixedEnumFor_uint32_t, int16_t>();
+ TestShouldNotFit<FixedEnumFor_uint32_t, int32_t>();
+ TestShouldFit<FixedEnumFor_uint32_t, int64_t>();
+
+ // check for int64_t
+ MAKE_FIXED_EMUM_FOR_TYPE(int64_t);
+ TestShouldNotFit<FixedEnumFor_int64_t, int8_t>();
+ TestShouldNotFit<FixedEnumFor_int64_t, int16_t>();
+ TestShouldNotFit<FixedEnumFor_int64_t, int32_t>();
+ TestShouldFit<FixedEnumFor_int64_t, int64_t>();
+
+ TestShouldNotFit<FixedEnumFor_int64_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_int64_t, uint16_t>();
+ TestShouldNotFit<FixedEnumFor_int64_t, uint32_t>();
+ TestShouldNotFit<FixedEnumFor_int64_t, uint64_t>();
+
+ // check for uint64_t
+ MAKE_FIXED_EMUM_FOR_TYPE(uint64_t);
+ TestShouldNotFit<FixedEnumFor_uint64_t, uint8_t>();
+ TestShouldNotFit<FixedEnumFor_uint64_t, uint16_t>();
+ TestShouldNotFit<FixedEnumFor_uint64_t, uint32_t>();
+ TestShouldFit<FixedEnumFor_uint64_t, uint64_t>();
+
+ TestShouldNotFit<FixedEnumFor_uint64_t, int8_t>();
+ TestShouldNotFit<FixedEnumFor_uint64_t, int16_t>();
+ TestShouldNotFit<FixedEnumFor_uint64_t, int32_t>();
+ TestShouldNotFit<FixedEnumFor_uint64_t, int64_t>();
+}
+
+// -
+
+template <typename T, typename U>
+static constexpr void AssertSameTypeAndValue(T a, U b) {
+ static_assert(std::is_same_v<T, U>);
+ MOZ_ASSERT(a == b);
+}
+
+void TestUnderlyingValue() {
+ enum class Pet : int16_t { Cat, Dog, Fish };
+ enum class Plant { Flower, Tree, Vine };
+
+ AssertSameTypeAndValue(UnderlyingValue(Pet::Cat), int16_t(0));
+ AssertSameTypeAndValue(UnderlyingValue(Pet::Dog), int16_t(1));
+ AssertSameTypeAndValue(UnderlyingValue(Pet::Fish), int16_t(2));
+
+ AssertSameTypeAndValue(UnderlyingValue(Plant::Flower), int(0));
+ AssertSameTypeAndValue(UnderlyingValue(Plant::Tree), int(1));
+ AssertSameTypeAndValue(UnderlyingValue(Plant::Vine), int(2));
+}
+
+// -
+
+int main() {
+ TestFitForTypes();
+ TestUnderlyingValue();
+ return 0;
+}
diff --git a/mfbt/tests/TestEnumeratedArray.cpp b/mfbt/tests/TestEnumeratedArray.cpp
new file mode 100644
index 0000000000..dfc1a37f17
--- /dev/null
+++ b/mfbt/tests/TestEnumeratedArray.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/EnumeratedArray.h"
+
+using mozilla::EnumeratedArray;
+
+enum class AnimalSpecies { Cow, Sheep, Pig, Count };
+
+using TestArray = EnumeratedArray<AnimalSpecies, AnimalSpecies::Count, int>;
+
+void TestInitialValueByConstructor() {
+ // Style 1
+ TestArray headCount(1, 2, 3);
+ MOZ_RELEASE_ASSERT(headCount[AnimalSpecies::Cow] == 1);
+ MOZ_RELEASE_ASSERT(headCount[AnimalSpecies::Sheep] == 2);
+ MOZ_RELEASE_ASSERT(headCount[AnimalSpecies::Pig] == 3);
+ // Style 2
+ TestArray headCount2{5, 6, 7};
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Cow] == 5);
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Sheep] == 6);
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Pig] == 7);
+ // Style 3
+ TestArray headCount3({8, 9, 10});
+ MOZ_RELEASE_ASSERT(headCount3[AnimalSpecies::Cow] == 8);
+ MOZ_RELEASE_ASSERT(headCount3[AnimalSpecies::Sheep] == 9);
+ MOZ_RELEASE_ASSERT(headCount3[AnimalSpecies::Pig] == 10);
+}
+
+void TestAssignment() {
+ TestArray headCount{8, 9, 10};
+ TestArray headCount2;
+ headCount2 = headCount;
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Cow] == 8);
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Sheep] == 9);
+ MOZ_RELEASE_ASSERT(headCount2[AnimalSpecies::Pig] == 10);
+}
+
+int main() {
+ TestInitialValueByConstructor();
+ TestAssignment();
+ return 0;
+}
diff --git a/mfbt/tests/TestFastBernoulliTrial.cpp b/mfbt/tests/TestFastBernoulliTrial.cpp
new file mode 100644
index 0000000000..f85d33b2db
--- /dev/null
+++ b/mfbt/tests/TestFastBernoulliTrial.cpp
@@ -0,0 +1,177 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FastBernoulliTrial.h"
+
+#include <math.h>
+
+// Note that because we always provide FastBernoulliTrial with a fixed
+// pseudorandom seed in these tests, the results here are completely
+// deterministic.
+//
+// A non-optimized version of this test runs in .009s on my laptop. Using larger
+// sample sizes lets us meet tighter bounds on the counts.
+
+static void TestProportions() {
+ mozilla::FastBernoulliTrial bernoulli(1.0, 698079309544035222ULL,
+ 6012389156611637584ULL);
+
+ for (size_t i = 0; i < 100; i++) MOZ_RELEASE_ASSERT(bernoulli.trial());
+
+ {
+ bernoulli.setProbability(0.5);
+ size_t count = 0;
+ for (size_t i = 0; i < 1000; i++) count += bernoulli.trial();
+ MOZ_RELEASE_ASSERT(count == 496);
+ }
+
+ {
+ bernoulli.setProbability(0.001);
+ size_t count = 0;
+ for (size_t i = 0; i < 1000; i++) count += bernoulli.trial();
+ MOZ_RELEASE_ASSERT(count == 2);
+ }
+
+ {
+ bernoulli.setProbability(0.85);
+ size_t count = 0;
+ for (size_t i = 0; i < 1000; i++) count += bernoulli.trial();
+ MOZ_RELEASE_ASSERT(count == 852);
+ }
+
+ bernoulli.setProbability(0.0);
+ for (size_t i = 0; i < 100; i++) MOZ_RELEASE_ASSERT(!bernoulli.trial());
+}
+
+static void TestHarmonics() {
+ mozilla::FastBernoulliTrial bernoulli(0.1, 698079309544035222ULL,
+ 6012389156611637584ULL);
+
+ const size_t n = 100000;
+ bool trials[n];
+ for (size_t i = 0; i < n; i++) trials[i] = bernoulli.trial();
+
+ // For each harmonic and phase, check that the proportion sampled is
+ // within acceptable bounds.
+ for (size_t harmonic = 1; harmonic < 20; harmonic++) {
+ size_t expected = n / harmonic / 10;
+ size_t low_expected = expected * 85 / 100;
+ size_t high_expected = expected * 115 / 100;
+
+ for (size_t phase = 0; phase < harmonic; phase++) {
+ size_t count = 0;
+ for (size_t i = phase; i < n; i += harmonic) count += trials[i];
+
+ MOZ_RELEASE_ASSERT(low_expected <= count && count <= high_expected);
+ }
+ }
+}
+
+static void TestTrialN() {
+ mozilla::FastBernoulliTrial bernoulli(0.01, 0x67ff17e25d855942ULL,
+ 0x74f298193fe1c5b1ULL);
+
+ {
+ size_t count = 0;
+ for (size_t i = 0; i < 10000; i++) count += bernoulli.trial(1);
+
+ // Expected value: 0.01 * 10000 == 100
+ MOZ_RELEASE_ASSERT(count == 97);
+ }
+
+ {
+ size_t count = 0;
+ for (size_t i = 0; i < 10000; i++) count += bernoulli.trial(3);
+
+ // Expected value: (1 - (1 - 0.01) ** 3) == 0.0297,
+ // 0.0297 * 10000 == 297
+ MOZ_RELEASE_ASSERT(count == 304);
+ }
+
+ {
+ size_t count = 0;
+ for (size_t i = 0; i < 10000; i++) count += bernoulli.trial(10);
+
+ // Expected value: (1 - (1 - 0.01) ** 10) == 0.0956,
+ // 0.0956 * 10000 == 956
+ MOZ_RELEASE_ASSERT(count == 936);
+ }
+
+ {
+ size_t count = 0;
+ for (size_t i = 0; i < 10000; i++) count += bernoulli.trial(100);
+
+ // Expected value: (1 - (1 - 0.01) ** 100) == 0.6339
+ // 0.6339 * 10000 == 6339
+ MOZ_RELEASE_ASSERT(count == 6372);
+ }
+
+ {
+ size_t count = 0;
+ for (size_t i = 0; i < 10000; i++) count += bernoulli.trial(1000);
+
+ // Expected value: (1 - (1 - 0.01) ** 1000) == 0.9999
+ // 0.9999 * 10000 == 9999
+ MOZ_RELEASE_ASSERT(count == 9998);
+ }
+}
+
+static void TestChangeProbability() {
+ mozilla::FastBernoulliTrial bernoulli(1.0, 0x67ff17e25d855942ULL,
+ 0x74f298193fe1c5b1ULL);
+
+ // Establish a very high skip count.
+ bernoulli.setProbability(0.0);
+
+ // This should re-establish a zero skip count.
+ bernoulli.setProbability(1.0);
+
+ // So this should return true.
+ MOZ_RELEASE_ASSERT(bernoulli.trial());
+}
+
+static void TestCuspProbabilities() {
+ /*
+ * FastBernoulliTrial takes care to avoid screwing up on edge cases. The
+ * checks here all look pretty dumb, but they exercise paths in the code that
+ * could exhibit undefined behavior if coded naïvely.
+ */
+
+ /*
+ * This should not be perceptibly different from 1; for 64-bit doubles, this
+ * is a one in ten trillion chance of the trial not succeeding. Overflows
+ * converting doubles to size_t skip counts may change this, though.
+ */
+ mozilla::FastBernoulliTrial bernoulli(nextafter(1, 0), 0x67ff17e25d855942ULL,
+ 0x74f298193fe1c5b1ULL);
+
+ for (size_t i = 0; i < 1000; i++) MOZ_RELEASE_ASSERT(bernoulli.trial());
+
+ /*
+ * This should not be perceptibly different from 0; for 64-bit doubles,
+ * the FastBernoulliTrial will actually treat this as exactly zero.
+ */
+ bernoulli.setProbability(nextafter(0, 1));
+ for (size_t i = 0; i < 1000; i++) MOZ_RELEASE_ASSERT(!bernoulli.trial());
+
+ /*
+ * This should be a vanishingly low probability which FastBernoulliTrial does
+ * *not* treat as exactly zero.
+ */
+ bernoulli.setProbability(1 - nextafter(1, 0));
+ for (size_t i = 0; i < 1000; i++) MOZ_RELEASE_ASSERT(!bernoulli.trial());
+}
+
+int main() {
+ TestProportions();
+ TestHarmonics();
+ TestTrialN();
+ TestChangeProbability();
+ TestCuspProbabilities();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestFloatingPoint.cpp b/mfbt/tests/TestFloatingPoint.cpp
new file mode 100644
index 0000000000..44918cd1c5
--- /dev/null
+++ b/mfbt/tests/TestFloatingPoint.cpp
@@ -0,0 +1,730 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FloatingPoint.h"
+
+#include <math.h>
+
+using mozilla::ExponentComponent;
+using mozilla::FloatingPoint;
+using mozilla::FuzzyEqualsAdditive;
+using mozilla::FuzzyEqualsMultiplicative;
+using mozilla::IsFloat32Representable;
+using mozilla::IsNegative;
+using mozilla::IsNegativeZero;
+using mozilla::IsPositiveZero;
+using mozilla::NegativeInfinity;
+using mozilla::NumberEqualsInt32;
+using mozilla::NumberEqualsInt64;
+using mozilla::NumberIsInt32;
+using mozilla::NumberIsInt64;
+using mozilla::NumbersAreIdentical;
+using mozilla::PositiveInfinity;
+using mozilla::SpecificNaN;
+using mozilla::UnspecifiedNaN;
+using std::exp2;
+using std::exp2f;
+
+#define A(a) MOZ_RELEASE_ASSERT(a)
+
+template <typename T>
+static void ShouldBeIdentical(T aD1, T aD2) {
+ A(NumbersAreIdentical(aD1, aD2));
+ A(NumbersAreIdentical(aD2, aD1));
+}
+
+template <typename T>
+static void ShouldNotBeIdentical(T aD1, T aD2) {
+ A(!NumbersAreIdentical(aD1, aD2));
+ A(!NumbersAreIdentical(aD2, aD1));
+}
+
+static void TestDoublesAreIdentical() {
+ ShouldBeIdentical(+0.0, +0.0);
+ ShouldBeIdentical(-0.0, -0.0);
+ ShouldNotBeIdentical(+0.0, -0.0);
+
+ ShouldBeIdentical(1.0, 1.0);
+ ShouldNotBeIdentical(-1.0, 1.0);
+ ShouldBeIdentical(4294967295.0, 4294967295.0);
+ ShouldNotBeIdentical(-4294967295.0, 4294967295.0);
+ ShouldBeIdentical(4294967296.0, 4294967296.0);
+ ShouldBeIdentical(4294967297.0, 4294967297.0);
+ ShouldBeIdentical(1e300, 1e300);
+
+ ShouldBeIdentical(PositiveInfinity<double>(), PositiveInfinity<double>());
+ ShouldBeIdentical(NegativeInfinity<double>(), NegativeInfinity<double>());
+ ShouldNotBeIdentical(PositiveInfinity<double>(), NegativeInfinity<double>());
+
+ ShouldNotBeIdentical(-0.0, NegativeInfinity<double>());
+ ShouldNotBeIdentical(+0.0, NegativeInfinity<double>());
+ ShouldNotBeIdentical(1e300, NegativeInfinity<double>());
+ ShouldNotBeIdentical(3.141592654, NegativeInfinity<double>());
+
+ ShouldBeIdentical(UnspecifiedNaN<double>(), UnspecifiedNaN<double>());
+ ShouldBeIdentical(-UnspecifiedNaN<double>(), UnspecifiedNaN<double>());
+ ShouldBeIdentical(UnspecifiedNaN<double>(), -UnspecifiedNaN<double>());
+
+ ShouldBeIdentical(SpecificNaN<double>(0, 17), SpecificNaN<double>(0, 42));
+ ShouldBeIdentical(SpecificNaN<double>(1, 17), SpecificNaN<double>(1, 42));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17), SpecificNaN<double>(1, 42));
+ ShouldBeIdentical(SpecificNaN<double>(1, 17), SpecificNaN<double>(0, 42));
+
+ const uint64_t Mask = 0xfffffffffffffULL;
+ for (unsigned i = 0; i < 52; i++) {
+ for (unsigned j = 0; j < 52; j++) {
+ for (unsigned sign = 0; i < 2; i++) {
+ ShouldBeIdentical(SpecificNaN<double>(0, 1ULL << i),
+ SpecificNaN<double>(sign, 1ULL << j));
+ ShouldBeIdentical(SpecificNaN<double>(1, 1ULL << i),
+ SpecificNaN<double>(sign, 1ULL << j));
+
+ ShouldBeIdentical(SpecificNaN<double>(0, Mask & ~(1ULL << i)),
+ SpecificNaN<double>(sign, Mask & ~(1ULL << j)));
+ ShouldBeIdentical(SpecificNaN<double>(1, Mask & ~(1ULL << i)),
+ SpecificNaN<double>(sign, Mask & ~(1ULL << j)));
+ }
+ }
+ }
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x8000000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x4000000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x2000000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x1000000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0800000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0400000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0200000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0100000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0080000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0040000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0020000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(0, 17),
+ SpecificNaN<double>(0, 0x0010000000000ULL));
+ ShouldBeIdentical(SpecificNaN<double>(1, 17),
+ SpecificNaN<double>(0, 0xff0ffffffffffULL));
+ ShouldBeIdentical(SpecificNaN<double>(1, 17),
+ SpecificNaN<double>(0, 0xfffffffffff0fULL));
+
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), +0.0);
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), -0.0);
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), 1.0);
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), -1.0);
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), PositiveInfinity<double>());
+ ShouldNotBeIdentical(UnspecifiedNaN<double>(), NegativeInfinity<double>());
+}
+
+static void TestFloatsAreIdentical() {
+ ShouldBeIdentical(+0.0f, +0.0f);
+ ShouldBeIdentical(-0.0f, -0.0f);
+ ShouldNotBeIdentical(+0.0f, -0.0f);
+
+ ShouldBeIdentical(1.0f, 1.0f);
+ ShouldNotBeIdentical(-1.0f, 1.0f);
+ ShouldBeIdentical(8388607.0f, 8388607.0f);
+ ShouldNotBeIdentical(-8388607.0f, 8388607.0f);
+ ShouldBeIdentical(8388608.0f, 8388608.0f);
+ ShouldBeIdentical(8388609.0f, 8388609.0f);
+ ShouldBeIdentical(1e36f, 1e36f);
+
+ ShouldBeIdentical(PositiveInfinity<float>(), PositiveInfinity<float>());
+ ShouldBeIdentical(NegativeInfinity<float>(), NegativeInfinity<float>());
+ ShouldNotBeIdentical(PositiveInfinity<float>(), NegativeInfinity<float>());
+
+ ShouldNotBeIdentical(-0.0f, NegativeInfinity<float>());
+ ShouldNotBeIdentical(+0.0f, NegativeInfinity<float>());
+ ShouldNotBeIdentical(1e36f, NegativeInfinity<float>());
+ ShouldNotBeIdentical(3.141592654f, NegativeInfinity<float>());
+
+ ShouldBeIdentical(UnspecifiedNaN<float>(), UnspecifiedNaN<float>());
+ ShouldBeIdentical(-UnspecifiedNaN<float>(), UnspecifiedNaN<float>());
+ ShouldBeIdentical(UnspecifiedNaN<float>(), -UnspecifiedNaN<float>());
+
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 42));
+ ShouldBeIdentical(SpecificNaN<float>(1, 17), SpecificNaN<float>(1, 42));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(1, 42));
+ ShouldBeIdentical(SpecificNaN<float>(1, 17), SpecificNaN<float>(0, 42));
+
+ const uint32_t Mask = 0x7fffffUL;
+ for (unsigned i = 0; i < 23; i++) {
+ for (unsigned j = 0; j < 23; j++) {
+ for (unsigned sign = 0; i < 2; i++) {
+ ShouldBeIdentical(SpecificNaN<float>(0, 1UL << i),
+ SpecificNaN<float>(sign, 1UL << j));
+ ShouldBeIdentical(SpecificNaN<float>(1, 1UL << i),
+ SpecificNaN<float>(sign, 1UL << j));
+
+ ShouldBeIdentical(SpecificNaN<float>(0, Mask & ~(1UL << i)),
+ SpecificNaN<float>(sign, Mask & ~(1UL << j)));
+ ShouldBeIdentical(SpecificNaN<float>(1, Mask & ~(1UL << i)),
+ SpecificNaN<float>(sign, Mask & ~(1UL << j)));
+ }
+ }
+ }
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x700000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x400000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x200000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x100000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x080000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x040000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x020000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x010000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x008000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x004000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x002000));
+ ShouldBeIdentical(SpecificNaN<float>(0, 17), SpecificNaN<float>(0, 0x001000));
+ ShouldBeIdentical(SpecificNaN<float>(1, 17), SpecificNaN<float>(0, 0x7f0fff));
+ ShouldBeIdentical(SpecificNaN<float>(1, 17), SpecificNaN<float>(0, 0x7fff0f));
+
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), +0.0f);
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), -0.0f);
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), 1.0f);
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), -1.0f);
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), PositiveInfinity<float>());
+ ShouldNotBeIdentical(UnspecifiedNaN<float>(), NegativeInfinity<float>());
+}
+
+static void TestAreIdentical() {
+ TestDoublesAreIdentical();
+ TestFloatsAreIdentical();
+}
+
+static void TestDoubleExponentComponent() {
+ A(ExponentComponent(0.0) ==
+ -int_fast16_t(FloatingPoint<double>::kExponentBias));
+ A(ExponentComponent(-0.0) ==
+ -int_fast16_t(FloatingPoint<double>::kExponentBias));
+ A(ExponentComponent(0.125) == -3);
+ A(ExponentComponent(0.5) == -1);
+ A(ExponentComponent(1.0) == 0);
+ A(ExponentComponent(1.5) == 0);
+ A(ExponentComponent(2.0) == 1);
+ A(ExponentComponent(7.0) == 2);
+ A(ExponentComponent(PositiveInfinity<double>()) ==
+ FloatingPoint<double>::kExponentBias + 1);
+ A(ExponentComponent(NegativeInfinity<double>()) ==
+ FloatingPoint<double>::kExponentBias + 1);
+ A(ExponentComponent(UnspecifiedNaN<double>()) ==
+ FloatingPoint<double>::kExponentBias + 1);
+}
+
+static void TestFloatExponentComponent() {
+ A(ExponentComponent(0.0f) ==
+ -int_fast16_t(FloatingPoint<float>::kExponentBias));
+ A(ExponentComponent(-0.0f) ==
+ -int_fast16_t(FloatingPoint<float>::kExponentBias));
+ A(ExponentComponent(0.125f) == -3);
+ A(ExponentComponent(0.5f) == -1);
+ A(ExponentComponent(1.0f) == 0);
+ A(ExponentComponent(1.5f) == 0);
+ A(ExponentComponent(2.0f) == 1);
+ A(ExponentComponent(7.0f) == 2);
+ A(ExponentComponent(PositiveInfinity<float>()) ==
+ FloatingPoint<float>::kExponentBias + 1);
+ A(ExponentComponent(NegativeInfinity<float>()) ==
+ FloatingPoint<float>::kExponentBias + 1);
+ A(ExponentComponent(UnspecifiedNaN<float>()) ==
+ FloatingPoint<float>::kExponentBias + 1);
+}
+
+static void TestExponentComponent() {
+ TestDoubleExponentComponent();
+ TestFloatExponentComponent();
+}
+
+// Used to test Number{Is,Equals}{Int32,Int64} for -0.0, the only case where
+// NumberEquals* and NumberIs* aren't equivalent.
+template <typename T>
+static void TestEqualsIsForNegativeZero() {
+ T negZero = T(-0.0);
+
+ int32_t i32;
+ A(!NumberIsInt32(negZero, &i32));
+ A(NumberEqualsInt32(negZero, &i32));
+ A(i32 == 0);
+
+ int64_t i64;
+ A(!NumberIsInt64(negZero, &i64));
+ A(NumberEqualsInt64(negZero, &i64));
+ A(i64 == 0);
+}
+
+// Used to test Number{Is,Equals}{Int32,Int64} for int32 values.
+template <typename T>
+static void TestEqualsIsForInt32(T aVal) {
+ int32_t i32;
+ A(NumberIsInt32(aVal, &i32));
+ MOZ_ASSERT(i32 == aVal);
+ A(NumberEqualsInt32(aVal, &i32));
+ MOZ_ASSERT(i32 == aVal);
+
+ int64_t i64;
+ A(NumberIsInt64(aVal, &i64));
+ MOZ_ASSERT(i64 == aVal);
+ A(NumberEqualsInt64(aVal, &i64));
+ MOZ_ASSERT(i64 == aVal);
+};
+
+// Used to test Number{Is,Equals}{Int32,Int64} for values that fit in int64 but
+// not int32.
+template <typename T>
+static void TestEqualsIsForInt64(T aVal) {
+ int32_t i32;
+ A(!NumberIsInt32(aVal, &i32));
+ A(!NumberEqualsInt32(aVal, &i32));
+
+ int64_t i64;
+ A(NumberIsInt64(aVal, &i64));
+ MOZ_ASSERT(i64 == aVal);
+ A(NumberEqualsInt64(aVal, &i64));
+ MOZ_ASSERT(i64 == aVal);
+};
+
+// Used to test Number{Is,Equals}{Int32,Int64} for values that aren't equal to
+// any int32 or int64.
+template <typename T>
+static void TestEqualsIsForNonInteger(T aVal) {
+ int32_t i32;
+ A(!NumberIsInt32(aVal, &i32));
+ A(!NumberEqualsInt32(aVal, &i32));
+
+ int64_t i64;
+ A(!NumberIsInt64(aVal, &i64));
+ A(!NumberEqualsInt64(aVal, &i64));
+};
+
+static void TestDoublesPredicates() {
+ A(std::isnan(UnspecifiedNaN<double>()));
+ A(std::isnan(SpecificNaN<double>(1, 17)));
+ ;
+ A(std::isnan(SpecificNaN<double>(0, 0xfffffffffff0fULL)));
+ A(!std::isnan(PositiveInfinity<double>()));
+ A(!std::isnan(NegativeInfinity<double>()));
+
+ A(std::isinf(PositiveInfinity<double>()));
+ A(std::isinf(NegativeInfinity<double>()));
+ A(!std::isinf(UnspecifiedNaN<double>()));
+
+ A(!std::isfinite(PositiveInfinity<double>()));
+ A(!std::isfinite(NegativeInfinity<double>()));
+ A(!std::isfinite(UnspecifiedNaN<double>()));
+
+ A(!IsNegative(PositiveInfinity<double>()));
+ A(IsNegative(NegativeInfinity<double>()));
+ A(IsNegative(-0.0));
+ A(!IsNegative(0.0));
+ A(IsNegative(-1.0));
+ A(!IsNegative(1.0));
+
+ A(!IsNegativeZero(PositiveInfinity<double>()));
+ A(!IsNegativeZero(NegativeInfinity<double>()));
+ A(!IsNegativeZero(SpecificNaN<double>(1, 17)));
+ ;
+ A(!IsNegativeZero(SpecificNaN<double>(1, 0xfffffffffff0fULL)));
+ A(!IsNegativeZero(SpecificNaN<double>(0, 17)));
+ ;
+ A(!IsNegativeZero(SpecificNaN<double>(0, 0xfffffffffff0fULL)));
+ A(!IsNegativeZero(UnspecifiedNaN<double>()));
+ A(IsNegativeZero(-0.0));
+ A(!IsNegativeZero(0.0));
+ A(!IsNegativeZero(-1.0));
+ A(!IsNegativeZero(1.0));
+
+ // Edge case: negative zero.
+ TestEqualsIsForNegativeZero<double>();
+
+ // Int32 values.
+ auto testInt32 = TestEqualsIsForInt32<double>;
+ testInt32(0.0);
+ testInt32(1.0);
+ testInt32(INT32_MIN);
+ testInt32(INT32_MAX);
+
+ // Int64 values that don't fit in int32.
+ auto testInt64 = TestEqualsIsForInt64<double>;
+ testInt64(2147483648);
+ testInt64(2147483649);
+ testInt64(-2147483649);
+ testInt64(INT64_MIN);
+ // Note: INT64_MAX can't be represented exactly as double. Use a large double
+ // very close to it.
+ testInt64(9223372036854772000.0);
+
+ constexpr double MinSafeInteger = -9007199254740991.0;
+ constexpr double MaxSafeInteger = 9007199254740991.0;
+ testInt64(MinSafeInteger);
+ testInt64(MaxSafeInteger);
+
+ // Doubles that aren't equal to any int32 or int64.
+ auto testNonInteger = TestEqualsIsForNonInteger<double>;
+ testNonInteger(NegativeInfinity<double>());
+ testNonInteger(PositiveInfinity<double>());
+ testNonInteger(UnspecifiedNaN<double>());
+ testNonInteger(-double(1ULL << 52) + 0.5);
+ testNonInteger(double(1ULL << 52) - 0.5);
+ testNonInteger(double(INT32_MAX) + 0.1);
+ testNonInteger(double(INT32_MIN) - 0.1);
+ testNonInteger(0.5);
+ testNonInteger(-0.0001);
+ testNonInteger(-9223372036854778000.0);
+ testNonInteger(9223372036854776000.0);
+
+ // Sanity-check that the IEEE-754 double-precision-derived literals used in
+ // testing here work as we intend them to.
+ A(exp2(-1075.0) == 0.0);
+ A(exp2(-1074.0) != 0.0);
+ testNonInteger(exp2(-1074.0));
+ testNonInteger(2 * exp2(-1074.0));
+
+ A(1.0 - exp2(-54.0) == 1.0);
+ A(1.0 - exp2(-53.0) != 1.0);
+ testNonInteger(1.0 - exp2(-53.0));
+ testNonInteger(1.0 - exp2(-52.0));
+
+ A(1.0 + exp2(-53.0) == 1.0f);
+ A(1.0 + exp2(-52.0) != 1.0f);
+ testNonInteger(1.0 + exp2(-52.0));
+}
+
+static void TestFloatsPredicates() {
+ A(std::isnan(UnspecifiedNaN<float>()));
+ A(std::isnan(SpecificNaN<float>(1, 17)));
+ ;
+ A(std::isnan(SpecificNaN<float>(0, 0x7fff0fUL)));
+ A(!std::isnan(PositiveInfinity<float>()));
+ A(!std::isnan(NegativeInfinity<float>()));
+
+ A(std::isinf(PositiveInfinity<float>()));
+ A(std::isinf(NegativeInfinity<float>()));
+ A(!std::isinf(UnspecifiedNaN<float>()));
+
+ A(!std::isfinite(PositiveInfinity<float>()));
+ A(!std::isfinite(NegativeInfinity<float>()));
+ A(!std::isfinite(UnspecifiedNaN<float>()));
+
+ A(!IsNegative(PositiveInfinity<float>()));
+ A(IsNegative(NegativeInfinity<float>()));
+ A(IsNegative(-0.0f));
+ A(!IsNegative(0.0f));
+ A(IsNegative(-1.0f));
+ A(!IsNegative(1.0f));
+
+ A(!IsNegativeZero(PositiveInfinity<float>()));
+ A(!IsNegativeZero(NegativeInfinity<float>()));
+ A(!IsNegativeZero(SpecificNaN<float>(1, 17)));
+ ;
+ A(!IsNegativeZero(SpecificNaN<float>(1, 0x7fff0fUL)));
+ A(!IsNegativeZero(SpecificNaN<float>(0, 17)));
+ ;
+ A(!IsNegativeZero(SpecificNaN<float>(0, 0x7fff0fUL)));
+ A(!IsNegativeZero(UnspecifiedNaN<float>()));
+ A(IsNegativeZero(-0.0f));
+ A(!IsNegativeZero(0.0f));
+ A(!IsNegativeZero(-1.0f));
+ A(!IsNegativeZero(1.0f));
+
+ A(!IsPositiveZero(PositiveInfinity<float>()));
+ A(!IsPositiveZero(NegativeInfinity<float>()));
+ A(!IsPositiveZero(SpecificNaN<float>(1, 17)));
+ ;
+ A(!IsPositiveZero(SpecificNaN<float>(1, 0x7fff0fUL)));
+ A(!IsPositiveZero(SpecificNaN<float>(0, 17)));
+ ;
+ A(!IsPositiveZero(SpecificNaN<float>(0, 0x7fff0fUL)));
+ A(!IsPositiveZero(UnspecifiedNaN<float>()));
+ A(IsPositiveZero(0.0f));
+ A(!IsPositiveZero(-0.0f));
+ A(!IsPositiveZero(-1.0f));
+ A(!IsPositiveZero(1.0f));
+
+ // Edge case: negative zero.
+ TestEqualsIsForNegativeZero<float>();
+
+ // Int32 values.
+ auto testInt32 = TestEqualsIsForInt32<float>;
+ testInt32(0.0f);
+ testInt32(1.0f);
+ testInt32(INT32_MIN);
+ testInt32(float(2147483648 - 128)); // max int32_t fitting in float
+ const int32_t BIG = 2097151;
+ testInt32(BIG);
+
+ // Int64 values that don't fit in int32.
+ auto testInt64 = TestEqualsIsForInt64<float>;
+ testInt64(INT64_MIN);
+ testInt64(9007199254740992.0f);
+ testInt64(-float(2147483648) - 256);
+ testInt64(float(2147483648));
+ testInt64(float(2147483648) + 256);
+
+ // Floats that aren't equal to any int32 or int64.
+ auto testNonInteger = TestEqualsIsForNonInteger<float>;
+ testNonInteger(NegativeInfinity<float>());
+ testNonInteger(PositiveInfinity<float>());
+ testNonInteger(UnspecifiedNaN<float>());
+ testNonInteger(0.5f);
+ testNonInteger(1.5f);
+ testNonInteger(-0.0001f);
+ testNonInteger(-19223373116872850000.0f);
+ testNonInteger(19223373116872850000.0f);
+ testNonInteger(float(BIG) + 0.1f);
+
+ A(powf(2.0f, -150.0f) == 0.0f);
+ A(powf(2.0f, -149.0f) != 0.0f);
+ testNonInteger(powf(2.0f, -149.0f));
+ testNonInteger(2 * powf(2.0f, -149.0f));
+
+ A(1.0f - powf(2.0f, -25.0f) == 1.0f);
+ A(1.0f - powf(2.0f, -24.0f) != 1.0f);
+ testNonInteger(1.0f - powf(2.0f, -24.0f));
+ testNonInteger(1.0f - powf(2.0f, -23.0f));
+
+ A(1.0f + powf(2.0f, -24.0f) == 1.0f);
+ A(1.0f + powf(2.0f, -23.0f) != 1.0f);
+ testNonInteger(1.0f + powf(2.0f, -23.0f));
+}
+
+static void TestPredicates() {
+ TestFloatsPredicates();
+ TestDoublesPredicates();
+}
+
+static void TestFloatsAreApproximatelyEqual() {
+ float epsilon = mozilla::detail::FuzzyEqualsEpsilon<float>::value();
+ float lessThanEpsilon = epsilon / 2.0f;
+ float moreThanEpsilon = epsilon * 2.0f;
+
+ // Additive tests using the default epsilon
+ // ... around 1.0
+ A(FuzzyEqualsAdditive(1.0f, 1.0f + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0f, 1.0f - lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0f, 1.0f + epsilon));
+ A(FuzzyEqualsAdditive(1.0f, 1.0f - epsilon));
+ A(!FuzzyEqualsAdditive(1.0f, 1.0f + moreThanEpsilon));
+ A(!FuzzyEqualsAdditive(1.0f, 1.0f - moreThanEpsilon));
+ // ... around 1.0e2 (this is near the upper bound of the range where
+ // adding moreThanEpsilon will still be representable and return false)
+ A(FuzzyEqualsAdditive(1.0e2f, 1.0e2f + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0e2f, 1.0e2f + epsilon));
+ A(!FuzzyEqualsAdditive(1.0e2f, 1.0e2f + moreThanEpsilon));
+ // ... around 1.0e-10
+ A(FuzzyEqualsAdditive(1.0e-10f, 1.0e-10f + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0e-10f, 1.0e-10f + epsilon));
+ A(!FuzzyEqualsAdditive(1.0e-10f, 1.0e-10f + moreThanEpsilon));
+ // ... straddling 0
+ A(FuzzyEqualsAdditive(1.0e-6f, -1.0e-6f));
+ A(!FuzzyEqualsAdditive(1.0e-5f, -1.0e-5f));
+ // Using a small epsilon
+ A(FuzzyEqualsAdditive(1.0e-5f, 1.0e-5f + 1.0e-10f, 1.0e-9f));
+ A(!FuzzyEqualsAdditive(1.0e-5f, 1.0e-5f + 1.0e-10f, 1.0e-11f));
+ // Using a big epsilon
+ A(FuzzyEqualsAdditive(1.0e20f, 1.0e20f + 1.0e15f, 1.0e16f));
+ A(!FuzzyEqualsAdditive(1.0e20f, 1.0e20f + 1.0e15f, 1.0e14f));
+
+ // Multiplicative tests using the default epsilon
+ // ... around 1.0
+ A(FuzzyEqualsMultiplicative(1.0f, 1.0f + lessThanEpsilon));
+ A(FuzzyEqualsMultiplicative(1.0f, 1.0f - lessThanEpsilon));
+ A(FuzzyEqualsMultiplicative(1.0f, 1.0f + epsilon));
+ A(!FuzzyEqualsMultiplicative(1.0f, 1.0f - epsilon));
+ A(!FuzzyEqualsMultiplicative(1.0f, 1.0f + moreThanEpsilon));
+ A(!FuzzyEqualsMultiplicative(1.0f, 1.0f - moreThanEpsilon));
+ // ... around 1.0e10
+ A(FuzzyEqualsMultiplicative(1.0e10f, 1.0e10f + (lessThanEpsilon * 1.0e10f)));
+ A(!FuzzyEqualsMultiplicative(1.0e10f, 1.0e10f + (moreThanEpsilon * 1.0e10f)));
+ // ... around 1.0e-10
+ A(FuzzyEqualsMultiplicative(1.0e-10f,
+ 1.0e-10f + (lessThanEpsilon * 1.0e-10f)));
+ A(!FuzzyEqualsMultiplicative(1.0e-10f,
+ 1.0e-10f + (moreThanEpsilon * 1.0e-10f)));
+ // ... straddling 0
+ A(!FuzzyEqualsMultiplicative(1.0e-6f, -1.0e-6f));
+ A(FuzzyEqualsMultiplicative(1.0e-6f, -1.0e-6f, 1.0e2f));
+ // Using a small epsilon
+ A(FuzzyEqualsMultiplicative(1.0e-5f, 1.0e-5f + 1.0e-10f, 1.0e-4f));
+ A(!FuzzyEqualsMultiplicative(1.0e-5f, 1.0e-5f + 1.0e-10f, 1.0e-5f));
+ // Using a big epsilon
+ A(FuzzyEqualsMultiplicative(1.0f, 2.0f, 1.0f));
+ A(!FuzzyEqualsMultiplicative(1.0f, 2.0f, 0.1f));
+
+ // "real world case"
+ float oneThird = 10.0f / 3.0f;
+ A(FuzzyEqualsAdditive(10.0f, 3.0f * oneThird));
+ A(FuzzyEqualsMultiplicative(10.0f, 3.0f * oneThird));
+ // NaN check
+ A(!FuzzyEqualsAdditive(SpecificNaN<float>(1, 1), SpecificNaN<float>(1, 1)));
+ A(!FuzzyEqualsAdditive(SpecificNaN<float>(1, 2), SpecificNaN<float>(0, 8)));
+ A(!FuzzyEqualsMultiplicative(SpecificNaN<float>(1, 1),
+ SpecificNaN<float>(1, 1)));
+ A(!FuzzyEqualsMultiplicative(SpecificNaN<float>(1, 2),
+ SpecificNaN<float>(0, 200)));
+}
+
+static void TestDoublesAreApproximatelyEqual() {
+ double epsilon = mozilla::detail::FuzzyEqualsEpsilon<double>::value();
+ double lessThanEpsilon = epsilon / 2.0;
+ double moreThanEpsilon = epsilon * 2.0;
+
+ // Additive tests using the default epsilon
+ // ... around 1.0
+ A(FuzzyEqualsAdditive(1.0, 1.0 + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0, 1.0 - lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0, 1.0 + epsilon));
+ A(FuzzyEqualsAdditive(1.0, 1.0 - epsilon));
+ A(!FuzzyEqualsAdditive(1.0, 1.0 + moreThanEpsilon));
+ A(!FuzzyEqualsAdditive(1.0, 1.0 - moreThanEpsilon));
+ // ... around 1.0e4 (this is near the upper bound of the range where
+ // adding moreThanEpsilon will still be representable and return false)
+ A(FuzzyEqualsAdditive(1.0e4, 1.0e4 + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0e4, 1.0e4 + epsilon));
+ A(!FuzzyEqualsAdditive(1.0e4, 1.0e4 + moreThanEpsilon));
+ // ... around 1.0e-25
+ A(FuzzyEqualsAdditive(1.0e-25, 1.0e-25 + lessThanEpsilon));
+ A(FuzzyEqualsAdditive(1.0e-25, 1.0e-25 + epsilon));
+ A(!FuzzyEqualsAdditive(1.0e-25, 1.0e-25 + moreThanEpsilon));
+ // ... straddling 0
+ A(FuzzyEqualsAdditive(1.0e-13, -1.0e-13));
+ A(!FuzzyEqualsAdditive(1.0e-12, -1.0e-12));
+ // Using a small epsilon
+ A(FuzzyEqualsAdditive(1.0e-15, 1.0e-15 + 1.0e-30, 1.0e-29));
+ A(!FuzzyEqualsAdditive(1.0e-15, 1.0e-15 + 1.0e-30, 1.0e-31));
+ // Using a big epsilon
+ A(FuzzyEqualsAdditive(1.0e40, 1.0e40 + 1.0e25, 1.0e26));
+ A(!FuzzyEqualsAdditive(1.0e40, 1.0e40 + 1.0e25, 1.0e24));
+
+ // Multiplicative tests using the default epsilon
+ // ... around 1.0
+ A(FuzzyEqualsMultiplicative(1.0, 1.0 + lessThanEpsilon));
+ A(FuzzyEqualsMultiplicative(1.0, 1.0 - lessThanEpsilon));
+ A(FuzzyEqualsMultiplicative(1.0, 1.0 + epsilon));
+ A(!FuzzyEqualsMultiplicative(1.0, 1.0 - epsilon));
+ A(!FuzzyEqualsMultiplicative(1.0, 1.0 + moreThanEpsilon));
+ A(!FuzzyEqualsMultiplicative(1.0, 1.0 - moreThanEpsilon));
+ // ... around 1.0e30
+ A(FuzzyEqualsMultiplicative(1.0e30, 1.0e30 + (lessThanEpsilon * 1.0e30)));
+ A(!FuzzyEqualsMultiplicative(1.0e30, 1.0e30 + (moreThanEpsilon * 1.0e30)));
+ // ... around 1.0e-30
+ A(FuzzyEqualsMultiplicative(1.0e-30, 1.0e-30 + (lessThanEpsilon * 1.0e-30)));
+ A(!FuzzyEqualsMultiplicative(1.0e-30, 1.0e-30 + (moreThanEpsilon * 1.0e-30)));
+ // ... straddling 0
+ A(!FuzzyEqualsMultiplicative(1.0e-6, -1.0e-6));
+ A(FuzzyEqualsMultiplicative(1.0e-6, -1.0e-6, 1.0e2));
+ // Using a small epsilon
+ A(FuzzyEqualsMultiplicative(1.0e-15, 1.0e-15 + 1.0e-30, 1.0e-15));
+ A(!FuzzyEqualsMultiplicative(1.0e-15, 1.0e-15 + 1.0e-30, 1.0e-16));
+ // Using a big epsilon
+ A(FuzzyEqualsMultiplicative(1.0e40, 2.0e40, 1.0));
+ A(!FuzzyEqualsMultiplicative(1.0e40, 2.0e40, 0.1));
+
+ // "real world case"
+ double oneThird = 10.0 / 3.0;
+ A(FuzzyEqualsAdditive(10.0, 3.0 * oneThird));
+ A(FuzzyEqualsMultiplicative(10.0, 3.0 * oneThird));
+ // NaN check
+ A(!FuzzyEqualsAdditive(SpecificNaN<double>(1, 1), SpecificNaN<double>(1, 1)));
+ A(!FuzzyEqualsAdditive(SpecificNaN<double>(1, 2), SpecificNaN<double>(0, 8)));
+ A(!FuzzyEqualsMultiplicative(SpecificNaN<double>(1, 1),
+ SpecificNaN<double>(1, 1)));
+ A(!FuzzyEqualsMultiplicative(SpecificNaN<double>(1, 2),
+ SpecificNaN<double>(0, 200)));
+}
+
+static void TestAreApproximatelyEqual() {
+ TestFloatsAreApproximatelyEqual();
+ TestDoublesAreApproximatelyEqual();
+}
+
+static void TestIsFloat32Representable() {
+ // Zeroes are representable.
+ A(IsFloat32Representable(+0.0));
+ A(IsFloat32Representable(-0.0));
+
+ // NaN and infinities are representable.
+ A(IsFloat32Representable(UnspecifiedNaN<double>()));
+ A(IsFloat32Representable(SpecificNaN<double>(0, 1)));
+ A(IsFloat32Representable(SpecificNaN<double>(0, 71389)));
+ A(IsFloat32Representable(SpecificNaN<double>(0, (uint64_t(1) << 52) - 2)));
+ A(IsFloat32Representable(SpecificNaN<double>(1, 1)));
+ A(IsFloat32Representable(SpecificNaN<double>(1, 71389)));
+ A(IsFloat32Representable(SpecificNaN<double>(1, (uint64_t(1) << 52) - 2)));
+ A(IsFloat32Representable(PositiveInfinity<double>()));
+ A(IsFloat32Representable(NegativeInfinity<double>()));
+
+ // Sanity-check that the IEEE-754 double-precision-derived literals used in
+ // testing here work as we intend them to.
+ A(exp2(-1075.0) == 0.0);
+ A(exp2(-1074.0) != 0.0);
+
+ for (double littleExp = -1074.0; littleExp < -149.0; littleExp++) {
+ // Powers of two representable as doubles but not as floats aren't
+ // representable.
+ A(!IsFloat32Representable(exp2(littleExp)));
+ }
+
+ // Sanity-check that the IEEE-754 single-precision-derived literals used in
+ // testing here work as we intend them to.
+ A(exp2f(-150.0f) == 0.0);
+ A(exp2f(-149.0f) != 0.0);
+
+ // Exact powers of two within the available range are representable.
+ for (double exponent = -149.0; exponent < 128.0; exponent++) {
+ A(IsFloat32Representable(exp2(exponent)));
+ }
+
+ // Powers of two above the available range aren't representable.
+ for (double bigExp = 128.0; bigExp < 1024.0; bigExp++) {
+ A(!IsFloat32Representable(exp2(bigExp)));
+ }
+
+ // Various denormal (i.e. super-small) doubles with MSB and LSB as far apart
+ // as possible are representable (but taken one bit further apart are not
+ // representable).
+ //
+ // Note that the final iteration tests non-denormal with exponent field
+ // containing (biased) 1, as |oneTooSmall| and |widestPossible| happen still
+ // to be correct for that exponent due to the extra bit of precision in the
+ // implicit-one bit.
+ double oneTooSmall = exp2(-150.0);
+ for (double denormExp = -149.0;
+ denormExp < 1 - double(FloatingPoint<double>::kExponentBias) + 1;
+ denormExp++) {
+ double baseDenorm = exp2(denormExp);
+ double tooWide = baseDenorm + oneTooSmall;
+ A(!IsFloat32Representable(tooWide));
+
+ double widestPossible = baseDenorm;
+ if (oneTooSmall * 2.0 != baseDenorm) {
+ widestPossible += oneTooSmall * 2.0;
+ }
+
+ A(IsFloat32Representable(widestPossible));
+ }
+
+ // Finally, check certain interesting/special values for basic sanity.
+ A(!IsFloat32Representable(2147483647.0));
+ A(!IsFloat32Representable(-2147483647.0));
+}
+
+#undef A
+
+int main() {
+ TestAreIdentical();
+ TestExponentComponent();
+ TestPredicates();
+ TestAreApproximatelyEqual();
+ TestIsFloat32Representable();
+ return 0;
+}
diff --git a/mfbt/tests/TestFunctionRef.cpp b/mfbt/tests/TestFunctionRef.cpp
new file mode 100644
index 0000000000..0ae1d4f193
--- /dev/null
+++ b/mfbt/tests/TestFunctionRef.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FunctionRef.h"
+#include "mozilla/UniquePtr.h"
+
+using mozilla::FunctionRef;
+
+#define CHECK(c) \
+ do { \
+ bool cond = !!(c); \
+ MOZ_RELEASE_ASSERT(cond, "Failed assertion: " #c); \
+ } while (false)
+
+int addConstRefs(const int& arg1, const int& arg2) { return arg1 + arg2; }
+
+void incrementPointer(int* arg) { (*arg)++; }
+
+int increment(int arg) { return arg + 1; }
+
+int incrementUnique(mozilla::UniquePtr<int> ptr) { return *ptr + 1; }
+
+static bool helloWorldCalled = false;
+
+void helloWorld() { helloWorldCalled = true; }
+
+struct S {
+ static int increment(int arg) { return arg + 1; }
+};
+
+struct Incrementor {
+ int operator()(int arg) { return arg + 1; }
+};
+
+template <typename Fn>
+struct Caller;
+
+template <typename Fn, typename... Params>
+std::invoke_result_t<Fn, Params...> CallFunctionRef(FunctionRef<Fn> aRef,
+ Params... aParams) {
+ return aRef(std::forward<Params>(aParams)...);
+}
+
+static void TestNonmemberFunction() {
+ CHECK(CallFunctionRef<int(int)>(increment, 42) == 43);
+}
+
+static void TestStaticMemberFunction() {
+ CHECK(CallFunctionRef<int(int)>(&S::increment, 42) == 43);
+}
+
+static void TestFunctionObject() {
+ auto incrementor = Incrementor();
+ CHECK(CallFunctionRef<int(int)>(incrementor, 42) == 43);
+}
+
+static void TestFunctionObjectTemporary() {
+ CHECK(CallFunctionRef<int(int)>(Incrementor(), 42) == 43);
+}
+
+static void TestLambda() {
+ // Test non-capturing lambda
+ auto lambda1 = [](int arg) { return arg + 1; };
+ CHECK(CallFunctionRef<int(int)>(lambda1, 42) == 43);
+
+ // Test capturing lambda
+ int one = 1;
+ auto lambda2 = [one](int arg) { return arg + one; };
+ CHECK(CallFunctionRef<int(int)>(lambda2, 42) == 43);
+
+ CHECK(CallFunctionRef<int(int)>([](int arg) { return arg + 1; }, 42) == 43);
+}
+
+static void TestOperatorBool() {
+ auto ToBool = [](FunctionRef<int(int)> aRef) {
+ return static_cast<bool>(aRef);
+ };
+ CHECK(!ToBool({}));
+ CHECK(ToBool(increment));
+ CHECK(!ToBool(nullptr));
+}
+
+static void TestReferenceParameters() {
+ int x = 1;
+ int y = 2;
+ CHECK(CallFunctionRef<int(const int&, const int&)>(addConstRefs, x, y) == 3);
+}
+
+static void TestVoidNoParameters() {
+ CHECK(!helloWorldCalled);
+ CallFunctionRef<void()>(helloWorld);
+ CHECK(helloWorldCalled);
+}
+
+static void TestPointerParameters() {
+ int x = 1;
+ CallFunctionRef<void(int*)>(incrementPointer, &x);
+ CHECK(x == 2);
+}
+
+static void TestImplicitFunctorTypeConversion() {
+ auto incrementor = Incrementor();
+ short x = 1;
+ CHECK(CallFunctionRef<long(short)>(incrementor, x) == 2);
+}
+
+static void TestImplicitLambdaTypeConversion() {
+ short x = 1;
+ CHECK(CallFunctionRef<long(short)>([](short arg) { return arg + 1; }, x) ==
+ 2);
+}
+
+static void TestImplicitFunctionPointerTypeConversion() {
+ short x = 1;
+ CHECK(CallFunctionRef<long(short)>(&increment, x) == 2);
+}
+
+static void TestMoveOnlyArguments() {
+ CHECK(CallFunctionRef<int(mozilla::UniquePtr<int>)>(
+ &incrementUnique, mozilla::MakeUnique<int>(5)) == 6);
+}
+
+int main() {
+ TestNonmemberFunction();
+ TestStaticMemberFunction();
+ TestFunctionObject();
+ TestFunctionObjectTemporary();
+ TestLambda();
+ TestOperatorBool();
+ TestReferenceParameters();
+ TestPointerParameters();
+ TestVoidNoParameters();
+ TestImplicitFunctorTypeConversion();
+ TestImplicitLambdaTypeConversion();
+ TestImplicitFunctionPointerTypeConversion();
+ TestMoveOnlyArguments();
+
+ printf("TestFunctionRef OK!\n");
+ return 0;
+}
diff --git a/mfbt/tests/TestFunctionTypeTraits.cpp b/mfbt/tests/TestFunctionTypeTraits.cpp
new file mode 100644
index 0000000000..eb9593fbbf
--- /dev/null
+++ b/mfbt/tests/TestFunctionTypeTraits.cpp
@@ -0,0 +1,232 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/FunctionTypeTraits.h"
+
+#include <functional>
+
+using mozilla::FunctionTypeTraits;
+
+void f0() {}
+
+int f1(char) { return 0; }
+
+#ifdef NS_HAVE_STDCALL
+void NS_STDCALL f0s() {}
+
+int NS_STDCALL f1s(char) { return 0; }
+#endif // NS_HAVE_STDCALL
+
+struct S {
+ void f0() {}
+ void f0c() const {}
+ int f1(char) { return 0; }
+ int f1c(char) const { return 0; }
+#ifdef NS_HAVE_STDCALL
+ void NS_STDCALL f0s() {}
+ void NS_STDCALL f0cs() const {}
+ int NS_STDCALL f1s(char) { return 0; }
+ int NS_STDCALL f1cs(char) const { return 0; }
+#endif // NS_HAVE_STDCALL
+};
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(f0)>::ReturnType,
+ void>::value,
+ "f0 returns void");
+static_assert(FunctionTypeTraits<decltype(f0)>::arity == 0,
+ "f0 takes no parameters");
+static_assert(
+ std::is_same<
+ typename FunctionTypeTraits<decltype(f0)>::template ParameterType<0>,
+ void>::value,
+ "f0 has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f0)>::ReturnType,
+ void>::value,
+ "S::f0 returns void");
+static_assert(FunctionTypeTraits<decltype(&S::f0)>::arity == 0,
+ "S::f0 takes no parameters");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f0)>::template ParameterType<0>,
+ void>::value,
+ "S::f0 has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f0c)>::ReturnType,
+ void>::value,
+ "S::f0c returns void");
+static_assert(FunctionTypeTraits<decltype(&S::f0c)>::arity == 0,
+ "S::f0c takes no parameters");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f0c)>::template ParameterType<0>,
+ void>::value,
+ "S::f0c has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(f1)>::ReturnType,
+ int>::value,
+ "f1 returns int");
+static_assert(FunctionTypeTraits<decltype(f1)>::arity == 1,
+ "f1 takes one parameter");
+static_assert(
+ std::is_same<
+ typename FunctionTypeTraits<decltype(f1)>::template ParameterType<0>,
+ char>::value,
+ "f1 takes a char");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f1)>::ReturnType,
+ int>::value,
+ "S::f1 returns int");
+static_assert(FunctionTypeTraits<decltype(&S::f1)>::arity == 1,
+ "S::f1 takes one parameter");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f1)>::template ParameterType<0>,
+ char>::value,
+ "S::f1 takes a char");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f1c)>::ReturnType,
+ int>::value,
+ "S::f1c returns int");
+static_assert(FunctionTypeTraits<decltype(&S::f1c)>::arity == 1,
+ "S::f1c takes one parameter");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f1c)>::template ParameterType<0>,
+ char>::value,
+ "S::f1c takes a char");
+
+#ifdef NS_HAVE_STDCALL
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(f0s)>::ReturnType,
+ void>::value,
+ "f0s returns void");
+static_assert(FunctionTypeTraits<decltype(f0s)>::arity == 0,
+ "f0s takes no parameters");
+static_assert(
+ std::is_same<
+ typename FunctionTypeTraits<decltype(f0s)>::template ParameterType<0>,
+ void>::value,
+ "f0s has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f0s)>::ReturnType,
+ void>::value,
+ "S::f0s returns void");
+static_assert(FunctionTypeTraits<decltype(&S::f0s)>::arity == 0,
+ "S::f0s takes no parameters");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f0s)>::template ParameterType<0>,
+ void>::value,
+ "S::f0s has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f0cs)>::ReturnType,
+ void>::value,
+ "S::f0cs returns void");
+static_assert(FunctionTypeTraits<decltype(&S::f0cs)>::arity == 0,
+ "S::f0cs takes no parameters");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f0cs)>::template ParameterType<0>,
+ void>::value,
+ "S::f0cs has no first parameter");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(f1s)>::ReturnType,
+ int>::value,
+ "f1s returns int");
+static_assert(FunctionTypeTraits<decltype(f1s)>::arity == 1,
+ "f1s takes one parameter");
+static_assert(
+ std::is_same<
+ typename FunctionTypeTraits<decltype(f1s)>::template ParameterType<0>,
+ char>::value,
+ "f1s takes a char");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f1s)>::ReturnType,
+ int>::value,
+ "S::f1s returns int");
+static_assert(FunctionTypeTraits<decltype(&S::f1s)>::arity == 1,
+ "S::f1s takes one parameter");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f1s)>::template ParameterType<0>,
+ char>::value,
+ "S::f1s takes a char");
+
+static_assert(
+ std::is_same<typename FunctionTypeTraits<decltype(&S::f1cs)>::ReturnType,
+ int>::value,
+ "S::f1cs returns int");
+static_assert(FunctionTypeTraits<decltype(&S::f1cs)>::arity == 1,
+ "S::f1cs takes one parameter");
+static_assert(std::is_same<typename FunctionTypeTraits<
+ decltype(&S::f1cs)>::template ParameterType<0>,
+ char>::value,
+ "S::f1cs takes a char");
+#endif // NS_HAVE_STDCALL
+
+template <typename F>
+void TestVoidVoid(F&&) {
+ static_assert(
+ std::is_same<typename FunctionTypeTraits<F>::ReturnType, void>::value,
+ "Should return void");
+ static_assert(FunctionTypeTraits<F>::arity == 0, "Should take no parameters");
+ static_assert(
+ std::is_same<typename FunctionTypeTraits<F>::template ParameterType<0>,
+ void>::value,
+ "Should have no first parameter");
+}
+
+template <typename F>
+void TestIntChar(F&&) {
+ static_assert(
+ std::is_same<typename FunctionTypeTraits<F>::ReturnType, int>::value,
+ "Should return int");
+ static_assert(FunctionTypeTraits<F>::arity == 1, "Should take one parameter");
+ static_assert(
+ std::is_same<typename FunctionTypeTraits<F>::template ParameterType<0>,
+ char>::value,
+ "Should take a char");
+}
+
+int main() {
+ TestVoidVoid(f0);
+ TestVoidVoid(&f0);
+ TestVoidVoid(&S::f0);
+ TestVoidVoid(&S::f0c);
+ TestVoidVoid([]() {});
+ std::function<void()> ff0 = f0;
+ TestVoidVoid(ff0);
+
+ TestIntChar(f1);
+ TestIntChar(&f1);
+ TestIntChar(&S::f1);
+ TestIntChar(&S::f1c);
+ TestIntChar([](char) { return 0; });
+ std::function<int(char)> ff1 = f1;
+ TestIntChar(ff1);
+
+#ifdef NS_HAVE_STDCALL
+ TestVoidVoid(f0s);
+ TestVoidVoid(&f0s);
+ TestVoidVoid(&S::f0s);
+ TestVoidVoid(&S::f0cs);
+ std::function<void()> ff0s = f0s;
+ TestVoidVoid(ff0s);
+
+ TestIntChar(f1s);
+ TestIntChar(&f1s);
+ TestIntChar(&S::f1s);
+ TestIntChar(&S::f1cs);
+ std::function<int(char)> ff1s = f1s;
+ TestIntChar(ff1s);
+#endif // NS_HAVE_STDCALL
+
+ return 0;
+}
diff --git a/mfbt/tests/TestHashTable.cpp b/mfbt/tests/TestHashTable.cpp
new file mode 100644
index 0000000000..c648184040
--- /dev/null
+++ b/mfbt/tests/TestHashTable.cpp
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/HashTable.h"
+#include "mozilla/PairHash.h"
+
+#include <utility>
+
+void TestMoveConstructor() {
+ using namespace mozilla;
+
+ HashMap<int, int> map;
+ MOZ_RELEASE_ASSERT(map.putNew(3, 32));
+ MOZ_RELEASE_ASSERT(map.putNew(4, 42));
+ MOZ_RELEASE_ASSERT(map.count() == 2);
+ MOZ_RELEASE_ASSERT(!map.empty());
+ MOZ_RELEASE_ASSERT(!map.lookup(2));
+ MOZ_RELEASE_ASSERT(map.lookup(3)->value() == 32);
+ MOZ_RELEASE_ASSERT(map.lookup(4)->value() == 42);
+
+ HashMap<int, int> moved = std::move(map);
+ MOZ_RELEASE_ASSERT(moved.count() == 2);
+ MOZ_RELEASE_ASSERT(!moved.empty());
+ MOZ_RELEASE_ASSERT(!moved.lookup(2));
+ MOZ_RELEASE_ASSERT(moved.lookup(3)->value() == 32);
+ MOZ_RELEASE_ASSERT(moved.lookup(4)->value() == 42);
+
+ MOZ_RELEASE_ASSERT(map.empty());
+ MOZ_RELEASE_ASSERT(!map.count());
+}
+
+enum SimpleEnum { SIMPLE_1, SIMPLE_2 };
+
+enum class ClassEnum : int {
+ CLASS_ENUM_1,
+ CLASS_ENUM_2,
+};
+
+void TestEnumHash() {
+ using namespace mozilla;
+
+ HashMap<SimpleEnum, int> map;
+ MOZ_RELEASE_ASSERT(map.put(SIMPLE_1, 1));
+ MOZ_RELEASE_ASSERT(map.put(SIMPLE_2, 2));
+
+ MOZ_RELEASE_ASSERT(map.lookup(SIMPLE_1)->value() == 1);
+ MOZ_RELEASE_ASSERT(map.lookup(SIMPLE_2)->value() == 2);
+
+ HashMap<ClassEnum, int> map2;
+ MOZ_RELEASE_ASSERT(map2.put(ClassEnum::CLASS_ENUM_1, 1));
+ MOZ_RELEASE_ASSERT(map2.put(ClassEnum::CLASS_ENUM_2, 2));
+
+ MOZ_RELEASE_ASSERT(map2.lookup(ClassEnum::CLASS_ENUM_1)->value() == 1);
+ MOZ_RELEASE_ASSERT(map2.lookup(ClassEnum::CLASS_ENUM_2)->value() == 2);
+}
+
+void TestHashPair() {
+ using namespace mozilla;
+
+ // Test with std::pair
+ {
+ HashMap<std::pair<int, bool>, int, PairHasher<int, bool>> map;
+ std::pair<int, bool> key1 = std::make_pair(1, true);
+ MOZ_RELEASE_ASSERT(map.putNew(key1, 1));
+ MOZ_RELEASE_ASSERT(map.has(key1));
+ std::pair<int, bool> key2 = std::make_pair(1, false);
+ MOZ_RELEASE_ASSERT(map.putNew(key2, 1));
+ std::pair<int, bool> key3 = std::make_pair(2, false);
+ MOZ_RELEASE_ASSERT(map.putNew(key3, 2));
+ MOZ_RELEASE_ASSERT(map.has(key3));
+
+ MOZ_RELEASE_ASSERT(map.lookup(key1)->value() == 1);
+ MOZ_RELEASE_ASSERT(map.lookup(key2)->value() == 1);
+ MOZ_RELEASE_ASSERT(map.lookup(key3)->value() == 2);
+ }
+ // Test wtih compact pair
+ {
+ HashMap<mozilla::CompactPair<int, bool>, int, CompactPairHasher<int, bool>>
+ map;
+ mozilla::CompactPair<int, bool> key1 = mozilla::MakeCompactPair(1, true);
+ MOZ_RELEASE_ASSERT(map.putNew(key1, 1));
+ MOZ_RELEASE_ASSERT(map.has(key1));
+ mozilla::CompactPair<int, bool> key2 = mozilla::MakeCompactPair(1, false);
+ MOZ_RELEASE_ASSERT(map.putNew(key2, 1));
+ mozilla::CompactPair<int, bool> key3 = mozilla::MakeCompactPair(2, false);
+ MOZ_RELEASE_ASSERT(map.putNew(key3, 2));
+ MOZ_RELEASE_ASSERT(map.has(key3));
+
+ MOZ_RELEASE_ASSERT(map.lookup(key1)->value() == 1);
+ MOZ_RELEASE_ASSERT(map.lookup(key2)->value() == 1);
+ MOZ_RELEASE_ASSERT(map.lookup(key3)->value() == 2);
+ }
+}
+
+int main() {
+ TestMoveConstructor();
+ TestEnumHash();
+ TestHashPair();
+ return 0;
+}
diff --git a/mfbt/tests/TestIntegerRange.cpp b/mfbt/tests/TestIntegerRange.cpp
new file mode 100644
index 0000000000..3aad90fcc1
--- /dev/null
+++ b/mfbt/tests/TestIntegerRange.cpp
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/IntegerRange.h"
+
+#include <stddef.h>
+
+using mozilla::IntegerRange;
+using mozilla::Reversed;
+
+const size_t kMaxNumber = 50;
+const size_t kArraySize = 256;
+
+template <typename IntType>
+static IntType GenerateNumber() {
+ return static_cast<IntType>(rand() % kMaxNumber + 1);
+}
+
+template <typename IntType>
+static void TestSingleParamRange(const IntType aN) {
+ IntType array[kArraySize];
+ IntType* ptr = array;
+ for (auto i : IntegerRange(aN)) {
+ static_assert(std::is_same_v<decltype(i), IntType>,
+ "type of the loop var and the param should be the same");
+ *ptr++ = i;
+ }
+
+ MOZ_RELEASE_ASSERT(ptr - array == static_cast<ptrdiff_t>(aN),
+ "Should iterates N items");
+ for (size_t i = 0; i < static_cast<size_t>(aN); i++) {
+ MOZ_RELEASE_ASSERT(array[i] == static_cast<IntType>(i),
+ "Values should equal to the index");
+ }
+}
+
+template <typename IntType>
+static void TestSingleParamReverseRange(const IntType aN) {
+ IntType array[kArraySize];
+ IntType* ptr = array;
+ for (auto i : Reversed(IntegerRange(aN))) {
+ static_assert(std::is_same_v<decltype(i), IntType>,
+ "type of the loop var and the param should be the same");
+ *ptr++ = i;
+ }
+
+ MOZ_RELEASE_ASSERT(ptr - array == static_cast<ptrdiff_t>(aN),
+ "Should iterates N items");
+ for (size_t i = 0; i < static_cast<size_t>(aN); i++) {
+ MOZ_RELEASE_ASSERT(array[i] == static_cast<IntType>(aN - i - 1),
+ "Values should be the reverse of their index");
+ }
+}
+
+template <typename IntType>
+static void TestSingleParamIntegerRange() {
+ const auto kN = GenerateNumber<IntType>();
+ TestSingleParamRange<IntType>(0);
+ TestSingleParamReverseRange<IntType>(0);
+ TestSingleParamRange<IntType>(kN);
+ TestSingleParamReverseRange<IntType>(kN);
+}
+
+template <typename IntType1, typename IntType2>
+static void TestDoubleParamRange(const IntType1 aBegin, const IntType2 aEnd) {
+ IntType2 array[kArraySize];
+ IntType2* ptr = array;
+ for (auto i : IntegerRange(aBegin, aEnd)) {
+ static_assert(std::is_same_v<decltype(i), IntType2>,
+ "type of the loop var "
+ "should be same as that of the second param");
+ *ptr++ = i;
+ }
+
+ MOZ_RELEASE_ASSERT(ptr - array == static_cast<ptrdiff_t>(aEnd - aBegin),
+ "Should iterates (aEnd - aBegin) times");
+ for (size_t i = 0; i < static_cast<size_t>(aEnd - aBegin); i++) {
+ MOZ_RELEASE_ASSERT(array[i] == static_cast<IntType2>(aBegin + i),
+ "Should iterate integers in [aBegin, aEnd) in order");
+ }
+}
+
+template <typename IntType1, typename IntType2>
+static void TestDoubleParamReverseRange(const IntType1 aBegin,
+ const IntType2 aEnd) {
+ IntType2 array[kArraySize];
+ IntType2* ptr = array;
+ for (auto i : Reversed(IntegerRange(aBegin, aEnd))) {
+ static_assert(std::is_same_v<decltype(i), IntType2>,
+ "type of the loop var "
+ "should be same as that of the second param");
+ *ptr++ = i;
+ }
+
+ MOZ_RELEASE_ASSERT(ptr - array == static_cast<ptrdiff_t>(aEnd - aBegin),
+ "Should iterates (aEnd - aBegin) times");
+ for (size_t i = 0; i < static_cast<size_t>(aEnd - aBegin); i++) {
+ MOZ_RELEASE_ASSERT(
+ array[i] == static_cast<IntType2>(aEnd - i - 1),
+ "Should iterate integers in [aBegin, aEnd) in reverse order");
+ }
+}
+
+template <typename IntType1, typename IntType2>
+static void TestDoubleParamIntegerRange() {
+ const auto kStart = GenerateNumber<IntType1>();
+ const auto kEnd = static_cast<IntType2>(kStart + GenerateNumber<IntType2>());
+ TestDoubleParamRange(kStart, static_cast<IntType2>(kStart));
+ TestDoubleParamReverseRange(kStart, static_cast<IntType2>(kStart));
+ TestDoubleParamRange(kStart, kEnd);
+ TestDoubleParamReverseRange(kStart, kEnd);
+}
+
+int main() {
+ TestSingleParamIntegerRange<int8_t>();
+ TestSingleParamIntegerRange<int16_t>();
+ TestSingleParamIntegerRange<int32_t>();
+ TestSingleParamIntegerRange<int64_t>();
+
+ TestSingleParamIntegerRange<uint8_t>();
+ TestSingleParamIntegerRange<uint16_t>();
+ TestSingleParamIntegerRange<uint32_t>();
+ TestSingleParamIntegerRange<uint64_t>();
+
+ TestDoubleParamIntegerRange<int8_t, int8_t>();
+ TestDoubleParamIntegerRange<int16_t, int16_t>();
+ TestDoubleParamIntegerRange<int32_t, int32_t>();
+ TestDoubleParamIntegerRange<int64_t, int64_t>();
+
+ TestDoubleParamIntegerRange<uint8_t, uint8_t>();
+ TestDoubleParamIntegerRange<uint16_t, uint16_t>();
+ TestDoubleParamIntegerRange<uint32_t, uint32_t>();
+ TestDoubleParamIntegerRange<uint64_t, uint64_t>();
+
+ TestDoubleParamIntegerRange<int8_t, int16_t>();
+ TestDoubleParamIntegerRange<int16_t, int32_t>();
+ TestDoubleParamIntegerRange<int32_t, int64_t>();
+ TestDoubleParamIntegerRange<int64_t, int8_t>();
+
+ TestDoubleParamIntegerRange<uint8_t, uint64_t>();
+ TestDoubleParamIntegerRange<uint16_t, uint8_t>();
+ TestDoubleParamIntegerRange<uint32_t, uint16_t>();
+ TestDoubleParamIntegerRange<uint64_t, uint32_t>();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestJSONWriter.cpp b/mfbt/tests/TestJSONWriter.cpp
new file mode 100644
index 0000000000..a90732396f
--- /dev/null
+++ b/mfbt/tests/TestJSONWriter.cpp
@@ -0,0 +1,657 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/JSONWriter.h"
+#include "mozilla/UniquePtr.h"
+#include <stdio.h>
+#include <string>
+#include <string.h>
+
+using mozilla::JSONWriteFunc;
+using mozilla::JSONWriter;
+using mozilla::MakeStringSpan;
+using mozilla::MakeUnique;
+using mozilla::Span;
+
+// This writes all the output into a big buffer.
+struct StringWriteFunc final : public JSONWriteFunc {
+ std::string mString;
+
+ void Write(const mozilla::Span<const char>& aStr) final {
+ mString.append(aStr.data(), aStr.size());
+ }
+};
+
+void Check(JSONWriter& aWriter, const char* aExpected) {
+ JSONWriteFunc& func = aWriter.WriteFunc();
+ const std::string& actual = static_cast<StringWriteFunc&>(func).mString;
+ if (strcmp(aExpected, actual.c_str()) != 0) {
+ fprintf(stderr,
+ "---- EXPECTED ----\n<<<%s>>>\n"
+ "---- ACTUAL ----\n<<<%s>>>\n",
+ aExpected, actual.c_str());
+ MOZ_RELEASE_ASSERT(false, "expected and actual output don't match");
+ }
+}
+
+// Note: to convert actual output into |expected| strings that C++ can handle,
+// apply the following substitutions, in order, to each line.
+// - s/\\/\\\\/g # escapes backslashes
+// - s/"/\\"/g # escapes quotes
+// - s/$/\\n\\/ # adds a newline and string continuation char to each line
+
+void TestBasicProperties() {
+ const char* expected =
+ "\
+{\n\
+ \"null\": null,\n\
+ \"bool1\": true,\n\
+ \"bool2\": false,\n\
+ \"int1\": 123,\n\
+ \"int2\": -123,\n\
+ \"int3\": -123456789000,\n\
+ \"double1\": 1.2345,\n\
+ \"double2\": -3,\n\
+ \"double3\": 1e-7,\n\
+ \"double4\": 1.1111111111111111e+21,\n\
+ \"string1\": \"\",\n\
+ \"string2\": \"1234\",\n\
+ \"string3\": \"hello\",\n\
+ \"string4\": \"\\\" \\\\ \\u0007 \\b \\t \\n \\u000b \\f \\r\",\n\
+ \"string5\": \"hello\",\n\
+ \"string6\": \"\\\" \\\\ \\u0007 \\b \\t \",\n\
+ \"span1\": \"buf1\",\n\
+ \"span2\": \"buf2\",\n\
+ \"span3\": \"buf3\",\n\
+ \"span4\": \"buf\\n4\",\n\
+ \"span5\": \"MakeStringSpan\",\n\
+ \"len 0 array, multi-line\": [\n\
+ ],\n\
+ \"len 0 array, single-line\": [],\n\
+ \"len 1 array\": [\n\
+ 1\n\
+ ],\n\
+ \"len 5 array, multi-line\": [\n\
+ 1,\n\
+ 2,\n\
+ 3,\n\
+ 4,\n\
+ 5\n\
+ ],\n\
+ \"len 3 array, single-line\": [1, [{}, 2, []], 3],\n\
+ \"len 0 object, multi-line\": {\n\
+ },\n\
+ \"len 0 object, single-line\": {},\n\
+ \"len 1 object\": {\n\
+ \"one\": 1\n\
+ },\n\
+ \"len 5 object\": {\n\
+ \"one\": 1,\n\
+ \"two\": 2,\n\
+ \"three\": 3,\n\
+ \"four\": 4,\n\
+ \"five\": 5\n\
+ },\n\
+ \"len 3 object, single-line\": {\"a\": 1, \"b\": [{}, 2, []], \"c\": 3}\n\
+}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ w.Start();
+ {
+ w.NullProperty("null");
+
+ w.BoolProperty("bool1", true);
+ w.BoolProperty("bool2", false);
+
+ w.IntProperty("int1", 123);
+ w.IntProperty("int2", -0x7b);
+ w.IntProperty("int3", -123456789000ll);
+
+ w.DoubleProperty("double1", 1.2345);
+ w.DoubleProperty("double2", -3);
+ w.DoubleProperty("double3", 1e-7);
+ w.DoubleProperty("double4", 1.1111111111111111e+21);
+
+ w.StringProperty("string1", "");
+ w.StringProperty("string2", "1234");
+ w.StringProperty("string3", "hello");
+ w.StringProperty("string4", "\" \\ \a \b \t \n \v \f \r");
+ w.StringProperty("string5", "hello\0cut"); // '\0' marks the end.
+ w.StringProperty("string6", "\" \\ \a \b \t \0 \n \v \f \r");
+
+ const char buf1[] = {'b', 'u', 'f', '1'};
+ w.StringProperty("span1", buf1);
+ const char buf2[] = {'b', 'u', 'f', '2', '\0'};
+ w.StringProperty("span2", buf2);
+ const char buf3[] = {'b', 'u', 'f', '3', '\0', '?'};
+ w.StringProperty("span3", buf3);
+ const char buf4[] = {'b', 'u', 'f', '\n', '4', '\0', '?'};
+ w.StringProperty("span4", buf4);
+ w.StringProperty("span5", MakeStringSpan("MakeStringSpan"));
+
+ w.StartArrayProperty("len 0 array, multi-line", w.MultiLineStyle);
+ w.EndArray();
+
+ w.StartArrayProperty("len 0 array, single-line", w.SingleLineStyle);
+ w.EndArray();
+
+ w.StartArrayProperty("len 1 array");
+ { w.IntElement(1); }
+ w.EndArray();
+
+ w.StartArrayProperty("len 5 array, multi-line", w.MultiLineStyle);
+ {
+ w.IntElement(1);
+ w.IntElement(2);
+ w.IntElement(3);
+ w.IntElement(4);
+ w.IntElement(5);
+ }
+ w.EndArray();
+
+ w.StartArrayProperty("len 3 array, single-line", w.SingleLineStyle);
+ {
+ w.IntElement(1);
+ w.StartArrayElement();
+ {
+ w.StartObjectElement(w.SingleLineStyle);
+ w.EndObject();
+
+ w.IntElement(2);
+
+ w.StartArrayElement(w.MultiLineStyle); // style overridden from above
+ w.EndArray();
+ }
+ w.EndArray();
+ w.IntElement(3);
+ }
+ w.EndArray();
+
+ w.StartObjectProperty("len 0 object, multi-line");
+ w.EndObject();
+
+ w.StartObjectProperty("len 0 object, single-line", w.SingleLineStyle);
+ w.EndObject();
+
+ w.StartObjectProperty("len 1 object");
+ { w.IntProperty("one", 1); }
+ w.EndObject();
+
+ w.StartObjectProperty("len 5 object");
+ {
+ w.IntProperty("one", 1);
+ w.IntProperty("two", 2);
+ w.IntProperty("three", 3);
+ w.IntProperty("four", 4);
+ w.IntProperty("five", 5);
+ }
+ w.EndObject();
+
+ w.StartObjectProperty("len 3 object, single-line", w.SingleLineStyle);
+ {
+ w.IntProperty("a", 1);
+ w.StartArrayProperty("b");
+ {
+ w.StartObjectElement();
+ w.EndObject();
+
+ w.IntElement(2);
+
+ w.StartArrayElement(w.SingleLineStyle);
+ w.EndArray();
+ }
+ w.EndArray();
+ w.IntProperty("c", 3);
+ }
+ w.EndObject();
+ }
+ w.End();
+
+ Check(w, expected);
+}
+
+void TestBasicElements() {
+ const char* expected =
+ "\
+{\n\
+ \"array\": [\n\
+ null,\n\
+ true,\n\
+ false,\n\
+ 123,\n\
+ -123,\n\
+ -123456789000,\n\
+ 1.2345,\n\
+ -3,\n\
+ 1e-7,\n\
+ 1.1111111111111111e+21,\n\
+ \"\",\n\
+ \"1234\",\n\
+ \"hello\",\n\
+ \"\\\" \\\\ \\u0007 \\b \\t \\n \\u000b \\f \\r\",\n\
+ \"hello\",\n\
+ \"\\\" \\\\ \\u0007 \\b \\t \",\n\
+ \"buf1\",\n\
+ \"buf2\",\n\
+ \"buf3\",\n\
+ \"buf\\n4\",\n\
+ \"MakeStringSpan\",\n\
+ [\n\
+ ],\n\
+ [],\n\
+ [\n\
+ 1\n\
+ ],\n\
+ [\n\
+ 1,\n\
+ 2,\n\
+ 3,\n\
+ 4,\n\
+ 5\n\
+ ],\n\
+ [1, [{}, 2, []], 3],\n\
+ {\n\
+ },\n\
+ {},\n\
+ {\n\
+ \"one\": 1\n\
+ },\n\
+ {\n\
+ \"one\": 1,\n\
+ \"two\": 2,\n\
+ \"three\": 3,\n\
+ \"four\": 4,\n\
+ \"five\": 5\n\
+ },\n\
+ {\"a\": 1, \"b\": [{}, 2, []], \"c\": 3}\n\
+ ]\n\
+}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ w.Start();
+ w.StartArrayProperty("array");
+ {
+ w.NullElement();
+
+ w.BoolElement(true);
+ w.BoolElement(false);
+
+ w.IntElement(123);
+ w.IntElement(-0x7b);
+ w.IntElement(-123456789000ll);
+
+ w.DoubleElement(1.2345);
+ w.DoubleElement(-3);
+ w.DoubleElement(1e-7);
+ w.DoubleElement(1.1111111111111111e+21);
+
+ w.StringElement("");
+ w.StringElement("1234");
+ w.StringElement("hello");
+ w.StringElement("\" \\ \a \b \t \n \v \f \r");
+ w.StringElement("hello\0cut"); // '\0' marks the end.
+ w.StringElement("\" \\ \a \b \t \0 \n \v \f \r");
+
+ const char buf1[] = {'b', 'u', 'f', '1'};
+ w.StringElement(buf1);
+ const char buf2[] = {'b', 'u', 'f', '2', '\0'};
+ w.StringElement(buf2);
+ const char buf3[] = {'b', 'u', 'f', '3', '\0', '?'};
+ w.StringElement(buf3);
+ const char buf4[] = {'b', 'u', 'f', '\n', '4', '\0', '?'};
+ w.StringElement(buf4);
+ w.StringElement(MakeStringSpan("MakeStringSpan"));
+
+ w.StartArrayElement();
+ w.EndArray();
+
+ w.StartArrayElement(w.SingleLineStyle);
+ w.EndArray();
+
+ w.StartArrayElement();
+ { w.IntElement(1); }
+ w.EndArray();
+
+ w.StartArrayElement();
+ {
+ w.IntElement(1);
+ w.IntElement(2);
+ w.IntElement(3);
+ w.IntElement(4);
+ w.IntElement(5);
+ }
+ w.EndArray();
+
+ w.StartArrayElement(w.SingleLineStyle);
+ {
+ w.IntElement(1);
+ w.StartArrayElement();
+ {
+ w.StartObjectElement(w.SingleLineStyle);
+ w.EndObject();
+
+ w.IntElement(2);
+
+ w.StartArrayElement(w.MultiLineStyle); // style overridden from above
+ w.EndArray();
+ }
+ w.EndArray();
+ w.IntElement(3);
+ }
+ w.EndArray();
+
+ w.StartObjectElement();
+ w.EndObject();
+
+ w.StartObjectElement(w.SingleLineStyle);
+ w.EndObject();
+
+ w.StartObjectElement();
+ { w.IntProperty("one", 1); }
+ w.EndObject();
+
+ w.StartObjectElement();
+ {
+ w.IntProperty("one", 1);
+ w.IntProperty("two", 2);
+ w.IntProperty("three", 3);
+ w.IntProperty("four", 4);
+ w.IntProperty("five", 5);
+ }
+ w.EndObject();
+
+ w.StartObjectElement(w.SingleLineStyle);
+ {
+ w.IntProperty("a", 1);
+ w.StartArrayProperty("b");
+ {
+ w.StartObjectElement();
+ w.EndObject();
+
+ w.IntElement(2);
+
+ w.StartArrayElement(w.SingleLineStyle);
+ w.EndArray();
+ }
+ w.EndArray();
+ w.IntProperty("c", 3);
+ }
+ w.EndObject();
+ }
+ w.EndArray();
+ w.End();
+
+ Check(w, expected);
+}
+
+void TestOneLineObject() {
+ const char* expected =
+ "\
+{\"i\": 1, \"array\": [null, [{}], {\"o\": {}}, \"s\"], \"d\": 3.33}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ w.Start(w.SingleLineStyle);
+
+ w.IntProperty("i", 1);
+
+ w.StartArrayProperty("array");
+ {
+ w.NullElement();
+
+ w.StartArrayElement(w.MultiLineStyle); // style overridden from above
+ {
+ w.StartObjectElement();
+ w.EndObject();
+ }
+ w.EndArray();
+
+ w.StartObjectElement();
+ {
+ w.StartObjectProperty("o");
+ w.EndObject();
+ }
+ w.EndObject();
+
+ w.StringElement("s");
+ }
+ w.EndArray();
+
+ w.DoubleProperty("d", 3.33);
+
+ w.End();
+
+ Check(w, expected);
+}
+
+void TestOneLineJson() {
+ const char* expected =
+ "\
+{\"i\":1,\"array\":[null,[{}],{\"o\":{}},\"s\"],\"d\":3.33}\
+";
+
+ StringWriteFunc func;
+ JSONWriter w(func, JSONWriter::SingleLineStyle);
+
+ w.Start(w.MultiLineStyle); // style overridden from above
+
+ w.IntProperty("i", 1);
+
+ w.StartArrayProperty("array");
+ {
+ w.NullElement();
+
+ w.StartArrayElement(w.MultiLineStyle); // style overridden from above
+ {
+ w.StartObjectElement();
+ w.EndObject();
+ }
+ w.EndArray();
+
+ w.StartObjectElement();
+ {
+ w.StartObjectProperty("o");
+ w.EndObject();
+ }
+ w.EndObject();
+
+ w.StringElement("s");
+ }
+ w.EndArray();
+
+ w.DoubleProperty("d", 3.33);
+
+ w.End(); // No newline in this case.
+
+ Check(w, expected);
+}
+
+void TestStringEscaping() {
+ // This test uses hexadecimal character escapes because UTF8 literals cause
+ // problems for some compilers (see bug 1069726).
+ const char* expected =
+ "\
+{\n\
+ \"ascii\": \"\x7F~}|{zyxwvutsrqponmlkjihgfedcba`_^]\\\\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#\\\"! \\u001f\\u001e\\u001d\\u001c\\u001b\\u001a\\u0019\\u0018\\u0017\\u0016\\u0015\\u0014\\u0013\\u0012\\u0011\\u0010\\u000f\\u000e\\r\\f\\u000b\\n\\t\\b\\u0007\\u0006\\u0005\\u0004\\u0003\\u0002\\u0001\",\n\
+ \"\xD9\x85\xD8\xB1\xD8\xAD\xD8\xA8\xD8\xA7 \xD9\x87\xD9\x86\xD8\xA7\xD9\x83\": true,\n\
+ \"\xD5\xA2\xD5\xA1\xD6\x80\xD5\xA5\xD6\x82 \xD5\xB9\xD5\xAF\xD5\xA1\": -123,\n\
+ \"\xE4\xBD\xA0\xE5\xA5\xBD\": 1.234,\n\
+ \"\xCE\xB3\xCE\xB5\xCE\xB9\xCE\xB1 \xCE\xB5\xCE\xBA\xCE\xB5\xCE\xAF\": \"\xD8\xB3\xD9\x84\xD8\xA7\xD9\x85\",\n\
+ \"hall\xC3\xB3 \xC3\xBE"
+ "arna\": 4660,\n\
+ \"\xE3\x81\x93\xE3\x82\x93\xE3\x81\xAB\xE3\x81\xA1\xE3\x81\xAF\": {\n\
+ \"\xD0\xBF\xD1\x80\xD0\xB8\xD0\xB2\xD0\xB5\xD1\x82\": [\n\
+ ]\n\
+ }\n\
+}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ // Test the string escaping behaviour.
+ w.Start();
+ {
+ // Test all 127 ascii values. Do it in reverse order so that the 0
+ // at the end serves as the null char.
+ char buf[128];
+ for (int i = 0; i < 128; i++) {
+ buf[i] = 127 - i;
+ }
+ w.StringProperty("ascii", buf);
+
+ // Test lots of unicode stuff. Note that this file is encoded as UTF-8.
+ w.BoolProperty(
+ "\xD9\x85\xD8\xB1\xD8\xAD\xD8\xA8\xD8\xA7 "
+ "\xD9\x87\xD9\x86\xD8\xA7\xD9\x83",
+ true);
+ w.IntProperty(
+ "\xD5\xA2\xD5\xA1\xD6\x80\xD5\xA5\xD6\x82 \xD5\xB9\xD5\xAF\xD5\xA1",
+ -123);
+ w.DoubleProperty("\xE4\xBD\xA0\xE5\xA5\xBD", 1.234);
+ w.StringProperty(
+ "\xCE\xB3\xCE\xB5\xCE\xB9\xCE\xB1 \xCE\xB5\xCE\xBA\xCE\xB5\xCE\xAF",
+ "\xD8\xB3\xD9\x84\xD8\xA7\xD9\x85");
+ w.IntProperty(
+ "hall\xC3\xB3 \xC3\xBE"
+ "arna",
+ 0x1234);
+ w.StartObjectProperty(
+ "\xE3\x81\x93\xE3\x82\x93\xE3\x81\xAB\xE3\x81\xA1\xE3\x81\xAF");
+ {
+ w.StartArrayProperty("\xD0\xBF\xD1\x80\xD0\xB8\xD0\xB2\xD0\xB5\xD1\x82");
+ w.EndArray();
+ }
+ w.EndObject();
+ }
+ w.End();
+
+ Check(w, expected);
+}
+
+void TestDeepNesting() {
+ const char* expected =
+ "\
+{\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ \"a\": [\n\
+ {\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+ }\n\
+ ]\n\
+}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ w.Start();
+ {
+ static const int n = 10;
+ for (int i = 0; i < n; i++) {
+ w.StartArrayProperty("a");
+ w.StartObjectElement();
+ }
+ for (int i = 0; i < n; i++) {
+ w.EndObject();
+ w.EndArray();
+ }
+ }
+ w.End();
+
+ Check(w, expected);
+}
+
+void TestEscapedPropertyNames() {
+ const char* expected =
+ "\
+{\"i\\t\": 1, \"array\\t\": [null, [{}], {\"o\\t\": {}}, \"s\"], \"d\": 3.33}\n\
+";
+
+ JSONWriter w(MakeUnique<StringWriteFunc>());
+
+ w.Start(w.SingleLineStyle);
+
+ w.IntProperty("i\t\0cut", 1); // '\0' marks the end.
+
+ w.StartArrayProperty("array\t");
+ {
+ w.NullElement();
+
+ w.StartArrayElement(w.MultiLineStyle); // style overridden from above
+ {
+ w.StartObjectElement();
+ w.EndObject();
+ }
+ w.EndArray();
+
+ w.StartObjectElement();
+ {
+ w.StartObjectProperty("o\t");
+ w.EndObject();
+ }
+ w.EndObject();
+
+ w.StringElement("s");
+ }
+ w.EndArray();
+
+ w.DoubleProperty("d\0\t", 3.33);
+
+ w.End();
+
+ Check(w, expected);
+}
+
+int main(void) {
+ TestBasicProperties();
+ TestBasicElements();
+ TestOneLineObject();
+ TestOneLineJson();
+ TestStringEscaping();
+ TestDeepNesting();
+ TestEscapedPropertyNames();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestLinkedList.cpp b/mfbt/tests/TestLinkedList.cpp
new file mode 100644
index 0000000000..bb1ffe08c0
--- /dev/null
+++ b/mfbt/tests/TestLinkedList.cpp
@@ -0,0 +1,399 @@
+
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/LinkedList.h"
+
+using mozilla::AutoCleanLinkedList;
+using mozilla::LinkedList;
+using mozilla::LinkedListElement;
+
+struct SomeClass : public LinkedListElement<SomeClass> {
+ unsigned int mValue;
+ explicit SomeClass(int aValue = 0) : mValue(aValue) {}
+ SomeClass(SomeClass&&) = default;
+ SomeClass& operator=(SomeClass&&) = default;
+ void incr() { ++mValue; }
+};
+
+template <size_t N>
+static void CheckListValues(LinkedList<SomeClass>& list,
+ unsigned int (&values)[N]) {
+ size_t count = 0;
+ for (SomeClass* x : list) {
+ MOZ_RELEASE_ASSERT(x->mValue == values[count]);
+ ++count;
+ }
+ MOZ_RELEASE_ASSERT(count == N);
+}
+
+static void TestList() {
+ LinkedList<SomeClass> list;
+
+ SomeClass one(1), two(2), three(3);
+
+ MOZ_RELEASE_ASSERT(list.isEmpty());
+ MOZ_RELEASE_ASSERT(list.length() == 0);
+ MOZ_RELEASE_ASSERT(!list.getFirst());
+ MOZ_RELEASE_ASSERT(!list.getLast());
+ MOZ_RELEASE_ASSERT(!list.popFirst());
+ MOZ_RELEASE_ASSERT(!list.popLast());
+
+ for (SomeClass* x : list) {
+ MOZ_RELEASE_ASSERT(x);
+ MOZ_RELEASE_ASSERT(false);
+ }
+
+ list.insertFront(&one);
+ {
+ unsigned int check[]{1};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(one.isInList());
+ MOZ_RELEASE_ASSERT(!two.isInList());
+ MOZ_RELEASE_ASSERT(!three.isInList());
+
+ MOZ_RELEASE_ASSERT(list.contains(&one));
+ MOZ_RELEASE_ASSERT(!list.contains(&two));
+ MOZ_RELEASE_ASSERT(!list.contains(&three));
+
+ MOZ_RELEASE_ASSERT(!list.isEmpty());
+ MOZ_RELEASE_ASSERT(list.length() == 1);
+ MOZ_RELEASE_ASSERT(list.getFirst()->mValue == 1);
+ MOZ_RELEASE_ASSERT(list.getLast()->mValue == 1);
+
+ list.insertFront(&two);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(list.length() == 2);
+ MOZ_RELEASE_ASSERT(list.getFirst()->mValue == 2);
+ MOZ_RELEASE_ASSERT(list.getLast()->mValue == 1);
+
+ list.insertBack(&three);
+ {
+ unsigned int check[]{2, 1, 3};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(list.length() == 3);
+ MOZ_RELEASE_ASSERT(list.getFirst()->mValue == 2);
+ MOZ_RELEASE_ASSERT(list.getLast()->mValue == 3);
+
+ one.removeFrom(list);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ three.setPrevious(&one);
+ {
+ unsigned int check[]{2, 1, 3};
+ CheckListValues(list, check);
+ }
+
+ three.removeFrom(list);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ two.setPrevious(&three);
+ {
+ unsigned int check[]{3, 2, 1};
+ CheckListValues(list, check);
+ }
+
+ three.removeFrom(list);
+ {
+ unsigned int check[]{2, 1};
+ CheckListValues(list, check);
+ }
+
+ two.setNext(&three);
+ {
+ unsigned int check[]{2, 3, 1};
+ CheckListValues(list, check);
+ }
+
+ one.remove();
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ two.remove();
+ {
+ unsigned int check[]{3};
+ CheckListValues(list, check);
+ }
+
+ three.setPrevious(&two);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ three.remove();
+ {
+ unsigned int check[]{2};
+ CheckListValues(list, check);
+ }
+
+ two.remove();
+
+ list.insertBack(&three);
+ {
+ unsigned int check[]{3};
+ CheckListValues(list, check);
+ }
+
+ list.insertFront(&two);
+ {
+ unsigned int check[]{2, 3};
+ CheckListValues(list, check);
+ }
+
+ for (SomeClass* x : list) {
+ x->incr();
+ }
+
+ MOZ_RELEASE_ASSERT(list.length() == 2);
+ MOZ_RELEASE_ASSERT(list.getFirst() == &two);
+ MOZ_RELEASE_ASSERT(list.getLast() == &three);
+ MOZ_RELEASE_ASSERT(list.getFirst()->mValue == 3);
+ MOZ_RELEASE_ASSERT(list.getLast()->mValue == 4);
+
+ const LinkedList<SomeClass>& constList = list;
+ for (const SomeClass* x : constList) {
+ MOZ_RELEASE_ASSERT(x);
+ }
+}
+
+static void TestExtendLists() {
+ AutoCleanLinkedList<SomeClass> list1, list2;
+
+ constexpr unsigned int N = 5;
+ for (unsigned int i = 0; i < N; ++i) {
+ list1.insertBack(new SomeClass(static_cast<int>(i)));
+
+ AutoCleanLinkedList<SomeClass> singleItemList;
+ singleItemList.insertFront(new SomeClass(static_cast<int>(i + N)));
+ list2.extendBack(std::move(singleItemList));
+ }
+ // list1 = { 0, 1, 2, 3, 4 }
+ // list2 = { 5, 6, 7, 8, 9 }
+
+ list1.extendBack(AutoCleanLinkedList<SomeClass>());
+ list1.extendBack(std::move(list2));
+
+ // Make sure the line above has properly emptied |list2|.
+ MOZ_RELEASE_ASSERT(list2.isEmpty()); // NOLINT(bugprone-use-after-move)
+
+ size_t i = 0;
+ for (SomeClass* x : list1) {
+ MOZ_RELEASE_ASSERT(x->mValue == i++);
+ }
+ MOZ_RELEASE_ASSERT(i == N * 2);
+}
+
+void TestSplice() {
+ AutoCleanLinkedList<SomeClass> list1, list2;
+ for (unsigned int i = 1; i <= 5; ++i) {
+ list1.insertBack(new SomeClass(static_cast<int>(i)));
+
+ AutoCleanLinkedList<SomeClass> singleItemList;
+ singleItemList.insertFront(new SomeClass(static_cast<int>(i * 10)));
+ list2.extendBack(std::move(singleItemList));
+ }
+ // list1 = { 1, 2, 3, 4, 5 }
+ // list2 = { 10, 20, 30, 40, 50 }
+
+ list1.splice(2, list2, 0, 5);
+
+ MOZ_RELEASE_ASSERT(list2.isEmpty());
+ unsigned int kExpected1[]{1, 2, 10, 20, 30, 40, 50, 3, 4, 5};
+ CheckListValues(list1, kExpected1);
+
+ // Since aSourceLen=100 exceeds list1's end, the function transfers
+ // three items [3, 4, 5].
+ list2.splice(0, list1, 7, 100);
+
+ unsigned int kExpected2[]{1, 2, 10, 20, 30, 40, 50};
+ unsigned int kExpected3[]{3, 4, 5};
+ CheckListValues(list1, kExpected2);
+ CheckListValues(list2, kExpected3);
+
+ // Since aDestinationPos=100 exceeds list2's end, the function transfers
+ // items to list2's end.
+ list2.splice(100, list1, 1, 1);
+
+ unsigned int kExpected4[]{1, 10, 20, 30, 40, 50};
+ unsigned int kExpected5[]{3, 4, 5, 2};
+ CheckListValues(list1, kExpected4);
+ CheckListValues(list2, kExpected5);
+}
+
+static void TestMove() {
+ auto MakeSomeClass = [](unsigned int aValue) -> SomeClass {
+ return SomeClass(aValue);
+ };
+
+ LinkedList<SomeClass> list1;
+
+ // Test move constructor for LinkedListElement.
+ SomeClass c1(MakeSomeClass(1));
+ list1.insertBack(&c1);
+
+ // Test move assignment for LinkedListElement from an element not in a
+ // list.
+ SomeClass c2;
+ c2 = MakeSomeClass(2);
+ list1.insertBack(&c2);
+
+ // Test move assignment of LinkedListElement from an element already in a
+ // list.
+ SomeClass c3;
+ c3 = std::move(c2);
+ MOZ_RELEASE_ASSERT(!c2.isInList());
+ MOZ_RELEASE_ASSERT(c3.isInList());
+
+ // Test move constructor for LinkedList.
+ LinkedList<SomeClass> list2(std::move(list1));
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(list2, check);
+ }
+ MOZ_RELEASE_ASSERT(list1.isEmpty());
+
+ // Test move assignment for LinkedList.
+ LinkedList<SomeClass> list3;
+ list3 = std::move(list2);
+ {
+ unsigned int check[]{1, 2};
+ CheckListValues(list3, check);
+ }
+ MOZ_RELEASE_ASSERT(list2.isEmpty());
+
+ list3.clear();
+}
+
+static void TestRemoveAndGet() {
+ LinkedList<SomeClass> list;
+
+ SomeClass one(1), two(2), three(3);
+ list.insertBack(&one);
+ list.insertBack(&two);
+ list.insertBack(&three);
+ {
+ unsigned int check[]{1, 2, 3};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(two.removeAndGetNext() == &three);
+ {
+ unsigned int check[]{1, 3};
+ CheckListValues(list, check);
+ }
+
+ MOZ_RELEASE_ASSERT(three.removeAndGetPrevious() == &one);
+ {
+ unsigned int check[]{1};
+ CheckListValues(list, check);
+ }
+}
+
+struct PrivateClass : private LinkedListElement<PrivateClass> {
+ friend class mozilla::LinkedList<PrivateClass>;
+ friend class mozilla::LinkedListElement<PrivateClass>;
+};
+
+static void TestPrivate() {
+ LinkedList<PrivateClass> list;
+ PrivateClass one, two;
+ list.insertBack(&one);
+ list.insertBack(&two);
+
+ size_t count = 0;
+ for (PrivateClass* p : list) {
+ MOZ_RELEASE_ASSERT(p, "cannot have null elements in list");
+ count++;
+ }
+ MOZ_RELEASE_ASSERT(count == 2);
+}
+
+struct CountedClass : public LinkedListElement<RefPtr<CountedClass>> {
+ int mCount;
+ void AddRef() { mCount++; }
+ void Release() { mCount--; }
+
+ CountedClass() : mCount(0) {}
+ ~CountedClass() { MOZ_RELEASE_ASSERT(mCount == 0); }
+};
+
+static void TestRefPtrList() {
+ LinkedList<RefPtr<CountedClass>> list;
+ CountedClass* elt1 = new CountedClass;
+ CountedClass* elt2 = new CountedClass;
+
+ list.insertBack(elt1);
+ list.insertBack(elt2);
+
+ MOZ_RELEASE_ASSERT(elt1->mCount == 1);
+ MOZ_RELEASE_ASSERT(elt2->mCount == 1);
+
+ for (RefPtr<CountedClass> p : list) {
+ MOZ_RELEASE_ASSERT(p->mCount == 2);
+ }
+
+ RefPtr<CountedClass> ptr = list.getFirst();
+ while (ptr) {
+ MOZ_RELEASE_ASSERT(ptr->mCount == 2);
+ RefPtr<CountedClass> next = ptr->getNext();
+ ptr->remove();
+ ptr = std::move(next);
+ }
+ ptr = nullptr;
+
+ MOZ_RELEASE_ASSERT(elt1->mCount == 0);
+ MOZ_RELEASE_ASSERT(elt2->mCount == 0);
+
+ list.insertBack(elt1);
+ elt1->setNext(elt2);
+
+ MOZ_RELEASE_ASSERT(elt1->mCount == 1);
+ MOZ_RELEASE_ASSERT(elt2->mCount == 1);
+
+ RefPtr<CountedClass> first = list.popFirst();
+
+ MOZ_RELEASE_ASSERT(elt1->mCount == 1);
+ MOZ_RELEASE_ASSERT(elt2->mCount == 1);
+
+ RefPtr<CountedClass> second = list.popFirst();
+
+ MOZ_RELEASE_ASSERT(elt1->mCount == 1);
+ MOZ_RELEASE_ASSERT(elt2->mCount == 1);
+
+ first = second = nullptr;
+
+ delete elt1;
+ delete elt2;
+}
+
+int main() {
+ TestList();
+ TestExtendLists();
+ TestSplice();
+ TestPrivate();
+ TestMove();
+ TestRemoveAndGet();
+ TestRefPtrList();
+ return 0;
+}
diff --git a/mfbt/tests/TestMacroArgs.cpp b/mfbt/tests/TestMacroArgs.cpp
new file mode 100644
index 0000000000..097ac9efa3
--- /dev/null
+++ b/mfbt/tests/TestMacroArgs.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MacroArgs.h"
+
+static_assert(MOZ_ARG_COUNT() == 0, "");
+static_assert(MOZ_ARG_COUNT(a) == 1, "");
+static_assert(MOZ_ARG_COUNT(a, b) == 2, "");
+static_assert(MOZ_ARG_COUNT(a, b, c) == 3, "");
+
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(100) == 1000, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(100, a) == 1001, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(100, a, b) == 1002, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(100, a, b, c) == 1003, "");
+
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(, a, b, c) == 3, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(, a) == 1, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(, !a) == 1, "");
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(, (a, b)) == 1, "");
+
+static_assert(MOZ_PASTE_PREFIX_AND_ARG_COUNT(, MOZ_ARGS_AFTER_1(a, b, c)) == 2,
+ "MOZ_ARGS_AFTER_1(a, b, c) should expand to 'b, c'");
+static_assert(MOZ_ARGS_AFTER_2(a, b, 3) == 3,
+ "MOZ_ARGS_AFTER_2(a, b, 3) should expand to '3'");
+
+static_assert(MOZ_ARG_1(10, 20, 30, 40, 50, 60, 70, 80, 90) == 10, "");
+static_assert(MOZ_ARG_2(10, 20, 30, 40, 50, 60, 70, 80, 90) == 20, "");
+static_assert(MOZ_ARG_3(10, 20, 30, 40, 50, 60, 70, 80, 90) == 30, "");
+static_assert(MOZ_ARG_4(10, 20, 30, 40, 50, 60, 70, 80, 90) == 40, "");
+static_assert(MOZ_ARG_5(10, 20, 30, 40, 50, 60, 70, 80, 90) == 50, "");
+static_assert(MOZ_ARG_6(10, 20, 30, 40, 50, 60, 70, 80, 90) == 60, "");
+static_assert(MOZ_ARG_7(10, 20, 30, 40, 50, 60, 70, 80, 90) == 70, "");
+static_assert(MOZ_ARG_8(10, 20, 30, 40, 50, 60, 70, 80, 90) == 80, "");
+
+int main() { return 0; }
diff --git a/mfbt/tests/TestMacroForEach.cpp b/mfbt/tests/TestMacroForEach.cpp
new file mode 100644
index 0000000000..11b75be810
--- /dev/null
+++ b/mfbt/tests/TestMacroForEach.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MacroForEach.h"
+
+#define HELPER_IDENTITY(x) x
+#define HELPER_IDENTITY_PLUS(x) x +
+static_assert(MOZ_FOR_EACH(HELPER_IDENTITY_PLUS, (), (10)) 0 == 10, "");
+static_assert(MOZ_FOR_EACH(HELPER_IDENTITY_PLUS, (), (1, 1, 1)) 0 == 3, "");
+static_assert(MOZ_FOR_EACH_SEPARATED(HELPER_IDENTITY, (+), (), (10)) == 10, "");
+static_assert(MOZ_FOR_EACH_SEPARATED(HELPER_IDENTITY, (+), (), (1, 1, 1)) == 3,
+ "");
+
+#define HELPER_ONE_PLUS(x) HELPER_IDENTITY_PLUS(1)
+static_assert(MOZ_FOR_EACH(HELPER_ONE_PLUS, (), ()) 0 == 0, "");
+
+#define HELPER_DEFINE_VAR(x) const int test1_##x = x;
+MOZ_FOR_EACH(HELPER_DEFINE_VAR, (), (10, 20))
+static_assert(test1_10 == 10 && test1_20 == 20, "");
+
+#define HELPER_DEFINE_VAR2(k, x) const int test2_##x = k + x;
+MOZ_FOR_EACH(HELPER_DEFINE_VAR2, (5, ), (10, 20))
+static_assert(test2_10 == 15 && test2_20 == 25, "");
+
+#define HELPER_DEFINE_PARAM(t, n) t n
+constexpr int test(MOZ_FOR_EACH_SEPARATED(HELPER_DEFINE_PARAM, (, ), (int, ),
+ (a, b, c))) {
+ return a + b + c;
+}
+static_assert(test(1, 2, 3) == 6, "");
+
+int main() {
+#define HELPER_IDENTITY_COMMA(k1, k2, x) k1, k2, x,
+ const int a[] = {MOZ_FOR_EACH(HELPER_IDENTITY_COMMA, (1, 2, ), (10, 20, 30))};
+ MOZ_RELEASE_ASSERT(a[0] == 1 && a[1] == 2 && a[2] == 10 && a[3] == 1 &&
+ a[4] == 2 && a[5] == 20 && a[6] == 1 && a[7] == 2 &&
+ a[8] == 30,
+ "MOZ_FOR_EACH args enumerated in incorrect order");
+ return 0;
+}
diff --git a/mfbt/tests/TestMathAlgorithms.cpp b/mfbt/tests/TestMathAlgorithms.cpp
new file mode 100644
index 0000000000..a21b286d0f
--- /dev/null
+++ b/mfbt/tests/TestMathAlgorithms.cpp
@@ -0,0 +1,545 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <stdint.h>
+
+using mozilla::Clamp;
+using mozilla::IsPowerOfTwo;
+
+static void TestClamp() {
+ MOZ_RELEASE_ASSERT(Clamp(0, 0, 0) == 0);
+ MOZ_RELEASE_ASSERT(Clamp(1, 0, 0) == 0);
+ MOZ_RELEASE_ASSERT(Clamp(-1, 0, 0) == 0);
+
+ MOZ_RELEASE_ASSERT(Clamp(0, 1, 1) == 1);
+ MOZ_RELEASE_ASSERT(Clamp(0, 1, 2) == 1);
+
+ MOZ_RELEASE_ASSERT(Clamp(0, -1, -1) == -1);
+ MOZ_RELEASE_ASSERT(Clamp(0, -2, -1) == -1);
+
+ MOZ_RELEASE_ASSERT(Clamp(0, 1, 3) == 1);
+ MOZ_RELEASE_ASSERT(Clamp(1, 1, 3) == 1);
+ MOZ_RELEASE_ASSERT(Clamp(2, 1, 3) == 2);
+ MOZ_RELEASE_ASSERT(Clamp(3, 1, 3) == 3);
+ MOZ_RELEASE_ASSERT(Clamp(4, 1, 3) == 3);
+ MOZ_RELEASE_ASSERT(Clamp(5, 1, 3) == 3);
+
+ MOZ_RELEASE_ASSERT(Clamp<uint8_t>(UINT8_MAX, 0, UINT8_MAX) == UINT8_MAX);
+ MOZ_RELEASE_ASSERT(Clamp<uint8_t>(0, 0, UINT8_MAX) == 0);
+
+ MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MIN, INT8_MIN, INT8_MAX) == INT8_MIN);
+ MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MIN, 0, INT8_MAX) == 0);
+ MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MAX, INT8_MIN, INT8_MAX) == INT8_MAX);
+ MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MAX, INT8_MIN, 0) == 0);
+}
+
+static void TestIsPowerOfTwo() {
+ static_assert(!IsPowerOfTwo(0u), "0 isn't a power of two");
+ static_assert(IsPowerOfTwo(1u), "1 is a power of two");
+ static_assert(IsPowerOfTwo(2u), "2 is a power of two");
+ static_assert(!IsPowerOfTwo(3u), "3 isn't a power of two");
+ static_assert(IsPowerOfTwo(4u), "4 is a power of two");
+ static_assert(!IsPowerOfTwo(5u), "5 isn't a power of two");
+ static_assert(!IsPowerOfTwo(6u), "6 isn't a power of two");
+ static_assert(!IsPowerOfTwo(7u), "7 isn't a power of two");
+ static_assert(IsPowerOfTwo(8u), "8 is a power of two");
+ static_assert(!IsPowerOfTwo(9u), "9 isn't a power of two");
+
+ static_assert(!IsPowerOfTwo(uint8_t(UINT8_MAX / 2)),
+ "127, 0x7f isn't a power of two");
+ static_assert(IsPowerOfTwo(uint8_t(UINT8_MAX / 2 + 1)),
+ "128, 0x80 is a power of two");
+ static_assert(!IsPowerOfTwo(uint8_t(UINT8_MAX / 2 + 2)),
+ "129, 0x81 isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint8_t(UINT8_MAX - 1)),
+ "254, 0xfe isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint8_t(UINT8_MAX)),
+ "255, 0xff isn't a power of two");
+
+ static_assert(!IsPowerOfTwo(uint16_t(UINT16_MAX / 2)),
+ "0x7fff isn't a power of two");
+ static_assert(IsPowerOfTwo(uint16_t(UINT16_MAX / 2 + 1)),
+ "0x8000 is a power of two");
+ static_assert(!IsPowerOfTwo(uint16_t(UINT16_MAX / 2 + 2)),
+ "0x8001 isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint16_t(UINT16_MAX - 1)),
+ "0xfffe isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint16_t(UINT16_MAX)),
+ "0xffff isn't a power of two");
+
+ static_assert(!IsPowerOfTwo(uint32_t(UINT32_MAX / 2)),
+ "0x7fffffff isn't a power of two");
+ static_assert(IsPowerOfTwo(uint32_t(UINT32_MAX / 2 + 1)),
+ "0x80000000 is a power of two");
+ static_assert(!IsPowerOfTwo(uint32_t(UINT32_MAX / 2 + 2)),
+ "0x80000001 isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint32_t(UINT32_MAX - 1)),
+ "0xfffffffe isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint32_t(UINT32_MAX)),
+ "0xffffffff isn't a power of two");
+
+ static_assert(!IsPowerOfTwo(uint64_t(UINT64_MAX / 2)),
+ "0x7fffffffffffffff isn't a power of two");
+ static_assert(IsPowerOfTwo(uint64_t(UINT64_MAX / 2 + 1)),
+ "0x8000000000000000 is a power of two");
+ static_assert(!IsPowerOfTwo(uint64_t(UINT64_MAX / 2 + 2)),
+ "0x8000000000000001 isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint64_t(UINT64_MAX - 1)),
+ "0xfffffffffffffffe isn't a power of two");
+ static_assert(!IsPowerOfTwo(uint64_t(UINT64_MAX)),
+ "0xffffffffffffffff isn't a power of two");
+}
+
+void TestGCD() {
+ MOZ_ASSERT(mozilla::GCD(0, 0) == 0);
+
+ // clang-format off
+ // import random
+ // import math
+ //
+ // j = 0
+ // testcases = [
+ // { "name": "signed 64-bits integers", "upper": (2**63)-1, "suffix": "" },
+ // { "name": "unsigned 64-bits integers", "upper": (2**64)-1, "suffix": "u" },
+ // { "name": "signed 32-bits integers", "upper": (2**31)-1, "suffix": "" },
+ // { "name": "unsigned 32-bits integers", "upper": (2**32)-1, "suffix": "u" },
+ // ]
+ // for case in testcases:
+ // print("")
+ // print(f"// {case['name']}")
+ // while True:
+ // a = random.randrange(0, case["upper"])
+ // b = random.randrange(0, a)
+ // res = math.gcd(a, b)
+ // j+=1
+ // suffix = case["suffix"]
+ // print(f'MOZ_ASSERT(mozilla::GCD({a}{suffix}, {b}{suffix}) == {res}{suffix});')
+ // if j == 100:
+ // j = 0
+ // break
+ //
+ // clang-format on
+
+ // signed 64-bits integers
+ MOZ_ASSERT(mozilla::GCD(6855423437784447881, 5744152981668854128) == 1);
+ MOZ_ASSERT(mozilla::GCD(2560787397587345465, 208780102238346432) == 1);
+ MOZ_ASSERT(mozilla::GCD(5577889716064657494, 2159469434101077254) == 2);
+ MOZ_ASSERT(mozilla::GCD(5349904765384950054, 1944688623103480392) == 18);
+ MOZ_ASSERT(mozilla::GCD(6510887230309733540, 4404045615056449988) == 4);
+ MOZ_ASSERT(mozilla::GCD(5153663464686238190, 3495293373406661950) == 10);
+ MOZ_ASSERT(mozilla::GCD(8640438456651239176, 6172550763106125918) == 2);
+ MOZ_ASSERT(mozilla::GCD(4636330475123995525, 2504439215041170117) == 1);
+ MOZ_ASSERT(mozilla::GCD(3049680828923698889, 1798896016456058960) == 1);
+ MOZ_ASSERT(mozilla::GCD(6857469018143857254, 839235513850919013) == 3);
+ MOZ_ASSERT(mozilla::GCD(1667993323500460751, 403448480939209779) == 1);
+ MOZ_ASSERT(mozilla::GCD(2756773685517793960, 1001994517356200529) == 1);
+ MOZ_ASSERT(mozilla::GCD(5809484314452898314, 252378426271103138) == 2);
+ MOZ_ASSERT(mozilla::GCD(5756566734144094840, 3050839541929564330) == 10);
+ MOZ_ASSERT(mozilla::GCD(2669472117169059649, 1053394704248223342) == 1);
+ MOZ_ASSERT(mozilla::GCD(8486335744011214524, 4866724521619209633) == 3);
+ MOZ_ASSERT(mozilla::GCD(4841597191067437171, 1862876789330567260) == 1);
+ MOZ_ASSERT(mozilla::GCD(8940692064089049746, 6136664682975600685) == 1);
+ MOZ_ASSERT(mozilla::GCD(6274111242168941448, 688426762929457484) == 4);
+ MOZ_ASSERT(mozilla::GCD(7715132980994738435, 1456592620536615117) == 1);
+ MOZ_ASSERT(mozilla::GCD(5650339953233205545, 4406664870835551648) == 1);
+ MOZ_ASSERT(mozilla::GCD(7763657864638523008, 306878184260935929) == 1);
+ MOZ_ASSERT(mozilla::GCD(7776062097319502113, 7551650059636008893) == 1);
+ MOZ_ASSERT(mozilla::GCD(9158681410218029314, 5401644381866109508) == 2);
+ MOZ_ASSERT(mozilla::GCD(428865066965126615, 345306139889243757) == 1);
+ MOZ_ASSERT(mozilla::GCD(1334408785926182232, 736025095410140597) == 1);
+ MOZ_ASSERT(mozilla::GCD(9129011607893106326, 4818080883860535758) == 2);
+ MOZ_ASSERT(mozilla::GCD(5968300398911311896, 2550670869539540947) == 1);
+ MOZ_ASSERT(mozilla::GCD(5030190181362172874, 3861860193070954804) == 2);
+ MOZ_ASSERT(mozilla::GCD(5449912203994605772, 395450435226244945) == 1);
+ MOZ_ASSERT(mozilla::GCD(3510149608312823296, 1122015596295686144) == 512);
+ MOZ_ASSERT(mozilla::GCD(8822408923914428398, 3005499570530356734) == 2);
+ MOZ_ASSERT(mozilla::GCD(1894251920744324374, 29251650223056432) == 2);
+ MOZ_ASSERT(mozilla::GCD(1643262375132697825, 133049278064101269) == 1);
+ MOZ_ASSERT(mozilla::GCD(5979771268022611030, 5021008984454830630) == 10);
+ MOZ_ASSERT(mozilla::GCD(8551631013482492569, 3214028471848344275) == 1);
+ MOZ_ASSERT(mozilla::GCD(1374240599294724199, 1106817149419837791) == 1);
+ MOZ_ASSERT(mozilla::GCD(7877493197090616258, 3627451313613172281) == 3);
+ MOZ_ASSERT(mozilla::GCD(7323120572203017429, 5958183356236253053) == 1);
+ MOZ_ASSERT(mozilla::GCD(7356702947943126364, 1234023498733740170) == 2);
+ MOZ_ASSERT(mozilla::GCD(3533663535984312691, 1287666490057924782) == 1);
+ MOZ_ASSERT(mozilla::GCD(8249625410612436788, 1692674983510387167) == 1);
+ MOZ_ASSERT(mozilla::GCD(6590544882911640025, 6518468963976945930) == 5);
+ MOZ_ASSERT(mozilla::GCD(1161703442901270391, 72640111759506406) == 1);
+ MOZ_ASSERT(mozilla::GCD(3648054318401558456, 286110734809583843) == 1);
+ MOZ_ASSERT(mozilla::GCD(7445158880116265073, 4921289272987608741) == 3);
+ MOZ_ASSERT(mozilla::GCD(8052135113655284875, 6319225376882653323) == 1);
+ MOZ_ASSERT(mozilla::GCD(1272523803145322419, 669368693174176828) == 1);
+ MOZ_ASSERT(mozilla::GCD(762600464449954636, 258101161586809942) == 2);
+ MOZ_ASSERT(mozilla::GCD(8711570456095175409, 3217102356729157526) == 1);
+ MOZ_ASSERT(mozilla::GCD(8596472485422071677, 6590296624757765441) == 1);
+ MOZ_ASSERT(mozilla::GCD(8830210169177656300, 4853400012200083924) == 4);
+ MOZ_ASSERT(mozilla::GCD(2241405940749418043, 1414859858059940275) == 1);
+ MOZ_ASSERT(mozilla::GCD(6645372226653882826, 1089866326575332751) == 1);
+ MOZ_ASSERT(mozilla::GCD(4972052091595687646, 3420503469411720440) == 2);
+ MOZ_ASSERT(mozilla::GCD(8796611232338780872, 8344997795629414169) == 1);
+ MOZ_ASSERT(mozilla::GCD(4109837086789844244, 2749395249398063222) == 2);
+ MOZ_ASSERT(mozilla::GCD(7099065868279436275, 3485530390566515044) == 1);
+ MOZ_ASSERT(mozilla::GCD(1041731907675308955, 561481363772326233) == 9);
+ MOZ_ASSERT(mozilla::GCD(5882271298652803063, 5189002859026699540) == 1);
+ MOZ_ASSERT(mozilla::GCD(835073783923421192, 56853706366082462) == 2);
+ MOZ_ASSERT(mozilla::GCD(2514946180207195049, 1934146334993787393) == 1);
+ MOZ_ASSERT(mozilla::GCD(8975439209128912747, 1377234541321015082) == 1);
+ MOZ_ASSERT(mozilla::GCD(7039355952603350033, 6501349986472883135) == 1);
+ MOZ_ASSERT(mozilla::GCD(3747474677542899887, 2583298074596991574) == 1);
+ MOZ_ASSERT(mozilla::GCD(8176323250144977780, 4706420973964948943) == 1);
+ MOZ_ASSERT(mozilla::GCD(8748260715055109420, 7094433080013425893) == 1);
+ MOZ_ASSERT(mozilla::GCD(2192085035443314042, 1964458338792492837) == 3);
+ MOZ_ASSERT(mozilla::GCD(4387059045133366080, 1521989527531982075) == 5);
+ MOZ_ASSERT(mozilla::GCD(5735277355594712161, 1564786041102368131) == 1);
+ MOZ_ASSERT(mozilla::GCD(3898210686025675418, 1252531932064281967) == 7);
+ MOZ_ASSERT(mozilla::GCD(1886253648955280570, 235795900409586307) == 7);
+ MOZ_ASSERT(mozilla::GCD(862214669576776425, 90702464427080315) == 5);
+ MOZ_ASSERT(mozilla::GCD(2831206027654482398, 2543050780384667441) == 1);
+ MOZ_ASSERT(mozilla::GCD(3561377609788845927, 2837335262531584639) == 1);
+ MOZ_ASSERT(mozilla::GCD(1973347825404473626, 634138253455209313) == 1);
+ MOZ_ASSERT(mozilla::GCD(6447708134022060248, 4346890077474767787) == 19);
+ MOZ_ASSERT(mozilla::GCD(1690365172062143048, 678324119874104971) == 1);
+ MOZ_ASSERT(mozilla::GCD(2900650911116509049, 818833306053988358) == 1);
+ MOZ_ASSERT(mozilla::GCD(4126258648185074937, 2190040072639642009) == 1);
+ MOZ_ASSERT(mozilla::GCD(7310083765892765377, 3615506256861011852) == 1);
+ MOZ_ASSERT(mozilla::GCD(1482494462925181129, 568665115985247457) == 1);
+ MOZ_ASSERT(mozilla::GCD(2675477464881771327, 1476381757716745502) == 1);
+ MOZ_ASSERT(mozilla::GCD(6437060864565620566, 266707802567839796) == 2);
+ MOZ_ASSERT(mozilla::GCD(3800292251587454230, 245022706279648741) == 1);
+ MOZ_ASSERT(mozilla::GCD(3549515343757259493, 1328377263505490456) == 1);
+ MOZ_ASSERT(mozilla::GCD(8324574140787708570, 393444007055415700) == 10);
+ MOZ_ASSERT(mozilla::GCD(4373054321374923750, 1031193918836627100) == 150);
+ MOZ_ASSERT(mozilla::GCD(1370218692062991327, 682070501541164452) == 1);
+ MOZ_ASSERT(mozilla::GCD(4728813669404513421, 2346998232227619529) == 1);
+ MOZ_ASSERT(mozilla::GCD(320864023853706984, 50178854177191437) == 3);
+ MOZ_ASSERT(mozilla::GCD(5424710852893793602, 4237974770221703674) == 2);
+ MOZ_ASSERT(mozilla::GCD(5167582806125634015, 3538730725111557853) == 79);
+ MOZ_ASSERT(mozilla::GCD(7197930858946883500, 6668556859540800605) == 5);
+ MOZ_ASSERT(mozilla::GCD(2900089593575477549, 2554913303396097824) == 1);
+ MOZ_ASSERT(mozilla::GCD(1397576820519717048, 847997331257829237) == 3);
+ MOZ_ASSERT(mozilla::GCD(3939714364354053162, 1374067007308181723) == 1);
+ MOZ_ASSERT(mozilla::GCD(1065626084531260890, 664198963621954813) == 317);
+ MOZ_ASSERT(mozilla::GCD(5912876357514418196, 5112700044139286313) == 1);
+ MOZ_ASSERT(mozilla::GCD(2654316726913809362, 588030922713986903) == 1);
+
+ // unsigned 64-bits integers
+ MOZ_ASSERT(mozilla::GCD(16747832015348854198u, 10986175599217457242u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(14011882763672869646u, 1150181481133900726u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(6605029198216299492u, 2540177763690679863u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(8723446333453359635u, 5501999887069319528u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2056609692029140361u, 1456692183174011231u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3979920159703007405u, 2102351633956912159u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(9463892761763926474u, 5727651032816755587u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(15074653294321365395u, 7500084005319994862u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(7596876989397200146u, 2100623677138635163u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(15788975435035111366u, 13949507094186899135u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(11511089994271140687u, 11202842908571961185u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(5238481506779057035u, 1275096406977139452u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(10319988989820236521u, 6004256112028859859u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(15363016657999062582u, 13709656670722381934u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(1212882338768103987u, 400304873392680016u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(14516701884936382582u, 9474965125574306885u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4684990176797036518u, 2826010316418750908u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(1257550743165743081u, 501524040422212694u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2684107647237574540u, 1059404913392538915u) == 5u);
+ MOZ_ASSERT(mozilla::GCD(9075798209725656040u, 8460431147770771484u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(8849414266308239550u, 2100344973594953676u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(18235452615524492166u, 6948238589518088517u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(15050298436941428700u, 1467533438133155187u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(8834598722016252963u, 4311275747815972852u) == 17u);
+ MOZ_ASSERT(mozilla::GCD(9356558625132137133u, 2037947968328350721u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3849613153563955590u, 742698742609310596u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(14456988562990139501u, 10112205238651656021u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(12307508681986233124u, 9812326358082292497u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1542509761845906606u, 753342053499303952u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(3002452874498902380u, 1551203246991573851u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(6995746439795805457u, 1188069610619158471u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2746395460341933223u, 2567350813567392270u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(13780256804547757349u, 3248441336598733689u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(11585262422698980788u, 9223319679416307971u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(12061506913736835258u, 4388981418731026638u) == 54u);
+ MOZ_ASSERT(mozilla::GCD(7926097431519628264u, 6609465824726553267u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4869073093357623730u, 127092341961569309u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2415749375652736599u, 1225333195065764619u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(12396258519293261927u, 7854932518032305093u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(8482841866529133449u, 8041279973223483861u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(6256232276718808317u, 218093546248209886u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(8708964372422992556u, 5925839455605803265u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(7079489553626522083u, 2723660727447617723u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(6456428365552053201u, 1199403261032183111u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3346567208089938575u, 2383119761029013459u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(11371634586699820652u, 1314783250642191861u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(17865943339510318926u, 6852058968402585010u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(6184068614737379672u, 3615164034002231440u) == 8u);
+ MOZ_ASSERT(mozilla::GCD(4188759555626894588u, 756597961380253895u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(9834711092513827417u, 3337572906055372223u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(15971004526745900665u, 8185256010881285296u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(15018742812984668959u, 529070670894924960u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1067863751656464299u, 905318428655384382u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(13862829046112265837u, 6101005940549725663u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(5042641015440071021u, 3851032995323622058u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(12302889786666538640u, 10776548976024201292u) == 76u);
+ MOZ_ASSERT(mozilla::GCD(13722399417473040071u, 9411461429949802122u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(7320504128957551347u, 54052915134765261u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(15757615267691124901u, 6960991167654285257u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(5748033181727727936u, 303811493931685833u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(13393585076101458038u, 11704741982068090192u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(1305962146520003941u, 900947650687182151u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(10210329619324275486u, 9165444096209531122u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(12287397750298100333u, 4589303685754232593u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(13074046732385479094u, 9410427502131685240u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(10769225306727183116u, 3766083633148275570u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(16097129444752648454u, 1689032025737433449u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(12569058547490329992u, 3311470626838389990u) == 18u);
+ MOZ_ASSERT(mozilla::GCD(6800922789750937338u, 1401809431753492506u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(7640775166765881526u, 330467034911649653u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(7713745971481011689u, 2881741428874316968u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3447718804232188171u, 2048968371582835027u) == 17u);
+ MOZ_ASSERT(mozilla::GCD(5048117340512952935u, 2723523492436699844u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(10307361968692211723u, 428905266774914488u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1319115090575683914u, 1262779939989801116u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(12690110976610715926u, 1527151730024909348u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(12963032302522784237u, 8894543024067386192u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4719664701853305298u, 328290838903591497u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(6046363361224867225u, 2463351775539510194u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(467063656725960574u, 62796777888499328u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(16390445286228133923u, 3793827091023779027u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3118497337756941652u, 2860811741849353064u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(17480668716240157222u, 6736393718990377613u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3008091962262081749u, 2764474578829797968u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(10443605258088065132u, 1118236736154633837u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(8681282777233478597u, 2520450074320754822u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2428054799146631800u, 2304419668216461210u) == 10u);
+ MOZ_ASSERT(mozilla::GCD(11986346113373252908u, 5868466983065345812u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(566070446598076689u, 226910043938150340u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(20286446051392853u, 2253005103754547u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(9478145873341733534u, 1361277916695374175u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2194077616952029858u, 1880982148321238243u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(5067528875217388843u, 1007391120419508106u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(14964775244731205772u, 8476706085421248933u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(15864657026011160414u, 11542748143033682677u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(6819186727513097073u, 3374817819083626717u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(14864653919493481829u, 1475678482546800916u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(674964986925038761u, 500070581922501698u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(5286379749864372936u, 1077542296477907313u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(506827427986892036u, 177356571976309469u) == 1u);
+
+ // signed 32-bits integers
+ MOZ_ASSERT(mozilla::GCD(2082847559, 1075502059) == 1);
+ MOZ_ASSERT(mozilla::GCD(1516817880, 1427978452) == 4);
+ MOZ_ASSERT(mozilla::GCD(1912103032, 865754441) == 1);
+ MOZ_ASSERT(mozilla::GCD(1907998028, 1578360455) == 1);
+ MOZ_ASSERT(mozilla::GCD(2082786344, 1864664012) == 4);
+ MOZ_ASSERT(mozilla::GCD(2060961011, 1928455778) == 1);
+ MOZ_ASSERT(mozilla::GCD(970664659, 63074065) == 1);
+ MOZ_ASSERT(mozilla::GCD(55960901, 36955491) == 1);
+ MOZ_ASSERT(mozilla::GCD(1136602528, 339758054) == 2);
+ MOZ_ASSERT(mozilla::GCD(2040420582, 1355439044) == 2);
+ MOZ_ASSERT(mozilla::GCD(1295522905, 736231412) == 1);
+ MOZ_ASSERT(mozilla::GCD(778941225, 674482877) == 1);
+ MOZ_ASSERT(mozilla::GCD(291862772, 262751987) == 1);
+ MOZ_ASSERT(mozilla::GCD(233275018, 60278627) == 1);
+ MOZ_ASSERT(mozilla::GCD(701740307, 432255046) == 1);
+ MOZ_ASSERT(mozilla::GCD(582766531, 457298210) == 1);
+ MOZ_ASSERT(mozilla::GCD(196369046, 15577226) == 2);
+ MOZ_ASSERT(mozilla::GCD(1342156837, 2790339) == 1);
+ MOZ_ASSERT(mozilla::GCD(502348102, 151073265) == 1);
+ MOZ_ASSERT(mozilla::GCD(836867611, 797891653) == 1);
+ MOZ_ASSERT(mozilla::GCD(859055751, 525520896) == 3);
+ MOZ_ASSERT(mozilla::GCD(701234220, 683730404) == 4);
+ MOZ_ASSERT(mozilla::GCD(2102253469, 1046820362) == 1);
+ MOZ_ASSERT(mozilla::GCD(1712691453, 34616585) == 1);
+ MOZ_ASSERT(mozilla::GCD(1074235876, 683609889) == 1);
+ MOZ_ASSERT(mozilla::GCD(535965177, 182306069) == 11);
+ MOZ_ASSERT(mozilla::GCD(1437763442, 180698008) == 2);
+ MOZ_ASSERT(mozilla::GCD(2005641602, 175306737) == 1);
+ MOZ_ASSERT(mozilla::GCD(803294953, 565920364) == 1);
+ MOZ_ASSERT(mozilla::GCD(2135931435, 220153322) == 1);
+ MOZ_ASSERT(mozilla::GCD(1002010726, 619364124) == 2);
+ MOZ_ASSERT(mozilla::GCD(1841159587, 577256747) == 1);
+ MOZ_ASSERT(mozilla::GCD(2117547620, 896973794) == 2);
+ MOZ_ASSERT(mozilla::GCD(2004836234, 157238204) == 2);
+ MOZ_ASSERT(mozilla::GCD(952368407, 625062194) == 1);
+ MOZ_ASSERT(mozilla::GCD(671144794, 357719289) == 1);
+ MOZ_ASSERT(mozilla::GCD(1369585680, 279330845) == 5);
+ MOZ_ASSERT(mozilla::GCD(389855496, 230820785) == 1);
+ MOZ_ASSERT(mozilla::GCD(2101505071, 572728762) == 1);
+ MOZ_ASSERT(mozilla::GCD(1657802296, 667524476) == 4);
+ MOZ_ASSERT(mozilla::GCD(1007298072, 598682608) == 8);
+ MOZ_ASSERT(mozilla::GCD(1499193816, 44129206) == 2);
+ MOZ_ASSERT(mozilla::GCD(1355799723, 1163556923) == 1);
+ MOZ_ASSERT(mozilla::GCD(346410469, 294136125) == 1);
+ MOZ_ASSERT(mozilla::GCD(240297386, 239749630) == 2);
+ MOZ_ASSERT(mozilla::GCD(1595986655, 706220030) == 5);
+ MOZ_ASSERT(mozilla::GCD(265850446, 117414954) == 2);
+ MOZ_ASSERT(mozilla::GCD(1594478812, 559606261) == 1);
+ MOZ_ASSERT(mozilla::GCD(1098933117, 145267674) == 3);
+ MOZ_ASSERT(mozilla::GCD(37749195, 34174284) == 3);
+ MOZ_ASSERT(mozilla::GCD(173141528, 158277345) == 1);
+ MOZ_ASSERT(mozilla::GCD(1523316779, 1507242666) == 1);
+ MOZ_ASSERT(mozilla::GCD(1574321272, 213222586) == 2);
+ MOZ_ASSERT(mozilla::GCD(186241582, 58675779) == 1);
+ MOZ_ASSERT(mozilla::GCD(1351024876, 1256961567) == 1);
+ MOZ_ASSERT(mozilla::GCD(2060871503, 1626844669) == 1);
+ MOZ_ASSERT(mozilla::GCD(794617235, 606782933) == 1);
+ MOZ_ASSERT(mozilla::GCD(620853401, 550785717) == 1);
+ MOZ_ASSERT(mozilla::GCD(978990617, 684228903) == 1);
+ MOZ_ASSERT(mozilla::GCD(185414372, 160958435) == 11);
+ MOZ_ASSERT(mozilla::GCD(13886275, 10781501) == 1);
+ MOZ_ASSERT(mozilla::GCD(316445410, 72994145) == 5);
+ MOZ_ASSERT(mozilla::GCD(260685833, 66561321) == 1);
+ MOZ_ASSERT(mozilla::GCD(656788852, 619471100) == 4);
+ MOZ_ASSERT(mozilla::GCD(409924450, 323144710) == 10);
+ MOZ_ASSERT(mozilla::GCD(1696374689, 155122424) == 1);
+ MOZ_ASSERT(mozilla::GCD(1720449495, 1332196090) == 5);
+ MOZ_ASSERT(mozilla::GCD(102504868, 95625294) == 2);
+ MOZ_ASSERT(mozilla::GCD(959039064, 266180243) == 1);
+ MOZ_ASSERT(mozilla::GCD(771762738, 99126507) == 3);
+ MOZ_ASSERT(mozilla::GCD(1666721205, 164347293) == 3);
+ MOZ_ASSERT(mozilla::GCD(1145868726, 1013299840) == 2);
+ MOZ_ASSERT(mozilla::GCD(123667035, 6968726) == 1);
+ MOZ_ASSERT(mozilla::GCD(856285310, 669026117) == 1);
+ MOZ_ASSERT(mozilla::GCD(1748843942, 376021862) == 2);
+ MOZ_ASSERT(mozilla::GCD(1364381942, 1316920424) == 2);
+ MOZ_ASSERT(mozilla::GCD(376501104, 233350000) == 16);
+ MOZ_ASSERT(mozilla::GCD(1516376773, 554534905) == 1);
+ MOZ_ASSERT(mozilla::GCD(1355209533, 371401397) == 1);
+ MOZ_ASSERT(mozilla::GCD(488029245, 453641230) == 5);
+ MOZ_ASSERT(mozilla::GCD(2086782535, 1965901533) == 1);
+ MOZ_ASSERT(mozilla::GCD(1701843138, 197489892) == 6);
+ MOZ_ASSERT(mozilla::GCD(1857287302, 756127018) == 2);
+ MOZ_ASSERT(mozilla::GCD(1806613582, 963087217) == 1);
+ MOZ_ASSERT(mozilla::GCD(1350708388, 1013432485) == 1);
+ MOZ_ASSERT(mozilla::GCD(742201232, 486590366) == 2);
+ MOZ_ASSERT(mozilla::GCD(47378255, 18524009) == 1);
+ MOZ_ASSERT(mozilla::GCD(750926792, 282203477) == 1);
+ MOZ_ASSERT(mozilla::GCD(1242468272, 1225593358) == 2);
+ MOZ_ASSERT(mozilla::GCD(1937337947, 1233008310) == 1);
+ MOZ_ASSERT(mozilla::GCD(600511783, 563234297) == 1);
+ MOZ_ASSERT(mozilla::GCD(1583895113, 1400349394) == 1);
+ MOZ_ASSERT(mozilla::GCD(361950446, 20294144) == 26);
+ MOZ_ASSERT(mozilla::GCD(712527923, 351368901) == 1);
+ MOZ_ASSERT(mozilla::GCD(221252886, 13768150) == 2);
+ MOZ_ASSERT(mozilla::GCD(1217530242, 184772639) == 1);
+ MOZ_ASSERT(mozilla::GCD(1145522580, 92958612) == 12);
+ MOZ_ASSERT(mozilla::GCD(1765854048, 1073605551) == 3);
+ MOZ_ASSERT(mozilla::GCD(1179258112, 1148756377) == 1);
+ MOZ_ASSERT(mozilla::GCD(211982661, 145365362) == 1);
+
+ // unsigned 32-bits integers
+ MOZ_ASSERT(mozilla::GCD(2346624228u, 854636854u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(257647411u, 113262213u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(532130107u, 181815062u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(188329196u, 21767880u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(965417460u, 433449910u) == 10u);
+ MOZ_ASSERT(mozilla::GCD(4285939108u, 782087256u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(3176833937u, 905249796u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1596497177u, 1259467765u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(296928708u, 137867254u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(810260571u, 278688539u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2319673546u, 6698908u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(335032855u, 304923748u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1520046075u, 30861208u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3370242674u, 2513781509u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2380615411u, 41999289u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2999947090u, 619047913u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(463491935u, 219826435u) == 5u);
+ MOZ_ASSERT(mozilla::GCD(256795166u, 3240595u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3794760062u, 542176354u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(2347135107u, 532837578u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(215263644u, 82185110u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(3242470340u, 1014909501u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1935066897u, 1646318370u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(2528019825u, 2199478105u) == 5u);
+ MOZ_ASSERT(mozilla::GCD(814340701u, 505422837u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2422005621u, 1270490106u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(2196878780u, 2125974315u) == 5u);
+ MOZ_ASSERT(mozilla::GCD(3243580525u, 3222120645u) == 5u);
+ MOZ_ASSERT(mozilla::GCD(592838u, 333273u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(957856834u, 660922287u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2650657380u, 2507896759u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(35861051u, 25878355u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1907977010u, 514369620u) == 10u);
+ MOZ_ASSERT(mozilla::GCD(1850153182u, 1133466079u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2404132308u, 942620249u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4120768767u, 794728522u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3115077311u, 437206010u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3653354572u, 3501340268u) == 4u);
+ MOZ_ASSERT(mozilla::GCD(3700775106u, 1237309608u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(3838425682u, 2767520531u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(812123689u, 691153768u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3201500844u, 1530832674u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(802121923u, 753535009u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(575392026u, 450096822u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(1074039450u, 724299558u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(3785968159u, 230568577u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(80611731u, 30537579u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(3717744094u, 3192172824u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(3481208739u, 3389567399u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1126134290u, 760589919u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2452072599u, 1235840929u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4172574373u, 664346996u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4280275945u, 1940565231u) == 11u);
+ MOZ_ASSERT(mozilla::GCD(1138803378u, 919205598u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(3871971423u, 3071143517u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1889403334u, 261936800u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(1233462464u, 462090021u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(267801361u, 177041892u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1586528261u, 1146114428u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2209381020u, 1616518545u) == 15u);
+ MOZ_ASSERT(mozilla::GCD(2493819993u, 110364986u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(105420984u, 83814372u) == 12u);
+ MOZ_ASSERT(mozilla::GCD(3093899047u, 917349662u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3716325890u, 1554865432u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(692565714u, 265467690u) == 18u);
+ MOZ_ASSERT(mozilla::GCD(659720171u, 250624014u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1890623148u, 1632453222u) == 6u);
+ MOZ_ASSERT(mozilla::GCD(3557986303u, 752931252u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(237903157u, 177153319u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4133928804u, 3898800943u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1783300920u, 196251347u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2035190407u, 866039372u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3893680107u, 3211053018u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(4293646715u, 2698207329u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1409442959u, 151043902u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1823328305u, 375231671u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2574512647u, 1902834298u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2533783127u, 1232079823u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2622446878u, 193328426u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(4099571222u, 3439224331u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2355797345u, 430435034u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2654318392u, 2069135952u) == 8u);
+ MOZ_ASSERT(mozilla::GCD(1671976410u, 1100794671u) == 3u);
+ MOZ_ASSERT(mozilla::GCD(328877177u, 236038245u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2373247523u, 1198763899u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(3230550971u, 203517406u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2274958703u, 353643804u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1048415366u, 740416576u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(2768590397u, 843179468u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2839858158u, 1019946790u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(4116867766u, 52672530u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(3433787325u, 2398189631u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(2636022376u, 2289412838u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(2904900253u, 2748915828u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(4041240379u, 605321815u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1730010566u, 92436785u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1362635513u, 757365378u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(1327133482u, 940350094u) == 2u);
+ MOZ_ASSERT(mozilla::GCD(3515019959u, 810874750u) == 1u);
+ MOZ_ASSERT(mozilla::GCD(82871503u, 43900000u) == 1u);
+
+ MOZ_ASSERT(mozilla::GCD(3u, 7u) == 1u);
+}
+
+int main() {
+ TestIsPowerOfTwo();
+ TestClamp();
+ TestGCD();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestMaybe.cpp b/mfbt/tests/TestMaybe.cpp
new file mode 100644
index 0000000000..2c56b85b6d
--- /dev/null
+++ b/mfbt/tests/TestMaybe.cpp
@@ -0,0 +1,1473 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+using mozilla::SomeRef;
+using mozilla::ToMaybe;
+using mozilla::ToMaybeRef;
+
+#define RUN_TEST(t) \
+ do { \
+ bool cond = (t()); \
+ if (!cond) return 1; \
+ cond = AllDestructorsWereCalled(); \
+ MOZ_ASSERT(cond, "Failed to destroy all objects during test: " #t); \
+ if (!cond) return 1; \
+ } while (false)
+
+enum Status {
+ eWasDefaultConstructed,
+ eWasConstructed,
+ eWasCopyConstructed,
+ eWasMoveConstructed,
+ eWasConstMoveConstructed,
+ eWasAssigned,
+ eWasCopyAssigned,
+ eWasMoveAssigned,
+ eWasCopiedFrom,
+ eWasMovedFrom,
+ eWasConstMovedFrom,
+};
+
+static size_t sUndestroyedObjects = 0;
+
+static bool AllDestructorsWereCalled() { return sUndestroyedObjects == 0; }
+
+struct BasicValue {
+ BasicValue() : mStatus(eWasDefaultConstructed), mTag(0) {
+ ++sUndestroyedObjects;
+ }
+
+ explicit BasicValue(int aTag) : mStatus(eWasConstructed), mTag(aTag) {
+ ++sUndestroyedObjects;
+ }
+
+ BasicValue(const BasicValue& aOther)
+ : mStatus(eWasCopyConstructed), mTag(aOther.mTag) {
+ ++sUndestroyedObjects;
+ }
+
+ BasicValue(BasicValue&& aOther)
+ : mStatus(eWasMoveConstructed), mTag(aOther.mTag) {
+ ++sUndestroyedObjects;
+ aOther.mStatus = eWasMovedFrom;
+ aOther.mTag = 0;
+ }
+
+ BasicValue(const BasicValue&& aOther)
+ : mStatus(eWasConstMoveConstructed), mTag(aOther.mTag) {
+ ++sUndestroyedObjects;
+ aOther.mStatus = eWasConstMovedFrom;
+ }
+
+ ~BasicValue() { --sUndestroyedObjects; }
+
+ BasicValue& operator=(const BasicValue& aOther) {
+ mStatus = eWasCopyAssigned;
+ mTag = aOther.mTag;
+ return *this;
+ }
+
+ BasicValue& operator=(BasicValue&& aOther) {
+ mStatus = eWasMoveAssigned;
+ mTag = aOther.mTag;
+ aOther.mStatus = eWasMovedFrom;
+ aOther.mTag = 0;
+ return *this;
+ }
+
+ bool operator==(const BasicValue& aOther) const {
+ return mTag == aOther.mTag;
+ }
+
+ bool operator<(const BasicValue& aOther) const { return mTag < aOther.mTag; }
+
+ Status GetStatus() const { return mStatus; }
+ void SetTag(int aValue) { mTag = aValue; }
+ int GetTag() const { return mTag; }
+
+ private:
+ mutable Status mStatus;
+ int mTag;
+};
+
+struct UncopyableValue {
+ UncopyableValue() : mStatus(eWasDefaultConstructed) { ++sUndestroyedObjects; }
+
+ UncopyableValue(UncopyableValue&& aOther) : mStatus(eWasMoveConstructed) {
+ ++sUndestroyedObjects;
+ aOther.mStatus = eWasMovedFrom;
+ }
+
+ ~UncopyableValue() { --sUndestroyedObjects; }
+
+ UncopyableValue& operator=(UncopyableValue&& aOther) {
+ mStatus = eWasMoveAssigned;
+ aOther.mStatus = eWasMovedFrom;
+ return *this;
+ }
+
+ Status GetStatus() { return mStatus; }
+
+ private:
+ UncopyableValue(const UncopyableValue& aOther) = delete;
+ UncopyableValue& operator=(const UncopyableValue& aOther) = delete;
+
+ Status mStatus;
+};
+
+struct UnmovableValue {
+ UnmovableValue() : mStatus(eWasDefaultConstructed) { ++sUndestroyedObjects; }
+
+ UnmovableValue(const UnmovableValue& aOther) : mStatus(eWasCopyConstructed) {
+ ++sUndestroyedObjects;
+ }
+
+ ~UnmovableValue() { --sUndestroyedObjects; }
+
+ UnmovableValue& operator=(const UnmovableValue& aOther) {
+ mStatus = eWasCopyAssigned;
+ return *this;
+ }
+
+ Status GetStatus() { return mStatus; }
+
+ UnmovableValue(UnmovableValue&& aOther) = delete;
+ UnmovableValue& operator=(UnmovableValue&& aOther) = delete;
+
+ private:
+ Status mStatus;
+};
+
+struct UncopyableUnmovableValue {
+ UncopyableUnmovableValue() : mStatus(eWasDefaultConstructed) {
+ ++sUndestroyedObjects;
+ }
+
+ explicit UncopyableUnmovableValue(int) : mStatus(eWasConstructed) {
+ ++sUndestroyedObjects;
+ }
+
+ ~UncopyableUnmovableValue() { --sUndestroyedObjects; }
+
+ Status GetStatus() const { return mStatus; }
+
+ private:
+ UncopyableUnmovableValue(const UncopyableUnmovableValue& aOther) = delete;
+ UncopyableUnmovableValue& operator=(const UncopyableUnmovableValue& aOther) =
+ delete;
+ UncopyableUnmovableValue(UncopyableUnmovableValue&& aOther) = delete;
+ UncopyableUnmovableValue& operator=(UncopyableUnmovableValue&& aOther) =
+ delete;
+
+ Status mStatus;
+};
+
+static_assert(std::is_trivially_destructible_v<Maybe<int>>);
+static_assert(std::is_trivially_copy_constructible_v<Maybe<int>>);
+static_assert(std::is_trivially_copy_assignable_v<Maybe<int>>);
+
+static_assert(42 == Some(42).value());
+static_assert(42 == Some(42).valueOr(43));
+static_assert(42 == Maybe<int>{}.valueOr(42));
+static_assert(42 == Some(42).valueOrFrom([] { return 43; }));
+static_assert(42 == Maybe<int>{}.valueOrFrom([] { return 42; }));
+static_assert(Some(43) == [] {
+ auto val = Some(42);
+ val.apply([](int& val) { val += 1; });
+ return val;
+}());
+static_assert(Some(43) == Some(42).map([](int val) { return val + 1; }));
+static_assert(Maybe<int>(std::in_place, 43) ==
+ Maybe<int>(std::in_place, 42).map([](int val) {
+ return val + 1;
+ }));
+
+struct TriviallyDestructible {
+ TriviallyDestructible() { // not trivially constructible
+ }
+};
+
+static_assert(std::is_trivially_destructible_v<Maybe<TriviallyDestructible>>);
+
+struct UncopyableValueLiteralType {
+ explicit constexpr UncopyableValueLiteralType(int aValue) : mValue{aValue} {}
+
+ UncopyableValueLiteralType(UncopyableValueLiteralType&&) = default;
+ UncopyableValueLiteralType& operator=(UncopyableValueLiteralType&&) = default;
+
+ int mValue;
+};
+
+static_assert(
+ std::is_trivially_destructible_v<Maybe<UncopyableValueLiteralType>>);
+static_assert(!std::is_copy_constructible_v<Maybe<UncopyableValueLiteralType>>);
+static_assert(!std::is_copy_assignable_v<Maybe<UncopyableValueLiteralType>>);
+static_assert(std::is_move_constructible_v<Maybe<UncopyableValueLiteralType>>);
+static_assert(std::is_move_assignable_v<Maybe<UncopyableValueLiteralType>>);
+
+constexpr Maybe<UncopyableValueLiteralType> someUncopyable =
+ Some(UncopyableValueLiteralType{42});
+static_assert(someUncopyable.isSome());
+static_assert(42 == someUncopyable->mValue);
+
+constexpr Maybe<UncopyableValueLiteralType> someUncopyableAssigned = [] {
+ auto res = Maybe<UncopyableValueLiteralType>{};
+ res = Some(UncopyableValueLiteralType{42});
+ return res;
+}();
+static_assert(someUncopyableAssigned.isSome());
+static_assert(42 == someUncopyableAssigned->mValue);
+
+static bool TestBasicFeatures() {
+ // Check that a Maybe<T> is initialized to Nothing.
+ Maybe<BasicValue> mayValue;
+ static_assert(std::is_same_v<BasicValue, decltype(mayValue)::ValueType>,
+ "Should have BasicValue ValueType");
+ MOZ_RELEASE_ASSERT(!mayValue);
+ MOZ_RELEASE_ASSERT(!mayValue.isSome());
+ MOZ_RELEASE_ASSERT(mayValue.isNothing());
+
+ // Check that emplace() default constructs and the accessors work.
+ mayValue.emplace();
+ MOZ_RELEASE_ASSERT(mayValue);
+ MOZ_RELEASE_ASSERT(mayValue.isSome());
+ MOZ_RELEASE_ASSERT(!mayValue.isNothing());
+ MOZ_RELEASE_ASSERT(*mayValue == BasicValue());
+ static_assert(std::is_same_v<BasicValue&, decltype(*mayValue)>,
+ "operator*() should return a BasicValue&");
+ MOZ_RELEASE_ASSERT(mayValue.value() == BasicValue());
+ static_assert(std::is_same_v<BasicValue, decltype(mayValue.value())>,
+ "value() should return a BasicValue");
+ MOZ_RELEASE_ASSERT(mayValue.ref() == BasicValue());
+ static_assert(std::is_same_v<BasicValue&, decltype(mayValue.ref())>,
+ "ref() should return a BasicValue&");
+ MOZ_RELEASE_ASSERT(mayValue.ptr() != nullptr);
+ static_assert(std::is_same_v<BasicValue*, decltype(mayValue.ptr())>,
+ "ptr() should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasDefaultConstructed);
+
+ // Check that reset() works.
+ mayValue.reset();
+ MOZ_RELEASE_ASSERT(!mayValue);
+ MOZ_RELEASE_ASSERT(!mayValue.isSome());
+ MOZ_RELEASE_ASSERT(mayValue.isNothing());
+
+ // Check that emplace(T1) calls the correct constructor.
+ mayValue.emplace(1);
+ MOZ_RELEASE_ASSERT(mayValue);
+ MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasConstructed);
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1);
+ mayValue.reset();
+ MOZ_RELEASE_ASSERT(!mayValue);
+
+ {
+ // Check that Maybe(std::in_place, T1) calls the correct constructor.
+ const auto mayValueConstructed = Maybe<BasicValue>(std::in_place, 1);
+ MOZ_RELEASE_ASSERT(mayValueConstructed);
+ MOZ_RELEASE_ASSERT(mayValueConstructed->GetStatus() == eWasConstructed);
+ MOZ_RELEASE_ASSERT(mayValueConstructed->GetTag() == 1);
+ }
+
+ // Check that Some() and Nothing() work.
+ mayValue = Some(BasicValue(2));
+ MOZ_RELEASE_ASSERT(mayValue);
+ MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 2);
+ mayValue = Nothing();
+ MOZ_RELEASE_ASSERT(!mayValue);
+
+ // Check that the accessors work through a const ref.
+ mayValue.emplace();
+ const Maybe<BasicValue>& mayValueCRef = mayValue;
+ MOZ_RELEASE_ASSERT(mayValueCRef);
+ MOZ_RELEASE_ASSERT(mayValueCRef.isSome());
+ MOZ_RELEASE_ASSERT(!mayValueCRef.isNothing());
+ MOZ_RELEASE_ASSERT(*mayValueCRef == BasicValue());
+ static_assert(std::is_same_v<const BasicValue&, decltype(*mayValueCRef)>,
+ "operator*() should return a BasicValue");
+ MOZ_RELEASE_ASSERT(mayValueCRef.value() == BasicValue());
+ static_assert(std::is_same_v<BasicValue, decltype(mayValueCRef.value())>,
+ "value() should return a BasicValue");
+ MOZ_RELEASE_ASSERT(mayValueCRef.ref() == BasicValue());
+ static_assert(std::is_same_v<const BasicValue&, decltype(mayValueCRef.ref())>,
+ "ref() should return a const BasicValue&");
+ MOZ_RELEASE_ASSERT(mayValueCRef.ptr() != nullptr);
+ static_assert(std::is_same_v<const BasicValue*, decltype(mayValueCRef.ptr())>,
+ "ptr() should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(mayValueCRef->GetStatus() == eWasDefaultConstructed);
+ mayValue.reset();
+
+ // Check that we can create and reference Maybe<const Type>.
+ Maybe<const BasicValue> mayCValue1 = Some(BasicValue(5));
+ MOZ_RELEASE_ASSERT(mayCValue1);
+ MOZ_RELEASE_ASSERT(mayCValue1.isSome());
+ MOZ_RELEASE_ASSERT(*mayCValue1 == BasicValue(5));
+ const Maybe<const BasicValue>& mayCValue1Ref = mayCValue1;
+ MOZ_RELEASE_ASSERT(mayCValue1Ref == mayCValue1);
+ MOZ_RELEASE_ASSERT(*mayCValue1Ref == BasicValue(5));
+ Maybe<const BasicValue> mayCValue2;
+ mayCValue2.emplace(6);
+ MOZ_RELEASE_ASSERT(mayCValue2);
+ MOZ_RELEASE_ASSERT(mayCValue2.isSome());
+ MOZ_RELEASE_ASSERT(*mayCValue2 == BasicValue(6));
+
+ // Check that accessors work through rvalue-references.
+ MOZ_RELEASE_ASSERT(Some(BasicValue()));
+ MOZ_RELEASE_ASSERT(Some(BasicValue()).isSome());
+ MOZ_RELEASE_ASSERT(!Some(BasicValue()).isNothing());
+ MOZ_RELEASE_ASSERT(*Some(BasicValue()) == BasicValue());
+ static_assert(std::is_same_v<BasicValue&&, decltype(*Some(BasicValue()))>,
+ "operator*() should return a BasicValue&&");
+ MOZ_RELEASE_ASSERT(Some(BasicValue()).value() == BasicValue());
+ static_assert(
+ std::is_same_v<BasicValue, decltype(Some(BasicValue()).value())>,
+ "value() should return a BasicValue");
+ MOZ_RELEASE_ASSERT(Some(BasicValue()).ref() == BasicValue());
+ static_assert(
+ std::is_same_v<BasicValue&&, decltype(Some(BasicValue()).ref())>,
+ "ref() should return a BasicValue&&");
+ MOZ_RELEASE_ASSERT(Some(BasicValue()).ptr() != nullptr);
+ static_assert(std::is_same_v<BasicValue*, decltype(Some(BasicValue()).ptr())>,
+ "ptr() should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(Some(BasicValue())->GetStatus() == eWasMoveConstructed);
+
+ // Check that accessors work through const-rvalue-references.
+ auto MakeConstMaybe = []() -> const Maybe<BasicValue> {
+ return Some(BasicValue());
+ };
+ MOZ_RELEASE_ASSERT(MakeConstMaybe());
+ MOZ_RELEASE_ASSERT(MakeConstMaybe().isSome());
+ MOZ_RELEASE_ASSERT(!MakeConstMaybe().isNothing());
+ MOZ_RELEASE_ASSERT(*MakeConstMaybe() == BasicValue());
+ static_assert(std::is_same_v<const BasicValue&&, decltype(*MakeConstMaybe())>,
+ "operator*() should return a const BasicValue&&");
+ MOZ_RELEASE_ASSERT(MakeConstMaybe().value() == BasicValue());
+ static_assert(std::is_same_v<BasicValue, decltype(MakeConstMaybe().value())>,
+ "value() should return a BasicValue");
+ MOZ_RELEASE_ASSERT(MakeConstMaybe().ref() == BasicValue());
+ static_assert(
+ std::is_same_v<const BasicValue&&, decltype(MakeConstMaybe().ref())>,
+ "ref() should return a const BasicValue&&");
+ MOZ_RELEASE_ASSERT(MakeConstMaybe().ptr() != nullptr);
+ static_assert(
+ std::is_same_v<const BasicValue*, decltype(MakeConstMaybe().ptr())>,
+ "ptr() should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(MakeConstMaybe()->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(BasicValue(*MakeConstMaybe()).GetStatus() ==
+ eWasConstMoveConstructed);
+
+ // Check that take works
+ mayValue = Some(BasicValue(6));
+ Maybe taken = mayValue.take();
+ MOZ_RELEASE_ASSERT(taken->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(taken == Some(BasicValue(6)));
+ MOZ_RELEASE_ASSERT(!mayValue.isSome());
+ MOZ_RELEASE_ASSERT(mayValue.take() == Nothing());
+
+ // Check that extract works
+ mayValue = Some(BasicValue(7));
+ BasicValue extracted = mayValue.extract();
+ MOZ_RELEASE_ASSERT(extracted.GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(extracted == BasicValue(7));
+ MOZ_RELEASE_ASSERT(!mayValue.isSome());
+
+ return true;
+}
+
+template <typename T>
+static void TestCopyMaybe() {
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ Maybe<T> dstCopyConstructed = src;
+
+ MOZ_RELEASE_ASSERT(2 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstCopyConstructed->GetStatus() == eWasCopyConstructed);
+ }
+
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ Maybe<T> dstCopyAssigned;
+ dstCopyAssigned = src;
+
+ MOZ_RELEASE_ASSERT(2 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstCopyAssigned->GetStatus() == eWasCopyConstructed);
+ }
+}
+
+template <typename T>
+static void TestMoveMaybe() {
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ Maybe<T> dstMoveConstructed = std::move(src);
+
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstMoveConstructed->GetStatus() == eWasMoveConstructed);
+ }
+
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ Maybe<T> dstMoveAssigned;
+ dstMoveAssigned = std::move(src);
+
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstMoveAssigned->GetStatus() == eWasMoveConstructed);
+ }
+
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ Maybe<T> dstMoveConstructed = src.take();
+
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstMoveConstructed->GetStatus() == eWasMoveConstructed);
+ }
+
+ {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ Maybe<T> src = Some(T());
+ T dstMoveConstructed = src.extract();
+
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(dstMoveConstructed.GetStatus() == eWasMoveConstructed);
+ }
+}
+
+static bool TestCopyAndMove() {
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ {
+ // Check that we get moves when possible for types that can support both
+ // moves and copies.
+ {
+ Maybe<BasicValue> mayBasicValue = Some(BasicValue(1));
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 1);
+ mayBasicValue = Some(BasicValue(2));
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveAssigned);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 2);
+ mayBasicValue.reset();
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+ mayBasicValue.emplace(BasicValue(3));
+ MOZ_RELEASE_ASSERT(1 == sUndestroyedObjects);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 3);
+
+ // Check that we get copies when moves aren't possible.
+ Maybe<BasicValue> mayBasicValue2 = Some(*mayBasicValue);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 3);
+ mayBasicValue->SetTag(4);
+ mayBasicValue2 = mayBasicValue;
+ // This test should work again when we fix bug 1052940.
+ // MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyAssigned);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 4);
+ mayBasicValue->SetTag(5);
+ mayBasicValue2.reset();
+ mayBasicValue2.emplace(*mayBasicValue);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 5);
+
+ // Check that std::move() works. (Another sanity check for move support.)
+ Maybe<BasicValue> mayBasicValue3 = Some(std::move(*mayBasicValue));
+ MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue3->GetTag() == 5);
+ MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMovedFrom);
+ mayBasicValue2->SetTag(6);
+ mayBasicValue3 = Some(std::move(*mayBasicValue2));
+ MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMoveAssigned);
+ MOZ_RELEASE_ASSERT(mayBasicValue3->GetTag() == 6);
+ MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasMovedFrom);
+ Maybe<BasicValue> mayBasicValue4;
+ mayBasicValue4.emplace(std::move(*mayBasicValue3));
+ MOZ_RELEASE_ASSERT(mayBasicValue4->GetStatus() == eWasMoveConstructed);
+ MOZ_RELEASE_ASSERT(mayBasicValue4->GetTag() == 6);
+ MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMovedFrom);
+ }
+
+ TestCopyMaybe<BasicValue>();
+ TestMoveMaybe<BasicValue>();
+ }
+
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ {
+ // Check that we always get copies for types that don't support moves.
+ {
+ Maybe<UnmovableValue> mayUnmovableValue = Some(UnmovableValue());
+ MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyConstructed);
+ mayUnmovableValue = Some(UnmovableValue());
+ MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyAssigned);
+ mayUnmovableValue.reset();
+ mayUnmovableValue.emplace(UnmovableValue());
+ MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyConstructed);
+ }
+
+ TestCopyMaybe<UnmovableValue>();
+
+ static_assert(std::is_copy_constructible_v<Maybe<UnmovableValue>>);
+ static_assert(std::is_copy_assignable_v<Maybe<UnmovableValue>>);
+ // XXX Why do these static_asserts not hold?
+ // static_assert(!std::is_move_constructible_v<Maybe<UnmovableValue>>);
+ // static_assert(!std::is_move_assignable_v<Maybe<UnmovableValue>>);
+ }
+
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ {
+ // Check that types that only support moves, but not copies, work.
+ {
+ Maybe<UncopyableValue> mayUncopyableValue = Some(UncopyableValue());
+ MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() ==
+ eWasMoveConstructed);
+ mayUncopyableValue = Some(UncopyableValue());
+ MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() == eWasMoveAssigned);
+ mayUncopyableValue.reset();
+ mayUncopyableValue.emplace(UncopyableValue());
+ MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() ==
+ eWasMoveConstructed);
+ mayUncopyableValue = Nothing();
+ }
+
+ TestMoveMaybe<BasicValue>();
+
+ static_assert(!std::is_copy_constructible_v<Maybe<UncopyableValue>>);
+ static_assert(!std::is_copy_assignable_v<Maybe<UncopyableValue>>);
+ static_assert(std::is_move_constructible_v<Maybe<UncopyableValue>>);
+ static_assert(std::is_move_assignable_v<Maybe<UncopyableValue>>);
+ }
+
+ MOZ_RELEASE_ASSERT(0 == sUndestroyedObjects);
+
+ { // Check that types that support neither moves or copies work.
+ {
+ const auto mayUncopyableUnmovableValueConstructed =
+ Maybe<UncopyableUnmovableValue>{std::in_place};
+ MOZ_RELEASE_ASSERT(mayUncopyableUnmovableValueConstructed->GetStatus() ==
+ eWasDefaultConstructed);
+ }
+
+ Maybe<UncopyableUnmovableValue> mayUncopyableUnmovableValue;
+ mayUncopyableUnmovableValue.emplace();
+ MOZ_RELEASE_ASSERT(mayUncopyableUnmovableValue->GetStatus() ==
+ eWasDefaultConstructed);
+ mayUncopyableUnmovableValue.reset();
+ mayUncopyableUnmovableValue.emplace(0);
+ MOZ_RELEASE_ASSERT(mayUncopyableUnmovableValue->GetStatus() ==
+ eWasConstructed);
+ mayUncopyableUnmovableValue = Nothing();
+
+ static_assert(
+ !std::is_copy_constructible_v<Maybe<UncopyableUnmovableValue>>);
+ static_assert(!std::is_copy_assignable_v<Maybe<UncopyableUnmovableValue>>);
+ static_assert(
+ !std::is_move_constructible_v<Maybe<UncopyableUnmovableValue>>);
+ static_assert(!std::is_move_assignable_v<Maybe<UncopyableUnmovableValue>>);
+ }
+
+ {
+ // Test copy and move with a trivially copyable and trivially destructible
+ // type.
+ {
+ constexpr Maybe<int> src = Some(42);
+ constexpr Maybe<int> dstCopyConstructed = src;
+
+ static_assert(src.isSome());
+ static_assert(dstCopyConstructed.isSome());
+ static_assert(42 == *src);
+ static_assert(42 == *dstCopyConstructed);
+ static_assert(42 == dstCopyConstructed.value());
+ }
+
+ {
+ const Maybe<int> src = Some(42);
+ Maybe<int> dstCopyAssigned;
+ dstCopyAssigned = src;
+
+ MOZ_RELEASE_ASSERT(src.isSome());
+ MOZ_RELEASE_ASSERT(dstCopyAssigned.isSome());
+ MOZ_RELEASE_ASSERT(42 == *src);
+ MOZ_RELEASE_ASSERT(42 == *dstCopyAssigned);
+ }
+
+ {
+ Maybe<int> src = Some(42);
+ const Maybe<int> dstMoveConstructed = std::move(src);
+
+ MOZ_RELEASE_ASSERT(!src.isSome());
+ MOZ_RELEASE_ASSERT(dstMoveConstructed.isSome());
+ MOZ_RELEASE_ASSERT(42 == *dstMoveConstructed);
+ }
+
+ {
+ Maybe<int> src = Some(42);
+ Maybe<int> dstMoveAssigned;
+ dstMoveAssigned = std::move(src);
+
+ MOZ_RELEASE_ASSERT(!src.isSome());
+ MOZ_RELEASE_ASSERT(dstMoveAssigned.isSome());
+ MOZ_RELEASE_ASSERT(42 == *dstMoveAssigned);
+ }
+ }
+
+ return true;
+}
+
+static BasicValue* sStaticBasicValue = nullptr;
+
+static BasicValue MakeBasicValue() { return BasicValue(9); }
+
+static BasicValue& MakeBasicValueRef() { return *sStaticBasicValue; }
+
+static BasicValue* MakeBasicValuePtr() { return sStaticBasicValue; }
+
+static bool TestFunctionalAccessors() {
+ BasicValue value(9);
+ sStaticBasicValue = new BasicValue(9);
+
+ // Check that the 'some' case of functional accessors works.
+ Maybe<BasicValue> someValue = Some(BasicValue(3));
+ MOZ_RELEASE_ASSERT(someValue.valueOr(value) == BasicValue(3));
+ static_assert(std::is_same_v<BasicValue, decltype(someValue.valueOr(value))>,
+ "valueOr should return a BasicValue");
+ MOZ_RELEASE_ASSERT(someValue.valueOrFrom(&MakeBasicValue) == BasicValue(3));
+ static_assert(
+ std::is_same_v<BasicValue,
+ decltype(someValue.valueOrFrom(&MakeBasicValue))>,
+ "valueOrFrom should return a BasicValue");
+ MOZ_RELEASE_ASSERT(someValue.ptrOr(&value) != &value);
+ static_assert(std::is_same_v<BasicValue*, decltype(someValue.ptrOr(&value))>,
+ "ptrOr should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(*someValue.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(3));
+ static_assert(
+ std::is_same_v<BasicValue*,
+ decltype(someValue.ptrOrFrom(&MakeBasicValuePtr))>,
+ "ptrOrFrom should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(someValue.refOr(value) == BasicValue(3));
+ static_assert(std::is_same_v<BasicValue&, decltype(someValue.refOr(value))>,
+ "refOr should return a BasicValue&");
+ MOZ_RELEASE_ASSERT(someValue.refOrFrom(&MakeBasicValueRef) == BasicValue(3));
+ static_assert(
+ std::is_same_v<BasicValue&,
+ decltype(someValue.refOrFrom(&MakeBasicValueRef))>,
+ "refOrFrom should return a BasicValue&");
+
+ // Check that the 'some' case works through a const reference.
+ const Maybe<BasicValue>& someValueCRef = someValue;
+ MOZ_RELEASE_ASSERT(someValueCRef.valueOr(value) == BasicValue(3));
+ static_assert(
+ std::is_same_v<BasicValue, decltype(someValueCRef.valueOr(value))>,
+ "valueOr should return a BasicValue");
+ MOZ_RELEASE_ASSERT(someValueCRef.valueOrFrom(&MakeBasicValue) ==
+ BasicValue(3));
+ static_assert(
+ std::is_same_v<BasicValue,
+ decltype(someValueCRef.valueOrFrom(&MakeBasicValue))>,
+ "valueOrFrom should return a BasicValue");
+ MOZ_RELEASE_ASSERT(someValueCRef.ptrOr(&value) != &value);
+ static_assert(
+ std::is_same_v<const BasicValue*, decltype(someValueCRef.ptrOr(&value))>,
+ "ptrOr should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(*someValueCRef.ptrOrFrom(&MakeBasicValuePtr) ==
+ BasicValue(3));
+ static_assert(
+ std::is_same_v<const BasicValue*,
+ decltype(someValueCRef.ptrOrFrom(&MakeBasicValuePtr))>,
+ "ptrOrFrom should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(someValueCRef.refOr(value) == BasicValue(3));
+ static_assert(
+ std::is_same_v<const BasicValue&, decltype(someValueCRef.refOr(value))>,
+ "refOr should return a const BasicValue&");
+ MOZ_RELEASE_ASSERT(someValueCRef.refOrFrom(&MakeBasicValueRef) ==
+ BasicValue(3));
+ static_assert(
+ std::is_same_v<const BasicValue&,
+ decltype(someValueCRef.refOrFrom(&MakeBasicValueRef))>,
+ "refOrFrom should return a const BasicValue&");
+
+ // Check that the 'none' case of functional accessors works.
+ Maybe<BasicValue> noneValue;
+ MOZ_RELEASE_ASSERT(noneValue.valueOr(value) == BasicValue(9));
+ static_assert(std::is_same_v<BasicValue, decltype(noneValue.valueOr(value))>,
+ "valueOr should return a BasicValue");
+ MOZ_RELEASE_ASSERT(noneValue.valueOrFrom(&MakeBasicValue) == BasicValue(9));
+ static_assert(
+ std::is_same_v<BasicValue,
+ decltype(noneValue.valueOrFrom(&MakeBasicValue))>,
+ "valueOrFrom should return a BasicValue");
+ MOZ_RELEASE_ASSERT(noneValue.ptrOr(&value) == &value);
+ static_assert(std::is_same_v<BasicValue*, decltype(noneValue.ptrOr(&value))>,
+ "ptrOr should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(*noneValue.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(9));
+ static_assert(
+ std::is_same_v<BasicValue*,
+ decltype(noneValue.ptrOrFrom(&MakeBasicValuePtr))>,
+ "ptrOrFrom should return a BasicValue*");
+ MOZ_RELEASE_ASSERT(noneValue.refOr(value) == BasicValue(9));
+ static_assert(std::is_same_v<BasicValue&, decltype(noneValue.refOr(value))>,
+ "refOr should return a BasicValue&");
+ MOZ_RELEASE_ASSERT(noneValue.refOrFrom(&MakeBasicValueRef) == BasicValue(9));
+ static_assert(
+ std::is_same_v<BasicValue&,
+ decltype(noneValue.refOrFrom(&MakeBasicValueRef))>,
+ "refOrFrom should return a BasicValue&");
+
+ // Check that the 'none' case works through a const reference.
+ const Maybe<BasicValue>& noneValueCRef = noneValue;
+ MOZ_RELEASE_ASSERT(noneValueCRef.valueOr(value) == BasicValue(9));
+ static_assert(
+ std::is_same_v<BasicValue, decltype(noneValueCRef.valueOr(value))>,
+ "valueOr should return a BasicValue");
+ MOZ_RELEASE_ASSERT(noneValueCRef.valueOrFrom(&MakeBasicValue) ==
+ BasicValue(9));
+ static_assert(
+ std::is_same_v<BasicValue,
+ decltype(noneValueCRef.valueOrFrom(&MakeBasicValue))>,
+ "valueOrFrom should return a BasicValue");
+ MOZ_RELEASE_ASSERT(noneValueCRef.ptrOr(&value) == &value);
+ static_assert(
+ std::is_same_v<const BasicValue*, decltype(noneValueCRef.ptrOr(&value))>,
+ "ptrOr should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(*noneValueCRef.ptrOrFrom(&MakeBasicValuePtr) ==
+ BasicValue(9));
+ static_assert(
+ std::is_same_v<const BasicValue*,
+ decltype(noneValueCRef.ptrOrFrom(&MakeBasicValuePtr))>,
+ "ptrOrFrom should return a const BasicValue*");
+ MOZ_RELEASE_ASSERT(noneValueCRef.refOr(value) == BasicValue(9));
+ static_assert(
+ std::is_same_v<const BasicValue&, decltype(noneValueCRef.refOr(value))>,
+ "refOr should return a const BasicValue&");
+ MOZ_RELEASE_ASSERT(noneValueCRef.refOrFrom(&MakeBasicValueRef) ==
+ BasicValue(9));
+ static_assert(
+ std::is_same_v<const BasicValue&,
+ decltype(noneValueCRef.refOrFrom(&MakeBasicValueRef))>,
+ "refOrFrom should return a const BasicValue&");
+
+ // Clean up so the undestroyed objects count stays accurate.
+ delete sStaticBasicValue;
+ sStaticBasicValue = nullptr;
+
+ return true;
+}
+
+static bool gFunctionWasApplied = false;
+
+static void IncrementTag(BasicValue& aValue) {
+ gFunctionWasApplied = true;
+ aValue.SetTag(aValue.GetTag() + 1);
+}
+
+static void AccessValue(const BasicValue&) { gFunctionWasApplied = true; }
+
+struct IncrementTagFunctor {
+ IncrementTagFunctor() : mBy(1) {}
+
+ void operator()(BasicValue& aValue) {
+ aValue.SetTag(aValue.GetTag() + mBy.GetTag());
+ }
+
+ BasicValue mBy;
+};
+
+static bool TestApply() {
+ // Check that apply handles the 'Nothing' case.
+ gFunctionWasApplied = false;
+ Maybe<BasicValue> mayValue;
+ mayValue.apply(&IncrementTag);
+ mayValue.apply(&AccessValue);
+ MOZ_RELEASE_ASSERT(!gFunctionWasApplied);
+
+ // Check that apply handles the 'Some' case.
+ mayValue = Some(BasicValue(1));
+ mayValue.apply(&IncrementTag);
+ MOZ_RELEASE_ASSERT(gFunctionWasApplied);
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 2);
+ gFunctionWasApplied = false;
+ mayValue.apply(&AccessValue);
+ MOZ_RELEASE_ASSERT(gFunctionWasApplied);
+
+ // Check that apply works with a const reference.
+ const Maybe<BasicValue>& mayValueCRef = mayValue;
+ gFunctionWasApplied = false;
+ mayValueCRef.apply(&AccessValue);
+ MOZ_RELEASE_ASSERT(gFunctionWasApplied);
+
+ // Check that apply works with functors.
+ IncrementTagFunctor tagIncrementer;
+ MOZ_RELEASE_ASSERT(tagIncrementer.mBy.GetStatus() == eWasConstructed);
+ mayValue = Some(BasicValue(1));
+ mayValue.apply(tagIncrementer);
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 2);
+ MOZ_RELEASE_ASSERT(tagIncrementer.mBy.GetStatus() == eWasConstructed);
+
+ // Check that apply works with lambda expressions.
+ int32_t two = 2;
+ gFunctionWasApplied = false;
+ mayValue = Some(BasicValue(2));
+ mayValue.apply([&](BasicValue& aVal) { aVal.SetTag(aVal.GetTag() * two); });
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 4);
+ mayValue.apply([=](BasicValue& aVal) { aVal.SetTag(aVal.GetTag() * two); });
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 8);
+ mayValueCRef.apply(
+ [&](const BasicValue& aVal) { gFunctionWasApplied = true; });
+ MOZ_RELEASE_ASSERT(gFunctionWasApplied == true);
+
+ return true;
+}
+
+static int TimesTwo(const BasicValue& aValue) { return aValue.GetTag() * 2; }
+
+static int TimesTwoAndResetOriginal(BasicValue& aValue) {
+ int tag = aValue.GetTag();
+ aValue.SetTag(1);
+ return tag * 2;
+}
+
+struct MultiplyTagFunctor {
+ MultiplyTagFunctor() : mBy(2) {}
+
+ int operator()(BasicValue& aValue) { return aValue.GetTag() * mBy.GetTag(); }
+
+ BasicValue mBy;
+};
+
+static bool TestMap() {
+ // Check that map handles the 'Nothing' case.
+ Maybe<BasicValue> mayValue;
+ MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Nothing());
+ static_assert(std::is_same_v<Maybe<int>, decltype(mayValue.map(&TimesTwo))>,
+ "map(TimesTwo) should return a Maybe<int>");
+ MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Nothing());
+
+ // Check that map handles the 'Some' case.
+ mayValue = Some(BasicValue(2));
+ MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Some(4));
+ MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Some(4));
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1);
+ mayValue = Some(BasicValue(2));
+
+ // Check that map works with a const reference.
+ mayValue->SetTag(2);
+ const Maybe<BasicValue>& mayValueCRef = mayValue;
+ MOZ_RELEASE_ASSERT(mayValueCRef.map(&TimesTwo) == Some(4));
+ static_assert(
+ std::is_same_v<Maybe<int>, decltype(mayValueCRef.map(&TimesTwo))>,
+ "map(TimesTwo) should return a Maybe<int>");
+
+ // Check that map works with functors.
+ MultiplyTagFunctor tagMultiplier;
+ MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed);
+ MOZ_RELEASE_ASSERT(mayValue.map(tagMultiplier) == Some(4));
+ MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed);
+
+ // Check that map works with lambda expressions.
+ int two = 2;
+ mayValue = Some(BasicValue(2));
+ Maybe<int> mappedValue =
+ mayValue.map([&](const BasicValue& aVal) { return aVal.GetTag() * two; });
+ MOZ_RELEASE_ASSERT(mappedValue == Some(4));
+ mappedValue =
+ mayValue.map([=](const BasicValue& aVal) { return aVal.GetTag() * two; });
+ MOZ_RELEASE_ASSERT(mappedValue == Some(4));
+ mappedValue = mayValueCRef.map(
+ [&](const BasicValue& aVal) { return aVal.GetTag() * two; });
+ MOZ_RELEASE_ASSERT(mappedValue == Some(4));
+
+ // Check that function object qualifiers are preserved when invoked.
+ struct F {
+ std::integral_constant<int, 1> operator()(int) & { return {}; }
+ std::integral_constant<int, 2> operator()(int) const& { return {}; }
+ std::integral_constant<int, 3> operator()(int) && { return {}; }
+ std::integral_constant<int, 4> operator()(int) const&& { return {}; }
+ };
+ Maybe<int> mi = Some(0);
+ const Maybe<int> cmi = Some(0);
+ F f;
+ static_assert(std::is_same<decltype(mi.map(f)),
+ Maybe<std::integral_constant<int, 1>>>::value,
+ "Maybe.map(&)");
+ MOZ_RELEASE_ASSERT(mi.map(f).value()() == 1);
+ static_assert(std::is_same<decltype(cmi.map(f)),
+ Maybe<std::integral_constant<int, 1>>>::value,
+ "const Maybe.map(&)");
+ MOZ_RELEASE_ASSERT(cmi.map(f).value()() == 1);
+ const F cf;
+ static_assert(std::is_same<decltype(mi.map(cf)),
+ Maybe<std::integral_constant<int, 2>>>::value,
+ "Maybe.map(const &)");
+ MOZ_RELEASE_ASSERT(mi.map(cf).value() == 2);
+ static_assert(std::is_same<decltype(cmi.map(cf)),
+ Maybe<std::integral_constant<int, 2>>>::value,
+ "const Maybe.map(const &)");
+ MOZ_RELEASE_ASSERT(cmi.map(cf).value() == 2);
+ static_assert(std::is_same<decltype(mi.map(F{})),
+ Maybe<std::integral_constant<int, 3>>>::value,
+ "Maybe.map(&&)");
+ MOZ_RELEASE_ASSERT(mi.map(F{}).value() == 3);
+ static_assert(std::is_same<decltype(cmi.map(F{})),
+ Maybe<std::integral_constant<int, 3>>>::value,
+ "const Maybe.map(&&)");
+ MOZ_RELEASE_ASSERT(cmi.map(F{}).value() == 3);
+ using CF = const F;
+ static_assert(std::is_same<decltype(mi.map(CF{})),
+ Maybe<std::integral_constant<int, 4>>>::value,
+ "Maybe.map(const &&)");
+ MOZ_RELEASE_ASSERT(mi.map(CF{}).value() == 4);
+ static_assert(std::is_same<decltype(cmi.map(CF{})),
+ Maybe<std::integral_constant<int, 4>>>::value,
+ "const Maybe.map(const &&)");
+ MOZ_RELEASE_ASSERT(cmi.map(CF{}).value() == 4);
+
+ return true;
+}
+
+static bool TestToMaybe() {
+ BasicValue value(1);
+ BasicValue* nullPointer = nullptr;
+
+ // Check that a non-null pointer translates into a Some value.
+ Maybe<BasicValue> mayValue = ToMaybe(&value);
+ static_assert(std::is_same_v<Maybe<BasicValue>, decltype(ToMaybe(&value))>,
+ "ToMaybe should return a Maybe<BasicValue>");
+ MOZ_RELEASE_ASSERT(mayValue.isSome());
+ MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1);
+ MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasCopyConstructed);
+ MOZ_RELEASE_ASSERT(value.GetStatus() != eWasMovedFrom);
+
+ // Check that a null pointer translates into a Nothing value.
+ mayValue = ToMaybe(nullPointer);
+ static_assert(
+ std::is_same_v<Maybe<BasicValue>, decltype(ToMaybe(nullPointer))>,
+ "ToMaybe should return a Maybe<BasicValue>");
+ MOZ_RELEASE_ASSERT(mayValue.isNothing());
+
+ return true;
+}
+
+static bool TestComparisonOperators() {
+ Maybe<BasicValue> nothingValue = Nothing();
+ Maybe<BasicValue> anotherNothingValue = Nothing();
+ Maybe<BasicValue> oneValue = Some(BasicValue(1));
+ Maybe<BasicValue> anotherOneValue = Some(BasicValue(1));
+ Maybe<BasicValue> twoValue = Some(BasicValue(2));
+
+ // Check equality.
+ MOZ_RELEASE_ASSERT(nothingValue == anotherNothingValue);
+ MOZ_RELEASE_ASSERT(oneValue == anotherOneValue);
+
+ // Check inequality.
+ MOZ_RELEASE_ASSERT(nothingValue != oneValue);
+ MOZ_RELEASE_ASSERT(oneValue != nothingValue);
+ MOZ_RELEASE_ASSERT(oneValue != twoValue);
+
+ // Check '<'.
+ MOZ_RELEASE_ASSERT(nothingValue < oneValue);
+ MOZ_RELEASE_ASSERT(oneValue < twoValue);
+
+ // Check '<='.
+ MOZ_RELEASE_ASSERT(nothingValue <= anotherNothingValue);
+ MOZ_RELEASE_ASSERT(nothingValue <= oneValue);
+ MOZ_RELEASE_ASSERT(oneValue <= oneValue);
+ MOZ_RELEASE_ASSERT(oneValue <= twoValue);
+
+ // Check '>'.
+ MOZ_RELEASE_ASSERT(oneValue > nothingValue);
+ MOZ_RELEASE_ASSERT(twoValue > oneValue);
+
+ // Check '>='.
+ MOZ_RELEASE_ASSERT(nothingValue >= anotherNothingValue);
+ MOZ_RELEASE_ASSERT(oneValue >= nothingValue);
+ MOZ_RELEASE_ASSERT(oneValue >= oneValue);
+ MOZ_RELEASE_ASSERT(twoValue >= oneValue);
+
+ return true;
+}
+
+// Check that Maybe<> can wrap a superclass that happens to also be a concrete
+// class (i.e. that the compiler doesn't warn when we invoke the superclass's
+// destructor explicitly in |reset()|.
+class MySuperClass {
+ virtual void VirtualMethod() { /* do nothing */
+ }
+};
+
+class MyDerivedClass : public MySuperClass {
+ void VirtualMethod() override { /* do nothing */
+ }
+};
+
+static bool TestVirtualFunction() {
+ Maybe<MySuperClass> super;
+ super.emplace();
+ super.reset();
+
+ Maybe<MyDerivedClass> derived;
+ derived.emplace();
+ derived.reset();
+
+ // If this compiles successfully, we've passed.
+ return true;
+}
+
+static Maybe<int*> ReturnSomeNullptr() { return Some(nullptr); }
+
+struct D {
+ explicit D(const Maybe<int*>&) {}
+};
+
+static bool TestSomeNullptrConversion() {
+ Maybe<int*> m1 = Some(nullptr);
+ MOZ_RELEASE_ASSERT(m1.isSome());
+ MOZ_RELEASE_ASSERT(m1);
+ MOZ_RELEASE_ASSERT(!*m1);
+
+ auto m2 = ReturnSomeNullptr();
+ MOZ_RELEASE_ASSERT(m2.isSome());
+ MOZ_RELEASE_ASSERT(m2);
+ MOZ_RELEASE_ASSERT(!*m2);
+
+ Maybe<decltype(nullptr)> m3 = Some(nullptr);
+ MOZ_RELEASE_ASSERT(m3.isSome());
+ MOZ_RELEASE_ASSERT(m3);
+ MOZ_RELEASE_ASSERT(*m3 == nullptr);
+
+ D d(Some(nullptr));
+
+ return true;
+}
+
+struct Base {};
+struct Derived : Base {};
+
+static Maybe<Base*> ReturnDerivedPointer() {
+ Derived* d = nullptr;
+ return Some(d);
+}
+
+struct ExplicitConstructorBasePointer {
+ explicit ExplicitConstructorBasePointer(const Maybe<Base*>&) {}
+};
+
+static bool TestSomePointerConversion() {
+ Base base;
+ Derived derived;
+
+ Maybe<Base*> m1 = Some(&derived);
+ MOZ_RELEASE_ASSERT(m1.isSome());
+ MOZ_RELEASE_ASSERT(m1);
+ MOZ_RELEASE_ASSERT(*m1 == &derived);
+
+ auto m2 = ReturnDerivedPointer();
+ MOZ_RELEASE_ASSERT(m2.isSome());
+ MOZ_RELEASE_ASSERT(m2);
+ MOZ_RELEASE_ASSERT(*m2 == nullptr);
+
+ Maybe<Base*> m3 = Some(&base);
+ MOZ_RELEASE_ASSERT(m3.isSome());
+ MOZ_RELEASE_ASSERT(m3);
+ MOZ_RELEASE_ASSERT(*m3 == &base);
+
+ auto s1 = Some(&derived);
+ Maybe<Base*> c1(s1);
+ MOZ_RELEASE_ASSERT(c1.isSome());
+ MOZ_RELEASE_ASSERT(c1);
+ MOZ_RELEASE_ASSERT(*c1 == &derived);
+
+ ExplicitConstructorBasePointer ecbp(Some(&derived));
+
+ return true;
+}
+
+struct SourceType1 {
+ int mTag;
+
+ operator int() const { return mTag; }
+};
+struct DestType {
+ int mTag;
+ Status mStatus;
+
+ DestType() : mTag(0), mStatus(eWasDefaultConstructed) {}
+
+ MOZ_IMPLICIT DestType(int aTag) : mTag(aTag), mStatus(eWasConstructed) {}
+
+ MOZ_IMPLICIT DestType(SourceType1&& aSrc)
+ : mTag(aSrc.mTag), mStatus(eWasMoveConstructed) {}
+
+ MOZ_IMPLICIT DestType(const SourceType1& aSrc)
+ : mTag(aSrc.mTag), mStatus(eWasCopyConstructed) {}
+
+ DestType& operator=(int aTag) {
+ mTag = aTag;
+ mStatus = eWasAssigned;
+ return *this;
+ }
+
+ DestType& operator=(SourceType1&& aSrc) {
+ mTag = aSrc.mTag;
+ mStatus = eWasMoveAssigned;
+ return *this;
+ }
+
+ DestType& operator=(const SourceType1& aSrc) {
+ mTag = aSrc.mTag;
+ mStatus = eWasCopyAssigned;
+ return *this;
+ }
+};
+struct SourceType2 {
+ int mTag;
+
+ operator DestType() const& {
+ DestType result;
+ result.mTag = mTag;
+ result.mStatus = eWasCopiedFrom;
+ return result;
+ }
+
+ operator DestType() && {
+ DestType result;
+ result.mTag = mTag;
+ result.mStatus = eWasMovedFrom;
+ return result;
+ }
+};
+
+static bool TestTypeConversion() {
+ {
+ Maybe<SourceType1> src = Some(SourceType1{1});
+ Maybe<DestType> dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasCopyConstructed);
+
+ src = Some(SourceType1{2});
+ dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasCopyAssigned);
+ }
+
+ {
+ Maybe<SourceType1> src = Some(SourceType1{1});
+ Maybe<DestType> dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasMoveConstructed);
+
+ src = Some(SourceType1{2});
+ dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasMoveAssigned);
+ }
+
+ {
+ Maybe<SourceType2> src = Some(SourceType2{1});
+ Maybe<DestType> dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasCopiedFrom);
+
+ src = Some(SourceType2{2});
+ dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasCopiedFrom);
+ }
+
+ {
+ Maybe<SourceType2> src = Some(SourceType2{1});
+ Maybe<DestType> dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasMovedFrom);
+
+ src = Some(SourceType2{2});
+ dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasMovedFrom);
+ }
+
+ {
+ Maybe<int> src = Some(1);
+ Maybe<DestType> dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && *src == 1);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasConstructed);
+
+ src = Some(2);
+ dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && *src == 2);
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasAssigned);
+ }
+
+ {
+ Maybe<int> src = Some(1);
+ Maybe<DestType> dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasConstructed);
+
+ src = Some(2);
+ dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && dest->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest->mStatus == eWasAssigned);
+ }
+
+ {
+ Maybe<SourceType1> src = Some(SourceType1{1});
+ Maybe<int> dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 1);
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 1);
+
+ src = Some(SourceType1{2});
+ dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && src->mTag == 2);
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 2);
+ }
+
+ {
+ Maybe<SourceType1> src = Some(SourceType1{1});
+ Maybe<int> dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 1);
+
+ src = Some(SourceType1{2});
+ dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 2);
+ }
+
+ {
+ Maybe<size_t> src = Some(1);
+ Maybe<char16_t> dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && *src == 1);
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 1);
+
+ src = Some(2);
+ dest = src;
+ MOZ_RELEASE_ASSERT(src.isSome() && *src == 2);
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 2);
+ }
+
+ {
+ Maybe<size_t> src = Some(1);
+ Maybe<char16_t> dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 1);
+
+ src = Some(2);
+ dest = std::move(src);
+ MOZ_RELEASE_ASSERT(src.isNothing());
+ MOZ_RELEASE_ASSERT(dest.isSome() && *dest == 2);
+ }
+
+ return true;
+}
+
+static bool TestReference() {
+ static_assert(std::is_trivially_destructible_v<Maybe<int&>>);
+ static_assert(std::is_trivially_copy_constructible_v<Maybe<int&>>);
+ static_assert(std::is_trivially_copy_assignable_v<Maybe<int&>>);
+
+ static_assert(Maybe<int&>{}.isNothing());
+ static_assert(Maybe<int&>{Nothing{}}.isNothing());
+
+ {
+ Maybe<int&> defaultConstructed;
+
+ MOZ_RELEASE_ASSERT(defaultConstructed.isNothing());
+ MOZ_RELEASE_ASSERT(!defaultConstructed.isSome());
+ MOZ_RELEASE_ASSERT(!defaultConstructed);
+ }
+
+ {
+ Maybe<int&> nothing = Nothing();
+
+ MOZ_RELEASE_ASSERT(nothing.isNothing());
+ MOZ_RELEASE_ASSERT(!nothing.isSome());
+ MOZ_RELEASE_ASSERT(!nothing);
+ }
+
+ {
+ int foo = 42, bar = 42;
+ Maybe<int&> some = SomeRef(foo);
+
+ MOZ_RELEASE_ASSERT(!some.isNothing());
+ MOZ_RELEASE_ASSERT(some.isSome());
+ MOZ_RELEASE_ASSERT(some);
+ MOZ_RELEASE_ASSERT(&some.ref() == &foo);
+
+ MOZ_RELEASE_ASSERT(some.refEquals(foo));
+ MOZ_RELEASE_ASSERT(some.refEquals(SomeRef(foo)));
+ MOZ_RELEASE_ASSERT(!some.refEquals(Nothing()));
+ MOZ_RELEASE_ASSERT(!some.refEquals(bar));
+ MOZ_RELEASE_ASSERT(!some.refEquals(SomeRef(bar)));
+
+ some.ref()++;
+ MOZ_RELEASE_ASSERT(43 == foo);
+
+ (*some)++;
+ MOZ_RELEASE_ASSERT(44 == foo);
+ }
+
+ {
+ int foo = 42, bar = 42;
+ Maybe<int&> some;
+ some.emplace(foo);
+
+ MOZ_RELEASE_ASSERT(!some.isNothing());
+ MOZ_RELEASE_ASSERT(some.isSome());
+ MOZ_RELEASE_ASSERT(some);
+ MOZ_RELEASE_ASSERT(&some.ref() == &foo);
+
+ MOZ_RELEASE_ASSERT(some.refEquals(foo));
+ MOZ_RELEASE_ASSERT(some.refEquals(SomeRef(foo)));
+ MOZ_RELEASE_ASSERT(!some.refEquals(Nothing()));
+ MOZ_RELEASE_ASSERT(!some.refEquals(bar));
+ MOZ_RELEASE_ASSERT(!some.refEquals(SomeRef(bar)));
+
+ some.ref()++;
+ MOZ_RELEASE_ASSERT(43 == foo);
+ }
+
+ {
+ Maybe<int&> defaultConstructed;
+ defaultConstructed.reset();
+
+ MOZ_RELEASE_ASSERT(defaultConstructed.isNothing());
+ MOZ_RELEASE_ASSERT(!defaultConstructed.isSome());
+ MOZ_RELEASE_ASSERT(!defaultConstructed);
+ }
+
+ {
+ int foo = 42;
+ Maybe<int&> some = SomeRef(foo);
+ some.reset();
+
+ MOZ_RELEASE_ASSERT(some.isNothing());
+ MOZ_RELEASE_ASSERT(!some.isSome());
+ MOZ_RELEASE_ASSERT(!some);
+ }
+
+ {
+ int foo = 42;
+ Maybe<int&> some = SomeRef(foo);
+
+ auto& applied = some.apply([](int& ref) { ref++; });
+
+ MOZ_RELEASE_ASSERT(&some == &applied);
+ MOZ_RELEASE_ASSERT(43 == foo);
+ }
+
+ {
+ Maybe<int&> nothing;
+
+ auto& applied = nothing.apply([](int& ref) { ref++; });
+
+ MOZ_RELEASE_ASSERT(&nothing == &applied);
+ }
+
+ {
+ int foo = 42;
+ Maybe<int&> some = SomeRef(foo);
+
+ auto mapped = some.map([](int& ref) { return &ref; });
+ static_assert(std::is_same_v<decltype(mapped), Maybe<int*>>);
+
+ MOZ_RELEASE_ASSERT(&foo == *mapped);
+ }
+
+ {
+ Maybe<int&> nothing;
+
+ auto mapped = nothing.map([](int& ref) { return &ref; });
+
+ MOZ_RELEASE_ASSERT(mapped.isNothing());
+ MOZ_RELEASE_ASSERT(!mapped.isSome());
+ MOZ_RELEASE_ASSERT(!mapped);
+ }
+
+ {
+ int foo = 42;
+ auto someRef = ToMaybeRef(&foo);
+
+ static_assert(std::is_same_v<decltype(someRef), Maybe<int&>>);
+
+ MOZ_RELEASE_ASSERT(someRef.isSome());
+ MOZ_RELEASE_ASSERT(&foo == &someRef.ref());
+ }
+
+ {
+ int* fooPtr = nullptr;
+ auto someRef = ToMaybeRef(fooPtr);
+
+ static_assert(std::is_same_v<decltype(someRef), Maybe<int&>>);
+
+ MOZ_RELEASE_ASSERT(someRef.isNothing());
+ }
+
+ return true;
+}
+
+// These are quasi-implementation details, but we assert them here to prevent
+// backsliding to earlier times when Maybe<T> for smaller T took up more space
+// than T's alignment required.
+
+static_assert(sizeof(Maybe<char>) == 2 * sizeof(char),
+ "Maybe<char> shouldn't bloat at all ");
+static_assert(sizeof(Maybe<bool>) <= 2 * sizeof(bool),
+ "Maybe<bool> shouldn't bloat");
+static_assert(sizeof(Maybe<int>) <= 2 * sizeof(int),
+ "Maybe<int> shouldn't bloat");
+static_assert(sizeof(Maybe<long>) <= 2 * sizeof(long),
+ "Maybe<long> shouldn't bloat");
+static_assert(sizeof(Maybe<double>) <= 2 * sizeof(double),
+ "Maybe<double> shouldn't bloat");
+static_assert(sizeof(Maybe<int&>) == sizeof(int*));
+
+int main() {
+ RUN_TEST(TestBasicFeatures);
+ RUN_TEST(TestCopyAndMove);
+ RUN_TEST(TestFunctionalAccessors);
+ RUN_TEST(TestApply);
+ RUN_TEST(TestMap);
+ RUN_TEST(TestToMaybe);
+ RUN_TEST(TestComparisonOperators);
+ RUN_TEST(TestVirtualFunction);
+ RUN_TEST(TestSomeNullptrConversion);
+ RUN_TEST(TestSomePointerConversion);
+ RUN_TEST(TestTypeConversion);
+ RUN_TEST(TestReference);
+
+ return 0;
+}
diff --git a/mfbt/tests/TestNonDereferenceable.cpp b/mfbt/tests/TestNonDereferenceable.cpp
new file mode 100644
index 0000000000..2f8f7c1dd1
--- /dev/null
+++ b/mfbt/tests/TestNonDereferenceable.cpp
@@ -0,0 +1,171 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/NonDereferenceable.h"
+
+using mozilla::NonDereferenceable;
+
+#define CHECK MOZ_RELEASE_ASSERT
+
+void TestNonDereferenceableSimple() {
+ // Default construction.
+ NonDereferenceable<int> nd0;
+ CHECK(!nd0);
+ CHECK(!nd0.value());
+
+ int i = 1;
+ int i2 = 2;
+
+ // Construction with pointer.
+ NonDereferenceable<int> nd1(&i);
+ CHECK(!!nd1);
+ CHECK(nd1.value() == reinterpret_cast<uintptr_t>(&i));
+
+ // Assignment with pointer.
+ nd1 = &i2;
+ CHECK(nd1.value() == reinterpret_cast<uintptr_t>(&i2));
+
+ // Copy-construction.
+ NonDereferenceable<int> nd2(nd1);
+ CHECK(nd2.value() == reinterpret_cast<uintptr_t>(&i2));
+
+ // Copy-assignment.
+ nd2 = nd0;
+ CHECK(!nd2.value());
+
+ // Move-construction.
+ NonDereferenceable<int> nd3{NonDereferenceable<int>(&i)};
+ CHECK(nd3.value() == reinterpret_cast<uintptr_t>(&i));
+
+ // Move-assignment.
+ nd3 = std::move(nd1);
+ CHECK(nd3.value() == reinterpret_cast<uintptr_t>(&i2));
+ // Note: Not testing nd1's value because we don't want to assume what state
+ // it is left in after move. But at least it should be reusable:
+ nd1 = &i;
+ CHECK(nd1.value() == reinterpret_cast<uintptr_t>(&i));
+}
+
+void TestNonDereferenceableHierarchy() {
+ struct Base1 {
+ // Member variable, to make sure Base1 is not empty.
+ int x1;
+ };
+ struct Base2 {
+ int x2;
+ };
+ struct Derived : Base1, Base2 {};
+
+ Derived d;
+
+ // Construct NonDereferenceable from raw pointer.
+ NonDereferenceable<Derived> ndd = NonDereferenceable<Derived>(&d);
+ CHECK(ndd);
+ CHECK(ndd.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Cast Derived to Base1.
+ NonDereferenceable<Base1> ndb1 = ndd;
+ CHECK(ndb1);
+ CHECK(ndb1.value() == reinterpret_cast<uintptr_t>(static_cast<Base1*>(&d)));
+
+ // Cast Base1 back to Derived.
+ NonDereferenceable<Derived> nddb1 = ndb1;
+ CHECK(nddb1.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Cast Derived to Base2.
+ NonDereferenceable<Base2> ndb2 = ndd;
+ CHECK(ndb2);
+ CHECK(ndb2.value() == reinterpret_cast<uintptr_t>(static_cast<Base2*>(&d)));
+ // Sanity check that Base2 should be offset from the start of Derived.
+ CHECK(ndb2.value() != ndd.value());
+
+ // Cast Base2 back to Derived.
+ NonDereferenceable<Derived> nddb2 = ndb2;
+ CHECK(nddb2.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Note that it's not possible to jump between bases, as they're not obviously
+ // related, i.e.: `NonDereferenceable<Base2> ndb22 = ndb1;` doesn't compile.
+ // However it's possible to explicitly navigate through the derived object:
+ NonDereferenceable<Base2> ndb22 = NonDereferenceable<Derived>(ndb1);
+ CHECK(ndb22.value() == reinterpret_cast<uintptr_t>(static_cast<Base2*>(&d)));
+
+ // Handling nullptr; should stay nullptr even for offset bases.
+ ndd = nullptr;
+ CHECK(!ndd);
+ CHECK(!ndd.value());
+ ndb1 = ndd;
+ CHECK(!ndb1);
+ CHECK(!ndb1.value());
+ ndb2 = ndd;
+ CHECK(!ndb2);
+ CHECK(!ndb2.value());
+ nddb2 = ndb2;
+ CHECK(!nddb2);
+ CHECK(!nddb2.value());
+}
+
+template <typename T, size_t Index>
+struct CRTPBase {
+ // Convert `this` from `CRTPBase*` to `T*` while construction is still in
+ // progress; normally UBSan -fsanitize=vptr would catch this, but using
+ // NonDereferenceable should keep UBSan happy.
+ CRTPBase() : mDerived(this) {}
+ NonDereferenceable<T> mDerived;
+};
+
+void TestNonDereferenceableCRTP() {
+ struct Derived : CRTPBase<Derived, 1>, CRTPBase<Derived, 2> {};
+ using Base1 = Derived::CRTPBase<Derived, 1>;
+ using Base2 = Derived::CRTPBase<Derived, 2>;
+
+ Derived d;
+ // Verify that base constructors have correctly captured the address of the
+ // (at the time still incomplete) derived object.
+ CHECK(d.Base1::mDerived.value() == reinterpret_cast<uintptr_t>(&d));
+ CHECK(d.Base2::mDerived.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Construct NonDereferenceable from raw pointer.
+ NonDereferenceable<Derived> ndd = NonDereferenceable<Derived>(&d);
+ CHECK(ndd);
+ CHECK(ndd.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Cast Derived to Base1.
+ NonDereferenceable<Base1> ndb1 = ndd;
+ CHECK(ndb1);
+ CHECK(ndb1.value() == reinterpret_cast<uintptr_t>(static_cast<Base1*>(&d)));
+
+ // Cast Base1 back to Derived.
+ NonDereferenceable<Derived> nddb1 = ndb1;
+ CHECK(nddb1.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Cast Derived to Base2.
+ NonDereferenceable<Base2> ndb2 = ndd;
+ CHECK(ndb2);
+ CHECK(ndb2.value() == reinterpret_cast<uintptr_t>(static_cast<Base2*>(&d)));
+ // Sanity check that Base2 should be offset from the start of Derived.
+ CHECK(ndb2.value() != ndd.value());
+
+ // Cast Base2 back to Derived.
+ NonDereferenceable<Derived> nddb2 = ndb2;
+ CHECK(nddb2.value() == reinterpret_cast<uintptr_t>(&d));
+
+ // Note that it's not possible to jump between bases, as they're not obviously
+ // related, i.e.: `NonDereferenceable<Base2> ndb22 = ndb1;` doesn't compile.
+ // However it's possible to explicitly navigate through the derived object:
+ NonDereferenceable<Base2> ndb22 = NonDereferenceable<Derived>(ndb1);
+ CHECK(ndb22.value() == reinterpret_cast<uintptr_t>(static_cast<Base2*>(&d)));
+}
+
+int main() {
+ TestNonDereferenceableSimple();
+ TestNonDereferenceableHierarchy();
+ TestNonDereferenceableCRTP();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestNotNull.cpp b/mfbt/tests/TestNotNull.cpp
new file mode 100644
index 0000000000..b9a62ea4d9
--- /dev/null
+++ b/mfbt/tests/TestNotNull.cpp
@@ -0,0 +1,386 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <type_traits>
+
+#include "mozilla/NotNull.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+
+using mozilla::MakeNotNull;
+using mozilla::NotNull;
+using mozilla::UniquePtr;
+using mozilla::WrapNotNull;
+
+#define CHECK MOZ_RELEASE_ASSERT
+
+class Blah {
+ public:
+ Blah() : mX(0) {}
+ void blah(){};
+ int mX;
+};
+
+// A simple smart pointer that implicity converts to and from T*.
+template <typename T>
+class MyPtr {
+ T* mRawPtr;
+
+ public:
+ MyPtr() : mRawPtr(nullptr) {}
+ MOZ_IMPLICIT MyPtr(T* aRawPtr) : mRawPtr(aRawPtr) {}
+
+ T* get() const { return mRawPtr; }
+ operator T*() const { return get(); }
+
+ T* operator->() const { return get(); }
+};
+
+// A simple class that works with RefPtr. It keeps track of the maximum
+// refcount value for testing purposes.
+class MyRefType {
+ int mExpectedMaxRefCnt;
+ int mMaxRefCnt;
+ int mRefCnt;
+
+ public:
+ explicit MyRefType(int aExpectedMaxRefCnt)
+ : mExpectedMaxRefCnt(aExpectedMaxRefCnt), mMaxRefCnt(0), mRefCnt(0) {}
+
+ ~MyRefType() { CHECK(mMaxRefCnt == mExpectedMaxRefCnt); }
+
+ uint32_t AddRef() {
+ mRefCnt++;
+ if (mRefCnt > mMaxRefCnt) {
+ mMaxRefCnt = mRefCnt;
+ }
+ return mRefCnt;
+ }
+
+ uint32_t Release() {
+ CHECK(mRefCnt > 0);
+ mRefCnt--;
+ if (mRefCnt == 0) {
+ delete this;
+ return 0;
+ }
+ return mRefCnt;
+ }
+};
+
+void f_i(int* aPtr) {}
+void f_my(MyPtr<int> aPtr) {}
+
+void f_nni(NotNull<int*> aPtr) {}
+void f_nnmy(NotNull<MyPtr<int>> aPtr) {}
+
+void TestNotNullWithMyPtr() {
+ int i4 = 4;
+ int i5 = 5;
+
+ MyPtr<int> my4 = &i4;
+ MyPtr<int> my5 = &i5;
+
+ NotNull<int*> nni4 = WrapNotNull(&i4);
+ NotNull<int*> nni5 = WrapNotNull(&i5);
+ NotNull<MyPtr<int>> nnmy4 = WrapNotNull(my4);
+
+ // WrapNotNull(nullptr); // no wrapping from nullptr
+ // WrapNotNull(0); // no wrapping from zero
+
+ // NotNull<int*> construction combinations
+ // NotNull<int*> nni4a; // no default
+ // NotNull<int*> nni4a(nullptr); // no nullptr
+ // NotNull<int*> nni4a(0); // no zero
+ // NotNull<int*> nni4a(&i4); // no int*
+ // NotNull<int*> nni4a(my4); // no MyPtr<int>
+ NotNull<int*> nni4b(WrapNotNull(&i4)); // WrapNotNull(int*)
+ NotNull<int*> nni4c(WrapNotNull(my4)); // WrapNotNull(MyPtr<int>)
+ NotNull<int*> nni4d(nni4); // NotNull<int*>
+ NotNull<int*> nni4e(nnmy4); // NotNull<MyPtr<int>>
+ CHECK(*nni4b == 4);
+ CHECK(*nni4c == 4);
+ CHECK(*nni4d == 4);
+ CHECK(*nni4e == 4);
+
+ // NotNull<MyPtr<int>> construction combinations
+ // NotNull<MyPtr<int>> nnmy4a; // no default
+ // NotNull<MyPtr<int>> nnmy4a(nullptr); // no nullptr
+ // NotNull<MyPtr<int>> nnmy4a(0); // no zero
+ // NotNull<MyPtr<int>> nnmy4a(&i4); // no int*
+ // NotNull<MyPtr<int>> nnmy4a(my4); // no MyPtr<int>
+ NotNull<MyPtr<int>> nnmy4b(WrapNotNull(&i4)); // WrapNotNull(int*)
+ NotNull<MyPtr<int>> nnmy4c(WrapNotNull(my4)); // WrapNotNull(MyPtr<int>)
+ NotNull<MyPtr<int>> nnmy4d(nni4); // NotNull<int*>
+ NotNull<MyPtr<int>> nnmy4e(nnmy4); // NotNull<MyPtr<int>>
+ CHECK(*nnmy4b == 4);
+ CHECK(*nnmy4c == 4);
+ CHECK(*nnmy4d == 4);
+ CHECK(*nnmy4e == 4);
+
+ // NotNull<int*> assignment combinations
+ // nni4b = nullptr; // no nullptr
+ // nni4b = 0; // no zero
+ // nni4a = &i4; // no int*
+ // nni4a = my4; // no MyPtr<int>
+ nni4b = WrapNotNull(&i4); // WrapNotNull(int*)
+ nni4c = WrapNotNull(my4); // WrapNotNull(MyPtr<int>)
+ nni4d = nni4; // NotNull<int*>
+ nni4e = nnmy4; // NotNull<MyPtr<int>>
+ CHECK(*nni4b == 4);
+ CHECK(*nni4c == 4);
+ CHECK(*nni4d == 4);
+ CHECK(*nni4e == 4);
+
+ // NotNull<MyPtr<int>> assignment combinations
+ // nnmy4a = nullptr; // no nullptr
+ // nnmy4a = 0; // no zero
+ // nnmy4a = &i4; // no int*
+ // nnmy4a = my4; // no MyPtr<int>
+ nnmy4b = WrapNotNull(&i4); // WrapNotNull(int*)
+ nnmy4c = WrapNotNull(my4); // WrapNotNull(MyPtr<int>)
+ nnmy4d = nni4; // NotNull<int*>
+ nnmy4e = nnmy4; // NotNull<MyPtr<int>>
+ CHECK(*nnmy4b == 4);
+ CHECK(*nnmy4c == 4);
+ CHECK(*nnmy4d == 4);
+ CHECK(*nnmy4e == 4);
+
+ NotNull<MyPtr<int>> nnmy5 = WrapNotNull(&i5);
+ CHECK(*nnmy5 == 5);
+ CHECK(nnmy5 == &i5); // NotNull<MyPtr<int>> == int*
+ CHECK(nnmy5 == my5); // NotNull<MyPtr<int>> == MyPtr<int>
+ CHECK(nnmy5 == nni5); // NotNull<MyPtr<int>> == NotNull<int*>
+ CHECK(nnmy5 == nnmy5); // NotNull<MyPtr<int>> == NotNull<MyPtr<int>>
+ CHECK(&i5 == nnmy5); // int* == NotNull<MyPtr<int>>
+ CHECK(my5 == nnmy5); // MyPtr<int> == NotNull<MyPtr<int>>
+ CHECK(nni5 == nnmy5); // NotNull<int*> == NotNull<MyPtr<int>>
+ CHECK(nnmy5 == nnmy5); // NotNull<MyPtr<int>> == NotNull<MyPtr<int>>
+ // CHECK(nni5 == nullptr); // no comparisons with nullptr
+ // CHECK(nullptr == nni5); // no comparisons with nullptr
+ // CHECK(nni5 == 0); // no comparisons with zero
+ // CHECK(0 == nni5); // no comparisons with zero
+
+ CHECK(*nnmy5 == 5);
+ CHECK(nnmy5 != &i4); // NotNull<MyPtr<int>> != int*
+ CHECK(nnmy5 != my4); // NotNull<MyPtr<int>> != MyPtr<int>
+ CHECK(nnmy5 != nni4); // NotNull<MyPtr<int>> != NotNull<int*>
+ CHECK(nnmy5 != nnmy4); // NotNull<MyPtr<int>> != NotNull<MyPtr<int>>
+ CHECK(&i4 != nnmy5); // int* != NotNull<MyPtr<int>>
+ CHECK(my4 != nnmy5); // MyPtr<int> != NotNull<MyPtr<int>>
+ CHECK(nni4 != nnmy5); // NotNull<int*> != NotNull<MyPtr<int>>
+ CHECK(nnmy4 != nnmy5); // NotNull<MyPtr<int>> != NotNull<MyPtr<int>>
+ // CHECK(nni4 != nullptr); // no comparisons with nullptr
+ // CHECK(nullptr != nni4); // no comparisons with nullptr
+ // CHECK(nni4 != 0); // no comparisons with zero
+ // CHECK(0 != nni4); // no comparisons with zero
+
+ // int* parameter
+ f_i(&i4); // identity int* --> int*
+ f_i(my4); // implicit MyPtr<int> --> int*
+ f_i(my4.get()); // explicit MyPtr<int> --> int*
+ f_i(nni4); // implicit NotNull<int*> --> int*
+ f_i(nni4.get()); // explicit NotNull<int*> --> int*
+ // f_i(nnmy4); // no implicit NotNull<MyPtr<int>> --> int*
+ f_i(nnmy4.get()); // explicit NotNull<MyPtr<int>> --> int*
+ f_i(nnmy4.get().get()); // doubly-explicit NotNull<MyPtr<int>> --> int*
+
+ // MyPtr<int> parameter
+ f_my(&i4); // implicit int* --> MyPtr<int>
+ f_my(my4); // identity MyPtr<int> --> MyPtr<int>
+ f_my(my4.get()); // explicit MyPtr<int> --> MyPtr<int>
+ // f_my(nni4); // no implicit NotNull<int*> --> MyPtr<int>
+ f_my(nni4.get()); // explicit NotNull<int*> --> MyPtr<int>
+ f_my(nnmy4); // implicit NotNull<MyPtr<int>> --> MyPtr<int>
+ f_my(nnmy4.get()); // explicit NotNull<MyPtr<int>> --> MyPtr<int>
+ f_my(
+ nnmy4.get().get()); // doubly-explicit NotNull<MyPtr<int>> --> MyPtr<int>
+
+ // NotNull<int*> parameter
+ f_nni(nni4); // identity NotNull<int*> --> NotNull<int*>
+ f_nni(nnmy4); // implicit NotNull<MyPtr<int>> --> NotNull<int*>
+
+ // NotNull<MyPtr<int>> parameter
+ f_nnmy(nni4); // implicit NotNull<int*> --> NotNull<MyPtr<int>>
+ f_nnmy(nnmy4); // identity NotNull<MyPtr<int>> --> NotNull<MyPtr<int>>
+
+ // CHECK(nni4); // disallow boolean conversion / unary expression usage
+ // CHECK(nnmy4); // ditto
+
+ // '->' dereferencing.
+ Blah blah;
+ MyPtr<Blah> myblah = &blah;
+ NotNull<Blah*> nnblah = WrapNotNull(&blah);
+ NotNull<MyPtr<Blah>> nnmyblah = WrapNotNull(myblah);
+ (&blah)->blah(); // int*
+ myblah->blah(); // MyPtr<int>
+ nnblah->blah(); // NotNull<int*>
+ nnmyblah->blah(); // NotNull<MyPtr<int>>
+
+ (&blah)->mX = 1;
+ CHECK((&blah)->mX == 1);
+ myblah->mX = 2;
+ CHECK(myblah->mX == 2);
+ nnblah->mX = 3;
+ CHECK(nnblah->mX == 3);
+ nnmyblah->mX = 4;
+ CHECK(nnmyblah->mX == 4);
+
+ // '*' dereferencing (lvalues and rvalues)
+ *(&i4) = 7; // int*
+ CHECK(*(&i4) == 7);
+ *my4 = 6; // MyPtr<int>
+ CHECK(*my4 == 6);
+ *nni4 = 5; // NotNull<int*>
+ CHECK(*nni4 == 5);
+ *nnmy4 = 4; // NotNull<MyPtr<int>>
+ CHECK(*nnmy4 == 4);
+
+ // Non-null arrays.
+ static const int N = 20;
+ int a[N];
+ NotNull<int*> nna = WrapNotNull(a);
+ for (int i = 0; i < N; i++) {
+ nna[i] = i;
+ }
+ for (int i = 0; i < N; i++) {
+ nna[i] *= 2;
+ }
+ for (int i = 0; i < N; i++) {
+ CHECK(nna[i] == i * 2);
+ }
+}
+
+void f_ref(NotNull<MyRefType*> aR) { NotNull<RefPtr<MyRefType>> r = aR; }
+
+void TestNotNullWithRefPtr() {
+ // This MyRefType object will have a maximum refcount of 5.
+ NotNull<RefPtr<MyRefType>> r1 = WrapNotNull(new MyRefType(5));
+
+ // At this point the refcount is 1.
+
+ NotNull<RefPtr<MyRefType>> r2 = r1;
+
+ // At this point the refcount is 2.
+
+ NotNull<MyRefType*> r3 = r2;
+ (void)r3;
+
+ // At this point the refcount is still 2.
+
+ RefPtr<MyRefType> r4 = r2;
+ mozilla::Unused << r4;
+
+ // At this point the refcount is 3.
+
+ RefPtr<MyRefType> r5 = r3.get();
+ mozilla::Unused << r5;
+
+ // At this point the refcount is 4.
+
+ // No change to the refcount occurs because of the argument passing. Within
+ // f_ref() the refcount temporarily hits 5, due to the local RefPtr.
+ f_ref(r2);
+
+ // At this point the refcount is 4.
+
+ NotNull<RefPtr<MyRefType>> r6 = std::move(r2);
+ mozilla::Unused << r6;
+
+ CHECK(r2.get());
+ CHECK(r6.get());
+
+ // At this point the refcount is 5 again, since NotNull is not movable.
+
+ // At function's end all RefPtrs are destroyed and the refcount drops to 0
+ // and the MyRefType is destroyed.
+}
+
+// Create a derived object and store its base pointer.
+struct Base {
+ virtual ~Base() = default;
+ virtual bool IsDerived() const { return false; }
+};
+struct Derived : Base {
+ bool IsDerived() const override { return true; }
+};
+
+void TestMakeNotNull() {
+ // Raw pointer.
+ auto nni = MakeNotNull<int*>(11);
+ static_assert(std::is_same_v<NotNull<int*>, decltype(nni)>,
+ "MakeNotNull<int*> should return NotNull<int*>");
+ CHECK(*nni == 11);
+ delete nni;
+
+ // Raw pointer to const.
+ auto nnci = MakeNotNull<const int*>(12);
+ static_assert(std::is_same_v<NotNull<const int*>, decltype(nnci)>,
+ "MakeNotNull<const int*> should return NotNull<const int*>");
+ CHECK(*nnci == 12);
+ delete nnci;
+
+ auto nnd = MakeNotNull<Derived*>();
+ static_assert(std::is_same_v<NotNull<Derived*>, decltype(nnd)>,
+ "MakeNotNull<Derived*> should return NotNull<Derived*>");
+ CHECK(nnd->IsDerived());
+ delete nnd;
+ NotNull<Base*> nnb = MakeNotNull<Derived*>();
+ static_assert(std::is_same_v<NotNull<Base*>, decltype(nnb)>,
+ "MakeNotNull<Derived*> should be assignable to NotNull<Base*>");
+ // Check that we have really built a Derived object.
+ CHECK(nnb->IsDerived());
+ delete nnb;
+
+ // Allow smart pointers.
+ auto nnmi = MakeNotNull<MyPtr<int>>(23);
+ static_assert(std::is_same_v<NotNull<MyPtr<int>>, decltype(nnmi)>,
+ "MakeNotNull<MyPtr<int>> should return NotNull<MyPtr<int>>");
+ CHECK(*nnmi == 23);
+ delete nnmi.get().get();
+
+ auto nnui = MakeNotNull<UniquePtr<int>>(24);
+ static_assert(
+ std::is_same_v<NotNull<UniquePtr<int>>, decltype(nnui)>,
+ "MakeNotNull<UniquePtr<int>> should return NotNull<UniquePtr<int>>");
+ CHECK(*nnui == 24);
+
+ // Expect only 1 RefCnt (from construction).
+ auto nnr = MakeNotNull<RefPtr<MyRefType>>(1);
+ static_assert(std::is_same_v<NotNull<RefPtr<MyRefType>>, decltype(nnr)>,
+ "MakeNotNull<RefPtr<MyRefType>> should return "
+ "NotNull<RefPtr<MyRefType>>");
+ mozilla::Unused << nnr;
+}
+
+mozilla::MovingNotNull<UniquePtr<int>> CreateNotNullUniquePtr() {
+ return mozilla::WrapMovingNotNull(mozilla::MakeUnique<int>(42));
+}
+
+void TestMovingNotNull() {
+ UniquePtr<int> x1 = CreateNotNullUniquePtr();
+ CHECK(x1);
+ CHECK(42 == *x1);
+
+ NotNull<UniquePtr<int>> x2 = CreateNotNullUniquePtr();
+ CHECK(42 == *x2);
+
+ NotNull<UniquePtr<Base>> x3 =
+ mozilla::WrapMovingNotNull(mozilla::MakeUnique<Derived>());
+
+ // Must not compile:
+ // auto y = CreateNotNullUniquePtr();
+}
+
+int main() {
+ TestNotNullWithMyPtr();
+ TestNotNullWithRefPtr();
+ TestMakeNotNull();
+ TestMovingNotNull();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestPoisonArea.cpp b/mfbt/tests/TestPoisonArea.cpp
new file mode 100644
index 0000000000..9df3929834
--- /dev/null
+++ b/mfbt/tests/TestPoisonArea.cpp
@@ -0,0 +1,530 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+/* Code in this file needs to be kept in sync with code in nsPresArena.cpp.
+ *
+ * We want to use a fixed address for frame poisoning so that it is readily
+ * identifiable in crash dumps. Whether such an address is available
+ * without any special setup depends on the system configuration.
+ *
+ * All current 64-bit CPUs (with the possible exception of PowerPC64)
+ * reserve the vast majority of the virtual address space for future
+ * hardware extensions; valid addresses must be below some break point
+ * between 2**48 and 2**54, depending on exactly which chip you have. Some
+ * chips (notably amd64) also allow the use of the *highest* 2**48 -- 2**54
+ * addresses. Thus, if user space pointers are 64 bits wide, we can just
+ * use an address outside this range, and no more is required. To
+ * accommodate the chips that allow very high addresses to be valid, the
+ * value chosen is close to 2**63 (that is, in the middle of the space).
+ *
+ * In most cases, a purely 32-bit operating system must reserve some
+ * fraction of the address space for its own use. Contemporary 32-bit OSes
+ * tend to take the high gigabyte or so (0xC000_0000 on up). If we can
+ * prove that high addresses are reserved to the kernel, we can use an
+ * address in that region. Unfortunately, not all 32-bit OSes do this;
+ * OSX 10.4 might not, and it is unclear what mobile OSes are like
+ * (some 32-bit CPUs make it very easy for the kernel to exist in its own
+ * private address space).
+ *
+ * Furthermore, when a 32-bit user space process is running on a 64-bit
+ * kernel, the operating system has no need to reserve any of the space that
+ * the process can see, and generally does not do so. This is the scenario
+ * of greatest concern, since it covers all contemporary OSX iterations
+ * (10.5+) as well as Windows Vista and 7 on newer amd64 hardware. Linux on
+ * amd64 is generally run as a pure 64-bit environment, but its 32-bit
+ * compatibility mode also has this property.
+ *
+ * Thus, when user space pointers are 32 bits wide, we need to validate
+ * our chosen address, and possibly *make* it a good poison address by
+ * allocating a page around it and marking it inaccessible. The algorithm
+ * for this is:
+ *
+ * 1. Attempt to make the page surrounding the poison address a reserved,
+ * inaccessible memory region using OS primitives. On Windows, this is
+ * done with VirtualAlloc(MEM_RESERVE); on Unix, mmap(PROT_NONE).
+ *
+ * 2. If mmap/VirtualAlloc failed, there are two possible reasons: either
+ * the region is reserved to the kernel and no further action is
+ * required, or there is already usable memory in this area and we have
+ * to pick a different address. The tricky part is knowing which case
+ * we have, without attempting to access the region. On Windows, we
+ * rely on GetSystemInfo()'s reported upper and lower bounds of the
+ * application memory area. On Unix, there is nothing devoted to the
+ * purpose, but seeing if madvise() fails is close enough (it *might*
+ * disrupt someone else's use of the memory region, but not by as much
+ * as anything else available).
+ *
+ * Be aware of these gotchas:
+ *
+ * 1. We cannot use mmap() with MAP_FIXED. MAP_FIXED is defined to
+ * _replace_ any existing mapping in the region, if necessary to satisfy
+ * the request. Obviously, as we are blindly attempting to acquire a
+ * page at a constant address, we must not do this, lest we overwrite
+ * someone else's allocation.
+ *
+ * 2. For the same reason, we cannot blindly use mprotect() if mmap() fails.
+ *
+ * 3. madvise() may fail when applied to a 'magic' memory region provided as
+ * a kernel/user interface. Fortunately, the only such case I know about
+ * is the "vsyscall" area (not to be confused with the "vdso" area) for
+ * *64*-bit processes on Linux - and we don't even run this code for
+ * 64-bit processes.
+ *
+ * 4. VirtualQuery() does not produce any useful information if
+ * applied to kernel memory - in fact, it doesn't write its output
+ * at all. Thus, it is not used here.
+ */
+
+// MAP_ANON(YMOUS) is not in any standard. Add defines as necessary.
+#define _GNU_SOURCE 1
+#define _DARWIN_C_SOURCE 1
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+# include <windows.h>
+#else
+# include <sys/types.h>
+# include <unistd.h>
+# include <sys/wait.h>
+
+# include <sys/mman.h>
+# ifndef MAP_ANON
+# ifdef MAP_ANONYMOUS
+# define MAP_ANON MAP_ANONYMOUS
+# else
+# error "Don't know how to get anonymous memory"
+# endif
+# endif
+#endif
+
+#define SIZxPTR ((int)(sizeof(uintptr_t) * 2))
+
+/* This program assumes that a whole number of return instructions fit into
+ * 32 bits, and that 32-bit alignment is sufficient for a branch destination.
+ * For architectures where this is not true, fiddling with RETURN_INSTR_TYPE
+ * can be enough.
+ */
+
+#if defined __i386__ || defined __x86_64__ || defined __i386 || \
+ defined __x86_64 || defined _M_IX86 || defined _M_AMD64
+# define RETURN_INSTR 0xC3C3C3C3 /* ret; ret; ret; ret */
+
+#elif defined __arm__ || defined _M_ARM
+# define RETURN_INSTR 0xE12FFF1E /* bx lr */
+
+// PPC has its own style of CPU-id #defines. There is no Windows for
+// PPC as far as I know, so no _M_ variant.
+#elif defined _ARCH_PPC || defined _ARCH_PWR || defined _ARCH_PWR2
+# define RETURN_INSTR 0x4E800020 /* blr */
+
+#elif defined __m68k__
+# define RETURN_INSTR 0x4E754E75 /* rts; rts */
+
+#elif defined __riscv
+# define RETURN_INSTR 0x80828082 /* ret; ret */
+
+#elif defined __sparc || defined __sparcv9
+# define RETURN_INSTR 0x81c3e008 /* retl */
+
+#elif defined __alpha
+# define RETURN_INSTR 0x6bfa8001 /* ret */
+
+#elif defined __hppa
+# define RETURN_INSTR 0xe840c002 /* bv,n r0(rp) */
+
+#elif defined __mips
+# define RETURN_INSTR 0x03e00008 /* jr ra */
+
+# ifdef __MIPSEL
+/* On mipsel, jr ra needs to be followed by a nop.
+ 0x03e00008 as a 64 bits integer just does that */
+# define RETURN_INSTR_TYPE uint64_t
+# endif
+
+#elif defined __s390__
+# define RETURN_INSTR 0x07fe0000 /* br %r14 */
+
+#elif defined __sh__
+# define RETURN_INSTR 0x0b000b00 /* rts; rts */
+
+#elif defined __aarch64__ || defined _M_ARM64
+# define RETURN_INSTR 0xd65f03c0 /* ret */
+
+#elif defined __loongarch64
+# define RETURN_INSTR 0x4c000020 /* jirl zero, ra, 0 */
+
+#elif defined __ia64
+struct ia64_instr {
+ uint32_t mI[4];
+};
+static const ia64_instr _return_instr = {
+ {0x00000011, 0x00000001, 0x80000200, 0x00840008}}; /* br.ret.sptk.many b0 */
+
+# define RETURN_INSTR _return_instr
+# define RETURN_INSTR_TYPE ia64_instr
+
+#else
+# error "Need return instruction for this architecture"
+#endif
+
+#ifndef RETURN_INSTR_TYPE
+# define RETURN_INSTR_TYPE uint32_t
+#endif
+
+// Miscellaneous Windows/Unix portability gumph
+
+#ifdef _WIN32
+// Uses of this function deliberately leak the string.
+static LPSTR StrW32Error(DWORD aErrcode) {
+ LPSTR errmsg;
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, aErrcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&errmsg, 0, nullptr);
+
+ // FormatMessage puts an unwanted newline at the end of the string
+ size_t n = strlen(errmsg) - 1;
+ while (errmsg[n] == '\r' || errmsg[n] == '\n') {
+ n--;
+ }
+ errmsg[n + 1] = '\0';
+ return errmsg;
+}
+# define LastErrMsg() (StrW32Error(GetLastError()))
+
+// Because we use VirtualAlloc in MEM_RESERVE mode, the "page size" we want
+// is the allocation granularity.
+static SYSTEM_INFO sInfo_;
+
+static inline uint32_t PageSize() { return sInfo_.dwAllocationGranularity; }
+
+static void* ReserveRegion(uintptr_t aRequest, bool aAccessible) {
+ return VirtualAlloc((void*)aRequest, PageSize(),
+ aAccessible ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE,
+ aAccessible ? PAGE_EXECUTE_READWRITE : PAGE_NOACCESS);
+}
+
+static void ReleaseRegion(void* aPage) {
+ VirtualFree(aPage, PageSize(), MEM_RELEASE);
+}
+
+static bool ProbeRegion(uintptr_t aPage) {
+ return aPage >= (uintptr_t)sInfo_.lpMaximumApplicationAddress &&
+ aPage + PageSize() >= (uintptr_t)sInfo_.lpMaximumApplicationAddress;
+}
+
+static bool MakeRegionExecutable(void*) { return false; }
+
+# undef MAP_FAILED
+# define MAP_FAILED 0
+
+#else // Unix
+
+# define LastErrMsg() (strerror(errno))
+
+static unsigned long gUnixPageSize;
+
+static inline unsigned long PageSize() { return gUnixPageSize; }
+
+static void* ReserveRegion(uintptr_t aRequest, bool aAccessible) {
+ return mmap(reinterpret_cast<void*>(aRequest), PageSize(),
+ aAccessible ? PROT_READ | PROT_WRITE : PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+}
+
+static void ReleaseRegion(void* aPage) { munmap(aPage, PageSize()); }
+
+static bool ProbeRegion(uintptr_t aPage) {
+# ifdef XP_SOLARIS
+ return !!posix_madvise(reinterpret_cast<void*>(aPage), PageSize(),
+ POSIX_MADV_NORMAL);
+# else
+ return !!madvise(reinterpret_cast<void*>(aPage), PageSize(), MADV_NORMAL);
+# endif
+}
+
+static int MakeRegionExecutable(void* aPage) {
+ return mprotect((caddr_t)aPage, PageSize(),
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+}
+
+#endif
+
+static uintptr_t ReservePoisonArea() {
+ if (sizeof(uintptr_t) == 8) {
+ // Use the hardware-inaccessible region.
+ // We have to avoid 64-bit constants and shifts by 32 bits, since this
+ // code is compiled in 32-bit mode, although it is never executed there.
+ uintptr_t result =
+ (((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu)) &
+ ~uintptr_t(PageSize() - 1));
+ printf("INFO | poison area assumed at 0x%.*" PRIxPTR "\n", SIZxPTR, result);
+ return result;
+ }
+
+ // First see if we can allocate the preferred poison address from the OS.
+ uintptr_t candidate = (0xF0DEAFFF & ~(PageSize() - 1));
+ void* result = ReserveRegion(candidate, false);
+ if (result == reinterpret_cast<void*>(candidate)) {
+ // success - inaccessible page allocated
+ printf("INFO | poison area allocated at 0x%.*" PRIxPTR
+ " (preferred addr)\n",
+ SIZxPTR, reinterpret_cast<uintptr_t>(result));
+ return candidate;
+ }
+
+ // That didn't work, so see if the preferred address is within a range
+ // of permanently inacessible memory.
+ if (ProbeRegion(candidate)) {
+ // success - selected page cannot be usable memory
+ if (result != MAP_FAILED) {
+ ReleaseRegion(result);
+ }
+ printf("INFO | poison area assumed at 0x%.*" PRIxPTR " (preferred addr)\n",
+ SIZxPTR, candidate);
+ return candidate;
+ }
+
+ // The preferred address is already in use. Did the OS give us a
+ // consolation prize?
+ if (result != MAP_FAILED) {
+ uintptr_t ures = reinterpret_cast<uintptr_t>(result);
+ printf("INFO | poison area allocated at 0x%.*" PRIxPTR
+ " (consolation prize)\n",
+ SIZxPTR, ures);
+ return ures;
+ }
+
+ // It didn't, so try to allocate again, without any constraint on
+ // the address.
+ result = ReserveRegion(0, false);
+ if (result != MAP_FAILED) {
+ uintptr_t ures = reinterpret_cast<uintptr_t>(result);
+ printf("INFO | poison area allocated at 0x%.*" PRIxPTR " (fallback)\n",
+ SIZxPTR, ures);
+ return ures;
+ }
+
+ printf("ERROR | no usable poison area found\n");
+ return 0;
+}
+
+/* The "positive control" area confirms that we can allocate a page with the
+ * proper characteristics.
+ */
+static uintptr_t ReservePositiveControl() {
+ void* result = ReserveRegion(0, false);
+ if (result == MAP_FAILED) {
+ printf("ERROR | allocating positive control | %s\n", LastErrMsg());
+ return 0;
+ }
+ printf("INFO | positive control allocated at 0x%.*" PRIxPTR "\n", SIZxPTR,
+ (uintptr_t)result);
+ return (uintptr_t)result;
+}
+
+/* The "negative control" area confirms that our probe logic does detect a
+ * page that is readable, writable, or executable.
+ */
+static uintptr_t ReserveNegativeControl() {
+ void* result = ReserveRegion(0, true);
+ if (result == MAP_FAILED) {
+ printf("ERROR | allocating negative control | %s\n", LastErrMsg());
+ return 0;
+ }
+
+ // Fill the page with return instructions.
+ RETURN_INSTR_TYPE* p = reinterpret_cast<RETURN_INSTR_TYPE*>(result);
+ RETURN_INSTR_TYPE* limit = reinterpret_cast<RETURN_INSTR_TYPE*>(
+ reinterpret_cast<char*>(result) + PageSize());
+ while (p < limit) {
+ *p++ = RETURN_INSTR;
+ }
+
+ // Now mark it executable as well as readable and writable.
+ // (mmap(PROT_EXEC) may fail when applied to anonymous memory.)
+
+ if (MakeRegionExecutable(result)) {
+ printf("ERROR | making negative control executable | %s\n", LastErrMsg());
+ return 0;
+ }
+
+ printf("INFO | negative control allocated at 0x%.*" PRIxPTR "\n", SIZxPTR,
+ (uintptr_t)result);
+ return (uintptr_t)result;
+}
+
+#ifndef _WIN32
+static void JumpTo(uintptr_t aOpaddr) {
+# ifdef __ia64
+ struct func_call {
+ uintptr_t mFunc;
+ uintptr_t mGp;
+ } call = {
+ aOpaddr,
+ };
+ ((void (*)()) & call)();
+# else
+ ((void (*)())aOpaddr)();
+# endif
+}
+#endif
+
+/* Test each page. */
+static bool TestPage(const char* aPageLabel, uintptr_t aPageAddr,
+ int aShouldSucceed) {
+ const char* oplabel;
+ uintptr_t opaddr;
+
+ bool failed = false;
+ for (unsigned int test = 0; test < 3; test++) {
+ switch (test) {
+ // The execute test must be done before the write test, because the
+ // write test will clobber memory at the target address.
+ case 0:
+ oplabel = "reading";
+ opaddr = aPageAddr + PageSize() / 2 - 1;
+ break;
+ case 1:
+ oplabel = "executing";
+ opaddr = aPageAddr + PageSize() / 2;
+ break;
+ case 2:
+ oplabel = "writing";
+ opaddr = aPageAddr + PageSize() / 2 - 1;
+ break;
+ default:
+ abort();
+ }
+
+#ifdef _WIN32
+ bool badptr = true;
+ MEMORY_BASIC_INFORMATION mbi = {};
+
+ if (VirtualQuery((LPCVOID)opaddr, &mbi, sizeof(mbi)) &&
+ mbi.State == MEM_COMMIT) {
+ switch (test) {
+ case 0: // read
+ badptr = !(mbi.Protect & (PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE |
+ PAGE_READONLY | PAGE_READWRITE));
+ break;
+ case 1: // execute
+ badptr =
+ !(mbi.Protect & (PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE));
+ break;
+ case 2: // write
+ badptr = !(mbi.Protect & (PAGE_READWRITE | PAGE_EXECUTE_READWRITE));
+ break;
+ default:
+ abort();
+ }
+ }
+
+ if (badptr) {
+ if (aShouldSucceed) {
+ printf("TEST-UNEXPECTED-FAIL | %s %s\n", oplabel, aPageLabel);
+ failed = true;
+ } else {
+ printf("TEST-PASS | %s %s\n", oplabel, aPageLabel);
+ }
+ } else {
+ // if control reaches this point the probe succeeded
+ if (aShouldSucceed) {
+ printf("TEST-PASS | %s %s\n", oplabel, aPageLabel);
+ } else {
+ printf("TEST-UNEXPECTED-FAIL | %s %s\n", oplabel, aPageLabel);
+ failed = true;
+ }
+ }
+#else
+ pid_t pid = fork();
+ if (pid == -1) {
+ printf("ERROR | %s %s | fork=%s\n", oplabel, aPageLabel, LastErrMsg());
+ exit(2);
+ } else if (pid == 0) {
+ volatile unsigned char scratch;
+ switch (test) {
+ case 0:
+ scratch = *(volatile unsigned char*)opaddr;
+ break;
+ case 1:
+ JumpTo(opaddr);
+ break;
+ case 2:
+ *(volatile unsigned char*)opaddr = 0;
+ break;
+ default:
+ abort();
+ }
+ (void)scratch;
+ _exit(0);
+ } else {
+ int status;
+ if (waitpid(pid, &status, 0) != pid) {
+ printf("ERROR | %s %s | wait=%s\n", oplabel, aPageLabel, LastErrMsg());
+ exit(2);
+ }
+
+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
+ if (aShouldSucceed) {
+ printf("TEST-PASS | %s %s\n", oplabel, aPageLabel);
+ } else {
+ printf("TEST-UNEXPECTED-FAIL | %s %s | unexpected successful exit\n",
+ oplabel, aPageLabel);
+ failed = true;
+ }
+ } else if (WIFEXITED(status)) {
+ printf("ERROR | %s %s | unexpected exit code %d\n", oplabel, aPageLabel,
+ WEXITSTATUS(status));
+ exit(2);
+ } else if (WIFSIGNALED(status)) {
+ if (aShouldSucceed) {
+ printf("TEST-UNEXPECTED-FAIL | %s %s | unexpected signal %d\n",
+ oplabel, aPageLabel, WTERMSIG(status));
+ failed = true;
+ } else {
+ printf("TEST-PASS | %s %s | signal %d (as expected)\n", oplabel,
+ aPageLabel, WTERMSIG(status));
+ }
+ } else {
+ printf("ERROR | %s %s | unexpected exit status %d\n", oplabel,
+ aPageLabel, status);
+ exit(2);
+ }
+ }
+#endif
+ }
+ return failed;
+}
+
+int main() {
+#ifdef _WIN32
+ GetSystemInfo(&sInfo_);
+#else
+ gUnixPageSize = sysconf(_SC_PAGESIZE);
+#endif
+
+ uintptr_t ncontrol = ReserveNegativeControl();
+ uintptr_t pcontrol = ReservePositiveControl();
+ uintptr_t poison = ReservePoisonArea();
+
+ if (!ncontrol || !pcontrol || !poison) {
+ return 2;
+ }
+
+ bool failed = false;
+ failed |= TestPage("negative control", ncontrol, 1);
+ failed |= TestPage("positive control", pcontrol, 0);
+ failed |= TestPage("poison area", poison, 0);
+
+ return failed ? 1 : 0;
+}
diff --git a/mfbt/tests/TestRandomNum.cpp b/mfbt/tests/TestRandomNum.cpp
new file mode 100644
index 0000000000..f53c42fc83
--- /dev/null
+++ b/mfbt/tests/TestRandomNum.cpp
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RandomNum.h"
+#include <vector>
+
+/*
+
+ * We're going to check that random number generation is sane on a basic
+ * level - That is, we want to check that the function returns success
+ * and doesn't just keep returning the same number.
+ *
+ * Note that there are many more tests that could be done, but to really test
+ * a PRNG we'd probably need to generate a large set of randoms and
+ * perform statistical analysis on them. Maybe that's worth doing eventually?
+ *
+ * For now we should be fine just performing a dumb test of generating 5
+ * numbers and making sure they're all unique. In theory, it is possible for
+ * this test to report a false negative, but with 5 numbers the probability
+ * is less than one-in-a-trillion.
+ *
+ */
+
+#define NUM_RANDOMS_TO_GENERATE 5
+
+using mozilla::Maybe;
+using mozilla::RandomUint64;
+
+static uint64_t getRandomUint64OrDie() {
+ Maybe<uint64_t> maybeRandomNum = RandomUint64();
+
+ MOZ_RELEASE_ASSERT(maybeRandomNum.isSome());
+
+ return maybeRandomNum.value();
+}
+
+static void TestRandomUint64() {
+ // The allocator uses RandomNum.h too, but its initialization path allocates
+ // memory. While the allocator itself handles the situation, we can't, so
+ // we make sure to use an allocation before getting a Random number ourselves.
+ std::vector<uint64_t> randomsList;
+ randomsList.reserve(NUM_RANDOMS_TO_GENERATE);
+
+ for (uint8_t i = 0; i < NUM_RANDOMS_TO_GENERATE; ++i) {
+ uint64_t randomNum = getRandomUint64OrDie();
+
+ for (uint64_t num : randomsList) {
+ MOZ_RELEASE_ASSERT(randomNum != num);
+ }
+
+ randomsList.push_back(randomNum);
+ }
+}
+
+int main() {
+ TestRandomUint64();
+ return 0;
+}
diff --git a/mfbt/tests/TestRange.cpp b/mfbt/tests/TestRange.cpp
new file mode 100644
index 0000000000..a3bc134896
--- /dev/null
+++ b/mfbt/tests/TestRange.cpp
@@ -0,0 +1,29 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Range.h"
+
+#include <type_traits>
+
+using mozilla::Range;
+
+static_assert(std::is_convertible_v<Range<int>, Range<const int>>,
+ "Range should convert into const");
+static_assert(!std::is_convertible_v<Range<const int>, Range<int>>,
+ "Range should not drop const in conversion");
+
+void test_RangeToBoolConversionShouldCompile() {
+ auto dummy = bool{Range<int>{}};
+ (void)dummy;
+}
+
+void test_RangeT_To_RangeConstT_ShouldCompile() {
+ auto dummy = Range<const int>{Range<int>{}};
+ (void)dummy;
+}
+
+// We need a proper program so we have someplace to hang the static_asserts.
+int main() { return 0; }
diff --git a/mfbt/tests/TestRefPtr.cpp b/mfbt/tests/TestRefPtr.cpp
new file mode 100644
index 0000000000..972e284c44
--- /dev/null
+++ b/mfbt/tests/TestRefPtr.cpp
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/RefCounted.h"
+
+#include <type_traits>
+
+using mozilla::RefCounted;
+
+class Foo : public RefCounted<Foo> {
+ public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(Foo)
+
+ Foo() : mDead(false) {}
+
+ static int sNumDestroyed;
+
+ ~Foo() {
+ MOZ_ASSERT(!mDead);
+ mDead = true;
+ sNumDestroyed++;
+ }
+
+ private:
+ bool mDead;
+};
+int Foo::sNumDestroyed;
+
+struct Bar : public Foo {};
+
+already_AddRefed<Foo> NewFoo() {
+ RefPtr<Foo> f(new Foo());
+ return f.forget();
+}
+
+already_AddRefed<Foo> NewBar() {
+ RefPtr<Bar> bar = new Bar();
+ return bar.forget();
+}
+
+void GetNewFoo(Foo** aFoo) {
+ *aFoo = new Bar();
+ // Kids, don't try this at home
+ (*aFoo)->AddRef();
+}
+
+void GetNewFoo(RefPtr<Foo>* aFoo) { *aFoo = new Bar(); }
+
+already_AddRefed<Foo> GetNullFoo() { return 0; }
+
+int main() {
+ MOZ_RELEASE_ASSERT(0 == Foo::sNumDestroyed);
+ {
+ RefPtr<Foo> f = new Foo();
+ MOZ_RELEASE_ASSERT(f->refCount() == 1);
+ }
+ MOZ_RELEASE_ASSERT(1 == Foo::sNumDestroyed);
+
+ {
+ RefPtr f1 = NewFoo();
+ static_assert(std::is_same_v<decltype(f1), RefPtr<Foo>>);
+ RefPtr f2(NewFoo());
+ static_assert(std::is_same_v<decltype(f2), RefPtr<Foo>>);
+ MOZ_RELEASE_ASSERT(1 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(3 == Foo::sNumDestroyed);
+
+ {
+ RefPtr<Foo> b = NewBar();
+ MOZ_RELEASE_ASSERT(3 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(4 == Foo::sNumDestroyed);
+
+ {
+ RefPtr<Foo> f1;
+ {
+ f1 = new Foo();
+ RefPtr<Foo> f2(f1);
+ RefPtr<Foo> f3 = f2;
+ MOZ_RELEASE_ASSERT(4 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(4 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(5 == Foo::sNumDestroyed);
+
+ {
+ {
+ RefPtr<Foo> f = new Foo();
+ RefPtr<Foo> g = f.forget();
+ }
+ MOZ_RELEASE_ASSERT(6 == Foo::sNumDestroyed);
+ }
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetNewFoo(getter_AddRefs(f));
+ MOZ_RELEASE_ASSERT(7 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(8 == Foo::sNumDestroyed);
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetNewFoo(&f);
+ MOZ_RELEASE_ASSERT(9 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(10 == Foo::sNumDestroyed);
+
+ { RefPtr<Foo> f1 = new Bar(); }
+ MOZ_RELEASE_ASSERT(11 == Foo::sNumDestroyed);
+
+ {
+ RefPtr f = GetNullFoo();
+ static_assert(std::is_same_v<decltype(f), RefPtr<Foo>>);
+ MOZ_RELEASE_ASSERT(11 == Foo::sNumDestroyed);
+ }
+ MOZ_RELEASE_ASSERT(11 == Foo::sNumDestroyed);
+
+ {
+ bool condition = true;
+ const auto f =
+ condition ? mozilla::MakeRefPtr<Bar>() : mozilla::MakeRefPtr<Foo>();
+
+ MOZ_RELEASE_ASSERT(f);
+ }
+
+ return 0;
+}
diff --git a/mfbt/tests/TestResult.cpp b/mfbt/tests/TestResult.cpp
new file mode 100644
index 0000000000..a2e10640c5
--- /dev/null
+++ b/mfbt/tests/TestResult.cpp
@@ -0,0 +1,870 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdint.h>
+#include <string.h>
+#include "mozilla/ResultVariant.h"
+#include "mozilla/Try.h"
+#include "mozilla/UniquePtr.h"
+
+using mozilla::Err;
+using mozilla::GenericErrorResult;
+using mozilla::Ok;
+using mozilla::Result;
+using mozilla::UniquePtr;
+
+#define MOZ_STATIC_AND_RELEASE_ASSERT(expr) \
+ static_assert(expr); \
+ MOZ_RELEASE_ASSERT(expr)
+
+enum struct TestUnusedZeroEnum : int16_t { Ok = 0, NotOk = 1 };
+
+namespace mozilla::detail {
+template <>
+struct UnusedZero<TestUnusedZeroEnum> : UnusedZeroEnum<TestUnusedZeroEnum> {};
+} // namespace mozilla::detail
+
+struct Failed {};
+
+namespace mozilla::detail {
+template <>
+struct UnusedZero<Failed> {
+ using StorageType = uintptr_t;
+
+ static constexpr bool value = true;
+ static constexpr StorageType nullValue = 0;
+ static constexpr StorageType GetDefaultValue() { return 2; }
+
+ static constexpr void AssertValid(StorageType aValue) {}
+ static constexpr Failed Inspect(const StorageType& aValue) {
+ return Failed{};
+ }
+ static constexpr Failed Unwrap(StorageType aValue) { return Failed{}; }
+ static constexpr StorageType Store(Failed aValue) {
+ return GetDefaultValue();
+ }
+};
+
+} // namespace mozilla::detail
+
+// V is trivially default-constructible, and E has UnusedZero<E>::value == true,
+// for a reference type and for a non-reference type
+static_assert(mozilla::detail::SelectResultImpl<uintptr_t, Failed>::value ==
+ mozilla::detail::PackingStrategy::NullIsOk);
+static_assert(
+ mozilla::detail::SelectResultImpl<Ok, TestUnusedZeroEnum>::value ==
+ mozilla::detail::PackingStrategy::NullIsOk);
+static_assert(mozilla::detail::SelectResultImpl<Ok, Failed>::value ==
+ mozilla::detail::PackingStrategy::LowBitTagIsError);
+
+static_assert(std::is_trivially_destructible_v<Result<uintptr_t, Failed>>);
+static_assert(std::is_trivially_destructible_v<Result<Ok, TestUnusedZeroEnum>>);
+static_assert(std::is_trivially_destructible_v<Result<Ok, Failed>>);
+
+static_assert(
+ sizeof(Result<bool, TestUnusedZeroEnum>) <= sizeof(uintptr_t),
+ "Result with bool value type should not be larger than pointer-sized");
+static_assert(sizeof(Result<Ok, Failed>) == sizeof(uint8_t),
+ "Result with empty value type should be size 1");
+static_assert(sizeof(Result<int*, Failed>) == sizeof(uintptr_t),
+ "Result with two aligned pointer types should be pointer-sized");
+static_assert(
+ sizeof(Result<char*, Failed*>) > sizeof(char*),
+ "Result with unaligned success type `char*` must not be pointer-sized");
+static_assert(
+ sizeof(Result<int*, char*>) > sizeof(char*),
+ "Result with unaligned error type `char*` must not be pointer-sized");
+
+enum Foo8 : uint8_t {};
+enum Foo16 : uint16_t {};
+enum Foo32 : uint32_t {};
+static_assert(sizeof(Result<Ok, Foo8>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+static_assert(sizeof(Result<Ok, Foo16>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+static_assert(sizeof(Foo32) >= sizeof(uintptr_t) ||
+ sizeof(Result<Ok, Foo32>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+
+static_assert(sizeof(Result<Foo16, Foo8>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+static_assert(sizeof(Result<Foo8, Foo16>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+static_assert(sizeof(Foo32) >= sizeof(uintptr_t) ||
+ sizeof(Result<Foo32, Foo16>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+static_assert(sizeof(Foo32) >= sizeof(uintptr_t) ||
+ sizeof(Result<Foo16, Foo32>) <= sizeof(uintptr_t),
+ "Result with small types should be pointer-sized");
+
+#if __cplusplus < 202002L
+static_assert(std::is_literal_type_v<Result<int*, Failed>>);
+static_assert(std::is_literal_type_v<Result<Ok, Failed>>);
+static_assert(std::is_literal_type_v<Result<Ok, Foo8>>);
+static_assert(std::is_literal_type_v<Result<Foo8, Foo16>>);
+static_assert(!std::is_literal_type_v<Result<Ok, UniquePtr<int>>>);
+#endif
+
+static constexpr GenericErrorResult<Failed> Fail() { return Err(Failed{}); }
+
+static constexpr GenericErrorResult<TestUnusedZeroEnum>
+FailTestUnusedZeroEnum() {
+ return Err(TestUnusedZeroEnum::NotOk);
+}
+
+static constexpr Result<Ok, Failed> Task1(bool pass) {
+ if (!pass) {
+ return Fail(); // implicit conversion from GenericErrorResult to Result
+ }
+ return Ok();
+}
+
+static constexpr Result<Ok, TestUnusedZeroEnum> Task1UnusedZeroEnumErr(
+ bool pass) {
+ if (!pass) {
+ return FailTestUnusedZeroEnum(); // implicit conversion from
+ // GenericErrorResult to Result
+ }
+ return Ok();
+}
+
+static constexpr Result<int, Failed> Task2(bool pass, int value) {
+ MOZ_TRY(
+ Task1(pass)); // converts one type of result to another in the error case
+ return value; // implicit conversion from T to Result<T, E>
+}
+
+static constexpr Result<int, TestUnusedZeroEnum> Task2UnusedZeroEnumErr(
+ bool pass, int value) {
+ MOZ_TRY(Task1UnusedZeroEnumErr(
+ pass)); // converts one type of result to another in the error case
+ return value; // implicit conversion from T to Result<T, E>
+}
+
+static Result<int, Failed> Task3(bool pass1, bool pass2, int value) {
+ int x, y;
+ MOZ_TRY_VAR(x, Task2(pass1, value));
+ MOZ_TRY_VAR(y, Task2(pass2, value));
+ return x + y;
+}
+
+static void BasicTests() {
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task1(true).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(!Task1(true).isErr());
+ MOZ_STATIC_AND_RELEASE_ASSERT(!Task1(false).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task1(false).isErr());
+
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task1UnusedZeroEnumErr(true).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(!Task1UnusedZeroEnumErr(true).isErr());
+ MOZ_STATIC_AND_RELEASE_ASSERT(!Task1UnusedZeroEnumErr(false).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task1UnusedZeroEnumErr(false).isErr());
+ MOZ_STATIC_AND_RELEASE_ASSERT(TestUnusedZeroEnum::NotOk ==
+ Task1UnusedZeroEnumErr(false).inspectErr());
+ MOZ_STATIC_AND_RELEASE_ASSERT(TestUnusedZeroEnum::NotOk ==
+ Task1UnusedZeroEnumErr(false).unwrapErr());
+
+ // MOZ_TRY works.
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2(true, 3).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2(true, 3).unwrap() == 3);
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2(true, 3).unwrapOr(6) == 3);
+ MOZ_RELEASE_ASSERT(Task2(false, 3).isErr());
+ MOZ_RELEASE_ASSERT(Task2(false, 3).unwrapOr(6) == 6);
+
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2UnusedZeroEnumErr(true, 3).isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2UnusedZeroEnumErr(true, 3).unwrap() == 3);
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2UnusedZeroEnumErr(true, 3).unwrapOr(6) ==
+ 3);
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2UnusedZeroEnumErr(false, 3).isErr());
+ MOZ_STATIC_AND_RELEASE_ASSERT(Task2UnusedZeroEnumErr(false, 3).unwrapOr(6) ==
+ 6);
+
+ // MOZ_TRY_VAR works.
+ MOZ_RELEASE_ASSERT(Task3(true, true, 3).isOk());
+ MOZ_RELEASE_ASSERT(Task3(true, true, 3).unwrap() == 6);
+ MOZ_RELEASE_ASSERT(Task3(true, false, 3).isErr());
+ MOZ_RELEASE_ASSERT(Task3(false, true, 3).isErr());
+ MOZ_RELEASE_ASSERT(Task3(false, true, 3).unwrapOr(6) == 6);
+
+ // Lvalues should work too.
+ {
+ constexpr Result<Ok, Failed> res1 = Task1(true);
+ MOZ_STATIC_AND_RELEASE_ASSERT(res1.isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(!res1.isErr());
+
+ constexpr Result<Ok, Failed> res2 = Task1(false);
+ MOZ_STATIC_AND_RELEASE_ASSERT(!res2.isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(res2.isErr());
+ }
+
+ {
+ Result<int, Failed> res = Task2(true, 3);
+ MOZ_RELEASE_ASSERT(res.isOk());
+ MOZ_RELEASE_ASSERT(res.unwrap() == 3);
+
+ res = Task2(false, 4);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ }
+
+ // Some tests for pointer tagging.
+ {
+ int i = 123;
+
+ Result<int*, Failed> res = &i;
+ static_assert(sizeof(res) == sizeof(uintptr_t),
+ "should use pointer tagging to fit in a word");
+
+ MOZ_RELEASE_ASSERT(res.isOk());
+ MOZ_RELEASE_ASSERT(*res.unwrap() == 123);
+
+ res = Err(Failed());
+ MOZ_RELEASE_ASSERT(res.isErr());
+ }
+}
+
+struct NonCopyableNonMovable {
+ explicit constexpr NonCopyableNonMovable(uint32_t aValue) : mValue(aValue) {}
+
+ NonCopyableNonMovable(const NonCopyableNonMovable&) = delete;
+ NonCopyableNonMovable(NonCopyableNonMovable&&) = delete;
+ NonCopyableNonMovable& operator=(const NonCopyableNonMovable&) = delete;
+ NonCopyableNonMovable& operator=(NonCopyableNonMovable&&) = delete;
+
+ uint32_t mValue;
+};
+
+static void InPlaceConstructionTests() {
+ {
+ // PackingStrategy == NullIsOk
+ static_assert(mozilla::detail::SelectResultImpl<NonCopyableNonMovable,
+ Failed>::value ==
+ mozilla::detail::PackingStrategy::NullIsOk);
+ constexpr Result<NonCopyableNonMovable, Failed> result{std::in_place, 42u};
+ MOZ_STATIC_AND_RELEASE_ASSERT(42 == result.inspect().mValue);
+ }
+
+ {
+ // PackingStrategy == Variant
+ static_assert(
+ mozilla::detail::SelectResultImpl<NonCopyableNonMovable, int>::value ==
+ mozilla::detail::PackingStrategy::Variant);
+ const Result<NonCopyableNonMovable, int> result{std::in_place, 42};
+ MOZ_RELEASE_ASSERT(42 == result.inspect().mValue);
+ }
+}
+
+/* * */
+
+struct Snafu : Failed {};
+
+static Result<Ok, Snafu*> Explode() {
+ static Snafu snafu;
+ return Err(&snafu);
+}
+
+static Result<Ok, Failed*> ErrorGeneralization() {
+ MOZ_TRY(Explode()); // change error type from Snafu* to more general Failed*
+ return Ok();
+}
+
+static void TypeConversionTests() {
+ MOZ_RELEASE_ASSERT(ErrorGeneralization().isErr());
+
+ {
+ const Result<Ok, Failed*> res = Explode();
+ MOZ_RELEASE_ASSERT(res.isErr());
+ }
+
+ {
+ const Result<Ok, Failed*> res = Result<Ok, Snafu*>{Ok{}};
+ MOZ_RELEASE_ASSERT(res.isOk());
+ }
+}
+
+static void EmptyValueTest() {
+ struct Fine {};
+ mozilla::Result<Fine, Failed> res((Fine()));
+ res.unwrap();
+ MOZ_RELEASE_ASSERT(res.isOk());
+ static_assert(sizeof(res) == sizeof(uint8_t),
+ "Result with empty value and error types should be size 1");
+}
+
+static void MapTest() {
+ struct MyError {
+ int x;
+
+ explicit MyError(int y) : x(y) {}
+ };
+
+ // Mapping over success values, to the same success type.
+ {
+ Result<int, MyError> res(5);
+ bool invoked = false;
+ auto res2 = res.map([&invoked](int x) {
+ MOZ_RELEASE_ASSERT(x == 5);
+ invoked = true;
+ return 6;
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 6);
+ }
+
+ // Mapping over success values, to a different success type.
+ {
+ Result<int, MyError> res(5);
+ bool invoked = false;
+ auto res2 = res.map([&invoked](int x) {
+ MOZ_RELEASE_ASSERT(x == 5);
+ invoked = true;
+ return "hello";
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(strcmp(res2.unwrap(), "hello") == 0);
+ }
+
+ // Mapping over success values (constexpr).
+ {
+ constexpr uint64_t kValue = 42u;
+ constexpr auto res2a = Result<int32_t, Failed>{5}.map([](int32_t x) {
+ MOZ_RELEASE_ASSERT(x == 5);
+ return kValue;
+ });
+ MOZ_STATIC_AND_RELEASE_ASSERT(res2a.isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(kValue == res2a.inspect());
+ }
+
+ // Mapping over error values.
+ {
+ MyError err(1);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ Result<char, MyError> res2 = res.map([](int x) {
+ MOZ_RELEASE_ASSERT(false);
+ return 'a';
+ });
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(res2.unwrapErr().x == err.x);
+ }
+
+ // Function pointers instead of lambdas as the mapping function.
+ {
+ Result<const char*, MyError> res("hello");
+ auto res2 = res.map(strlen);
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 5);
+ }
+}
+
+static void MapErrTest() {
+ struct MyError {
+ int x;
+
+ explicit MyError(int y) : x(y) {}
+ };
+
+ struct MyError2 {
+ int a;
+
+ explicit MyError2(int b) : a(b) {}
+ };
+
+ // Mapping over error values, to the same error type.
+ {
+ MyError err(1);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 = res.mapErr([&invoked](const auto err) {
+ MOZ_RELEASE_ASSERT(err.x == 1);
+ invoked = true;
+ return MyError(2);
+ });
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrapErr().x == 2);
+ }
+
+ // Mapping over error values, to a different error type.
+ {
+ MyError err(1);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 = res.mapErr([&invoked](const auto err) {
+ MOZ_RELEASE_ASSERT(err.x == 1);
+ invoked = true;
+ return MyError2(2);
+ });
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrapErr().a == 2);
+ }
+
+ // Mapping over success values.
+ {
+ Result<int, MyError> res(5);
+ auto res2 = res.mapErr([](const auto err) {
+ MOZ_RELEASE_ASSERT(false);
+ return MyError(1);
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 5);
+ }
+
+ // Function pointers instead of lambdas as the mapping function.
+ {
+ Result<Ok, const char*> res("hello");
+ auto res2 = res.mapErr(strlen);
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(res2.unwrapErr() == 5);
+ }
+}
+
+static Result<Ok, size_t> strlen_ResultWrapper(const char* aValue) {
+ return Err(strlen(aValue));
+}
+
+static void OrElseTest() {
+ struct MyError {
+ int x;
+
+ explicit constexpr MyError(int y) : x(y) {}
+ };
+
+ struct MyError2 {
+ int a;
+
+ explicit constexpr MyError2(int b) : a(b) {}
+ };
+
+ // `orElse`ing over error values, to Result<V, E> (the same error type) error
+ // variant.
+ {
+ MyError err(1);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 = res.orElse([&invoked](const auto err) -> Result<char, MyError> {
+ MOZ_RELEASE_ASSERT(err.x == 1);
+ invoked = true;
+ if (err.x != 42) {
+ return Err(MyError(2));
+ }
+ return 'a';
+ });
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrapErr().x == 2);
+ }
+
+ // `orElse`ing over error values, to Result<V, E> (the same error type)
+ // success variant.
+ {
+ MyError err(42);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 = res.orElse([&invoked](const auto err) -> Result<char, MyError> {
+ MOZ_RELEASE_ASSERT(err.x == 42);
+ invoked = true;
+ if (err.x != 42) {
+ return Err(MyError(2));
+ }
+ return 'a';
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 'a');
+ }
+
+ // `orElse`ing over error values, to Result<V, E2> (a different error type)
+ // error variant.
+ {
+ MyError err(1);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 =
+ res.orElse([&invoked](const auto err) -> Result<char, MyError2> {
+ MOZ_RELEASE_ASSERT(err.x == 1);
+ invoked = true;
+ if (err.x != 42) {
+ return Err(MyError2(2));
+ }
+ return 'a';
+ });
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrapErr().a == 2);
+ }
+
+ // `orElse`ing over error values, to Result<V, E2> (a different error type)
+ // success variant.
+ {
+ MyError err(42);
+ Result<char, MyError> res(err);
+ MOZ_RELEASE_ASSERT(res.isErr());
+ bool invoked = false;
+ auto res2 =
+ res.orElse([&invoked](const auto err) -> Result<char, MyError2> {
+ MOZ_RELEASE_ASSERT(err.x == 42);
+ invoked = true;
+ if (err.x != 42) {
+ return Err(MyError2(2));
+ }
+ return 'a';
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(invoked);
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 'a');
+ }
+
+ // `orElse`ing over success values.
+ {
+ Result<int, MyError> res(5);
+ auto res2 = res.orElse([](const auto err) -> Result<int, MyError> {
+ MOZ_RELEASE_ASSERT(false);
+ return Err(MyError(1));
+ });
+ MOZ_RELEASE_ASSERT(res2.isOk());
+ MOZ_RELEASE_ASSERT(res2.unwrap() == 5);
+ }
+
+ // Function pointers instead of lambdas as the `orElse`ing function.
+ {
+ Result<Ok, const char*> res("hello");
+ auto res2 = res.orElse(strlen_ResultWrapper);
+ MOZ_RELEASE_ASSERT(res2.isErr());
+ MOZ_RELEASE_ASSERT(res2.unwrapErr() == 5);
+ }
+}
+
+static void AndThenTest() {
+ // `andThen`ing over success results.
+ {
+ Result<int, const char*> r1(10);
+ Result<int, const char*> r2 =
+ r1.andThen([](int x) { return Result<int, const char*>(x + 1); });
+ MOZ_RELEASE_ASSERT(r2.isOk());
+ MOZ_RELEASE_ASSERT(r2.unwrap() == 11);
+ }
+
+ // `andThen`ing over success results (constexpr).
+ {
+ constexpr Result<int, Failed> r2a = Result<int, Failed>{10}.andThen(
+ [](int x) { return Result<int, Failed>(x + 1); });
+ MOZ_STATIC_AND_RELEASE_ASSERT(r2a.isOk());
+ MOZ_STATIC_AND_RELEASE_ASSERT(r2a.inspect() == 11);
+ }
+
+ // `andThen`ing over error results.
+ {
+ Result<int, const char*> r3("error");
+ Result<int, const char*> r4 = r3.andThen([](int x) {
+ MOZ_RELEASE_ASSERT(false);
+ return Result<int, const char*>(1);
+ });
+ MOZ_RELEASE_ASSERT(r4.isErr());
+ MOZ_RELEASE_ASSERT(r3.unwrapErr() == r4.unwrapErr());
+ }
+
+ // andThen with a function accepting an rvalue
+ {
+ Result<int, const char*> r1(10);
+ Result<int, const char*> r2 =
+ r1.andThen([](int&& x) { return Result<int, const char*>(x + 1); });
+ MOZ_RELEASE_ASSERT(r2.isOk());
+ MOZ_RELEASE_ASSERT(r2.unwrap() == 11);
+ }
+
+ // `andThen`ing over error results (constexpr).
+ {
+ constexpr Result<int, Failed> r4a =
+ Result<int, Failed>{Failed{}}.andThen([](int x) {
+ MOZ_RELEASE_ASSERT(false);
+ return Result<int, Failed>(1);
+ });
+ MOZ_STATIC_AND_RELEASE_ASSERT(r4a.isErr());
+ }
+}
+
+using UniqueResult = Result<UniquePtr<int>, const char*>;
+
+static UniqueResult UniqueTask() { return mozilla::MakeUnique<int>(3); }
+static UniqueResult UniqueTaskError() { return Err("bad"); }
+
+using UniqueErrorResult = Result<int, UniquePtr<int>>;
+static UniqueErrorResult UniqueError() {
+ return Err(mozilla::MakeUnique<int>(4));
+}
+
+static Result<Ok, UniquePtr<int>> TryUniqueErrorResult() {
+ MOZ_TRY(UniqueError());
+ return Ok();
+}
+
+static void UniquePtrTest() {
+ {
+ auto result = UniqueTask();
+ MOZ_RELEASE_ASSERT(result.isOk());
+ auto ptr = result.unwrap();
+ MOZ_RELEASE_ASSERT(ptr);
+ MOZ_RELEASE_ASSERT(*ptr == 3);
+ auto moved = result.unwrap();
+ MOZ_RELEASE_ASSERT(!moved);
+ }
+
+ {
+ auto err = UniqueTaskError();
+ MOZ_RELEASE_ASSERT(err.isErr());
+ auto ptr = err.unwrapOr(mozilla::MakeUnique<int>(4));
+ MOZ_RELEASE_ASSERT(ptr);
+ MOZ_RELEASE_ASSERT(*ptr == 4);
+ }
+
+ {
+ auto result = UniqueTaskError();
+ result = UniqueResult(mozilla::MakeUnique<int>(6));
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() && *result.inspect() == 6);
+ }
+
+ {
+ auto result = UniqueError();
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(result.inspectErr());
+ MOZ_RELEASE_ASSERT(*result.inspectErr() == 4);
+ auto err = result.unwrapErr();
+ MOZ_RELEASE_ASSERT(!result.inspectErr());
+ MOZ_RELEASE_ASSERT(err);
+ MOZ_RELEASE_ASSERT(*err == 4);
+
+ result = UniqueErrorResult(0);
+ MOZ_RELEASE_ASSERT(result.isOk() && result.unwrap() == 0);
+ }
+
+ {
+ auto result = TryUniqueErrorResult();
+ MOZ_RELEASE_ASSERT(result.isErr());
+ auto err = result.unwrapErr();
+ MOZ_RELEASE_ASSERT(err && *err == 4);
+ MOZ_RELEASE_ASSERT(!result.inspectErr());
+ }
+}
+
+struct ZeroIsUnusedStructForPointer {
+ int x = 1;
+};
+enum class ZeroIsUnusedEnum1 : uint8_t {
+ V1 = 1,
+ V2 = 2,
+};
+enum class ZeroIsUnusedEnum2 : uint16_t {
+ V1 = 1,
+ V2 = 2,
+};
+enum class ZeroIsUnusedEnum4 : uint32_t {
+ V1 = 1,
+ V2 = 2,
+};
+enum class ZeroIsUnusedEnum8 : uint64_t {
+ V1 = 1,
+ V2 = 2,
+};
+struct EmptyErrorStruct {};
+
+template <>
+struct mozilla::detail::UnusedZero<ZeroIsUnusedStructForPointer*> {
+ static const bool value = true;
+};
+template <>
+struct mozilla::detail::UnusedZero<ZeroIsUnusedEnum1> {
+ static const bool value = true;
+};
+template <>
+struct mozilla::detail::UnusedZero<ZeroIsUnusedEnum2> {
+ static const bool value = true;
+};
+template <>
+struct mozilla::detail::UnusedZero<ZeroIsUnusedEnum4> {
+ static const bool value = true;
+};
+template <>
+struct mozilla::detail::UnusedZero<ZeroIsUnusedEnum8> {
+ static const bool value = true;
+};
+
+static void ZeroIsEmptyErrorTest() {
+ {
+ ZeroIsUnusedStructForPointer s;
+
+ using V = ZeroIsUnusedStructForPointer*;
+
+ mozilla::Result<V, EmptyErrorStruct> result(&s);
+ MOZ_RELEASE_ASSERT(sizeof(result) == sizeof(V));
+
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() == &s);
+ }
+
+ {
+ using V = ZeroIsUnusedStructForPointer*;
+
+ mozilla::Result<V, EmptyErrorStruct> result(Err(EmptyErrorStruct{}));
+
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(*reinterpret_cast<V*>(&result) == nullptr);
+ }
+
+ {
+ ZeroIsUnusedEnum1 e = ZeroIsUnusedEnum1::V1;
+
+ using V = ZeroIsUnusedEnum1;
+
+ mozilla::Result<V, EmptyErrorStruct> result(e);
+ MOZ_RELEASE_ASSERT(sizeof(result) == sizeof(V));
+
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() == e);
+ }
+
+ {
+ using V = ZeroIsUnusedEnum1;
+
+ mozilla::Result<V, EmptyErrorStruct> result(Err(EmptyErrorStruct()));
+
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(*reinterpret_cast<uint8_t*>(&result) == 0);
+ }
+
+ {
+ ZeroIsUnusedEnum2 e = ZeroIsUnusedEnum2::V1;
+
+ using V = ZeroIsUnusedEnum2;
+
+ mozilla::Result<V, EmptyErrorStruct> result(e);
+ MOZ_RELEASE_ASSERT(sizeof(result) == sizeof(V));
+
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() == e);
+ }
+
+ {
+ using V = ZeroIsUnusedEnum2;
+
+ mozilla::Result<V, EmptyErrorStruct> result(Err(EmptyErrorStruct()));
+
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(*reinterpret_cast<uint16_t*>(&result) == 0);
+ }
+
+ {
+ ZeroIsUnusedEnum4 e = ZeroIsUnusedEnum4::V1;
+
+ using V = ZeroIsUnusedEnum4;
+
+ mozilla::Result<V, EmptyErrorStruct> result(e);
+ MOZ_RELEASE_ASSERT(sizeof(result) == sizeof(V));
+
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() == e);
+ }
+
+ {
+ using V = ZeroIsUnusedEnum4;
+
+ mozilla::Result<V, EmptyErrorStruct> result(Err(EmptyErrorStruct()));
+
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(*reinterpret_cast<uint32_t*>(&result) == 0);
+ }
+
+ {
+ ZeroIsUnusedEnum8 e = ZeroIsUnusedEnum8::V1;
+
+ using V = ZeroIsUnusedEnum8;
+
+ mozilla::Result<V, EmptyErrorStruct> result(e);
+ MOZ_RELEASE_ASSERT(sizeof(result) == sizeof(V));
+
+ MOZ_RELEASE_ASSERT(result.isOk());
+ MOZ_RELEASE_ASSERT(result.inspect() == e);
+ }
+
+ {
+ using V = ZeroIsUnusedEnum8;
+
+ mozilla::Result<V, EmptyErrorStruct> result(Err(EmptyErrorStruct()));
+
+ MOZ_RELEASE_ASSERT(result.isErr());
+ MOZ_RELEASE_ASSERT(*reinterpret_cast<uint64_t*>(&result) == 0);
+ }
+}
+
+class Foo {};
+
+class C1 {};
+class C2 : public C1 {};
+
+class E1 {};
+class E2 : public E1 {};
+
+void UpcastTest() {
+ {
+ C2 c2;
+
+ mozilla::Result<C2*, Failed> result(&c2);
+ mozilla::Result<C1*, Failed> copied(std::move(result));
+
+ MOZ_RELEASE_ASSERT(copied.inspect() == &c2);
+ }
+
+ {
+ E2 e2;
+
+ mozilla::Result<Foo, E2*> result(Err(&e2));
+ mozilla::Result<Foo, E1*> copied(std::move(result));
+
+ MOZ_RELEASE_ASSERT(copied.inspectErr() == &e2);
+ }
+
+ {
+ C2 c2;
+
+ mozilla::Result<C2*, E2*> result(&c2);
+ mozilla::Result<C1*, E1*> copied(std::move(result));
+
+ MOZ_RELEASE_ASSERT(copied.inspect() == &c2);
+ }
+
+ {
+ E2 e2;
+
+ mozilla::Result<C2*, E2*> result(Err(&e2));
+ mozilla::Result<C1*, E1*> copied(std::move(result));
+
+ MOZ_RELEASE_ASSERT(copied.inspectErr() == &e2);
+ }
+}
+
+/* * */
+
+int main() {
+ BasicTests();
+ InPlaceConstructionTests();
+ TypeConversionTests();
+ EmptyValueTest();
+ MapTest();
+ MapErrTest();
+ OrElseTest();
+ AndThenTest();
+ UniquePtrTest();
+ ZeroIsEmptyErrorTest();
+ UpcastTest();
+ return 0;
+}
diff --git a/mfbt/tests/TestRollingMean.cpp b/mfbt/tests/TestRollingMean.cpp
new file mode 100644
index 0000000000..001d827c4f
--- /dev/null
+++ b/mfbt/tests/TestRollingMean.cpp
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/RollingMean.h"
+
+using mozilla::RollingMean;
+
+class MyClass {
+ public:
+ uint32_t mValue;
+
+ explicit MyClass(uint32_t aValue = 0) : mValue(aValue) {}
+
+ bool operator==(const MyClass& aOther) const {
+ return mValue == aOther.mValue;
+ }
+
+ MyClass operator+(const MyClass& aOther) const {
+ return MyClass(mValue + aOther.mValue);
+ }
+
+ MyClass operator-(const MyClass& aOther) const {
+ return MyClass(mValue - aOther.mValue);
+ }
+
+ MyClass operator/(uint32_t aDiv) const { return MyClass(mValue / aDiv); }
+};
+
+class RollingMeanSuite {
+ public:
+ RollingMeanSuite() = default;
+
+ void runTests() {
+ testZero();
+ testClear();
+ testRolling();
+ testClass();
+ testMove();
+ }
+
+ private:
+ void testZero() {
+ RollingMean<uint32_t, uint64_t> mean(3);
+ MOZ_RELEASE_ASSERT(mean.empty());
+ }
+
+ void testClear() {
+ RollingMean<uint32_t, uint64_t> mean(3);
+
+ mean.insert(4);
+ MOZ_RELEASE_ASSERT(mean.mean() == 4);
+
+ mean.clear();
+ MOZ_RELEASE_ASSERT(mean.empty());
+
+ mean.insert(3);
+ MOZ_RELEASE_ASSERT(mean.mean() == 3);
+ }
+
+ void testRolling() {
+ RollingMean<uint32_t, uint64_t> mean(3);
+
+ mean.insert(10);
+ MOZ_RELEASE_ASSERT(mean.mean() == 10);
+
+ mean.insert(20);
+ MOZ_RELEASE_ASSERT(mean.mean() == 15);
+
+ mean.insert(35);
+ MOZ_RELEASE_ASSERT(mean.mean() == 21);
+
+ mean.insert(5);
+ MOZ_RELEASE_ASSERT(mean.mean() == 20);
+
+ mean.insert(10);
+ MOZ_RELEASE_ASSERT(mean.mean() == 16);
+ }
+
+ void testClass() {
+ RollingMean<MyClass, MyClass> mean(3);
+
+ mean.insert(MyClass(4));
+ MOZ_RELEASE_ASSERT(mean.mean() == MyClass(4));
+
+ mean.clear();
+ MOZ_RELEASE_ASSERT(mean.empty());
+ }
+
+ void testMove() {
+ RollingMean<uint32_t, uint64_t> mean(3);
+ mean = RollingMean<uint32_t, uint64_t>(4);
+ MOZ_RELEASE_ASSERT(mean.maxValues() == 4);
+
+ mean.insert(10);
+ MOZ_RELEASE_ASSERT(mean.mean() == 10);
+
+ mean = RollingMean<uint32_t, uint64_t>(3);
+ mean.insert(30);
+ mean.insert(40);
+ mean.insert(50);
+ mean.insert(60);
+ MOZ_RELEASE_ASSERT(mean.mean() == 50);
+ }
+};
+
+int main() {
+ RollingMeanSuite suite;
+ suite.runTests();
+ return 0;
+}
diff --git a/mfbt/tests/TestSHA1.cpp b/mfbt/tests/TestSHA1.cpp
new file mode 100644
index 0000000000..9bc9d2a0b7
--- /dev/null
+++ b/mfbt/tests/TestSHA1.cpp
@@ -0,0 +1,204 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/SHA1.h"
+
+using mozilla::SHA1Sum;
+
+static unsigned int gTestV[1024] = {
+ 0x048edc1a, 0x4345588c, 0x0ef03cbf, 0x1d6438f5, 0x094e0a1e, 0x68535f60,
+ 0x14e8c927, 0x60190043, 0x5d640ab7, 0x73dc7c62, 0x364223f9, 0x47320292,
+ 0x3924cae0, 0x5f6b26d3, 0x5efa04ef, 0x7aab361e, 0x2773b1aa, 0x1631b07d,
+ 0x385b5dd1, 0x26c809b0, 0x28ad3a9f, 0x0315292a, 0x1a544e67, 0x1e79dcb9,
+ 0x787683e8, 0x3a591c75, 0x1dd338c7, 0x01c539e5, 0x1c15b23e, 0x0697c25c,
+ 0x4df5fd45, 0x672aa324, 0x39f74e6e, 0x269cdd5f, 0x087b6fce, 0x293509db,
+ 0x0aef54a9, 0x210c4cc5, 0x29d6dc4a, 0x16320825, 0x3ab7b181, 0x56d6fd25,
+ 0x6837fda2, 0x3e7994c2, 0x37f77529, 0x48c85472, 0x424fd84d, 0x00aba7fa,
+ 0x6d8475de, 0x354634a7, 0x0c73bb49, 0x0a335de6, 0x0a9ea542, 0x5ffb31f1,
+ 0x00a6a3f2, 0x76b14a03, 0x1e436a37, 0x173b766a, 0x33cf3ca0, 0x34eb0f1a,
+ 0x4ca073ee, 0x27591fe6, 0x5eaf3356, 0x10c24493, 0x1bad88b6, 0x676f2309,
+ 0x7f5e2d91, 0x74bd4c83, 0x66549b43, 0x52ffdf24, 0x2dfa0a83, 0x7c3e1cbf,
+ 0x1edf87fc, 0x1f6fa930, 0x7c29bc74, 0x374bcd2f, 0x5b43de94, 0x0d09a3a6,
+ 0x7437ecb0, 0x635117f8, 0x2aa78f65, 0x2c788958, 0x098cb9f3, 0x13ed5b3f,
+ 0x41b7c7ba, 0x696b2d88, 0x42e20d63, 0x69585b1d, 0x4a9b027c, 0x0c761cba,
+ 0x563bdbc4, 0x3bde2f5b, 0x0bab9730, 0x7740104c, 0x11641702, 0x26f03c32,
+ 0x011a87c6, 0x2c5e4e6c, 0x46c34200, 0x6a167e84, 0x34205728, 0x0e8a6152,
+ 0x0014604b, 0x6793bacd, 0x442bca9c, 0x6f2018ce, 0x4313e07e, 0x77f2c69c,
+ 0x62621441, 0x47bf6358, 0x59c45e04, 0x16ba3426, 0x6ac0c19d, 0x20218c6b,
+ 0x510b4ddc, 0x585f6c9d, 0x1ed02b0c, 0x366bf0a9, 0x131c7f59, 0x0ebcd320,
+ 0x00ca858a, 0x5efbcb77, 0x2a7a1859, 0x64bb5afd, 0x76258886, 0x6505c895,
+ 0x602cfa32, 0x17040942, 0x783df744, 0x3838e0ae, 0x6a021e39, 0x4c8c9c5a,
+ 0x4a5e96b6, 0x10f4477d, 0x247fda4f, 0x4c390400, 0x0cbe048c, 0x7b547d26,
+ 0x1e2e6897, 0x4ba7e01b, 0x5cfea1bb, 0x39a2d199, 0x45aee64a, 0x12615500,
+ 0x0151615f, 0x1a9f5d33, 0x4542ed44, 0x101357eb, 0x35a16b1f, 0x3420b3e1,
+ 0x6442bac7, 0x1c0f2a8c, 0x68d642f1, 0x45744fc4, 0x048e60cb, 0x5f217f44,
+ 0x6cc7d151, 0x27f41984, 0x2d01eb09, 0x2bb15aea, 0x6dda49f8, 0x590dd6bc,
+ 0x280cc20b, 0x7e2592b5, 0x043642f0, 0x292b5d29, 0x2e0a9b69, 0x41162471,
+ 0x1e55db6b, 0x648b96fe, 0x05f8f9d1, 0x4a9d4cbb, 0x38517039, 0x2b0f8917,
+ 0x4d1e67bb, 0x713e0974, 0x64fdf214, 0x11223963, 0x2bd09d24, 0x19924092,
+ 0x4b4a70f0, 0x1ece6b03, 0x1780c9c1, 0x09b4c3ac, 0x58ac7e73, 0x5c9a4747,
+ 0x321f943b, 0x41167667, 0x3a19cf8c, 0x53f4144d, 0x03a498de, 0x6fb4b742,
+ 0x54d793cb, 0x7ee164e2, 0x501af74c, 0x43201e7f, 0x0ad581be, 0x497f046a,
+ 0x3b1d2a9f, 0x53b88eb0, 0x2c3a26c5, 0x5ae970ba, 0x7d7ee4ff, 0x471366c5,
+ 0x46119703, 0x3bfc2e58, 0x456d6c4f, 0x4b6bb181, 0x45d7c872, 0x0d023221,
+ 0x021176d1, 0x4195ad44, 0x4621ec90, 0x3ae68279, 0x57952f71, 0x1796080c,
+ 0x228077bb, 0x5e2b7fee, 0x3d71dd88, 0x4a651849, 0x7f1c8081, 0x04c333fc,
+ 0x1f99bff6, 0x11b7754c, 0x740be324, 0x069bf2e2, 0x0802f3e0, 0x371cf30e,
+ 0x1d44dda5, 0x6033b9e5, 0x5639a9b0, 0x6526bfff, 0x14d7d9b7, 0x4182b6a7,
+ 0x01a5fa76, 0x7aa5e581, 0x762465e6, 0x386b3a2e, 0x495a3ab0, 0x04421b2e,
+ 0x46e04591, 0x472af458, 0x6a007dd3, 0x2e8be484, 0x18660abe, 0x7969af82,
+ 0x5a242a83, 0x581b5f72, 0x5f0eff6d, 0x38aea98c, 0x2acb5853, 0x6d650b35,
+ 0x10b750d7, 0x18fdcd14, 0x09b4816c, 0x3ceef016, 0x6957153c, 0x27cf39fb,
+ 0x60e3495d, 0x381e1da6, 0x4b5be02d, 0x14b6f309, 0x6380c589, 0x1a31f436,
+ 0x4b5e50c1, 0x493ac048, 0x314baad1, 0x71e24ab7, 0x718af49c, 0x022f4658,
+ 0x1a419d5b, 0x1854610d, 0x2ec4e99a, 0x7096ce50, 0x5467ba00, 0x404aab4c,
+ 0x1a5ab015, 0x217580f7, 0x2d50071e, 0x71a9f437, 0x27f758b5, 0x11cd8b3f,
+ 0x63b089c9, 0x53c860c1, 0x2fa6b7d7, 0x61e54771, 0x5c0ba6b9, 0x3138f796,
+ 0x5c7359cd, 0x4c2c5654, 0x549d581c, 0x3129ebf7, 0x4958a248, 0x1a460541,
+ 0x68e64964, 0x597c0609, 0x57afcbab, 0x2f1c6479, 0x57a0ad5c, 0x5936938f,
+ 0x536a5cbe, 0x29aacf0b, 0x43eca70d, 0x6e7a3e4e, 0x563c1e3b, 0x32f23909,
+ 0x12faa42d, 0x28b0bbde, 0x797e2842, 0x1b827bdf, 0x0df96a6e, 0x542ef7f4,
+ 0x6226d368, 0x01cb4258, 0x77bcba08, 0x7e6dc041, 0x0571eda3, 0x0fdf5065,
+ 0x5c9b9f7a, 0x2b496dd6, 0x02d3b40b, 0x3a5752db, 0x4843a293, 0x6fdc9c3f,
+ 0x42963996, 0x39c9e4eb, 0x01db58ad, 0x7e79381c, 0x5bb207bb, 0x2df5de51,
+ 0x1549ec82, 0x64f01e70, 0x536eb0d0, 0x10fa6e03, 0x5b7f9a20, 0x2d8b625d,
+ 0x397410c7, 0x7778284e, 0x1ab75170, 0x254f304e, 0x395ba877, 0x0c2e2815,
+ 0x5c723dec, 0x63b91327, 0x7c5954b5, 0x67dd69a3, 0x21d220c7, 0x5a287fcd,
+ 0x0d0b9c59, 0x22444c9f, 0x6305cb43, 0x12f717cc, 0x77c11945, 0x0e79bda8,
+ 0x6e014391, 0x441d0179, 0x5e17dd2f, 0x53e57a5c, 0x692f4b9a, 0x76c1e94b,
+ 0x5a872d81, 0x044f7e7e, 0x0970844f, 0x25e34e73, 0x57865d3c, 0x640771d2,
+ 0x12d410ed, 0x1424e079, 0x3e1c7fd7, 0x0e89295a, 0x48dcf262, 0x55a29550,
+ 0x0fd4d360, 0x7494d449, 0x41e6f260, 0x2230d4e7, 0x5ad1cd49, 0x7f8dd428,
+ 0x7722b48a, 0x7a14848d, 0x2a83335a, 0x548c0d9b, 0x24f5d43b, 0x33a417cb,
+ 0x3061e078, 0x1a1bc935, 0x5aedb5df, 0x6755f3e4, 0x795e4cdb, 0x64dfcd1c,
+ 0x6d5164fc, 0x34a3df0e, 0x2cc92142, 0x2569127d, 0x130f3d86, 0x43617cc2,
+ 0x25eaf1fa, 0x044ae792, 0x4b47ee17, 0x6879ea87, 0x7eb455fa, 0x54481e19,
+ 0x13bba2f0, 0x6da3fe79, 0x19c306ff, 0x42591e38, 0x2b0e205d, 0x60bd48bc,
+ 0x550aa0ce, 0x2296a6ef, 0x551eb052, 0x76df1b8e, 0x242a2d22, 0x0ada0b06,
+ 0x58b661ec, 0x490bec94, 0x20bd7c59, 0x760de8c3, 0x7a048ee8, 0x44ba6dcd,
+ 0x3816abd9, 0x47e8527e, 0x2194a188, 0x6967a480, 0x7f7e2083, 0x0ec455f3,
+ 0x78198eab, 0x3d710773, 0x05969198, 0x76ffcffe, 0x54be4797, 0x11105781,
+ 0x3a851719, 0x516284b8, 0x4295de1c, 0x3905be43, 0x6d4e7d6a, 0x0877796d,
+ 0x0b9e986a, 0x5e2b853f, 0x7e6c79cd, 0x4a44a54c, 0x1e28b9a2, 0x5b1e408e,
+ 0x6a1c8eac, 0x62a87929, 0x4f075dac, 0x5c030e8c, 0x3df73ce9, 0x321c3c69,
+ 0x2325cc45, 0x4eaf0759, 0x486a31fb, 0x12d04b94, 0x714e15d5, 0x420d1910,
+ 0x092dc45b, 0x0119beac, 0x68b2bfdb, 0x74863a17, 0x3c7ab8e5, 0x035bc2df,
+ 0x4e7a7965, 0x017f58d6, 0x6414074e, 0x3a1e64ae, 0x2d6725d8, 0x0f22f82a,
+ 0x0a0affa0, 0x4159f31e, 0x4002cb9d, 0x234e393f, 0x6028169f, 0x3b804078,
+ 0x0c16e2e1, 0x0e198020, 0x24b13c40, 0x1ceb2143, 0x38dd4246, 0x6f483590,
+ 0x69b20a6e, 0x105580b1, 0x5d60f184, 0x065d18eb, 0x09a28739, 0x70345728,
+ 0x595a5934, 0x14a78a43, 0x449f05c7, 0x6556fcfc, 0x260bc0b2, 0x3afb600e,
+ 0x1f47bb91, 0x145c14b6, 0x541832fe, 0x54f10f23, 0x3013650e, 0x6c0d32ba,
+ 0x4f202c8d, 0x66bcc661, 0x6131dc7f, 0x04828b25, 0x1737565d, 0x520e967f,
+ 0x16cf0438, 0x6f2bc19e, 0x553c3dda, 0x356906b0, 0x333916d5, 0x2887c195,
+ 0x11e7440b, 0x6354f182, 0x06b2f977, 0x6d2c9a5c, 0x2d02bfb7, 0x74fafcf6,
+ 0x2b955161, 0x74035c38, 0x6e9bc991, 0x09a3a5b9, 0x460f416a, 0x11afabfc,
+ 0x66e32d10, 0x4a56ac6e, 0x6448afa8, 0x680b0044, 0x05d0e296, 0x49569eac,
+ 0x0adb563b, 0x4a9da168, 0x4f857004, 0x0f234600, 0x6db386ec, 0x280b94bf,
+ 0x7cd258a5, 0x6165fd88, 0x3bf2aac9, 0x2cb47c44, 0x2381c2a4, 0x4fe42552,
+ 0x21d4c81e, 0x24baa9af, 0x365231cb, 0x11b7fc81, 0x419748fb, 0x38ff637e,
+ 0x065f3365, 0x21f1aba8, 0x2df41ace, 0x5cec1d95, 0x22c078a8, 0x7bb894fc,
+ 0x2d66fc53, 0x7ed82ccc, 0x4485c9d7, 0x1af210fc, 0x5d2faa09, 0x3b33412e,
+ 0x79d12ea8, 0x7bb8103b, 0x5cea1a7b, 0x2779db45, 0x1250ed5b, 0x0c4d8964,
+ 0x6c18e9f5, 0x501ddc60, 0x3de43ae4, 0x6c0e8577, 0x0adfb426, 0x7ec718f5,
+ 0x1991f387, 0x101ccb9c, 0x632360b4, 0x7d52ce4d, 0x0b58c91c, 0x1fa59d53,
+ 0x0b0b48b0, 0x297315d0, 0x7f3132ff, 0x323b85d1, 0x2f852141, 0x23e84bdc,
+ 0x3732cb25, 0x1274eb57, 0x21a882c3, 0x095288a9, 0x2120e253, 0x617799ce,
+ 0x5e4926b3, 0x52575363, 0x696722e0, 0x509c9117, 0x3b60f14f, 0x423310fa,
+ 0x4e694e80, 0x000a647e, 0x453e283a, 0x3f1d21ef, 0x527c91f0, 0x7ac2e88a,
+ 0x1ba3b840, 0x1c3f253a, 0x04c40280, 0x437dc361, 0x7247859c, 0x61e5b34c,
+ 0x20746a53, 0x58cfc2df, 0x79edf48e, 0x5b48e723, 0x7b08baac, 0x1d1035ea,
+ 0x023fc918, 0x2de0427c, 0x71540904, 0x4030e8f5, 0x2b0961f6, 0x4ec98ef0,
+ 0x781076ee, 0x0dac959b, 0x16f66214, 0x273411e5, 0x02334297, 0x3b568cd1,
+ 0x7cf4e8c0, 0x0f4c2c91, 0x2d8dd28e, 0x4a7b3fb0, 0x237969ae, 0x363d6cb6,
+ 0x75fee60a, 0x5825f4df, 0x29f79f9d, 0x22de4f33, 0x2309590e, 0x1977c2bd,
+ 0x67f7bebe, 0x452b8330, 0x5dc70832, 0x5cddbea4, 0x59091e0b, 0x4d287830,
+ 0x2bbc2ce6, 0x420ee023, 0x02d6e086, 0x228a7a14, 0x48207207, 0x1d5ccc5a,
+ 0x37d32cdc, 0x50dc6508, 0x0b795304, 0x5b9fd543, 0x2a3f2925, 0x72e71606,
+ 0x0dc8ba42, 0x3279a910, 0x6bd2c2e2, 0x775065d8, 0x547c59a6, 0x4b5374cf,
+ 0x0c45cd18, 0x532096d6, 0x351c9bd1, 0x107fdce0, 0x3ae69075, 0x5dddd5de,
+ 0x3bb0ba8b, 0x0b1a0019, 0x6c226525, 0x109e9002, 0x312191be, 0x16fa3de8,
+ 0x4a5197aa, 0x0931b2d2, 0x79ee6e1b, 0x657a142b, 0x6ab74d38, 0x77440cff,
+ 0x11e37956, 0x5c335799, 0x269d3be3, 0x18923cfd, 0x4dd71b00, 0x77c58014,
+ 0x07145324, 0x1678546a, 0x5dfd4f6a, 0x207f4e13, 0x6b0a98c0, 0x015bc2cf,
+ 0x1636d8fe, 0x7bc5f038, 0x183a0661, 0x573ec5f3, 0x54cf2255, 0x2fcc905c,
+ 0x71bb70b9, 0x2b122a89, 0x59f86e5b, 0x5528273d, 0x464cf857, 0x27efdeec,
+ 0x1d0bcfcc, 0x64d7837f, 0x1e7a659a, 0x02aa611c, 0x53969ad5, 0x0e83f59f,
+ 0x50a6d11b, 0x79513c59, 0x0e5c3c98, 0x2ed7bbcf, 0x117de9d9, 0x375ec696,
+ 0x19c830aa, 0x66950511, 0x2b6dbbaa, 0x5ca18c9b, 0x0a487514, 0x6f44a887,
+ 0x6921bc6e, 0x3ef8130b, 0x26f6cde3, 0x686d7605, 0x6583553a, 0x29bcf7cc,
+ 0x55d42201, 0x1c93497c, 0x64c53231, 0x32088f6e, 0x381c5770, 0x617574d8,
+ 0x09757952, 0x1a616eb0, 0x1140e8aa, 0x0ff66ffb, 0x32039001, 0x5a455e7c,
+ 0x0027b906, 0x21cf154c, 0x67d3527f, 0x56fd7602, 0x150f8b25, 0x2ae8e4c8,
+ 0x0bf10aec, 0x3d26a40f, 0x5c4c8ffc, 0x3c291322, 0x737fd02c, 0x4b506209,
+ 0x484ddaa4, 0x00b44669, 0x5974bdd1, 0x7d39d617, 0x12995404, 0x48f00bbe,
+ 0x44f7c59a, 0x23cb9292, 0x6476f20b, 0x034fbd59, 0x2893161c, 0x1dbae8c0,
+ 0x50348c2e, 0x797f0957, 0x685ddeaf, 0x36fb8a2e, 0x0fceb6f4, 0x10347ab4,
+ 0x72720bfc, 0x292a4304, 0x0cbf8a27, 0x3cea6db7, 0x4b0c6b15, 0x57e8e716,
+ 0x4e9c54cc, 0x4fc7f7ca, 0x49a6d3e2, 0x10fc2df3, 0x73db387e, 0x72cb89c3,
+ 0x71dba437, 0x4b14048c, 0x6e1af265, 0x1084b213, 0x3842107d, 0x6ecdc171,
+ 0x647919b2, 0x41a80841, 0x7b387c76, 0x46bc094b, 0x331b312a, 0x2f140cc4,
+ 0x355d0a11, 0x19390200, 0x69b05263, 0x582963fa, 0x44897e31, 0x66a473f0,
+ 0x0374f08d, 0x35879e45, 0x5e1dd7ef, 0x34d6a311, 0x6e4e18eb, 0x7b44734b,
+ 0x0e421333, 0x3da026d8, 0x5becbf4b, 0x56db4a1f, 0x1f2089bc, 0x28c733f2,
+ 0x04b0975d, 0x6156f224, 0x12d1f40f, 0x7f4d30f4, 0x2c0b9861, 0x769a083b,
+ 0x739544fb, 0x1dbd1067, 0x0e8cd717, 0x4c246fb2, 0x115eff39, 0x19e22f2a,
+ 0x4563ba61, 0x5d33a617, 0x54af83cf, 0x030bde73, 0x54b4736d, 0x0f01dfec,
+ 0x08869c01, 0x4e9e4d7b, 0x4739855a, 0x62d964a3, 0x26948fde, 0x30adf212,
+ 0x1f57b400, 0x3766c914, 0x1e7f9d1c, 0x33258b59, 0x522ab2c2, 0x3dc99798,
+ 0x15f53fe2, 0x05636669, 0x354b59c3, 0x1c37ebd4, 0x0bb7ebf9, 0x0e4e87f9,
+ 0x680d3124, 0x2770d549, 0x0c5e112e, 0x74aaa7ed, 0x06c0b550, 0x342b5922,
+ 0x4532ab5b, 0x4257dbee, 0x087f32a9, 0x45ada3e3, 0x7a854272, 0x061625f2,
+ 0x47c85a91, 0x25ad375d, 0x2809bd9d, 0x168b9348, 0x4381b0a3, 0x6f2dc6ca,
+ 0x122e54f6, 0x6c3228a6, 0x653c1652, 0x60b60584, 0x1d304b77, 0x4cc74c58,
+ 0x087e3dd5, 0x79bd540e, 0x79ab7a70, 0x26fcd1c9, 0x342abaaf, 0x644716b0,
+ 0x01f076cb, 0x73628937, 0x20b01ff8, 0x5832b80b, 0x2f77fc92, 0x4468d962,
+ 0x2bac2679, 0x7f850778, 0x47d2997c, 0x02690cb7, 0x7de54951, 0x54d80b14,
+ 0x5e0c6854, 0x313cc749, 0x622b86ba, 0x38dbf6d3, 0x045d3e52, 0x574f87fd,
+ 0x09f1b078, 0x31784f71, 0x4f01dd2f, 0x1874c9f9, 0x5837c7af, 0x2372f768,
+ 0x531bd1e8, 0x61816c0b, 0x4592995f, 0x156463c0, 0x250c5afe, 0x40c83178,
+ 0x4396f6b7, 0x29bdbec0, 0x43ea8ca5, 0x5c474696, 0x2c869192, 0x2ff2f51a,
+ 0x7c963fe5, 0x294319c1, 0x019fbe26, 0x72fa8e68, 0x245ca463, 0x4ca88208,
+ 0x72ac845a, 0x25307181, 0x2cdf88f7, 0x0adbfebd, 0x2eea465b, 0x52e4eee0,
+ 0x084daacd, 0x717ce67e, 0x594087c2, 0x2b8ee5c7, 0x4558f811, 0x76b65ba4,
+ 0x5de05e09, 0x3db76e27, 0x3c75110d, 0x04ca67e7, 0x51cd6d09, 0x7b4e9c3e,
+ 0x7cdda4d2, 0x674fb021, 0x7d372d2d, 0x13f7978b, 0x5fb106b1, 0x034377d1,
+ 0x2e5336f3, 0x099bb17d, 0x04e6755e, 0x34f73c1e, 0x004e0a0d, 0x7f2c32e2,
+ 0x1fc8f910, 0x67d0859d, 0x76462b25, 0x59fa9a17, 0x028e53ef, 0x3d6d5fdd,
+ 0x79a4671e, 0x5cbec506, 0x2c23ee6d, 0x628a2c1e, 0x4dae87bd, 0x07a189ea,
+ 0x3a414a96, 0x5915f622, 0x6bea011e, 0x412674cf, 0x07ecc314, 0x6a7dbce8,
+ 0x7e176f10, 0x68e60d47, 0x079ea970, 0x79f3b55c, 0x65a46098, 0x56155533,
+ 0x7e5d0272, 0x795bfad5, 0x094da770, 0x05ba427c, 0x152e430e, 0x187d8470,
+ 0x08e607bc, 0x45ce5ef9, 0x654231ae, 0x38d8cb48, 0x605632f8, 0x25cf8ee9,
+ 0x11497170, 0x171a3b00, 0x0f103d49, 0x24826483, 0x2848e187, 0x7498919b,
+ 0x1bb788cb, 0x791ad5c7, 0x5129330e, 0x016c4436, 0x430f05bf, 0x1f06b5cd,
+ 0x62df1378, 0x0423b9b4, 0x0341acaf, 0x3189543c, 0x7b96b2ea, 0x6c4865c3,
+ 0x4cc7adc3, 0x78a2bff6, 0x642db7c7, 0x70d02300, 0x7cd43ac0, 0x4f5fe414,
+ 0x333b52c2, 0x500d3c74, 0x65782c01, 0x3f72a2c5, 0x278f59d8, 0x493bf7f8,
+ 0x16bf51a0, 0x6cc70ced, 0x6ed15979, 0x1a77abae, 0x08cadbb7, 0x2f2e0bc0,
+ 0x236f5e8d, 0x1a4b4495, 0x360bd008, 0x32227d40};
+
+int main() {
+ SHA1Sum sum;
+ SHA1Sum::Hash hash;
+ sum.update(reinterpret_cast<const uint8_t*>(gTestV), sizeof(gTestV));
+ sum.finish(hash);
+
+ static const uint8_t expected[20] = {0xc8, 0xf2, 0x09, 0x59, 0x4e, 0x64, 0x40,
+ 0xaa, 0x7b, 0xf7, 0xb8, 0xe0, 0xfa, 0x44,
+ 0xb2, 0x31, 0x95, 0xad, 0x94, 0x81};
+
+ static_assert(sizeof(expected) == sizeof(SHA1Sum::Hash),
+ "expected-data size should be the same as the actual hash "
+ "size");
+
+ for (size_t i = 0; i < SHA1Sum::kHashSize; i++) {
+ MOZ_RELEASE_ASSERT(hash[i] == expected[i]);
+ }
+
+ return 0;
+}
diff --git a/mfbt/tests/TestSIMD.cpp b/mfbt/tests/TestSIMD.cpp
new file mode 100644
index 0000000000..23dc8b0117
--- /dev/null
+++ b/mfbt/tests/TestSIMD.cpp
@@ -0,0 +1,631 @@
+/* -*- Mode: C++; tab-width: 9; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/SIMD.h"
+
+using mozilla::SIMD;
+
+void TestTinyString() {
+ const char* test = "012\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '0', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '0', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '1', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '1', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '2', 3) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '2', 3) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '\n', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '\n', 3) == nullptr);
+}
+
+void TestShortString() {
+ const char* test = "0123456789\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '0', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '0', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '1', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '1', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '2', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '2', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '3', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '3', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '4', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '4', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '5', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '5', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '6', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '6', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '7', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '7', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '8', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '8', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '9', 10) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '9', 10) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '\n', 10) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '\n', 10) == nullptr);
+}
+
+void TestMediumString() {
+ const char* test = "0123456789abcdef\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '0', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '0', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '1', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '1', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '2', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '2', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '3', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '3', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '4', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '4', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '5', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '5', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '6', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '6', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '7', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '7', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '8', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '8', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '9', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '9', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'a', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'a', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'b', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'b', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'c', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'c', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'd', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'd', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'e', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'e', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, 'f', 16) == test + 0xf);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, 'f', 16) == test + 0xf);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, '\n', 16) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test, '\n', 16) == nullptr);
+}
+
+void TestLongString() {
+ // NOTE: here we make sure we go all the way up to 256 to ensure we're
+ // handling negative-valued chars appropriately. We don't need to bother
+ // testing this side of things with char16_t's because they are very
+ // sensibly guaranteed to be unsigned.
+ const size_t count = 256;
+ char test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = static_cast<char>(i);
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ MOZ_RELEASE_ASSERT(SIMD::memchr8(test, static_cast<char>(i), count - 1) ==
+ test + i);
+ MOZ_RELEASE_ASSERT(
+ SIMD::memchr8SSE2(test, static_cast<char>(i), count - 1) == test + i);
+ }
+ MOZ_RELEASE_ASSERT(
+ SIMD::memchr8(test, static_cast<char>(count - 1), count - 1) == nullptr);
+}
+
+void TestGauntlet() {
+ const size_t count = 256;
+ char test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = static_cast<char>(i);
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ for (size_t j = 0; j < count - 1; ++j) {
+ for (size_t k = 0; k < count - 1; ++k) {
+ if (i >= k) {
+ const char* expected = nullptr;
+ if (j >= k && j < i) {
+ expected = test + j;
+ }
+ MOZ_RELEASE_ASSERT(
+ SIMD::memchr8(test + k, static_cast<char>(j), i - k) == expected);
+ MOZ_RELEASE_ASSERT(SIMD::memchr8SSE2(test + k, static_cast<char>(j),
+ i - k) == expected);
+ }
+ }
+ }
+ }
+}
+
+void TestTinyString16() {
+ const char16_t* test = u"012\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'0', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'0', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'1', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'1', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'2', 3) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'2', 3) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'\n', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'\n', 3) == nullptr);
+}
+
+void TestShortString16() {
+ const char16_t* test = u"0123456789\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'0', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'0', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'1', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'1', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'2', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'2', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'3', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'3', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'4', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'4', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'5', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'5', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'6', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'6', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'7', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'7', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'8', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'8', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'9', 10) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'9', 10) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'\n', 10) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'\n', 10) == nullptr);
+}
+
+void TestMediumString16() {
+ const char16_t* test = u"0123456789abcdef\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'0', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'0', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'1', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'1', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'2', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'2', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'3', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'3', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'4', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'4', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'5', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'5', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'6', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'6', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'7', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'7', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'8', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'8', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'9', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'9', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'a', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'a', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'b', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'b', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'c', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'c', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'd', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'd', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'e', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'e', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'f', 16) == test + 0xf);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'f', 16) == test + 0xf);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, u'\n', 16) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, u'\n', 16) == nullptr);
+}
+
+void TestLongString16() {
+ const size_t count = 256;
+ char16_t test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = i;
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ MOZ_RELEASE_ASSERT(
+ SIMD::memchr16(test, static_cast<char16_t>(i), count - 1) == test + i);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, static_cast<char16_t>(i),
+ count - 1) == test + i);
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test, count - 1, count - 1) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test, count - 1, count - 1) == nullptr);
+}
+
+void TestGauntlet16() {
+ const size_t count = 257;
+ char16_t test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = i;
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ for (size_t j = 0; j < count - 1; ++j) {
+ for (size_t k = 0; k < count - 1; ++k) {
+ if (i >= k) {
+ const char16_t* expected = nullptr;
+ if (j >= k && j < i) {
+ expected = test + j;
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr16(test + k, static_cast<char16_t>(j),
+ i - k) == expected);
+ MOZ_RELEASE_ASSERT(SIMD::memchr16SSE2(test + k,
+ static_cast<char16_t>(j),
+ i - k) == expected);
+ }
+ }
+ }
+ }
+}
+
+void TestTinyString64() {
+ const uint64_t test[4] = {0, 1, 2, 3};
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 0, 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 1, 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 2, 3) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 3, 3) == nullptr);
+}
+
+void TestShortString64() {
+ const uint64_t test[16] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 0, 15) == test + 0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 1, 15) == test + 1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 2, 15) == test + 2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 3, 15) == test + 3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 4, 15) == test + 4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 5, 15) == test + 5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 6, 15) == test + 6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 7, 15) == test + 7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 8, 15) == test + 8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 9, 15) == test + 9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 9, 15) == test + 9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 10, 15) == test + 10);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 11, 15) == test + 11);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 12, 15) == test + 12);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 13, 15) == test + 13);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 14, 15) == test + 14);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 15, 15) == nullptr);
+}
+
+void TestMediumString64() {
+ const uint64_t test[32] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 0, 31) == test + 0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 1, 31) == test + 1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 2, 31) == test + 2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 3, 31) == test + 3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 4, 31) == test + 4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 5, 31) == test + 5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 6, 31) == test + 6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 7, 31) == test + 7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 8, 31) == test + 8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 9, 31) == test + 9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 9, 31) == test + 9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 10, 31) == test + 10);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 11, 31) == test + 11);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 12, 31) == test + 12);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 13, 31) == test + 13);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 14, 31) == test + 14);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 15, 31) == test + 15);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 16, 31) == test + 16);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 17, 31) == test + 17);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 18, 31) == test + 18);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 19, 31) == test + 19);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 20, 31) == test + 20);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 21, 31) == test + 21);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 22, 31) == test + 22);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 23, 31) == test + 23);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 24, 31) == test + 24);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 25, 31) == test + 25);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 26, 31) == test + 26);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 27, 31) == test + 27);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 28, 31) == test + 28);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 29, 31) == test + 29);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 30, 31) == test + 30);
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, 31, 31) == nullptr);
+}
+
+void TestLongString64() {
+ const size_t count = 256;
+ uint64_t test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = i;
+ }
+
+ for (uint64_t i = 0; i < count - 1; ++i) {
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, i, count - 1) == test + i);
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test, count - 1, count - 1) == nullptr);
+}
+
+void TestGauntlet64() {
+ const size_t count = 257;
+ uint64_t test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = i;
+ }
+
+ for (uint64_t i = 0; i < count - 1; ++i) {
+ for (uint64_t j = 0; j < count - 1; ++j) {
+ for (uint64_t k = 0; k < count - 1; ++k) {
+ if (i >= k) {
+ const uint64_t* expected = nullptr;
+ if (j >= k && j < i) {
+ expected = test + j;
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr64(test + k, j, i - k) == expected);
+ }
+ }
+ }
+ }
+}
+
+void TestTinyString2x8() {
+ const char* test = "012\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '0', '1', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '1', '2', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '2', '\n', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '0', '2', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '1', '\n', 3) == nullptr);
+}
+
+void TestShortString2x8() {
+ const char* test = "0123456789\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '0', '1', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '1', '2', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '2', '3', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '3', '4', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '4', '5', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '5', '6', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '6', '7', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '7', '8', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '8', '9', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '9', '\n', 10) == nullptr);
+}
+
+void TestMediumString2x8() {
+ const char* test = "0123456789abcdef\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '0', '1', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '1', '2', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '2', '3', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '3', '4', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '4', '5', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '5', '6', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '6', '7', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '7', '8', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '8', '9', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, '9', 'a', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'a', 'b', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'b', 'c', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'c', 'd', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'd', 'e', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'e', 'f', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, 'f', '\n', 16) == nullptr);
+}
+
+void TestLongString2x8() {
+ const size_t count = 256;
+ char test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = static_cast<char>(i);
+ }
+
+ for (size_t i = 0; i < count - 2; ++i) {
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, static_cast<char>(i),
+ static_cast<char>(i + 1),
+ count - 1) == test + i);
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test, static_cast<char>(count - 2),
+ static_cast<char>(count - 1),
+ count - 1) == nullptr);
+}
+
+void TestTinyString2x16() {
+ const char16_t* test = u"012\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'1', 3) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'1', u'2', 3) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'2', u'\n', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'2', 3) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'1', u'\n', 3) == nullptr);
+}
+
+void TestShortString2x16() {
+ const char16_t* test = u"0123456789\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'1', 10) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'1', u'2', 10) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'2', u'3', 10) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'3', u'4', 10) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'4', u'5', 10) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'5', u'6', 10) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'6', u'7', 10) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'7', u'8', 10) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'8', u'9', 10) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'9', u'\n', 10) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'2', 10) == nullptr);
+}
+
+void TestMediumString2x16() {
+ const char16_t* test = u"0123456789abcdef\n";
+
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'1', 16) == test + 0x0);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'1', u'2', 16) == test + 0x1);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'2', u'3', 16) == test + 0x2);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'3', u'4', 16) == test + 0x3);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'4', u'5', 16) == test + 0x4);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'5', u'6', 16) == test + 0x5);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'6', u'7', 16) == test + 0x6);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'7', u'8', 16) == test + 0x7);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'8', u'9', 16) == test + 0x8);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'9', u'a', 16) == test + 0x9);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'a', u'b', 16) == test + 0xa);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'b', u'c', 16) == test + 0xb);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'c', u'd', 16) == test + 0xc);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'd', u'e', 16) == test + 0xd);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'e', u'f', 16) == test + 0xe);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'f', u'\n', 16) == nullptr);
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, u'0', u'2', 10) == nullptr);
+}
+
+void TestLongString2x16() {
+ const size_t count = 257;
+ char16_t test[count];
+ for (size_t i = 0; i < count; ++i) {
+ test[i] = static_cast<char16_t>(i);
+ }
+
+ for (size_t i = 0; i < count - 2; ++i) {
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, static_cast<char16_t>(i),
+ static_cast<char16_t>(i + 1),
+ count - 1) == test + i);
+ }
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test, static_cast<char16_t>(count - 2),
+ static_cast<char16_t>(count - 1),
+ count - 1) == nullptr);
+}
+
+void TestGauntlet2x8() {
+ const size_t count = 256;
+ char test[count * 2];
+ // load in the evens
+ for (size_t i = 0; i < count / 2; ++i) {
+ test[i] = static_cast<char>(2 * i);
+ }
+ // load in the odds
+ for (size_t i = 0; i < count / 2; ++i) {
+ test[count / 2 + i] = static_cast<char>(2 * i + 1);
+ }
+ // load in evens and odds sequentially
+ for (size_t i = 0; i < count; ++i) {
+ test[count + i] = static_cast<char>(i);
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ for (size_t j = 0; j < count - 2; ++j) {
+ for (size_t k = 0; k < count - 1; ++k) {
+ if (i > k + 1) {
+ const char* expected1 = nullptr;
+ const char* expected2 = nullptr;
+ if (i > j + 1) {
+ expected1 = test + j + count; // Add count to skip over odds/evens
+ if (j >= k) {
+ expected2 = test + j + count;
+ }
+ }
+ char a = static_cast<char>(j);
+ char b = static_cast<char>(j + 1);
+ // Make sure it doesn't pick up any in the alternating odd/even
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test + k, a, b, i - k + count) ==
+ expected1);
+ // Make sure we cover smaller inputs
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test + k + count, a, b, i - k) ==
+ expected2);
+ }
+ }
+ }
+ }
+}
+
+void TestGauntlet2x16() {
+ const size_t count = 1024;
+ char16_t test[count * 2];
+ // load in the evens
+ for (size_t i = 0; i < count / 2; ++i) {
+ test[i] = static_cast<char16_t>(2 * i);
+ }
+ // load in the odds
+ for (size_t i = 0; i < count / 2; ++i) {
+ test[count / 2 + i] = static_cast<char16_t>(2 * i + 1);
+ }
+ // load in evens and odds sequentially
+ for (size_t i = 0; i < count; ++i) {
+ test[count + i] = static_cast<char16_t>(i);
+ }
+
+ for (size_t i = 0; i < count - 1; ++i) {
+ for (size_t j = 0; j < count - 2; ++j) {
+ for (size_t k = 0; k < count - 1; ++k) {
+ if (i > k + 1) {
+ const char16_t* expected1 = nullptr;
+ const char16_t* expected2 = nullptr;
+ if (i > j + 1) {
+ expected1 = test + j + count; // Add count to skip over odds/evens
+ if (j >= k) {
+ expected2 = test + j + count;
+ }
+ }
+ char16_t a = static_cast<char16_t>(j);
+ char16_t b = static_cast<char16_t>(j + 1);
+ // Make sure it doesn't pick up any in the alternating odd/even
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test + k, a, b, i - k + count) ==
+ expected1);
+ // Make sure we cover smaller inputs
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test + k + count, a, b, i - k) ==
+ expected2);
+ }
+ }
+ }
+ }
+}
+
+void TestSpecialCases() {
+ // The following 4 asserts test the case where we do two overlapping checks,
+ // where the first one ends with our first search character, and the second
+ // one begins with our search character. Since they are overlapping, we want
+ // to ensure that the search function doesn't carry the match from the
+ // first check over to the second check.
+ const char* test1 = "x123456789abcdey";
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test1, 'y', 'x', 16) == nullptr);
+ const char* test2 = "1000000000000000200000000000000030b000000000000a40";
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x8(test2, 'a', 'b', 50) == nullptr);
+ const char16_t* test1wide = u"x123456y";
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test1wide, 'y', 'x', 8) == nullptr);
+ const char16_t* test2wide = u"100000002000000030b0000a40";
+ MOZ_RELEASE_ASSERT(SIMD::memchr2x16(test2wide, 'a', 'b', 26) == nullptr);
+}
+
+int main(void) {
+ TestTinyString();
+ TestShortString();
+ TestMediumString();
+ TestLongString();
+ TestGauntlet();
+
+ TestTinyString16();
+ TestShortString16();
+ TestMediumString16();
+ TestLongString16();
+ TestGauntlet16();
+
+ TestTinyString64();
+ TestShortString64();
+ TestMediumString64();
+ TestLongString64();
+ TestGauntlet64();
+
+ TestTinyString2x8();
+ TestShortString2x8();
+ TestMediumString2x8();
+ TestLongString2x8();
+
+ TestTinyString2x16();
+ TestShortString2x16();
+ TestMediumString2x16();
+ TestLongString2x16();
+
+ TestSpecialCases();
+
+ // These are too slow to run all the time, but they should be run when making
+ // meaningful changes just to be sure.
+ // TestGauntlet2x8();
+ // TestGauntlet2x16();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestSPSCQueue.cpp b/mfbt/tests/TestSPSCQueue.cpp
new file mode 100644
index 0000000000..e54d911b85
--- /dev/null
+++ b/mfbt/tests/TestSPSCQueue.cpp
@@ -0,0 +1,302 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SPSCQueue.h"
+#include "mozilla/PodOperations.h"
+#include <vector>
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <memory>
+#include <string>
+
+#ifdef _WIN32
+# include <windows.h>
+#endif
+
+using namespace mozilla;
+
+/* Generate a monotonically increasing sequence of numbers. */
+template <typename T>
+class SequenceGenerator {
+ public:
+ SequenceGenerator() = default;
+ void Get(T* aElements, size_t aCount) {
+ for (size_t i = 0; i < aCount; i++) {
+ aElements[i] = static_cast<T>(mIndex);
+ mIndex++;
+ }
+ }
+ void Rewind(size_t aCount) { mIndex -= aCount; }
+
+ private:
+ size_t mIndex = 0;
+};
+
+/* Checks that a sequence is monotonically increasing. */
+template <typename T>
+class SequenceVerifier {
+ public:
+ SequenceVerifier() = default;
+ void Check(T* aElements, size_t aCount) {
+ for (size_t i = 0; i < aCount; i++) {
+ if (aElements[i] != static_cast<T>(mIndex)) {
+ std::cerr << "Element " << i << " is different. Expected "
+ << static_cast<T>(mIndex) << ", got " << aElements[i] << "."
+ << std::endl;
+ MOZ_RELEASE_ASSERT(false);
+ }
+ mIndex++;
+ }
+ }
+
+ private:
+ size_t mIndex = 0;
+};
+
+const int BLOCK_SIZE = 127;
+
+template <typename T>
+void TestRing(int capacity) {
+ SPSCQueue<T> buf(capacity);
+ std::unique_ptr<T[]> seq(new T[capacity]);
+ SequenceGenerator<T> gen;
+ SequenceVerifier<T> checker;
+
+ int iterations = 1002;
+
+ while (iterations--) {
+ gen.Get(seq.get(), BLOCK_SIZE);
+ int rv = buf.Enqueue(seq.get(), BLOCK_SIZE);
+ MOZ_RELEASE_ASSERT(rv == BLOCK_SIZE);
+ PodZero(seq.get(), BLOCK_SIZE);
+ rv = buf.Dequeue(seq.get(), BLOCK_SIZE);
+ MOZ_RELEASE_ASSERT(rv == BLOCK_SIZE);
+ checker.Check(seq.get(), BLOCK_SIZE);
+ }
+}
+
+void Delay() {
+ // On Windows and x86 Android, the timer resolution is so bad that, even if
+ // we used `timeBeginPeriod(1)`, any nonzero sleep from the test's inner loops
+ // would make this program take far too long.
+#ifdef _WIN32
+ Sleep(0);
+#elif defined(ANDROID)
+ std::this_thread::sleep_for(std::chrono::microseconds(0));
+#else
+ std::this_thread::sleep_for(std::chrono::microseconds(10));
+#endif
+}
+
+template <typename T>
+void TestRingMultiThread(int capacity) {
+ SPSCQueue<T> buf(capacity);
+ SequenceVerifier<T> checker;
+ std::unique_ptr<T[]> outBuffer(new T[capacity]);
+
+ std::thread t([&buf, capacity] {
+ int iterations = 1002;
+ std::unique_ptr<T[]> inBuffer(new T[capacity]);
+ SequenceGenerator<T> gen;
+
+ while (iterations--) {
+ Delay();
+ gen.Get(inBuffer.get(), BLOCK_SIZE);
+ int rv = buf.Enqueue(inBuffer.get(), BLOCK_SIZE);
+ MOZ_RELEASE_ASSERT(rv <= BLOCK_SIZE);
+ if (rv != BLOCK_SIZE) {
+ gen.Rewind(BLOCK_SIZE - rv);
+ }
+ }
+ });
+
+ int remaining = 1002;
+
+ while (remaining--) {
+ Delay();
+ int rv = buf.Dequeue(outBuffer.get(), BLOCK_SIZE);
+ MOZ_RELEASE_ASSERT(rv <= BLOCK_SIZE);
+ checker.Check(outBuffer.get(), rv);
+ }
+
+ t.join();
+}
+
+template <typename T>
+void BasicAPITest(T& ring) {
+ MOZ_RELEASE_ASSERT(ring.Capacity() == 128);
+
+ MOZ_RELEASE_ASSERT(ring.AvailableRead() == 0);
+ MOZ_RELEASE_ASSERT(ring.AvailableWrite() == 128);
+
+ int rv = ring.EnqueueDefault(63);
+
+ MOZ_RELEASE_ASSERT(rv == 63);
+ MOZ_RELEASE_ASSERT(ring.AvailableRead() == 63);
+ MOZ_RELEASE_ASSERT(ring.AvailableWrite() == 65);
+
+ rv = ring.EnqueueDefault(65);
+
+ MOZ_RELEASE_ASSERT(rv == 65);
+ MOZ_RELEASE_ASSERT(ring.AvailableRead() == 128);
+ MOZ_RELEASE_ASSERT(ring.AvailableWrite() == 0);
+
+ rv = ring.Dequeue(nullptr, 63);
+
+ MOZ_RELEASE_ASSERT(ring.AvailableRead() == 65);
+ MOZ_RELEASE_ASSERT(ring.AvailableWrite() == 63);
+
+ rv = ring.Dequeue(nullptr, 65);
+
+ MOZ_RELEASE_ASSERT(ring.AvailableRead() == 0);
+ MOZ_RELEASE_ASSERT(ring.AvailableWrite() == 128);
+}
+
+const size_t RING_BUFFER_SIZE = 128;
+const size_t ENQUEUE_SIZE = RING_BUFFER_SIZE / 2;
+
+void TestResetAPI() {
+ SPSCQueue<float> ring(RING_BUFFER_SIZE);
+ std::thread p([&ring] {
+ std::unique_ptr<float[]> inBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Enqueue(inBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+
+ p.join();
+
+ std::thread c([&ring] {
+ std::unique_ptr<float[]> outBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Dequeue(outBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+
+ c.join();
+
+ // Enqueue with a different thread. We reset the thread ID in the ring buffer,
+ // this should work.
+ std::thread p2([&ring] {
+ ring.ResetProducerThreadId();
+ std::unique_ptr<float[]> inBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Enqueue(inBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+
+ p2.join();
+
+ // Dequeue with a different thread. We reset the thread ID in the ring buffer,
+ // this should work.
+ std::thread c2([&ring] {
+ ring.ResetConsumerThreadId();
+ std::unique_ptr<float[]> outBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Dequeue(outBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+
+ c2.join();
+
+ // Similarly, but do the Enqueues without a Dequeue in between, since a
+ // Dequeue could affect memory ordering.
+ std::thread p4;
+ std::thread p3([&] {
+ ring.ResetProducerThreadId();
+ std::unique_ptr<float[]> inBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Enqueue(inBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ p4 = std::thread([&ring] {
+ ring.ResetProducerThreadId();
+ std::unique_ptr<float[]> inBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Enqueue(inBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+ });
+
+ p3.join();
+ p4.join();
+
+ std::thread c4;
+ std::thread c3([&] {
+ ring.ResetConsumerThreadId();
+ std::unique_ptr<float[]> outBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Dequeue(outBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ c4 = std::thread([&ring] {
+ ring.ResetConsumerThreadId();
+ std::unique_ptr<float[]> outBuffer(new float[ENQUEUE_SIZE]);
+ int rv = ring.Dequeue(outBuffer.get(), ENQUEUE_SIZE);
+ MOZ_RELEASE_ASSERT(rv > 0);
+ });
+ });
+
+ c3.join();
+ c4.join();
+}
+
+void TestMove() {
+ const size_t ELEMENT_COUNT = 16;
+ struct Thing {
+ Thing() : mStr("") {}
+ explicit Thing(const std::string& aStr) : mStr(aStr) {}
+ Thing(Thing&& aOtherThing) {
+ mStr = std::move(aOtherThing.mStr);
+ // aOtherThing.mStr.clear();
+ }
+ Thing& operator=(Thing&& aOtherThing) {
+ mStr = std::move(aOtherThing.mStr);
+ return *this;
+ }
+ std::string mStr;
+ };
+
+ std::vector<Thing> vec_in;
+ std::vector<Thing> vec_out;
+
+ for (uint32_t i = 0; i < ELEMENT_COUNT; i++) {
+ vec_in.push_back(Thing(std::to_string(i)));
+ vec_out.push_back(Thing());
+ }
+
+ SPSCQueue<Thing> queue(ELEMENT_COUNT);
+
+ int rv = queue.Enqueue(&vec_in[0], ELEMENT_COUNT);
+ MOZ_RELEASE_ASSERT(rv == ELEMENT_COUNT);
+
+ // Check that we've moved the std::string into the queue.
+ for (uint32_t i = 0; i < ELEMENT_COUNT; i++) {
+ MOZ_RELEASE_ASSERT(vec_in[i].mStr.empty());
+ }
+
+ rv = queue.Dequeue(&vec_out[0], ELEMENT_COUNT);
+ MOZ_RELEASE_ASSERT(rv == ELEMENT_COUNT);
+
+ for (uint32_t i = 0; i < ELEMENT_COUNT; i++) {
+ MOZ_RELEASE_ASSERT(std::stoul(vec_out[i].mStr) == i);
+ }
+}
+
+int main() {
+ const int minCapacity = 199;
+ const int maxCapacity = 1277;
+ const int capacityIncrement = 27;
+
+ SPSCQueue<float> q1(128);
+ BasicAPITest(q1);
+ SPSCQueue<char> q2(128);
+ BasicAPITest(q2);
+
+ for (uint32_t i = minCapacity; i < maxCapacity; i += capacityIncrement) {
+ TestRing<uint32_t>(i);
+ TestRingMultiThread<uint32_t>(i);
+ TestRing<float>(i);
+ TestRingMultiThread<float>(i);
+ }
+
+ TestResetAPI();
+ TestMove();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestSaturate.cpp b/mfbt/tests/TestSaturate.cpp
new file mode 100644
index 0000000000..500c9eed7f
--- /dev/null
+++ b/mfbt/tests/TestSaturate.cpp
@@ -0,0 +1,181 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <mozilla/Saturate.h>
+
+#include <mozilla/Assertions.h>
+
+#include <limits>
+
+using mozilla::detail::Saturate;
+
+#define A(a) MOZ_RELEASE_ASSERT(a, "Test \'" #a "\' failed.")
+
+static const unsigned long sNumOps = 32;
+
+template <typename T>
+static T StartValue() {
+ // Specialize |StartValue| for the given type.
+ A(false);
+}
+
+template <>
+int8_t StartValue<int8_t>() {
+ return 0;
+}
+
+template <>
+int16_t StartValue<int16_t>() {
+ return 0;
+}
+
+template <>
+int32_t StartValue<int32_t>() {
+ return 0;
+}
+
+template <>
+uint8_t StartValue<uint8_t>() {
+ // Picking a value near middle of uint8_t's range.
+ return static_cast<uint8_t>(std::numeric_limits<int8_t>::max());
+}
+
+template <>
+uint16_t StartValue<uint16_t>() {
+ // Picking a value near middle of uint16_t's range.
+ return static_cast<uint8_t>(std::numeric_limits<int16_t>::max());
+}
+
+template <>
+uint32_t StartValue<uint32_t>() {
+ // Picking a value near middle of uint32_t's range.
+ return static_cast<uint8_t>(std::numeric_limits<int32_t>::max());
+}
+
+// Add
+//
+
+template <typename T>
+static void TestPrefixIncr() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A(++value == ++satValue);
+ }
+}
+
+template <typename T>
+static void TestPostfixIncr() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A(value++ == satValue++);
+ }
+}
+
+template <typename T>
+static void TestAdd() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A((value + i) == (satValue + i));
+ }
+}
+
+// Subtract
+//
+
+template <typename T>
+static void TestPrefixDecr() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A(--value == --satValue);
+ }
+}
+
+template <typename T>
+static void TestPostfixDecr() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A(value-- == satValue--);
+ }
+}
+
+template <typename T>
+static void TestSub() {
+ T value = StartValue<T>();
+ Saturate<T> satValue(value);
+
+ for (T i = 0; i < static_cast<T>(sNumOps); ++i) {
+ A((value - i) == (satValue - i));
+ }
+}
+
+// Corner cases near bounds
+//
+
+template <typename T>
+static void TestUpperBound() {
+ Saturate<T> satValue(std::numeric_limits<T>::max());
+
+ A(--satValue == (std::numeric_limits<T>::max() - 1));
+ A(++satValue == (std::numeric_limits<T>::max()));
+ A(++satValue == (std::numeric_limits<T>::max())); // don't overflow here
+ A(++satValue == (std::numeric_limits<T>::max())); // don't overflow here
+ A(--satValue == (std::numeric_limits<T>::max() - 1)); // back at (max - 1)
+ A(--satValue == (std::numeric_limits<T>::max() - 2));
+}
+
+template <typename T>
+static void TestLowerBound() {
+ Saturate<T> satValue(std::numeric_limits<T>::min());
+
+ A(++satValue == (std::numeric_limits<T>::min() + 1));
+ A(--satValue == (std::numeric_limits<T>::min()));
+ A(--satValue == (std::numeric_limits<T>::min())); // don't overflow here
+ A(--satValue == (std::numeric_limits<T>::min())); // don't overflow here
+ A(++satValue == (std::numeric_limits<T>::min() + 1)); // back at (max + 1)
+ A(++satValue == (std::numeric_limits<T>::min() + 2));
+}
+
+// Framework
+//
+
+template <typename T>
+static void TestAll() {
+ // Assert that we don't accidently hit type's range limits in tests.
+ const T value = StartValue<T>();
+ A(std::numeric_limits<T>::min() + static_cast<T>(sNumOps) <= value);
+ A(std::numeric_limits<T>::max() - static_cast<T>(sNumOps) >= value);
+
+ TestPrefixIncr<T>();
+ TestPostfixIncr<T>();
+ TestAdd<T>();
+
+ TestPrefixDecr<T>();
+ TestPostfixDecr<T>();
+ TestSub<T>();
+
+ TestUpperBound<T>();
+ TestLowerBound<T>();
+}
+
+int main() {
+ TestAll<int8_t>();
+ TestAll<int16_t>();
+ TestAll<int32_t>();
+ TestAll<uint8_t>();
+ TestAll<uint16_t>();
+ TestAll<uint32_t>();
+ return 0;
+}
diff --git a/mfbt/tests/TestScopeExit.cpp b/mfbt/tests/TestScopeExit.cpp
new file mode 100644
index 0000000000..1c5eef68c2
--- /dev/null
+++ b/mfbt/tests/TestScopeExit.cpp
@@ -0,0 +1,55 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ScopeExit.h"
+
+using mozilla::MakeScopeExit;
+
+#define CHECK(c) \
+ do { \
+ bool cond = !!(c); \
+ MOZ_RELEASE_ASSERT(cond, "Failed assertion: " #c); \
+ if (!cond) { \
+ return false; \
+ } \
+ } while (false)
+
+static bool Test() {
+ int a = 1;
+ int b = 1;
+ int c = 1;
+
+ {
+ a++;
+ auto guardA = MakeScopeExit([&] { a--; });
+
+ b++;
+ auto guardB = MakeScopeExit([&] { b--; });
+
+ guardB.release();
+
+ c++;
+ auto guardC = MakeScopeExit([&] { c--; });
+
+ { auto guardC_ = std::move(guardC); }
+
+ CHECK(c == 1);
+ }
+
+ CHECK(a == 1);
+ CHECK(b == 2);
+ CHECK(c == 1);
+
+ return true;
+}
+
+int main() {
+ if (!Test()) {
+ return 1;
+ }
+ return 0;
+}
diff --git a/mfbt/tests/TestSegmentedVector.cpp b/mfbt/tests/TestSegmentedVector.cpp
new file mode 100644
index 0000000000..dd569ea7b6
--- /dev/null
+++ b/mfbt/tests/TestSegmentedVector.cpp
@@ -0,0 +1,388 @@
+/* -*- Mode: C++; tab-width: 9; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This is included first to ensure it doesn't implicitly depend on anything
+// else.
+#include "mozilla/SegmentedVector.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Assertions.h"
+
+using mozilla::SegmentedVector;
+
+// It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
+// but MFBT cannot use mozalloc.
+class InfallibleAllocPolicy {
+ public:
+ template <typename T>
+ T* pod_malloc(size_t aNumElems) {
+ if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ MOZ_CRASH("TestSegmentedVector.cpp: overflow");
+ }
+ T* rv = static_cast<T*>(malloc(aNumElems * sizeof(T)));
+ if (!rv) {
+ MOZ_CRASH("TestSegmentedVector.cpp: out of memory");
+ }
+ return rv;
+ }
+
+ template <typename T>
+ void free_(T* aPtr, size_t aNumElems = 0) {
+ free(aPtr);
+ }
+};
+
+template <typename Vector>
+void CheckContents(Vector& vector, size_t expectedLength) {
+ MOZ_RELEASE_ASSERT(vector.Length() == expectedLength);
+ size_t n = 0;
+ for (auto iter = vector.Iter(); !iter.Done(); iter.Next()) {
+ MOZ_RELEASE_ASSERT(iter.Get() == int(n));
+ n++;
+ }
+ MOZ_RELEASE_ASSERT(n == expectedLength);
+}
+
+// We want to test Append(), which is fallible and marked with
+// [[nodiscard]]. But we're using an infallible alloc policy, and so
+// don't really need to check the result. Casting to |void| works with clang
+// but not GCC, so we instead use this dummy variable which works with both
+// compilers.
+static int gDummy;
+
+// This tests basic segmented vector construction and iteration.
+void TestBasics() {
+ // A SegmentedVector with a POD element type.
+ typedef SegmentedVector<int, 1024, InfallibleAllocPolicy> MyVector;
+ MyVector v;
+ int i;
+
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+
+ // Add 100 elements, then check various things.
+ i = 0;
+ for (; i < 100; i++) {
+ gDummy = v.Append(std::move(i));
+ }
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ CheckContents(v, 100);
+
+ // Add another 900 elements, then re-check.
+ for (; i < 1000; i++) {
+ v.InfallibleAppend(std::move(i));
+ }
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ CheckContents(v, 1000);
+
+ // Pop off all of the elements.
+ MOZ_RELEASE_ASSERT(v.Length() == 1000);
+ for (int len = (int)v.Length(); len > 0; len--) {
+ MOZ_RELEASE_ASSERT(v.GetLast() == len - 1);
+ v.PopLast();
+ }
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+ MOZ_RELEASE_ASSERT(v.Length() == 0);
+
+ // Fill the vector up again to prepare for the clear.
+ for (i = 0; i < 1000; i++) {
+ v.InfallibleAppend(std::move(i));
+ }
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ MOZ_RELEASE_ASSERT(v.Length() == 1000);
+
+ v.Clear();
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+ MOZ_RELEASE_ASSERT(v.Length() == 0);
+
+ // Fill the vector up to verify PopLastN works.
+ for (i = 0; i < 1000; ++i) {
+ v.InfallibleAppend(std::move(i));
+ }
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ MOZ_RELEASE_ASSERT(v.Length() == 1000);
+
+ // Verify we pop the right amount of elements.
+ v.PopLastN(300);
+ MOZ_RELEASE_ASSERT(v.Length() == 700);
+
+ // Verify the contents are what we expect.
+ CheckContents(v, 700);
+}
+
+void TestMoveAndSwap() {
+ typedef SegmentedVector<int, 32, InfallibleAllocPolicy> MyVector;
+ MyVector v;
+
+ for (int i = 0; i < 100; i++) {
+ (void)v.Append(i);
+ }
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ CheckContents(v, 100);
+
+ // Test move constructor.
+ MyVector w(std::move(v));
+ CheckContents(w, 100);
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+
+ // Test move assignment.
+ v = std::move(w);
+ CheckContents(v, 100);
+ MOZ_RELEASE_ASSERT(w.IsEmpty());
+
+ // Test swap.
+ std::swap(v, w);
+ CheckContents(w, 100);
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+}
+
+static size_t gNumDefaultCtors;
+static size_t gNumExplicitCtors;
+static size_t gNumCopyCtors;
+static size_t gNumMoveCtors;
+static size_t gNumDtors;
+
+struct NonPOD {
+ NonPOD() { gNumDefaultCtors++; }
+ explicit NonPOD(int x) { gNumExplicitCtors++; }
+ NonPOD(NonPOD&) { gNumCopyCtors++; }
+ NonPOD(NonPOD&&) { gNumMoveCtors++; }
+ ~NonPOD() { gNumDtors++; }
+};
+
+// This tests how segmented vectors with non-POD elements construct and
+// destruct those elements.
+void TestConstructorsAndDestructors() {
+ size_t defaultCtorCalls = 0;
+ size_t explicitCtorCalls = 0;
+ size_t copyCtorCalls = 0;
+ size_t moveCtorCalls = 0;
+ size_t dtorCalls = 0;
+
+ {
+ static const size_t segmentSize = 64;
+
+ // A SegmentedVector with a non-POD element type.
+ NonPOD x(1); // explicit constructor called
+ explicitCtorCalls++;
+ SegmentedVector<NonPOD, segmentSize, InfallibleAllocPolicy> v;
+ // default constructor called 0 times
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+ gDummy = v.Append(x); // copy constructor called
+ copyCtorCalls++;
+ NonPOD y(1); // explicit constructor called
+ explicitCtorCalls++;
+ gDummy = v.Append(std::move(y)); // move constructor called
+ moveCtorCalls++;
+ NonPOD z(1); // explicit constructor called
+ explicitCtorCalls++;
+ v.InfallibleAppend(std::move(z)); // move constructor called
+ moveCtorCalls++;
+ v.PopLast(); // destructor called 1 time
+ dtorCalls++;
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ v.Clear(); // destructor called 2 times
+ dtorCalls += 2;
+
+ // Test that PopLastN() correctly calls the destructors of all the
+ // elements in the segments it destroys.
+ //
+ // We depend on the size of NonPOD when determining how many things
+ // to push onto the vector. It would be nicer to get this information
+ // from SegmentedVector itself...
+ static_assert(sizeof(NonPOD) == 1, "Fix length calculations!");
+
+ size_t nonFullLastSegmentSize = segmentSize - 1;
+ for (size_t i = 0; i < nonFullLastSegmentSize; ++i) {
+ gDummy = v.Append(x); // copy constructor called
+ copyCtorCalls++;
+ }
+ MOZ_RELEASE_ASSERT(gNumCopyCtors == copyCtorCalls);
+
+ // Pop some of the elements.
+ {
+ size_t partialPopAmount = 5;
+ MOZ_RELEASE_ASSERT(nonFullLastSegmentSize > partialPopAmount);
+ v.PopLastN(partialPopAmount); // destructor called partialPopAmount times
+ dtorCalls += partialPopAmount;
+ MOZ_RELEASE_ASSERT(v.Length() ==
+ nonFullLastSegmentSize - partialPopAmount);
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ }
+
+ // Pop a full segment.
+ {
+ size_t length = v.Length();
+ v.PopLastN(length);
+ dtorCalls += length;
+ // These two tests *are* semantically different given the underlying
+ // implementation; Length sums up the sizes of the internal segments,
+ // while IsEmpty looks at the sequence of internal segments.
+ MOZ_RELEASE_ASSERT(v.Length() == 0);
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ }
+
+ size_t multipleSegmentsSize = (segmentSize * 3) / 2;
+ for (size_t i = 0; i < multipleSegmentsSize; ++i) {
+ gDummy = v.Append(x); // copy constructor called
+ copyCtorCalls++;
+ }
+ MOZ_RELEASE_ASSERT(gNumCopyCtors == copyCtorCalls);
+
+ // Pop across segment boundaries.
+ {
+ v.PopLastN(segmentSize);
+ dtorCalls += segmentSize;
+ MOZ_RELEASE_ASSERT(v.Length() == (multipleSegmentsSize - segmentSize));
+ MOZ_RELEASE_ASSERT(!v.IsEmpty());
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ }
+
+ // Clear everything here to make calculations easier.
+ {
+ size_t length = v.Length();
+ v.Clear();
+ dtorCalls += length;
+ MOZ_RELEASE_ASSERT(v.IsEmpty());
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ }
+
+ MOZ_RELEASE_ASSERT(gNumDefaultCtors == defaultCtorCalls);
+ MOZ_RELEASE_ASSERT(gNumExplicitCtors == explicitCtorCalls);
+ MOZ_RELEASE_ASSERT(gNumCopyCtors == copyCtorCalls);
+ MOZ_RELEASE_ASSERT(gNumMoveCtors == moveCtorCalls);
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+ } // destructor called for x, y, z
+ dtorCalls += 3;
+ MOZ_RELEASE_ASSERT(gNumDtors == dtorCalls);
+}
+
+struct A {
+ int mX;
+ int mY;
+};
+struct B {
+ int mX;
+ char mY;
+ double mZ;
+};
+struct C {
+ A mA;
+ B mB;
+};
+struct D {
+ char mBuf[101];
+};
+struct E {};
+
+// This tests that we get the right segment capacities for specified segment
+// sizes, and that the elements are aligned appropriately.
+void TestSegmentCapacitiesAndAlignments() {
+ // When SegmentedVector's constructor is passed a size, it asserts that the
+ // vector's segment capacity results in a segment size equal to (or very
+ // close to) the passed size.
+ //
+ // Also, SegmentedVector has a static assertion that elements are
+ // appropriately aligned.
+ SegmentedVector<double, 512> v1(512);
+ SegmentedVector<A, 1024> v2(1024);
+ SegmentedVector<B, 999> v3(999);
+ SegmentedVector<C, 10> v4(10);
+ SegmentedVector<D, 1234> v5(1234);
+ SegmentedVector<E> v6(4096); // 4096 is the default segment size
+ SegmentedVector<mozilla::AlignedElem<16>, 100> v7(100);
+}
+
+void TestIterator() {
+ SegmentedVector<int, 4> v;
+
+ auto iter = v.Iter();
+ auto iterFromLast = v.IterFromLast();
+ MOZ_RELEASE_ASSERT(iter.Done());
+ MOZ_RELEASE_ASSERT(iterFromLast.Done());
+
+ gDummy = v.Append(1);
+ iter = v.Iter();
+ iterFromLast = v.IterFromLast();
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+
+ iter.Next();
+ MOZ_RELEASE_ASSERT(iter.Done());
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(iterFromLast.Done());
+
+ iter = v.Iter();
+ iterFromLast = v.IterFromLast();
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+
+ iter.Prev();
+ MOZ_RELEASE_ASSERT(iter.Done());
+ iterFromLast.Prev();
+ MOZ_RELEASE_ASSERT(iterFromLast.Done());
+
+ // Append enough entries to ensure we have at least two segments.
+ gDummy = v.Append(1);
+ gDummy = v.Append(1);
+ gDummy = v.Append(1);
+ gDummy = v.Append(1);
+
+ iter = v.Iter();
+ iterFromLast = v.IterFromLast();
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+
+ iter.Prev();
+ MOZ_RELEASE_ASSERT(iter.Done());
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(iterFromLast.Done());
+
+ iter = v.Iter();
+ iterFromLast = v.IterFromLast();
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+
+ iter.Next();
+ MOZ_RELEASE_ASSERT(!iter.Done());
+ iterFromLast.Prev();
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+
+ iter = v.Iter();
+ iterFromLast = v.IterFromLast();
+ int count = 0;
+ for (; !iter.Done() && !iterFromLast.Done();
+ iter.Next(), iterFromLast.Prev()) {
+ ++count;
+ }
+ MOZ_RELEASE_ASSERT(count == 5);
+
+ // Modify the vector while using the iterator.
+ iterFromLast = v.IterFromLast();
+ gDummy = v.Append(2);
+ gDummy = v.Append(3);
+ gDummy = v.Append(4);
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(!iterFromLast.Done());
+ MOZ_RELEASE_ASSERT(iterFromLast.Get() == 2);
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(iterFromLast.Get() == 3);
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(iterFromLast.Get() == 4);
+ iterFromLast.Next();
+ MOZ_RELEASE_ASSERT(iterFromLast.Done());
+}
+
+int main(void) {
+ TestBasics();
+ TestMoveAndSwap();
+ TestConstructorsAndDestructors();
+ TestSegmentCapacitiesAndAlignments();
+ TestIterator();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestSmallPointerArray.cpp b/mfbt/tests/TestSmallPointerArray.cpp
new file mode 100644
index 0000000000..163b2b1df8
--- /dev/null
+++ b/mfbt/tests/TestSmallPointerArray.cpp
@@ -0,0 +1,237 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SmallPointerArray.h"
+
+#define PTR1 (void*)0x4
+#define PTR2 (void*)0x5
+#define PTR3 (void*)0x6
+
+// We explicitly test sizes up to 3 here, as that is when SmallPointerArray<>
+// switches to the storage method used for larger arrays.
+void TestArrayManipulation() {
+ using namespace mozilla;
+ SmallPointerArray<void> testArray;
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 0);
+ MOZ_RELEASE_ASSERT(sizeof(testArray) == 2 * sizeof(void*));
+ MOZ_RELEASE_ASSERT(!testArray.Contains(PTR1));
+
+ testArray.AppendElement(PTR1);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.Contains(PTR1));
+
+ testArray.AppendElement(PTR2);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 2);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(1) == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.Contains(PTR2));
+
+ MOZ_RELEASE_ASSERT(testArray.RemoveElement(PTR1));
+ MOZ_RELEASE_ASSERT(!testArray.RemoveElement(PTR1));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR2);
+ MOZ_RELEASE_ASSERT(!testArray.Contains(PTR1));
+
+ testArray.AppendElement(PTR1);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 2);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR2);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(1) == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.Contains(PTR1));
+
+ testArray.AppendElement(PTR3);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 3);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR2);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(1) == PTR1);
+ MOZ_RELEASE_ASSERT(testArray[2] == PTR3);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(2) == PTR3);
+ MOZ_RELEASE_ASSERT(testArray.Contains(PTR3));
+
+ MOZ_RELEASE_ASSERT(testArray.RemoveElement(PTR1));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 2);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR2);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR3);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(1) == PTR3);
+
+ MOZ_RELEASE_ASSERT(testArray.RemoveElement(PTR2));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR3);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR3);
+
+ MOZ_RELEASE_ASSERT(testArray.RemoveElement(PTR3));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 0);
+
+ testArray.Clear();
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 0);
+
+ testArray.AppendElement(PTR1);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+
+ testArray.AppendElement(PTR2);
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 2);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(1) == PTR2);
+
+ MOZ_RELEASE_ASSERT(testArray.RemoveElement(PTR2));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+
+ MOZ_RELEASE_ASSERT(!testArray.RemoveElement(PTR3));
+
+ MOZ_RELEASE_ASSERT(testArray.Length() == 1);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray.ElementAt(0) == PTR1);
+}
+
+void TestRangeBasedLoops() {
+ using namespace mozilla;
+ SmallPointerArray<void> testArray;
+ void* verification[3];
+ uint32_t entries = 0;
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 0);
+
+ testArray.AppendElement(PTR1);
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 1);
+ MOZ_RELEASE_ASSERT(verification[0] == PTR1);
+
+ entries = 0;
+
+ testArray.AppendElement(PTR2);
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 2);
+ MOZ_RELEASE_ASSERT(verification[0] == PTR1);
+ MOZ_RELEASE_ASSERT(verification[1] == PTR2);
+
+ entries = 0;
+
+ testArray.RemoveElement(PTR1);
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 1);
+ MOZ_RELEASE_ASSERT(verification[0] == PTR2);
+
+ entries = 0;
+
+ testArray.AppendElement(PTR1);
+ testArray.AppendElement(PTR3);
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 3);
+ MOZ_RELEASE_ASSERT(verification[0] == PTR2);
+ MOZ_RELEASE_ASSERT(verification[1] == PTR1);
+ MOZ_RELEASE_ASSERT(verification[2] == PTR3);
+
+ entries = 0;
+
+ testArray.RemoveElement(PTR1);
+ testArray.RemoveElement(PTR2);
+ testArray.RemoveElement(PTR3);
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 0);
+
+ testArray.Clear();
+
+ for (void* test : testArray) {
+ verification[entries++] = test;
+ }
+
+ MOZ_RELEASE_ASSERT(entries == 0);
+}
+
+void TestMove() {
+ using namespace mozilla;
+
+ SmallPointerArray<void> testArray;
+ testArray.AppendElement(PTR1);
+ testArray.AppendElement(PTR2);
+
+ SmallPointerArray<void> moved = std::move(testArray);
+
+ MOZ_RELEASE_ASSERT(testArray.IsEmpty());
+ MOZ_RELEASE_ASSERT(moved.Length() == 2);
+ MOZ_RELEASE_ASSERT(moved[0] == PTR1);
+ MOZ_RELEASE_ASSERT(moved[1] == PTR2);
+
+ // Heap case.
+ moved.AppendElement(PTR3);
+
+ SmallPointerArray<void> another = std::move(moved);
+
+ MOZ_RELEASE_ASSERT(testArray.IsEmpty());
+ MOZ_RELEASE_ASSERT(moved.IsEmpty());
+ MOZ_RELEASE_ASSERT(another.Length() == 3);
+ MOZ_RELEASE_ASSERT(another[0] == PTR1);
+ MOZ_RELEASE_ASSERT(another[1] == PTR2);
+ MOZ_RELEASE_ASSERT(another[2] == PTR3);
+
+ // Move assignment.
+ testArray = std::move(another);
+
+ MOZ_RELEASE_ASSERT(moved.IsEmpty());
+ MOZ_RELEASE_ASSERT(another.IsEmpty());
+ MOZ_RELEASE_ASSERT(testArray.Length() == 3);
+ MOZ_RELEASE_ASSERT(testArray[0] == PTR1);
+ MOZ_RELEASE_ASSERT(testArray[1] == PTR2);
+ MOZ_RELEASE_ASSERT(testArray[2] == PTR3);
+}
+
+int main() {
+ TestArrayManipulation();
+ TestRangeBasedLoops();
+ TestMove();
+ return 0;
+}
diff --git a/mfbt/tests/TestSplayTree.cpp b/mfbt/tests/TestSplayTree.cpp
new file mode 100644
index 0000000000..8269664ce9
--- /dev/null
+++ b/mfbt/tests/TestSplayTree.cpp
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 9; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/SplayTree.h"
+#include "mozilla/Unused.h"
+
+using mozilla::SplayTree;
+using mozilla::SplayTreeNode;
+
+// The following array contains the values 0..999 in random order, as computed
+// with the following Python program:
+//
+// from random import shuffle
+// x = range(1000)
+// shuffle(x)
+// print(x);
+//
+static int gValues[] = {
+ 778, 999, 248, 795, 607, 177, 725, 33, 215, 565, 436, 821, 941, 802, 322,
+ 54, 151, 416, 531, 65, 818, 99, 340, 401, 274, 767, 278, 617, 425, 629,
+ 833, 878, 440, 984, 724, 519, 100, 369, 490, 131, 422, 169, 932, 476, 823,
+ 521, 390, 781, 747, 218, 376, 461, 717, 532, 471, 298, 720, 608, 334, 788,
+ 161, 500, 280, 963, 430, 484, 779, 572, 96, 333, 650, 158, 199, 137, 991,
+ 399, 882, 689, 358, 548, 196, 718, 211, 388, 133, 188, 321, 892, 25, 694,
+ 735, 886, 872, 785, 195, 275, 696, 975, 393, 619, 894, 18, 281, 191, 792,
+ 846, 861, 351, 542, 806, 570, 702, 931, 585, 444, 284, 217, 132, 251, 253,
+ 302, 808, 224, 37, 63, 863, 409, 49, 780, 790, 31, 638, 890, 186, 114,
+ 152, 949, 491, 207, 392, 170, 460, 794, 482, 877, 407, 263, 909, 249, 710,
+ 614, 51, 431, 915, 62, 332, 74, 495, 901, 23, 365, 752, 89, 660, 745,
+ 741, 547, 669, 449, 465, 605, 107, 774, 205, 852, 266, 247, 690, 835, 765,
+ 410, 140, 122, 400, 510, 664, 105, 935, 230, 134, 106, 959, 375, 884, 361,
+ 527, 715, 840, 272, 232, 102, 415, 903, 117, 313, 153, 463, 464, 876, 406,
+ 967, 713, 381, 836, 555, 190, 859, 172, 483, 61, 633, 294, 993, 72, 337,
+ 11, 896, 523, 101, 916, 244, 566, 706, 533, 439, 201, 222, 695, 739, 553,
+ 571, 289, 918, 209, 189, 357, 814, 670, 866, 910, 579, 246, 636, 750, 891,
+ 494, 758, 341, 626, 426, 772, 254, 682, 588, 104, 347, 184, 977, 126, 498,
+ 165, 955, 241, 516, 235, 497, 121, 123, 791, 844, 259, 995, 283, 602, 417,
+ 221, 308, 855, 429, 86, 345, 928, 44, 679, 796, 363, 402, 445, 492, 450,
+ 964, 749, 925, 847, 637, 982, 648, 635, 481, 564, 867, 940, 291, 159, 290,
+ 929, 59, 712, 986, 611, 954, 820, 103, 622, 316, 142, 204, 225, 678, 314,
+ 84, 578, 315, 141, 990, 880, 504, 969, 412, 746, 47, 517, 124, 848, 466,
+ 438, 674, 979, 782, 651, 181, 26, 435, 832, 386, 951, 229, 642, 655, 91,
+ 162, 921, 647, 113, 686, 56, 805, 763, 245, 581, 287, 998, 525, 641, 135,
+ 634, 237, 728, 112, 828, 228, 899, 1, 723, 16, 613, 144, 659, 97, 185,
+ 312, 292, 733, 624, 276, 387, 926, 339, 768, 960, 610, 807, 656, 851, 219,
+ 582, 709, 927, 514, 680, 870, 597, 536, 77, 164, 512, 149, 900, 85, 335,
+ 997, 8, 705, 777, 653, 815, 311, 701, 507, 202, 530, 827, 541, 958, 82,
+ 874, 55, 487, 383, 885, 684, 180, 829, 760, 109, 194, 540, 816, 906, 657,
+ 469, 446, 857, 907, 38, 600, 618, 797, 950, 822, 277, 842, 116, 513, 255,
+ 424, 643, 163, 372, 129, 67, 118, 754, 529, 917, 687, 473, 174, 538, 939,
+ 663, 775, 474, 242, 883, 20, 837, 293, 584, 943, 32, 176, 904, 14, 448,
+ 893, 888, 744, 171, 714, 454, 691, 261, 934, 606, 789, 825, 671, 397, 338,
+ 317, 612, 737, 130, 41, 923, 574, 136, 980, 850, 12, 729, 197, 403, 57,
+ 783, 360, 146, 75, 432, 447, 192, 799, 740, 267, 214, 250, 367, 853, 968,
+ 120, 736, 391, 881, 784, 665, 68, 398, 350, 839, 268, 697, 567, 428, 738,
+ 48, 182, 70, 220, 865, 418, 374, 148, 945, 353, 539, 589, 307, 427, 506,
+ 265, 558, 128, 46, 336, 299, 349, 309, 377, 304, 420, 30, 34, 875, 948,
+ 212, 394, 442, 719, 273, 269, 157, 502, 675, 751, 838, 897, 862, 831, 676,
+ 590, 811, 966, 854, 477, 15, 598, 573, 108, 98, 81, 408, 421, 296, 73,
+ 644, 456, 362, 666, 550, 331, 368, 193, 470, 203, 769, 342, 36, 604, 60,
+ 970, 748, 813, 522, 515, 90, 672, 243, 793, 947, 595, 632, 912, 475, 258,
+ 80, 873, 623, 524, 546, 262, 727, 216, 505, 330, 373, 58, 297, 609, 908,
+ 150, 206, 703, 755, 260, 511, 213, 198, 766, 898, 992, 488, 405, 974, 770,
+ 936, 743, 554, 0, 499, 976, 94, 160, 919, 434, 324, 156, 757, 830, 677,
+ 183, 630, 871, 640, 938, 518, 344, 366, 742, 552, 306, 535, 200, 652, 496,
+ 233, 419, 787, 318, 981, 371, 166, 143, 384, 88, 508, 698, 812, 559, 658,
+ 549, 208, 599, 621, 961, 668, 563, 93, 154, 587, 560, 389, 3, 210, 326,
+ 4, 924, 300, 2, 804, 914, 801, 753, 654, 27, 236, 19, 708, 451, 985,
+ 596, 478, 922, 240, 127, 994, 983, 385, 472, 40, 528, 288, 111, 543, 568,
+ 155, 625, 759, 937, 956, 545, 953, 962, 382, 479, 809, 557, 501, 354, 414,
+ 343, 378, 843, 379, 178, 556, 800, 803, 592, 627, 942, 576, 920, 704, 707,
+ 726, 223, 119, 404, 24, 879, 722, 868, 5, 238, 817, 520, 631, 946, 462,
+ 457, 295, 480, 957, 441, 145, 286, 303, 688, 17, 628, 493, 364, 226, 110,
+ 615, 69, 320, 534, 593, 721, 411, 285, 869, 952, 849, 139, 356, 346, 28,
+ 887, 810, 92, 798, 544, 458, 996, 692, 396, 667, 328, 173, 22, 773, 50,
+ 645, 987, 42, 685, 734, 700, 683, 601, 580, 639, 913, 323, 858, 179, 761,
+ 6, 841, 905, 234, 730, 29, 21, 575, 586, 902, 443, 826, 646, 257, 125,
+ 649, 53, 453, 252, 13, 87, 971, 227, 485, 168, 380, 711, 79, 732, 325,
+ 52, 468, 76, 551, 39, 395, 327, 973, 459, 45, 583, 989, 147, 455, 776,
+ 944, 569, 889, 256, 35, 175, 834, 756, 933, 860, 526, 845, 864, 764, 771,
+ 282, 9, 693, 352, 731, 7, 577, 264, 319, 138, 467, 819, 930, 231, 115,
+ 988, 978, 762, 486, 301, 616, 10, 78, 603, 452, 965, 279, 972, 413, 895,
+ 591, 662, 594, 348, 423, 489, 43, 699, 433, 509, 355, 270, 66, 83, 95,
+ 561, 661, 562, 329, 620, 370, 64, 187, 503, 716, 856, 310, 786, 167, 71,
+ 239, 359, 537, 437, 305, 673, 824, 911, 681, 271};
+
+struct SplayInt : SplayTreeNode<SplayInt> {
+ explicit SplayInt(int aValue) : mValue(aValue) {}
+
+ static int compare(const SplayInt& aOne, const SplayInt& aTwo) {
+ if (aOne.mValue < aTwo.mValue) {
+ return -1;
+ }
+ if (aOne.mValue > aTwo.mValue) {
+ return 1;
+ }
+ return 0;
+ }
+
+ int mValue;
+};
+
+struct SplayNoCopy : SplayTreeNode<SplayNoCopy> {
+ SplayNoCopy(const SplayNoCopy&) = delete;
+ SplayNoCopy(SplayNoCopy&&) = delete;
+
+ static int compare(const SplayNoCopy&, const SplayNoCopy&) { return 0; }
+};
+
+static SplayTree<SplayNoCopy, SplayNoCopy> testNoCopy;
+
+int main() {
+ mozilla::Unused << testNoCopy;
+
+ SplayTree<SplayInt, SplayInt> tree;
+
+ MOZ_RELEASE_ASSERT(tree.empty());
+
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(0)));
+
+ static const int N = mozilla::ArrayLength(gValues);
+
+ // Insert the values, and check each one is findable just after insertion.
+ for (int i = 0; i < N; i++) {
+ tree.insert(new SplayInt(gValues[i]));
+ SplayInt* inserted = tree.find(SplayInt(gValues[i]));
+ MOZ_RELEASE_ASSERT(inserted);
+ MOZ_RELEASE_ASSERT(tree.findOrInsert(SplayInt(gValues[i])) == inserted);
+ tree.checkCoherency();
+ }
+
+ // Check they're all findable after all insertions.
+ for (int i = 0; i < N; i++) {
+ MOZ_RELEASE_ASSERT(tree.find(SplayInt(gValues[i])));
+ MOZ_RELEASE_ASSERT(tree.findOrInsert(SplayInt(gValues[i])));
+ tree.checkCoherency();
+ }
+
+ // Check that non-inserted values cannot be found.
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(-1)));
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(N)));
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(0x7fffffff)));
+
+ // Remove the values, and check each one is not findable just after removal.
+ for (int i = 0; i < N; i++) {
+ SplayInt* removed = tree.remove(SplayInt(gValues[i]));
+ MOZ_RELEASE_ASSERT(removed->mValue == gValues[i]);
+ MOZ_RELEASE_ASSERT(!tree.find(*removed));
+ delete removed;
+ tree.checkCoherency();
+ }
+
+ MOZ_RELEASE_ASSERT(tree.empty());
+
+ // Insert the values, and check each one is findable just after insertion.
+ for (int i = 0; i < N; i++) {
+ SplayInt* inserted = tree.findOrInsert(SplayInt(gValues[i]));
+ MOZ_RELEASE_ASSERT(tree.find(SplayInt(gValues[i])) == inserted);
+ MOZ_RELEASE_ASSERT(tree.findOrInsert(SplayInt(gValues[i])) == inserted);
+ tree.checkCoherency();
+ }
+
+ // Check they're all findable after all insertions.
+ for (int i = 0; i < N; i++) {
+ MOZ_RELEASE_ASSERT(tree.find(SplayInt(gValues[i])));
+ MOZ_RELEASE_ASSERT(tree.findOrInsert(SplayInt(gValues[i])));
+ tree.checkCoherency();
+ }
+
+ // Check that non-inserted values cannot be found.
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(-1)));
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(N)));
+ MOZ_RELEASE_ASSERT(!tree.find(SplayInt(0x7fffffff)));
+
+ // Remove the values, and check each one is not findable just after removal.
+ for (int i = 0; i < N; i++) {
+ SplayInt* removed = tree.remove(SplayInt(gValues[i]));
+ MOZ_RELEASE_ASSERT(removed->mValue == gValues[i]);
+ MOZ_RELEASE_ASSERT(!tree.find(*removed));
+ delete removed;
+ tree.checkCoherency();
+ }
+
+ MOZ_RELEASE_ASSERT(tree.empty());
+
+ // Reinsert the values, in reverse order to last time.
+ for (int i = 0; i < N; i++) {
+ tree.insert(new SplayInt(gValues[N - i - 1]));
+ tree.checkCoherency();
+ }
+
+ // Remove the minimum value repeatedly.
+ for (int i = 0; i < N; i++) {
+ SplayInt* removed = tree.removeMin();
+ MOZ_RELEASE_ASSERT(removed->mValue == i);
+ delete removed;
+ tree.checkCoherency();
+ }
+
+ MOZ_RELEASE_ASSERT(tree.empty());
+
+ return 0;
+}
diff --git a/mfbt/tests/TestTextUtils.cpp b/mfbt/tests/TestTextUtils.cpp
new file mode 100644
index 0000000000..93989019f7
--- /dev/null
+++ b/mfbt/tests/TestTextUtils.cpp
@@ -0,0 +1,1064 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/TextUtils.h"
+
+using mozilla::AsciiAlphanumericToNumber;
+using mozilla::IsAscii;
+using mozilla::IsAsciiAlpha;
+using mozilla::IsAsciiAlphanumeric;
+using mozilla::IsAsciiDigit;
+using mozilla::IsAsciiLowercaseAlpha;
+using mozilla::IsAsciiNullTerminated;
+using mozilla::IsAsciiUppercaseAlpha;
+
+static void TestIsAscii() {
+ // char
+
+ static_assert(!IsAscii(char(-1)), "char(-1) isn't ASCII");
+
+ static_assert(IsAscii('\0'), "nul is ASCII");
+
+ static_assert(IsAscii('A'), "'A' is ASCII");
+ static_assert(IsAscii('B'), "'B' is ASCII");
+ static_assert(IsAscii('M'), "'M' is ASCII");
+ static_assert(IsAscii('Y'), "'Y' is ASCII");
+ static_assert(IsAscii('Z'), "'Z' is ASCII");
+
+ static_assert(IsAscii('['), "'[' is ASCII");
+ static_assert(IsAscii('`'), "'`' is ASCII");
+
+ static_assert(IsAscii('a'), "'a' is ASCII");
+ static_assert(IsAscii('b'), "'b' is ASCII");
+ static_assert(IsAscii('m'), "'m' is ASCII");
+ static_assert(IsAscii('y'), "'y' is ASCII");
+ static_assert(IsAscii('z'), "'z' is ASCII");
+
+ static_assert(IsAscii('{'), "'{' is ASCII");
+
+ static_assert(IsAscii('5'), "'5' is ASCII");
+
+ static_assert(IsAscii('\x7F'), "'\\x7F' is ASCII");
+ static_assert(!IsAscii('\x80'), "'\\x80' isn't ASCII");
+
+ // char16_t
+
+ static_assert(!IsAscii(char16_t(-1)), "char16_t(-1) isn't ASCII");
+
+ static_assert(IsAscii(u'\0'), "nul is ASCII");
+
+ static_assert(IsAscii(u'A'), "u'A' is ASCII");
+ static_assert(IsAscii(u'B'), "u'B' is ASCII");
+ static_assert(IsAscii(u'M'), "u'M' is ASCII");
+ static_assert(IsAscii(u'Y'), "u'Y' is ASCII");
+ static_assert(IsAscii(u'Z'), "u'Z' is ASCII");
+
+ static_assert(IsAscii(u'['), "u'[' is ASCII");
+ static_assert(IsAscii(u'`'), "u'`' is ASCII");
+
+ static_assert(IsAscii(u'a'), "u'a' is ASCII");
+ static_assert(IsAscii(u'b'), "u'b' is ASCII");
+ static_assert(IsAscii(u'm'), "u'm' is ASCII");
+ static_assert(IsAscii(u'y'), "u'y' is ASCII");
+ static_assert(IsAscii(u'z'), "u'z' is ASCII");
+
+ static_assert(IsAscii(u'{'), "u'{' is ASCII");
+
+ static_assert(IsAscii(u'5'), "u'5' is ASCII");
+
+ static_assert(IsAscii(u'\x7F'), "u'\\x7F' is ASCII");
+ static_assert(!IsAscii(u'\x80'), "u'\\x80' isn't ASCII");
+
+ // char32_t
+
+ static_assert(!IsAscii(char32_t(-1)), "char32_t(-1) isn't ASCII");
+
+ static_assert(IsAscii(U'\0'), "nul is ASCII");
+
+ static_assert(IsAscii(U'A'), "U'A' is ASCII");
+ static_assert(IsAscii(U'B'), "U'B' is ASCII");
+ static_assert(IsAscii(U'M'), "U'M' is ASCII");
+ static_assert(IsAscii(U'Y'), "U'Y' is ASCII");
+ static_assert(IsAscii(U'Z'), "U'Z' is ASCII");
+
+ static_assert(IsAscii(U'['), "U'[' is ASCII");
+ static_assert(IsAscii(U'`'), "U'`' is ASCII");
+
+ static_assert(IsAscii(U'a'), "U'a' is ASCII");
+ static_assert(IsAscii(U'b'), "U'b' is ASCII");
+ static_assert(IsAscii(U'm'), "U'm' is ASCII");
+ static_assert(IsAscii(U'y'), "U'y' is ASCII");
+ static_assert(IsAscii(U'z'), "U'z' is ASCII");
+
+ static_assert(IsAscii(U'{'), "U'{' is ASCII");
+
+ static_assert(IsAscii(U'5'), "U'5' is ASCII");
+
+ static_assert(IsAscii(U'\x7F'), "U'\\x7F' is ASCII");
+ static_assert(!IsAscii(U'\x80'), "U'\\x80' isn't ASCII");
+}
+
+static void TestIsAsciiNullTerminated() {
+ // char
+
+ constexpr char allChar[] =
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\0x0C\x0D\x0E\x0F"
+ "\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\0x1C\x1D\x1E\x1F"
+ "\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\0x2C\x2D\x2E\x2F"
+ "\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\0x3C\x3D\x3E\x3F"
+ "\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\0x4C\x4D\x4E\x4F"
+ "\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\0x5C\x5D\x5E\x5F"
+ "\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\0x6C\x6D\x6E\x6F"
+ "\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\0x7C\x7D\x7E\x7F";
+
+ static_assert(IsAsciiNullTerminated(allChar), "allChar is ASCII");
+
+ constexpr char loBadChar[] = "\x80";
+
+ static_assert(!IsAsciiNullTerminated(loBadChar), "loBadChar isn't ASCII");
+
+ constexpr char hiBadChar[] = "\xFF";
+
+ static_assert(!IsAsciiNullTerminated(hiBadChar), "hiBadChar isn't ASCII");
+
+ // char16_t
+
+ constexpr char16_t allChar16[] =
+ u"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\0x0C\x0D\x0E\x0F"
+ "\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\0x1C\x1D\x1E\x1F"
+ "\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\0x2C\x2D\x2E\x2F"
+ "\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\0x3C\x3D\x3E\x3F"
+ "\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\0x4C\x4D\x4E\x4F"
+ "\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\0x5C\x5D\x5E\x5F"
+ "\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\0x6C\x6D\x6E\x6F"
+ "\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\0x7C\x7D\x7E\x7F";
+
+ static_assert(IsAsciiNullTerminated(allChar16), "allChar16 is ASCII");
+
+ constexpr char16_t loBadChar16[] = u"\x80";
+
+ static_assert(!IsAsciiNullTerminated(loBadChar16), "loBadChar16 isn't ASCII");
+
+ constexpr char16_t hiBadChar16[] = u"\xFF";
+
+ static_assert(!IsAsciiNullTerminated(hiBadChar16), "hiBadChar16 isn't ASCII");
+
+ constexpr char16_t highestChar16[] = u"\uFFFF";
+
+ static_assert(!IsAsciiNullTerminated(highestChar16),
+ "highestChar16 isn't ASCII");
+
+ // char32_t
+
+ constexpr char32_t allChar32[] =
+ U"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\0x0C\x0D\x0E\x0F"
+ "\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\0x1C\x1D\x1E\x1F"
+ "\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\0x2C\x2D\x2E\x2F"
+ "\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\0x3C\x3D\x3E\x3F"
+ "\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\0x4C\x4D\x4E\x4F"
+ "\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\0x5C\x5D\x5E\x5F"
+ "\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\0x6C\x6D\x6E\x6F"
+ "\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\0x7C\x7D\x7E\x7F";
+
+ static_assert(IsAsciiNullTerminated(allChar32), "allChar32 is ASCII");
+
+ constexpr char32_t loBadChar32[] = U"\x80";
+
+ static_assert(!IsAsciiNullTerminated(loBadChar32), "loBadChar32 isn't ASCII");
+
+ constexpr char32_t hiBadChar32[] = U"\xFF";
+
+ static_assert(!IsAsciiNullTerminated(hiBadChar32), "hiBadChar32 isn't ASCII");
+
+ constexpr char32_t highestChar32[] = {static_cast<char32_t>(-1), 0};
+
+ static_assert(!IsAsciiNullTerminated(highestChar32),
+ "highestChar32 isn't ASCII");
+}
+
+static void TestIsAsciiAlpha() {
+ // char
+
+ static_assert(!IsAsciiAlpha('@'), "'@' isn't ASCII alpha");
+ static_assert('@' == 0x40, "'@' has value 0x40");
+
+ static_assert('A' == 0x41, "'A' has value 0x41");
+ static_assert(IsAsciiAlpha('A'), "'A' is ASCII alpha");
+ static_assert(IsAsciiAlpha('B'), "'B' is ASCII alpha");
+ static_assert(IsAsciiAlpha('M'), "'M' is ASCII alpha");
+ static_assert(IsAsciiAlpha('Y'), "'Y' is ASCII alpha");
+ static_assert(IsAsciiAlpha('Z'), "'Z' is ASCII alpha");
+
+ static_assert('Z' == 0x5A, "'Z' has value 0x5A");
+ static_assert('[' == 0x5B, "'[' has value 0x5B");
+ static_assert(!IsAsciiAlpha('['), "'[' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha('`'), "'`' isn't ASCII alpha");
+ static_assert('`' == 0x60, "'`' has value 0x60");
+
+ static_assert('a' == 0x61, "'a' has value 0x61");
+ static_assert(IsAsciiAlpha('a'), "'a' is ASCII alpha");
+ static_assert(IsAsciiAlpha('b'), "'b' is ASCII alpha");
+ static_assert(IsAsciiAlpha('m'), "'m' is ASCII alpha");
+ static_assert(IsAsciiAlpha('y'), "'y' is ASCII alpha");
+ static_assert(IsAsciiAlpha('z'), "'z' is ASCII alpha");
+
+ static_assert('z' == 0x7A, "'z' has value 0x7A");
+ static_assert('{' == 0x7B, "'{' has value 0x7B");
+ static_assert(!IsAsciiAlpha('{'), "'{' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha('5'), "'5' isn't ASCII alpha");
+
+ // char16_t
+
+ static_assert(!IsAsciiAlpha(u'@'), "u'@' isn't ASCII alpha");
+ static_assert(u'@' == 0x40, "u'@' has value 0x40");
+
+ static_assert(u'A' == 0x41, "u'A' has value 0x41");
+ static_assert(IsAsciiAlpha(u'A'), "u'A' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'B'), "u'B' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'M'), "u'M' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'Y'), "u'Y' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'Z'), "u'Z' is ASCII alpha");
+
+ static_assert(u'Z' == 0x5A, "u'Z' has value 0x5A");
+ static_assert(u'[' == 0x5B, "u'[' has value 0x5B");
+ static_assert(!IsAsciiAlpha(u'['), "u'[' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha(u'`'), "u'`' isn't ASCII alpha");
+ static_assert(u'`' == 0x60, "u'`' has value 0x60");
+
+ static_assert(u'a' == 0x61, "u'a' has value 0x61");
+ static_assert(IsAsciiAlpha(u'a'), "u'a' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'b'), "u'b' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'm'), "u'm' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'y'), "u'y' is ASCII alpha");
+ static_assert(IsAsciiAlpha(u'z'), "u'z' is ASCII alpha");
+
+ static_assert(u'z' == 0x7A, "u'z' has value 0x7A");
+ static_assert(u'{' == 0x7B, "u'{' has value 0x7B");
+ static_assert(!IsAsciiAlpha(u'{'), "u'{' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha(u'5'), "u'5' isn't ASCII alpha");
+
+ // char32_t
+
+ static_assert(!IsAsciiAlpha(U'@'), "U'@' isn't ASCII alpha");
+ static_assert(U'@' == 0x40, "U'@' has value 0x40");
+
+ static_assert(U'A' == 0x41, "U'A' has value 0x41");
+ static_assert(IsAsciiAlpha(U'A'), "U'A' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'B'), "U'B' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'M'), "U'M' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'Y'), "U'Y' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'Z'), "U'Z' is ASCII alpha");
+
+ static_assert(U'Z' == 0x5A, "U'Z' has value 0x5A");
+ static_assert(U'[' == 0x5B, "U'[' has value 0x5B");
+ static_assert(!IsAsciiAlpha(U'['), "U'[' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha(U'`'), "U'`' isn't ASCII alpha");
+ static_assert(U'`' == 0x60, "U'`' has value 0x60");
+
+ static_assert(U'a' == 0x61, "U'a' has value 0x61");
+ static_assert(IsAsciiAlpha(U'a'), "U'a' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'b'), "U'b' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'm'), "U'm' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'y'), "U'y' is ASCII alpha");
+ static_assert(IsAsciiAlpha(U'z'), "U'z' is ASCII alpha");
+
+ static_assert(U'z' == 0x7A, "U'z' has value 0x7A");
+ static_assert(U'{' == 0x7B, "U'{' has value 0x7B");
+ static_assert(!IsAsciiAlpha(U'{'), "U'{' isn't ASCII alpha");
+
+ static_assert(!IsAsciiAlpha(U'5'), "U'5' isn't ASCII alpha");
+}
+
+static void TestIsAsciiUppercaseAlpha() {
+ // char
+
+ static_assert(!IsAsciiUppercaseAlpha('@'), "'@' isn't ASCII alpha uppercase");
+ static_assert('@' == 0x40, "'@' has value 0x40");
+
+ static_assert('A' == 0x41, "'A' has value 0x41");
+ static_assert(IsAsciiUppercaseAlpha('A'), "'A' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha('B'), "'B' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha('M'), "'M' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha('Y'), "'Y' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha('Z'), "'Z' is ASCII alpha uppercase");
+
+ static_assert('Z' == 0x5A, "'Z' has value 0x5A");
+ static_assert('[' == 0x5B, "'[' has value 0x5B");
+ static_assert(!IsAsciiUppercaseAlpha('['), "'[' isn't ASCII alpha uppercase");
+
+ static_assert(!IsAsciiUppercaseAlpha('`'), "'`' isn't ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('a'), "'a' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('b'), "'b' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('m'), "'m' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('y'), "'y' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('z'), "'z' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha('{'), "'{' isn't ASCII alpha uppercase");
+
+ // char16_t
+
+ static_assert(!IsAsciiUppercaseAlpha(u'@'),
+ "u'@' isn't ASCII alpha uppercase");
+ static_assert(u'@' == 0x40, "u'@' has value 0x40");
+
+ static_assert(u'A' == 0x41, "u'A' has value 0x41");
+ static_assert(IsAsciiUppercaseAlpha(u'A'), "u'A' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(u'B'), "u'B' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(u'M'), "u'M' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(u'Y'), "u'Y' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(u'Z'), "u'Z' is ASCII alpha uppercase");
+
+ static_assert(u'Z' == 0x5A, "u'Z' has value 0x5A");
+ static_assert(u'[' == 0x5B, "u'[' has value 0x5B");
+ static_assert(!IsAsciiUppercaseAlpha(u'['),
+ "u'[' isn't ASCII alpha uppercase");
+
+ static_assert(!IsAsciiUppercaseAlpha(u'`'),
+ "u'`' isn't ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'a'), "u'a' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'b'), "u'b' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'm'), "u'm' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'y'), "u'y' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'z'), "u'z' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(u'{'),
+ "u'{' isn't ASCII alpha uppercase");
+
+ // char32_t
+
+ static_assert(!IsAsciiUppercaseAlpha(U'@'),
+ "U'@' isn't ASCII alpha uppercase");
+ static_assert(U'@' == 0x40, "U'@' has value 0x40");
+
+ static_assert(U'A' == 0x41, "U'A' has value 0x41");
+ static_assert(IsAsciiUppercaseAlpha(U'A'), "U'A' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(U'B'), "U'B' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(U'M'), "U'M' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(U'Y'), "U'Y' is ASCII alpha uppercase");
+ static_assert(IsAsciiUppercaseAlpha(U'Z'), "U'Z' is ASCII alpha uppercase");
+
+ static_assert(U'Z' == 0x5A, "U'Z' has value 0x5A");
+ static_assert(U'[' == 0x5B, "U'[' has value 0x5B");
+ static_assert(!IsAsciiUppercaseAlpha(U'['),
+ "U'[' isn't ASCII alpha uppercase");
+
+ static_assert(!IsAsciiUppercaseAlpha(U'`'),
+ "U'`' isn't ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'a'), "U'a' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'b'), "U'b' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'm'), "U'm' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'y'), "U'y' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'z'), "U'z' is ASCII alpha uppercase");
+ static_assert(!IsAsciiUppercaseAlpha(U'{'),
+ "U'{' isn't ASCII alpha uppercase");
+}
+
+static void TestIsAsciiLowercaseAlpha() {
+ // char
+
+ static_assert(!IsAsciiLowercaseAlpha('`'), "'`' isn't ASCII alpha lowercase");
+ static_assert('`' == 0x60, "'`' has value 0x60");
+
+ static_assert('a' == 0x61, "'a' has value 0x61");
+ static_assert(IsAsciiLowercaseAlpha('a'), "'a' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha('b'), "'b' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha('m'), "'m' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha('y'), "'y' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha('z'), "'z' is ASCII alpha lowercase");
+
+ static_assert('z' == 0x7A, "'z' has value 0x7A");
+ static_assert('{' == 0x7B, "'{' has value 0x7B");
+ static_assert(!IsAsciiLowercaseAlpha('{'), "'{' isn't ASCII alpha lowercase");
+
+ static_assert(!IsAsciiLowercaseAlpha('@'), "'@' isn't ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('A'), "'A' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('B'), "'B' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('M'), "'M' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('Y'), "'Y' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('Z'), "'Z' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha('['), "'[' isn't ASCII alpha lowercase");
+
+ // char16_t
+
+ static_assert(!IsAsciiLowercaseAlpha(u'`'),
+ "u'`' isn't ASCII alpha lowercase");
+ static_assert(u'`' == 0x60, "u'`' has value 0x60");
+
+ static_assert(u'a' == 0x61, "u'a' has value 0x61");
+ static_assert(IsAsciiLowercaseAlpha(u'a'), "u'a' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(u'b'), "u'b' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(u'm'), "u'm' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(u'y'), "u'y' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(u'z'), "u'z' is ASCII alpha lowercase");
+
+ static_assert(u'z' == 0x7A, "u'z' has value 0x7A");
+ static_assert(u'{' == 0x7B, "u'{' has value 0x7B");
+ static_assert(!IsAsciiLowercaseAlpha(u'{'),
+ "u'{' isn't ASCII alpha lowercase");
+
+ static_assert(!IsAsciiLowercaseAlpha(u'@'),
+ "u'@' isn't ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'A'), "u'A' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'B'), "u'B' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'M'), "u'M' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'Y'), "u'Y' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'Z'), "u'Z' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(u'['),
+ "u'[' isn't ASCII alpha lowercase");
+
+ // char32_t
+
+ static_assert(!IsAsciiLowercaseAlpha(U'`'),
+ "U'`' isn't ASCII alpha lowercase");
+ static_assert(U'`' == 0x60, "U'`' has value 0x60");
+
+ static_assert(U'a' == 0x61, "U'a' has value 0x61");
+ static_assert(IsAsciiLowercaseAlpha(U'a'), "U'a' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(U'b'), "U'b' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(U'm'), "U'm' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(U'y'), "U'y' is ASCII alpha lowercase");
+ static_assert(IsAsciiLowercaseAlpha(U'z'), "U'z' is ASCII alpha lowercase");
+
+ static_assert(U'z' == 0x7A, "U'z' has value 0x7A");
+ static_assert(U'{' == 0x7B, "U'{' has value 0x7B");
+ static_assert(!IsAsciiLowercaseAlpha(U'{'),
+ "U'{' isn't ASCII alpha lowercase");
+
+ static_assert(!IsAsciiLowercaseAlpha(U'@'),
+ "U'@' isn't ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'A'), "U'A' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'B'), "U'B' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'M'), "U'M' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'Y'), "U'Y' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'Z'), "U'Z' is ASCII alpha lowercase");
+ static_assert(!IsAsciiLowercaseAlpha(U'['),
+ "U'[' isn't ASCII alpha lowercase");
+}
+
+static void TestIsAsciiAlphanumeric() {
+ // char
+
+ static_assert(!IsAsciiAlphanumeric('/'), "'/' isn't ASCII alphanumeric");
+ static_assert('/' == 0x2F, "'/' has value 0x2F");
+
+ static_assert('0' == 0x30, "'0' has value 0x30");
+ static_assert(IsAsciiAlphanumeric('0'), "'0' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('1'), "'1' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('5'), "'5' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('8'), "'8' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('9'), "'9' is ASCII alphanumeric");
+
+ static_assert('9' == 0x39, "'9' has value 0x39");
+ static_assert(':' == 0x3A, "':' has value 0x3A");
+ static_assert(!IsAsciiAlphanumeric(':'), "':' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric('@'), "'@' isn't ASCII alphanumeric");
+ static_assert('@' == 0x40, "'@' has value 0x40");
+
+ static_assert('A' == 0x41, "'A' has value 0x41");
+ static_assert(IsAsciiAlphanumeric('A'), "'A' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('B'), "'B' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('M'), "'M' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('Y'), "'Y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('Z'), "'Z' is ASCII alphanumeric");
+
+ static_assert('Z' == 0x5A, "'Z' has value 0x5A");
+ static_assert('[' == 0x5B, "'[' has value 0x5B");
+ static_assert(!IsAsciiAlphanumeric('['), "'[' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric('`'), "'`' isn't ASCII alphanumeric");
+ static_assert('`' == 0x60, "'`' has value 0x60");
+
+ static_assert('a' == 0x61, "'a' has value 0x61");
+ static_assert(IsAsciiAlphanumeric('a'), "'a' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('b'), "'b' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('m'), "'m' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('y'), "'y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric('z'), "'z' is ASCII alphanumeric");
+
+ static_assert('z' == 0x7A, "'z' has value 0x7A");
+ static_assert('{' == 0x7B, "'{' has value 0x7B");
+ static_assert(!IsAsciiAlphanumeric('{'), "'{' isn't ASCII alphanumeric");
+
+ // char16_t
+
+ static_assert(!IsAsciiAlphanumeric(u'/'), "u'/' isn't ASCII alphanumeric");
+ static_assert(u'/' == 0x2F, "u'/' has value 0x2F");
+
+ static_assert(u'0' == 0x30, "u'0' has value 0x30");
+ static_assert(IsAsciiAlphanumeric(u'0'), "u'0' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'1'), "u'1' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'5'), "u'5' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'8'), "u'8' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'9'), "u'9' is ASCII alphanumeric");
+
+ static_assert(u'9' == 0x39, "u'9' has value 0x39");
+ static_assert(u':' == 0x3A, "u':' has value 0x3A");
+ static_assert(!IsAsciiAlphanumeric(u':'), "u':' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric(u'@'), "u'@' isn't ASCII alphanumeric");
+ static_assert(u'@' == 0x40, "u'@' has value 0x40");
+
+ static_assert(u'A' == 0x41, "u'A' has value 0x41");
+ static_assert(IsAsciiAlphanumeric(u'A'), "u'A' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'B'), "u'B' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'M'), "u'M' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'Y'), "u'Y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'Z'), "u'Z' is ASCII alphanumeric");
+
+ static_assert(u'Z' == 0x5A, "u'Z' has value 0x5A");
+ static_assert(u'[' == 0x5B, "u'[' has value 0x5B");
+ static_assert(!IsAsciiAlphanumeric(u'['), "u'[' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric(u'`'), "u'`' isn't ASCII alphanumeric");
+ static_assert(u'`' == 0x60, "u'`' has value 0x60");
+
+ static_assert(u'a' == 0x61, "u'a' has value 0x61");
+ static_assert(IsAsciiAlphanumeric(u'a'), "u'a' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'b'), "u'b' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'm'), "u'm' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'y'), "u'y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(u'z'), "u'z' is ASCII alphanumeric");
+
+ static_assert(u'z' == 0x7A, "u'z' has value 0x7A");
+ static_assert(u'{' == 0x7B, "u'{' has value 0x7B");
+ static_assert(!IsAsciiAlphanumeric(u'{'), "u'{' isn't ASCII alphanumeric");
+
+ // char32_t
+
+ static_assert(!IsAsciiAlphanumeric(U'/'), "U'/' isn't ASCII alphanumeric");
+ static_assert(U'/' == 0x2F, "U'/' has value 0x2F");
+
+ static_assert(U'0' == 0x30, "U'0' has value 0x30");
+ static_assert(IsAsciiAlphanumeric(U'0'), "U'0' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'1'), "U'1' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'5'), "U'5' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'8'), "U'8' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'9'), "U'9' is ASCII alphanumeric");
+
+ static_assert(U'9' == 0x39, "U'9' has value 0x39");
+ static_assert(U':' == 0x3A, "U':' has value 0x3A");
+ static_assert(!IsAsciiAlphanumeric(U':'), "U':' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric(U'@'), "U'@' isn't ASCII alphanumeric");
+ static_assert(U'@' == 0x40, "U'@' has value 0x40");
+
+ static_assert(U'A' == 0x41, "U'A' has value 0x41");
+ static_assert(IsAsciiAlphanumeric(U'A'), "U'A' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'B'), "U'B' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'M'), "U'M' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'Y'), "U'Y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'Z'), "U'Z' is ASCII alphanumeric");
+
+ static_assert(U'Z' == 0x5A, "U'Z' has value 0x5A");
+ static_assert(U'[' == 0x5B, "U'[' has value 0x5B");
+ static_assert(!IsAsciiAlphanumeric(U'['), "U'[' isn't ASCII alphanumeric");
+
+ static_assert(!IsAsciiAlphanumeric(U'`'), "U'`' isn't ASCII alphanumeric");
+ static_assert(U'`' == 0x60, "U'`' has value 0x60");
+
+ static_assert(U'a' == 0x61, "U'a' has value 0x61");
+ static_assert(IsAsciiAlphanumeric(U'a'), "U'a' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'b'), "U'b' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'm'), "U'm' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'y'), "U'y' is ASCII alphanumeric");
+ static_assert(IsAsciiAlphanumeric(U'z'), "U'z' is ASCII alphanumeric");
+
+ static_assert(U'z' == 0x7A, "U'z' has value 0x7A");
+ static_assert(U'{' == 0x7B, "U'{' has value 0x7B");
+ static_assert(!IsAsciiAlphanumeric(U'{'), "U'{' isn't ASCII alphanumeric");
+}
+
+static void TestAsciiAlphanumericToNumber() {
+ // When AsciiAlphanumericToNumber becomes constexpr, make sure to convert all
+ // these to just static_assert.
+
+ // char
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('0') == 0, "'0' converts to 0");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('1') == 1, "'1' converts to 1");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('2') == 2, "'2' converts to 2");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('3') == 3, "'3' converts to 3");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('4') == 4, "'4' converts to 4");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('5') == 5, "'5' converts to 5");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('6') == 6, "'6' converts to 6");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('7') == 7, "'7' converts to 7");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('8') == 8, "'8' converts to 8");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('9') == 9, "'9' converts to 9");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('A') == 10,
+ "'A' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('B') == 11,
+ "'B' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('C') == 12,
+ "'C' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('D') == 13,
+ "'D' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('E') == 14,
+ "'E' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('F') == 15,
+ "'F' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('G') == 16,
+ "'G' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('H') == 17,
+ "'H' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('I') == 18,
+ "'I' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('J') == 19,
+ "'J' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('K') == 20,
+ "'K' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('L') == 21,
+ "'L' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('M') == 22,
+ "'M' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('N') == 23,
+ "'N' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('O') == 24,
+ "'O' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('P') == 25,
+ "'P' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('Q') == 26,
+ "'Q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('R') == 27,
+ "'R' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('S') == 28,
+ "'S' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('T') == 29,
+ "'T' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('U') == 30,
+ "'U' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('V') == 31,
+ "'V' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('W') == 32,
+ "'W' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('X') == 33,
+ "'X' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('Y') == 34,
+ "'Y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('Z') == 35,
+ "'Z' converts to 35");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('a') == 10,
+ "'a' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('b') == 11,
+ "'b' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('c') == 12,
+ "'c' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('d') == 13,
+ "'d' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('e') == 14,
+ "'e' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('f') == 15,
+ "'f' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('g') == 16,
+ "'g' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('h') == 17,
+ "'h' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('i') == 18,
+ "'i' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('j') == 19,
+ "'j' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('k') == 20,
+ "'k' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('l') == 21,
+ "'l' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('m') == 22,
+ "'m' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('n') == 23,
+ "'n' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('o') == 24,
+ "'o' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('p') == 25,
+ "'p' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('q') == 26,
+ "'q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('r') == 27,
+ "'r' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('s') == 28,
+ "'s' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('t') == 29,
+ "'t' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('u') == 30,
+ "'u' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('v') == 31,
+ "'v' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('w') == 32,
+ "'w' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('x') == 33,
+ "'x' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('y') == 34,
+ "'y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber('z') == 35,
+ "'z' converts to 35");
+
+ // char16_t
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'0') == 0,
+ "u'0' converts to 0");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'1') == 1,
+ "u'1' converts to 1");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'2') == 2,
+ "u'2' converts to 2");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'3') == 3,
+ "u'3' converts to 3");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'4') == 4,
+ "u'4' converts to 4");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'5') == 5,
+ "u'5' converts to 5");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'6') == 6,
+ "u'6' converts to 6");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'7') == 7,
+ "u'7' converts to 7");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'8') == 8,
+ "u'8' converts to 8");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'9') == 9,
+ "u'9' converts to 9");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'A') == 10,
+ "u'A' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'B') == 11,
+ "u'B' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'C') == 12,
+ "u'C' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'D') == 13,
+ "u'D' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'E') == 14,
+ "u'E' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'F') == 15,
+ "u'F' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'G') == 16,
+ "u'G' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'H') == 17,
+ "u'H' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'I') == 18,
+ "u'I' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'J') == 19,
+ "u'J' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'K') == 20,
+ "u'K' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'L') == 21,
+ "u'L' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'M') == 22,
+ "u'M' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'N') == 23,
+ "u'N' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'O') == 24,
+ "u'O' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'P') == 25,
+ "u'P' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'Q') == 26,
+ "u'Q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'R') == 27,
+ "u'R' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'S') == 28,
+ "u'S' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'T') == 29,
+ "u'T' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'U') == 30,
+ "u'U' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'V') == 31,
+ "u'V' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'W') == 32,
+ "u'W' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'X') == 33,
+ "u'X' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'Y') == 34,
+ "u'Y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'Z') == 35,
+ "u'Z' converts to 35");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'a') == 10,
+ "u'a' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'b') == 11,
+ "u'b' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'c') == 12,
+ "u'c' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'd') == 13,
+ "u'd' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'e') == 14,
+ "u'e' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'f') == 15,
+ "u'f' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'g') == 16,
+ "u'g' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'h') == 17,
+ "u'h' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'i') == 18,
+ "u'i' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'j') == 19,
+ "u'j' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'k') == 20,
+ "u'k' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'l') == 21,
+ "u'l' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'm') == 22,
+ "u'm' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'n') == 23,
+ "u'n' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'o') == 24,
+ "u'o' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'p') == 25,
+ "u'p' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'q') == 26,
+ "u'q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'r') == 27,
+ "u'r' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u's') == 28,
+ "u's' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u't') == 29,
+ "u't' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'u') == 30,
+ "u'u' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'v') == 31,
+ "u'v' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'w') == 32,
+ "u'w' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'x') == 33,
+ "u'x' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'y') == 34,
+ "u'y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(u'z') == 35,
+ "u'z' converts to 35");
+
+ // char32_t
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'0') == 0,
+ "U'0' converts to 0");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'1') == 1,
+ "U'1' converts to 1");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'2') == 2,
+ "U'2' converts to 2");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'3') == 3,
+ "U'3' converts to 3");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'4') == 4,
+ "U'4' converts to 4");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'5') == 5,
+ "U'5' converts to 5");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'6') == 6,
+ "U'6' converts to 6");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'7') == 7,
+ "U'7' converts to 7");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'8') == 8,
+ "U'8' converts to 8");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'9') == 9,
+ "U'9' converts to 9");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'A') == 10,
+ "U'A' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'B') == 11,
+ "U'B' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'C') == 12,
+ "U'C' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'D') == 13,
+ "U'D' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'E') == 14,
+ "U'E' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'F') == 15,
+ "U'F' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'G') == 16,
+ "U'G' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'H') == 17,
+ "U'H' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'I') == 18,
+ "U'I' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'J') == 19,
+ "U'J' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'K') == 20,
+ "U'K' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'L') == 21,
+ "U'L' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'M') == 22,
+ "U'M' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'N') == 23,
+ "U'N' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'O') == 24,
+ "U'O' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'P') == 25,
+ "U'P' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'Q') == 26,
+ "U'Q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'R') == 27,
+ "U'R' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'S') == 28,
+ "U'S' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'T') == 29,
+ "U'T' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'U') == 30,
+ "U'U' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'V') == 31,
+ "U'V' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'W') == 32,
+ "U'W' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'X') == 33,
+ "U'X' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'Y') == 34,
+ "U'Y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'Z') == 35,
+ "U'Z' converts to 35");
+
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'a') == 10,
+ "U'a' converts to 10");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'b') == 11,
+ "U'b' converts to 11");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'c') == 12,
+ "U'c' converts to 12");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'd') == 13,
+ "U'd' converts to 13");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'e') == 14,
+ "U'e' converts to 14");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'f') == 15,
+ "U'f' converts to 15");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'g') == 16,
+ "U'g' converts to 16");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'h') == 17,
+ "U'h' converts to 17");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'i') == 18,
+ "U'i' converts to 18");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'j') == 19,
+ "U'j' converts to 19");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'k') == 20,
+ "U'k' converts to 20");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'l') == 21,
+ "U'l' converts to 21");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'm') == 22,
+ "U'm' converts to 22");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'n') == 23,
+ "U'n' converts to 23");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'o') == 24,
+ "U'o' converts to 24");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'p') == 25,
+ "U'p' converts to 25");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'q') == 26,
+ "U'q' converts to 26");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'r') == 27,
+ "U'r' converts to 27");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U's') == 28,
+ "U's' converts to 28");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U't') == 29,
+ "U't' converts to 29");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'u') == 30,
+ "U'u' converts to 30");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'v') == 31,
+ "U'v' converts to 31");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'w') == 32,
+ "U'w' converts to 32");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'x') == 33,
+ "U'x' converts to 33");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'y') == 34,
+ "U'y' converts to 34");
+ MOZ_RELEASE_ASSERT(AsciiAlphanumericToNumber(U'z') == 35,
+ "U'z' converts to 35");
+}
+
+static void TestIsAsciiDigit() {
+ // char
+
+ static_assert(!IsAsciiDigit('/'), "'/' isn't an ASCII digit");
+ static_assert('/' == 0x2F, "'/' has value 0x2F");
+
+ static_assert('0' == 0x30, "'0' has value 0x30");
+ static_assert(IsAsciiDigit('0'), "'0' is an ASCII digit");
+ static_assert(IsAsciiDigit('1'), "'1' is an ASCII digit");
+ static_assert(IsAsciiDigit('5'), "'5' is an ASCII digit");
+ static_assert(IsAsciiDigit('8'), "'8' is an ASCII digit");
+ static_assert(IsAsciiDigit('9'), "'9' is an ASCII digit");
+
+ static_assert('9' == 0x39, "'9' has value 0x39");
+ static_assert(':' == 0x3A, "':' has value 0x3A");
+ static_assert(!IsAsciiDigit(':'), "':' isn't an ASCII digit");
+
+ static_assert(!IsAsciiDigit('@'), "'@' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('A'), "'A' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('B'), "'B' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('M'), "'M' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('Y'), "'Y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('Z'), "'Z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('['), "'[' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('`'), "'`' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('a'), "'a' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('b'), "'b' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('m'), "'m' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('y'), "'y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('z'), "'z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit('{'), "'{' isn't an ASCII digit");
+
+ // char16_t
+
+ static_assert(!IsAsciiDigit(u'/'), "u'/' isn't an ASCII digit");
+ static_assert(u'/' == 0x2F, "u'/' has value 0x2F");
+ static_assert(u'0' == 0x30, "u'0' has value 0x30");
+ static_assert(IsAsciiDigit(u'0'), "u'0' is an ASCII digit");
+ static_assert(IsAsciiDigit(u'1'), "u'1' is an ASCII digit");
+ static_assert(IsAsciiDigit(u'5'), "u'5' is an ASCII digit");
+ static_assert(IsAsciiDigit(u'8'), "u'8' is an ASCII digit");
+ static_assert(IsAsciiDigit(u'9'), "u'9' is an ASCII digit");
+
+ static_assert(u'9' == 0x39, "u'9' has value 0x39");
+ static_assert(u':' == 0x3A, "u':' has value 0x3A");
+ static_assert(!IsAsciiDigit(u':'), "u':' isn't an ASCII digit");
+
+ static_assert(!IsAsciiDigit(u'@'), "u'@' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'A'), "u'A' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'B'), "u'B' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'M'), "u'M' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'Y'), "u'Y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'Z'), "u'Z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'['), "u'[' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'`'), "u'`' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'a'), "u'a' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'b'), "u'b' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'm'), "u'm' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'y'), "u'y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'z'), "u'z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(u'{'), "u'{' isn't an ASCII digit");
+
+ // char32_t
+
+ static_assert(!IsAsciiDigit(U'/'), "U'/' isn't an ASCII digit");
+ static_assert(U'/' == 0x2F, "U'/' has value 0x2F");
+
+ static_assert(U'0' == 0x30, "U'0' has value 0x30");
+ static_assert(IsAsciiDigit(U'0'), "U'0' is an ASCII digit");
+ static_assert(IsAsciiDigit(U'1'), "U'1' is an ASCII digit");
+ static_assert(IsAsciiDigit(U'5'), "U'5' is an ASCII digit");
+ static_assert(IsAsciiDigit(U'8'), "U'8' is an ASCII digit");
+ static_assert(IsAsciiDigit(U'9'), "U'9' is an ASCII digit");
+
+ static_assert(U'9' == 0x39, "U'9' has value 0x39");
+ static_assert(U':' == 0x3A, "U':' has value 0x3A");
+ static_assert(!IsAsciiDigit(U':'), "U':' isn't an ASCII digit");
+
+ static_assert(!IsAsciiDigit(U'@'), "U'@' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'A'), "U'A' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'B'), "U'B' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'M'), "U'M' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'Y'), "U'Y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'Z'), "U'Z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'['), "U'[' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'`'), "U'`' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'a'), "U'a' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'b'), "U'b' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'm'), "U'm' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'y'), "U'y' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'z'), "U'z' isn't an ASCII digit");
+ static_assert(!IsAsciiDigit(U'{'), "U'{' isn't an ASCII digit");
+}
+
+int main() {
+ TestIsAscii();
+ TestIsAsciiNullTerminated();
+ TestIsAsciiAlpha();
+ TestIsAsciiUppercaseAlpha();
+ TestIsAsciiLowercaseAlpha();
+ TestIsAsciiAlphanumeric();
+ TestAsciiAlphanumericToNumber();
+ TestIsAsciiDigit();
+}
diff --git a/mfbt/tests/TestThreadSafeWeakPtr.cpp b/mfbt/tests/TestThreadSafeWeakPtr.cpp
new file mode 100644
index 0000000000..3670d15bc0
--- /dev/null
+++ b/mfbt/tests/TestThreadSafeWeakPtr.cpp
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/ThreadSafeWeakPtr.h"
+
+using mozilla::SupportsThreadSafeWeakPtr;
+using mozilla::ThreadSafeWeakPtr;
+
+// To have a class C support weak pointers, inherit from
+// SupportsThreadSafeWeakPtr<C>.
+class C : public SupportsThreadSafeWeakPtr<C> {
+ public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(C)
+
+ int mNum;
+
+ C() : mNum(0) {}
+
+ ~C() {
+ // Setting mNum in the destructor allows us to test against use-after-free
+ // below
+ mNum = 0xDEAD;
+ }
+
+ void act() {}
+};
+
+// Test that declaring a ThreadSafeWeakPtr pointing to an incomplete type
+// builds.
+class Incomplete;
+class D {
+ ThreadSafeWeakPtr<Incomplete> mMember;
+};
+
+int main() {
+ RefPtr<C> c1 = new C;
+ MOZ_RELEASE_ASSERT(c1->mNum == 0);
+
+ // Get weak pointers to c1. The first time,
+ // a reference-counted ThreadSafeWeakReference object is created that
+ // can live beyond the lifetime of 'c1'. The ThreadSafeWeakReference
+ // object will be notified of 'c1's destruction.
+ ThreadSafeWeakPtr<C> w1(c1);
+ {
+ RefPtr<C> s1(w1);
+ // Test a weak pointer for validity before using it.
+ MOZ_RELEASE_ASSERT(s1);
+ MOZ_RELEASE_ASSERT(s1 == c1);
+ s1->mNum = 1;
+ s1->act();
+ }
+
+ // Test taking another ThreadSafeWeakPtr<C> to c1
+ ThreadSafeWeakPtr<C> w2(c1);
+ {
+ RefPtr<C> s2(w2);
+ MOZ_RELEASE_ASSERT(s2);
+ MOZ_RELEASE_ASSERT(s2 == c1);
+ MOZ_RELEASE_ASSERT(w1 == s2);
+ MOZ_RELEASE_ASSERT(s2->mNum == 1);
+ }
+
+ // Test that when a ThreadSafeWeakPtr is destroyed, it does not destroy the
+ // object that it points to, and it does not affect other ThreadSafeWeakPtrs
+ // pointing to the same object (e.g. it does not destroy the
+ // ThreadSafeWeakReference object).
+ {
+ ThreadSafeWeakPtr<C> w4local(c1);
+ MOZ_RELEASE_ASSERT(w4local == c1);
+ }
+ // Now w4local has gone out of scope. If that had destroyed c1, then the
+ // following would fail for sure (see C::~C()).
+ MOZ_RELEASE_ASSERT(c1->mNum == 1);
+ // Check that w4local going out of scope hasn't affected other
+ // ThreadSafeWeakPtr's pointing to c1
+ MOZ_RELEASE_ASSERT(w1 == c1);
+ MOZ_RELEASE_ASSERT(w2 == c1);
+
+ // Now construct another C object and test changing what object a
+ // ThreadSafeWeakPtr points to
+ RefPtr<C> c2 = new C;
+ c2->mNum = 2;
+ {
+ RefPtr<C> s2(w2);
+ MOZ_RELEASE_ASSERT(s2->mNum == 1); // w2 was pointing to c1
+ }
+ w2 = c2;
+ {
+ RefPtr<C> s2(w2);
+ MOZ_RELEASE_ASSERT(s2);
+ MOZ_RELEASE_ASSERT(s2 == c2);
+ MOZ_RELEASE_ASSERT(s2 != c1);
+ MOZ_RELEASE_ASSERT(w1 != s2);
+ MOZ_RELEASE_ASSERT(s2->mNum == 2);
+ }
+
+ // Destroying the underlying object clears weak pointers to it.
+ // It should not affect pointers that are not currently pointing to it.
+ c1 = nullptr;
+ {
+ RefPtr<C> s1(w1);
+ MOZ_RELEASE_ASSERT(
+ !s1, "Deleting an object should clear ThreadSafeWeakPtr's to it.");
+ MOZ_RELEASE_ASSERT(w1.IsDead(), "The weak pointer is now dead");
+ MOZ_RELEASE_ASSERT(!w1.IsNull(), "The weak pointer isn't null");
+
+ RefPtr<C> s2(w2);
+ MOZ_RELEASE_ASSERT(s2,
+ "Deleting an object should not clear ThreadSafeWeakPtr "
+ "that are not pointing to it.");
+ MOZ_RELEASE_ASSERT(!w2.IsDead(), "The weak pointer isn't dead");
+ MOZ_RELEASE_ASSERT(!w2.IsNull(), "The weak pointer isn't null");
+ }
+
+ c2 = nullptr;
+ {
+ RefPtr<C> s2(w2);
+ MOZ_RELEASE_ASSERT(
+ !s2, "Deleting an object should clear ThreadSafeWeakPtr's to it.");
+ MOZ_RELEASE_ASSERT(w2.IsDead(), "The weak pointer is now dead");
+ MOZ_RELEASE_ASSERT(!w2.IsNull(), "The weak pointer isn't null");
+ }
+}
diff --git a/mfbt/tests/TestTypedEnum.cpp b/mfbt/tests/TestTypedEnum.cpp
new file mode 100644
index 0000000000..cddbb39e0b
--- /dev/null
+++ b/mfbt/tests/TestTypedEnum.cpp
@@ -0,0 +1,502 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/TypedEnumBits.h"
+
+#include <stdint.h>
+#include <type_traits>
+
+// A rough feature check for is_literal_type. Not very carefully checked.
+// Feel free to amend as needed. is_literal_type was removed in C++20.
+// We leave ANDROID out because it's using stlport which doesn't have
+// std::is_literal_type.
+#if __cplusplus >= 201103L && __cplusplus < 202002L && !defined(ANDROID)
+# if defined(__clang__)
+/*
+ * Per Clang documentation, "Note that marketing version numbers should not
+ * be used to check for language features, as different vendors use different
+ * numbering schemes. Instead, use the feature checking macros."
+ */
+# ifndef __has_extension
+# define __has_extension \
+ __has_feature /* compatibility, for older versions of clang */
+# endif
+# if __has_extension(is_literal) && __has_include(<type_traits>)
+# define MOZ_HAVE_IS_LITERAL
+# endif
+# elif defined(__GNUC__) || defined(_MSC_VER)
+# define MOZ_HAVE_IS_LITERAL
+# endif
+#endif
+
+#if defined(MOZ_HAVE_IS_LITERAL) && defined(MOZ_HAVE_CXX11_CONSTEXPR)
+# include <type_traits>
+template <typename T>
+void RequireLiteralType() {
+ static_assert(std::is_literal_type<T>::value, "Expected a literal type");
+}
+#else // not MOZ_HAVE_IS_LITERAL
+template <typename T>
+void RequireLiteralType() {}
+#endif
+
+template <typename T>
+void RequireLiteralType(const T&) {
+ RequireLiteralType<T>();
+}
+
+enum class AutoEnum { A, B = -3, C };
+
+enum class CharEnum : char { A, B = 3, C };
+
+enum class AutoEnumBitField { A = 0x10, B = 0x20, C };
+
+enum class CharEnumBitField : char { A = 0x10, B, C = 0x40 };
+
+struct Nested {
+ enum class AutoEnum { A, B, C = -1 };
+
+ enum class CharEnum : char { A = 4, B, C = 1 };
+
+ enum class AutoEnumBitField { A, B = 0x20, C };
+
+ enum class CharEnumBitField : char { A = 1, B = 1, C = 1 };
+};
+
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(AutoEnumBitField)
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(CharEnumBitField)
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Nested::AutoEnumBitField)
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Nested::CharEnumBitField)
+
+#define MAKE_STANDARD_BITFIELD_FOR_TYPE(IntType) \
+ enum class BitFieldFor_##IntType : IntType{ \
+ A = 1, \
+ B = 2, \
+ C = 4, \
+ }; \
+ MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(BitFieldFor_##IntType)
+
+MAKE_STANDARD_BITFIELD_FOR_TYPE(int8_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(uint8_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(int16_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(uint16_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(int32_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(uint32_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(int64_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(uint64_t)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(char)
+typedef signed char signed_char;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(signed_char)
+typedef unsigned char unsigned_char;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(unsigned_char)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(short)
+typedef unsigned short unsigned_short;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(unsigned_short)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(int)
+typedef unsigned int unsigned_int;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(unsigned_int)
+MAKE_STANDARD_BITFIELD_FOR_TYPE(long)
+typedef unsigned long unsigned_long;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(unsigned_long)
+typedef long long long_long;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(long_long)
+typedef unsigned long long unsigned_long_long;
+MAKE_STANDARD_BITFIELD_FOR_TYPE(unsigned_long_long)
+
+#undef MAKE_STANDARD_BITFIELD_FOR_TYPE
+
+template <typename T>
+void TestNonConvertibilityForOneType() {
+ static_assert(!std::is_convertible_v<T, bool>, "should not be convertible");
+ static_assert(!std::is_convertible_v<T, int>, "should not be convertible");
+ static_assert(!std::is_convertible_v<T, uint64_t>,
+ "should not be convertible");
+
+ static_assert(!std::is_convertible_v<bool, T>, "should not be convertible");
+ static_assert(!std::is_convertible_v<int, T>, "should not be convertible");
+ static_assert(!std::is_convertible_v<uint64_t, T>,
+ "should not be convertible");
+}
+
+template <typename TypedEnum>
+void TestTypedEnumBasics() {
+ const TypedEnum a = TypedEnum::A;
+ int unused = int(a);
+ (void)unused;
+ RequireLiteralType(TypedEnum::A);
+ RequireLiteralType(a);
+ TestNonConvertibilityForOneType<TypedEnum>();
+}
+
+// Op wraps a bitwise binary operator, passed as a char template parameter,
+// and applies it to its arguments (aT1, aT2). For example,
+//
+// Op<'|'>(aT1, aT2)
+//
+// is the same as
+//
+// aT1 | aT2.
+//
+template <char o, typename T1, typename T2>
+auto Op(const T1& aT1, const T2& aT2)
+ -> decltype(aT1 | aT2) // See the static_assert's below --- the return type
+ // depends solely on the operands type, not on the
+ // choice of operation.
+{
+ static_assert(std::is_same_v<decltype(aT1 | aT2), decltype(aT1 & aT2)>,
+ "binary ops should have the same result type");
+ static_assert(std::is_same_v<decltype(aT1 | aT2), decltype(aT1 ^ aT2)>,
+ "binary ops should have the same result type");
+
+ static_assert(o == '|' || o == '&' || o == '^',
+ "unexpected operator character");
+
+ return o == '|' ? aT1 | aT2 : o == '&' ? aT1 & aT2 : aT1 ^ aT2;
+}
+
+// OpAssign wraps a bitwise binary operator, passed as a char template
+// parameter, and applies the corresponding compound-assignment operator to its
+// arguments (aT1, aT2). For example,
+//
+// OpAssign<'|'>(aT1, aT2)
+//
+// is the same as
+//
+// aT1 |= aT2.
+//
+template <char o, typename T1, typename T2>
+T1& OpAssign(T1& aT1, const T2& aT2) {
+ static_assert(o == '|' || o == '&' || o == '^',
+ "unexpected operator character");
+
+ switch (o) {
+ case '|':
+ return aT1 |= aT2;
+ case '&':
+ return aT1 &= aT2;
+ case '^':
+ return aT1 ^= aT2;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Tests a single binary bitwise operator, using a single set of three operands.
+// The operations tested are:
+//
+// result = aT1 Op aT2;
+// result Op= aT3;
+//
+// Where Op is the operator specified by the char template parameter 'o' and
+// can be any of '|', '&', '^'.
+//
+// Note that the operands aT1, aT2, aT3 are intentionally passed with free
+// types (separate template parameters for each) because their type may
+// actually be different from TypedEnum:
+//
+// 1) Their type could be CastableTypedEnumResult<TypedEnum> if they are
+// the result of a bitwise operation themselves;
+// 2) In the non-c++11 legacy path, the type of enum values is also
+// different from TypedEnum.
+//
+template <typename TypedEnum, char o, typename T1, typename T2, typename T3>
+void TestBinOp(const T1& aT1, const T2& aT2, const T3& aT3) {
+ typedef typename mozilla::detail::UnsignedIntegerTypeForEnum<TypedEnum>::Type
+ UnsignedIntegerType;
+
+ // Part 1:
+ // Test the bitwise binary operator i.e.
+ // result = aT1 Op aT2;
+ auto result = Op<o>(aT1, aT2);
+
+ typedef decltype(result) ResultType;
+
+ RequireLiteralType<ResultType>();
+ TestNonConvertibilityForOneType<ResultType>();
+
+ UnsignedIntegerType unsignedIntegerResult =
+ Op<o>(UnsignedIntegerType(aT1), UnsignedIntegerType(aT2));
+
+ MOZ_RELEASE_ASSERT(unsignedIntegerResult == UnsignedIntegerType(result));
+ MOZ_RELEASE_ASSERT(TypedEnum(unsignedIntegerResult) == TypedEnum(result));
+ MOZ_RELEASE_ASSERT((!unsignedIntegerResult) == (!result));
+ MOZ_RELEASE_ASSERT((!!unsignedIntegerResult) == (!!result));
+ MOZ_RELEASE_ASSERT(bool(unsignedIntegerResult) == bool(result));
+
+ // Part 2:
+ // Test the compound-assignment operator, i.e.
+ // result Op= aT3;
+ TypedEnum newResult = result;
+ OpAssign<o>(newResult, aT3);
+ UnsignedIntegerType unsignedIntegerNewResult = unsignedIntegerResult;
+ OpAssign<o>(unsignedIntegerNewResult, UnsignedIntegerType(aT3));
+ MOZ_RELEASE_ASSERT(TypedEnum(unsignedIntegerNewResult) == newResult);
+
+ // Part 3:
+ // Test additional boolean operators that we unfortunately had to add to
+ // CastableTypedEnumResult at some point to please some compiler,
+ // even though bool convertibility should have been enough.
+ MOZ_RELEASE_ASSERT(result == TypedEnum(result));
+ MOZ_RELEASE_ASSERT(!(result != TypedEnum(result)));
+ MOZ_RELEASE_ASSERT((result && true) == bool(result));
+ MOZ_RELEASE_ASSERT((result && false) == false);
+ MOZ_RELEASE_ASSERT((true && result) == bool(result));
+ MOZ_RELEASE_ASSERT((false && result && false) == false);
+ MOZ_RELEASE_ASSERT((result || false) == bool(result));
+ MOZ_RELEASE_ASSERT((result || true) == true);
+ MOZ_RELEASE_ASSERT((false || result) == bool(result));
+ MOZ_RELEASE_ASSERT((true || result) == true);
+
+ // Part 4:
+ // Test short-circuit evaluation.
+ auto Explode = [] {
+ // This function should never be called. Return an arbitrary value.
+ MOZ_RELEASE_ASSERT(false);
+ return false;
+ };
+ if (result) {
+ MOZ_RELEASE_ASSERT(result || Explode());
+ MOZ_RELEASE_ASSERT(!(!result && Explode()));
+ } else {
+ MOZ_RELEASE_ASSERT(!(result && Explode()));
+ MOZ_RELEASE_ASSERT(!result || Explode());
+ }
+}
+
+// Similar to TestBinOp but testing the unary ~ operator.
+template <typename TypedEnum, typename T>
+void TestTilde(const T& aT) {
+ typedef typename mozilla::detail::UnsignedIntegerTypeForEnum<TypedEnum>::Type
+ UnsignedIntegerType;
+
+ auto result = ~aT;
+
+ typedef decltype(result) ResultType;
+
+ RequireLiteralType<ResultType>();
+ TestNonConvertibilityForOneType<ResultType>();
+
+ UnsignedIntegerType unsignedIntegerResult = ~(UnsignedIntegerType(aT));
+
+ MOZ_RELEASE_ASSERT(unsignedIntegerResult == UnsignedIntegerType(result));
+ MOZ_RELEASE_ASSERT(TypedEnum(unsignedIntegerResult) == TypedEnum(result));
+ MOZ_RELEASE_ASSERT((!unsignedIntegerResult) == (!result));
+ MOZ_RELEASE_ASSERT((!!unsignedIntegerResult) == (!!result));
+ MOZ_RELEASE_ASSERT(bool(unsignedIntegerResult) == bool(result));
+}
+
+// Helper dispatching a given triple of operands to all operator-specific
+// testing functions.
+template <typename TypedEnum, typename T1, typename T2, typename T3>
+void TestAllOpsForGivenOperands(const T1& aT1, const T2& aT2, const T3& aT3) {
+ TestBinOp<TypedEnum, '|'>(aT1, aT2, aT3);
+ TestBinOp<TypedEnum, '&'>(aT1, aT2, aT3);
+ TestBinOp<TypedEnum, '^'>(aT1, aT2, aT3);
+ TestTilde<TypedEnum>(aT1);
+}
+
+// Helper building various triples of operands using a given operator,
+// and testing all operators with them.
+template <typename TypedEnum, char o>
+void TestAllOpsForOperandsBuiltUsingGivenOp() {
+ // The type of enum values like TypedEnum::A may be different from
+ // TypedEnum. That is the case in the legacy non-C++11 path. We want to
+ // ensure good test coverage even when these two types are distinct.
+ // To that effect, we have both 'auto' typed variables, preserving the
+ // original type of enum values, and 'plain' typed variables, that
+ // are plain TypedEnum's.
+
+ const TypedEnum a_plain = TypedEnum::A;
+ const TypedEnum b_plain = TypedEnum::B;
+ const TypedEnum c_plain = TypedEnum::C;
+
+ auto a_auto = TypedEnum::A;
+ auto b_auto = TypedEnum::B;
+ auto c_auto = TypedEnum::C;
+
+ auto ab_plain = Op<o>(a_plain, b_plain);
+ auto bc_plain = Op<o>(b_plain, c_plain);
+ auto ab_auto = Op<o>(a_auto, b_auto);
+ auto bc_auto = Op<o>(b_auto, c_auto);
+
+ // On each row below, we pass a triple of operands. Keep in mind that this
+ // is going to be received as (aT1, aT2, aT3) and the actual tests performed
+ // will be of the form
+ //
+ // result = aT1 Op aT2;
+ // result Op= aT3;
+ //
+ // For this reason, we carefully ensure that the values of (aT1, aT2)
+ // systematically cover all types of such pairs; to limit complexity,
+ // we are not so careful with aT3, and we just try to pass aT3's
+ // that may lead to nontrivial bitwise operations.
+ TestAllOpsForGivenOperands<TypedEnum>(a_plain, b_plain, c_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(a_plain, bc_plain, b_auto);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_plain, c_plain, a_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_plain, bc_plain, a_auto);
+
+ TestAllOpsForGivenOperands<TypedEnum>(a_plain, b_auto, c_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(a_plain, bc_auto, b_auto);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_plain, c_auto, a_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_plain, bc_auto, a_auto);
+
+ TestAllOpsForGivenOperands<TypedEnum>(a_auto, b_plain, c_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(a_auto, bc_plain, b_auto);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_auto, c_plain, a_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_auto, bc_plain, a_auto);
+
+ TestAllOpsForGivenOperands<TypedEnum>(a_auto, b_auto, c_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(a_auto, bc_auto, b_auto);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_auto, c_auto, a_plain);
+ TestAllOpsForGivenOperands<TypedEnum>(ab_auto, bc_auto, a_auto);
+}
+
+// Tests all bitwise operations on a given TypedEnum bitfield.
+template <typename TypedEnum>
+void TestTypedEnumBitField() {
+ TestTypedEnumBasics<TypedEnum>();
+
+ TestAllOpsForOperandsBuiltUsingGivenOp<TypedEnum, '|'>();
+ TestAllOpsForOperandsBuiltUsingGivenOp<TypedEnum, '&'>();
+ TestAllOpsForOperandsBuiltUsingGivenOp<TypedEnum, '^'>();
+}
+
+// Checks that enum bitwise expressions have the same non-convertibility
+// properties as c++11 enum classes do, i.e. not implicitly convertible to
+// anything (though *explicitly* convertible).
+void TestNoConversionsBetweenUnrelatedTypes() {
+ // Two typed enum classes having the same underlying integer type, to ensure
+ // that we would catch bugs accidentally allowing conversions in that case.
+ typedef CharEnumBitField T1;
+ typedef Nested::CharEnumBitField T2;
+
+ static_assert(!std::is_convertible_v<T1, T2>, "should not be convertible");
+ static_assert(!std::is_convertible_v<T1, decltype(T2::A)>,
+ "should not be convertible");
+ static_assert(!std::is_convertible_v<T1, decltype(T2::A | T2::B)>,
+ "should not be convertible");
+
+ static_assert(!std::is_convertible_v<decltype(T1::A), T2>,
+ "should not be convertible");
+ static_assert(!std::is_convertible_v<decltype(T1::A), decltype(T2::A)>,
+ "should not be convertible");
+ static_assert(
+ !std::is_convertible_v<decltype(T1::A), decltype(T2::A | T2::B)>,
+ "should not be convertible");
+
+ static_assert(!std::is_convertible_v<decltype(T1::A | T1::B), T2>,
+ "should not be convertible");
+ static_assert(
+ !std::is_convertible_v<decltype(T1::A | T1::B), decltype(T2::A)>,
+ "should not be convertible");
+ static_assert(
+ !std::is_convertible_v<decltype(T1::A | T1::B), decltype(T2::A | T2::B)>,
+ "should not be convertible");
+}
+
+enum class Int8EnumWithHighBits : int8_t { A = 0x20, B = 0x40 };
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Int8EnumWithHighBits)
+
+enum class Uint8EnumWithHighBits : uint8_t { A = 0x40, B = 0x80 };
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Uint8EnumWithHighBits)
+
+enum class Int16EnumWithHighBits : int16_t { A = 0x2000, B = 0x4000 };
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Int16EnumWithHighBits)
+
+enum class Uint16EnumWithHighBits : uint16_t { A = 0x4000, B = 0x8000 };
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Uint16EnumWithHighBits)
+
+enum class Int32EnumWithHighBits : int32_t { A = 0x20000000, B = 0x40000000 };
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Int32EnumWithHighBits)
+
+enum class Uint32EnumWithHighBits : uint32_t {
+ A = 0x40000000u,
+ B = 0x80000000u
+};
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Uint32EnumWithHighBits)
+
+enum class Int64EnumWithHighBits : int64_t {
+ A = 0x2000000000000000ll,
+ B = 0x4000000000000000ll
+};
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Int64EnumWithHighBits)
+
+enum class Uint64EnumWithHighBits : uint64_t {
+ A = 0x4000000000000000ull,
+ B = 0x8000000000000000ull
+};
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(Uint64EnumWithHighBits)
+
+// Checks that we don't accidentally truncate high bits by coercing to the wrong
+// integer type internally when implementing bitwise ops.
+template <typename EnumType, typename IntType>
+void TestIsNotTruncated() {
+ EnumType a = EnumType::A;
+ EnumType b = EnumType::B;
+ MOZ_RELEASE_ASSERT(IntType(a));
+ MOZ_RELEASE_ASSERT(IntType(b));
+ MOZ_RELEASE_ASSERT(a | EnumType::B);
+ MOZ_RELEASE_ASSERT(a | b);
+ MOZ_RELEASE_ASSERT(EnumType::A | EnumType::B);
+ EnumType c = EnumType::A | EnumType::B;
+ MOZ_RELEASE_ASSERT(IntType(c));
+ MOZ_RELEASE_ASSERT(c & c);
+ MOZ_RELEASE_ASSERT(c | c);
+ MOZ_RELEASE_ASSERT(c == (EnumType::A | EnumType::B));
+ MOZ_RELEASE_ASSERT(a != (EnumType::A | EnumType::B));
+ MOZ_RELEASE_ASSERT(b != (EnumType::A | EnumType::B));
+ MOZ_RELEASE_ASSERT(c & EnumType::A);
+ MOZ_RELEASE_ASSERT(c & EnumType::B);
+ EnumType d = EnumType::A;
+ d |= EnumType::B;
+ MOZ_RELEASE_ASSERT(d == c);
+}
+
+int main() {
+ TestTypedEnumBasics<AutoEnum>();
+ TestTypedEnumBasics<CharEnum>();
+ TestTypedEnumBasics<Nested::AutoEnum>();
+ TestTypedEnumBasics<Nested::CharEnum>();
+
+ TestTypedEnumBitField<AutoEnumBitField>();
+ TestTypedEnumBitField<CharEnumBitField>();
+ TestTypedEnumBitField<Nested::AutoEnumBitField>();
+ TestTypedEnumBitField<Nested::CharEnumBitField>();
+
+ TestTypedEnumBitField<BitFieldFor_uint8_t>();
+ TestTypedEnumBitField<BitFieldFor_int8_t>();
+ TestTypedEnumBitField<BitFieldFor_uint16_t>();
+ TestTypedEnumBitField<BitFieldFor_int16_t>();
+ TestTypedEnumBitField<BitFieldFor_uint32_t>();
+ TestTypedEnumBitField<BitFieldFor_int32_t>();
+ TestTypedEnumBitField<BitFieldFor_uint64_t>();
+ TestTypedEnumBitField<BitFieldFor_int64_t>();
+ TestTypedEnumBitField<BitFieldFor_char>();
+ TestTypedEnumBitField<BitFieldFor_signed_char>();
+ TestTypedEnumBitField<BitFieldFor_unsigned_char>();
+ TestTypedEnumBitField<BitFieldFor_short>();
+ TestTypedEnumBitField<BitFieldFor_unsigned_short>();
+ TestTypedEnumBitField<BitFieldFor_int>();
+ TestTypedEnumBitField<BitFieldFor_unsigned_int>();
+ TestTypedEnumBitField<BitFieldFor_long>();
+ TestTypedEnumBitField<BitFieldFor_unsigned_long>();
+ TestTypedEnumBitField<BitFieldFor_long_long>();
+ TestTypedEnumBitField<BitFieldFor_unsigned_long_long>();
+
+ TestNoConversionsBetweenUnrelatedTypes();
+
+ TestIsNotTruncated<Int8EnumWithHighBits, int8_t>();
+ TestIsNotTruncated<Int16EnumWithHighBits, int16_t>();
+ TestIsNotTruncated<Int32EnumWithHighBits, int32_t>();
+ TestIsNotTruncated<Int64EnumWithHighBits, int64_t>();
+ TestIsNotTruncated<Uint8EnumWithHighBits, uint8_t>();
+ TestIsNotTruncated<Uint16EnumWithHighBits, uint16_t>();
+ TestIsNotTruncated<Uint32EnumWithHighBits, uint32_t>();
+ TestIsNotTruncated<Uint64EnumWithHighBits, uint64_t>();
+
+ return 0;
+}
diff --git a/mfbt/tests/TestUniquePtr.cpp b/mfbt/tests/TestUniquePtr.cpp
new file mode 100644
index 0000000000..03f9033fe5
--- /dev/null
+++ b/mfbt/tests/TestUniquePtr.cpp
@@ -0,0 +1,609 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stddef.h>
+
+#include <memory> // For unique_ptr
+#include <type_traits>
+#include <utility>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/Vector.h"
+
+using mozilla::DefaultDelete;
+using mozilla::MakeUnique;
+using mozilla::UniqueFreePtr;
+using mozilla::UniquePtr;
+using mozilla::Vector;
+
+#define CHECK(c) \
+ do { \
+ bool cond = !!(c); \
+ MOZ_ASSERT(cond, "Failed assertion: " #c); \
+ if (!cond) { \
+ return false; \
+ } \
+ } while (false)
+
+typedef UniquePtr<int> NewInt;
+static_assert(sizeof(NewInt) == sizeof(int*), "stored most efficiently");
+
+static size_t gADestructorCalls = 0;
+
+struct A {
+ public:
+ A() : mX(0) {}
+ virtual ~A() { gADestructorCalls++; }
+
+ int mX;
+};
+
+static size_t gBDestructorCalls = 0;
+
+struct B : public A {
+ public:
+ B() : mY(1) {}
+ ~B() { gBDestructorCalls++; }
+
+ int mY;
+};
+
+typedef UniquePtr<A> UniqueA;
+typedef UniquePtr<B, UniqueA::DeleterType> UniqueB; // permit interconversion
+
+static_assert(sizeof(UniqueA) == sizeof(A*), "stored most efficiently");
+static_assert(sizeof(UniqueB) == sizeof(B*), "stored most efficiently");
+
+struct DeleterSubclass : UniqueA::DeleterType {};
+
+typedef UniquePtr<B, DeleterSubclass> UniqueC;
+static_assert(sizeof(UniqueC) == sizeof(B*), "stored most efficiently");
+
+static UniqueA ReturnUniqueA() { return UniqueA(new B); }
+
+static UniqueA ReturnLocalA() {
+ UniqueA a(new A);
+ return a;
+}
+
+static void TestDeleterType() {
+ // Make sure UniquePtr will use its deleter's pointer type if it defines one.
+ typedef int* Ptr;
+ struct Deleter {
+ typedef Ptr pointer;
+ Deleter() = default;
+ void operator()(int* p) { delete p; }
+ };
+ UniquePtr<Ptr, Deleter> u(new int, Deleter());
+}
+
+static bool TestDefaultFreeGuts() {
+ static_assert(std::is_same_v<NewInt::DeleterType, DefaultDelete<int> >,
+ "weird deleter?");
+
+ NewInt n1(new int);
+ CHECK(n1);
+ CHECK(n1.get() != nullptr);
+
+ n1 = nullptr;
+ CHECK(!n1);
+ CHECK(n1.get() == nullptr);
+
+ int* p1 = new int;
+ n1.reset(p1);
+ CHECK(n1);
+ NewInt n2(std::move(n1));
+ CHECK(!n1);
+ CHECK(n1.get() == nullptr);
+ CHECK(n2.get() == p1);
+
+ std::swap(n1, n2);
+ CHECK(n1.get() == p1);
+ CHECK(n2.get() == nullptr);
+
+ n1.swap(n2);
+ CHECK(n1.get() == nullptr);
+ CHECK(n2.get() == p1);
+ delete n2.release();
+
+ CHECK(n1.get() == nullptr);
+ CHECK(n2 == nullptr);
+ CHECK(nullptr == n2);
+
+ int* p2 = new int;
+ int* p3 = new int;
+ n1.reset(p2);
+ n2.reset(p3);
+ CHECK(n1.get() == p2);
+ CHECK(n2.get() == p3);
+
+ n1.swap(n2);
+ CHECK(n2 != nullptr);
+ CHECK(nullptr != n2);
+ CHECK(n2.get() == p2);
+ CHECK(n1.get() == p3);
+
+ UniqueA a1;
+ CHECK(a1 == nullptr);
+ a1.reset(new A);
+ CHECK(gADestructorCalls == 0);
+ CHECK(a1->mX == 0);
+
+ B* bp1 = new B;
+ bp1->mX = 5;
+ CHECK(gBDestructorCalls == 0);
+ a1.reset(bp1);
+ CHECK(gADestructorCalls == 1);
+ CHECK(a1->mX == 5);
+ a1.reset(nullptr);
+ CHECK(gADestructorCalls == 2);
+ CHECK(gBDestructorCalls == 1);
+
+ B* bp2 = new B;
+ UniqueB b1(bp2);
+ UniqueA a2(nullptr);
+ a2 = std::move(b1);
+ CHECK(gADestructorCalls == 2);
+ CHECK(gBDestructorCalls == 1);
+
+ UniqueA a3(std::move(a2));
+ a3 = nullptr;
+ CHECK(gADestructorCalls == 3);
+ CHECK(gBDestructorCalls == 2);
+
+ B* bp3 = new B;
+ bp3->mX = 42;
+ UniqueB b2(bp3);
+ UniqueA a4(std::move(b2));
+ CHECK(b2.get() == nullptr);
+ CHECK((*a4).mX == 42);
+ CHECK(gADestructorCalls == 3);
+ CHECK(gBDestructorCalls == 2);
+
+ UniqueA a5(new A);
+ UniqueB b3(new B);
+ a5 = std::move(b3);
+ CHECK(gADestructorCalls == 4);
+ CHECK(gBDestructorCalls == 2);
+
+ ReturnUniqueA();
+ CHECK(gADestructorCalls == 5);
+ CHECK(gBDestructorCalls == 3);
+
+ ReturnLocalA();
+ CHECK(gADestructorCalls == 6);
+ CHECK(gBDestructorCalls == 3);
+
+ UniqueA a6(ReturnLocalA());
+ a6 = nullptr;
+ CHECK(gADestructorCalls == 7);
+ CHECK(gBDestructorCalls == 3);
+
+ UniqueC c1(new B);
+ UniqueA a7(new B);
+ a7 = std::move(c1);
+ CHECK(gADestructorCalls == 8);
+ CHECK(gBDestructorCalls == 4);
+
+ c1.reset(new B);
+
+ UniqueA a8(std::move(c1));
+ CHECK(gADestructorCalls == 8);
+ CHECK(gBDestructorCalls == 4);
+
+ // These smart pointers still own B resources.
+ CHECK(a4);
+ CHECK(a5);
+ CHECK(a7);
+ CHECK(a8);
+ return true;
+}
+
+static bool TestDefaultFree() {
+ CHECK(TestDefaultFreeGuts());
+ CHECK(gADestructorCalls == 12);
+ CHECK(gBDestructorCalls == 8);
+ return true;
+}
+
+static size_t FreeClassCounter = 0;
+
+struct FreeClass {
+ public:
+ FreeClass() = default;
+
+ void operator()(int* aPtr) {
+ FreeClassCounter++;
+ delete aPtr;
+ }
+};
+
+typedef UniquePtr<int, FreeClass> NewIntCustom;
+static_assert(sizeof(NewIntCustom) == sizeof(int*), "stored most efficiently");
+
+static bool TestFreeClass() {
+ CHECK(FreeClassCounter == 0);
+ {
+ NewIntCustom n1(new int);
+ CHECK(FreeClassCounter == 0);
+ }
+ CHECK(FreeClassCounter == 1);
+
+ NewIntCustom n2;
+ {
+ NewIntCustom n3(new int);
+ CHECK(FreeClassCounter == 1);
+ n2 = std::move(n3);
+ }
+ CHECK(FreeClassCounter == 1);
+ n2 = nullptr;
+ CHECK(FreeClassCounter == 2);
+
+ n2.reset(nullptr);
+ CHECK(FreeClassCounter == 2);
+ n2.reset(new int);
+ n2.reset();
+ CHECK(FreeClassCounter == 3);
+
+ NewIntCustom n4(new int, FreeClass());
+ CHECK(FreeClassCounter == 3);
+ n4.reset(new int);
+ CHECK(FreeClassCounter == 4);
+ n4.reset();
+ CHECK(FreeClassCounter == 5);
+
+ FreeClass f;
+ NewIntCustom n5(new int, f);
+ CHECK(FreeClassCounter == 5);
+ int* p = n5.release();
+ CHECK(FreeClassCounter == 5);
+ delete p;
+
+ return true;
+}
+
+typedef UniquePtr<int, DefaultDelete<int>&> IntDeleterRef;
+typedef UniquePtr<A, DefaultDelete<A>&> ADeleterRef;
+typedef UniquePtr<B, DefaultDelete<A>&> BDeleterRef;
+
+static_assert(sizeof(IntDeleterRef) > sizeof(int*),
+ "has to be heavier than an int* to store the reference");
+static_assert(sizeof(ADeleterRef) > sizeof(A*),
+ "has to be heavier than an A* to store the reference");
+static_assert(sizeof(BDeleterRef) > sizeof(int*),
+ "has to be heavier than a B* to store the reference");
+
+static bool TestReferenceDeleterGuts() {
+ DefaultDelete<int> delInt;
+ IntDeleterRef id1(new int, delInt);
+
+ IntDeleterRef id2(std::move(id1));
+ CHECK(id1 == nullptr);
+ CHECK(nullptr != id2);
+ CHECK(&id1.get_deleter() == &id2.get_deleter());
+
+ IntDeleterRef id3(std::move(id2));
+
+ DefaultDelete<A> delA;
+ ADeleterRef a1(new A, delA);
+ a1.reset(nullptr);
+ a1.reset(new B);
+ a1 = nullptr;
+
+ BDeleterRef b1(new B, delA);
+ a1 = std::move(b1);
+
+ BDeleterRef b2(new B, delA);
+
+ ADeleterRef a2(std::move(b2));
+
+ return true;
+}
+
+static bool TestReferenceDeleter() {
+ gADestructorCalls = 0;
+ gBDestructorCalls = 0;
+
+ CHECK(TestReferenceDeleterGuts());
+
+ CHECK(gADestructorCalls == 4);
+ CHECK(gBDestructorCalls == 3);
+
+ gADestructorCalls = 0;
+ gBDestructorCalls = 0;
+ return true;
+}
+
+typedef void (&FreeSignature)(void*);
+
+static size_t DeleteIntFunctionCallCount = 0;
+
+static void DeleteIntFunction(void* aPtr) {
+ DeleteIntFunctionCallCount++;
+ delete static_cast<int*>(aPtr);
+}
+
+static void SetMallocedInt(UniquePtr<int, FreeSignature>& aPtr, int aI) {
+ int* newPtr = static_cast<int*>(malloc(sizeof(int)));
+ *newPtr = aI;
+ aPtr.reset(newPtr);
+}
+
+static UniquePtr<int, FreeSignature> MallocedInt(int aI) {
+ UniquePtr<int, FreeSignature> ptr(static_cast<int*>(malloc(sizeof(int))),
+ free);
+ *ptr = aI;
+ return ptr;
+}
+static bool TestFunctionReferenceDeleter() {
+ // Look for allocator mismatches and leaks to verify these bits
+ UniquePtr<int, FreeSignature> i1(MallocedInt(17));
+ CHECK(*i1 == 17);
+
+ SetMallocedInt(i1, 42);
+ CHECK(*i1 == 42);
+
+ // These bits use a custom deleter so we can instrument deletion.
+ {
+ UniquePtr<int, FreeSignature> i2 =
+ UniquePtr<int, FreeSignature>(new int[42], DeleteIntFunction);
+ CHECK(DeleteIntFunctionCallCount == 0);
+
+ i2.reset(new int[76]);
+ CHECK(DeleteIntFunctionCallCount == 1);
+ }
+
+ CHECK(DeleteIntFunctionCallCount == 2);
+
+ return true;
+}
+
+template <typename T>
+struct AppendNullptrTwice {
+ AppendNullptrTwice() = default;
+
+ bool operator()(Vector<T>& vec) {
+ CHECK(vec.append(nullptr));
+ CHECK(vec.append(nullptr));
+ return true;
+ }
+};
+
+static size_t AAfter;
+static size_t BAfter;
+
+static bool TestVectorGuts() {
+ Vector<UniqueA> vec;
+ CHECK(vec.append(new B));
+ CHECK(vec.append(new A));
+ CHECK(AppendNullptrTwice<UniqueA>()(vec));
+ CHECK(vec.append(new B));
+
+ size_t initialLength = vec.length();
+
+ UniqueA* begin = vec.begin();
+ bool appendA = true;
+ do {
+ CHECK(appendA ? vec.append(new A) : vec.append(new B));
+ appendA = !appendA;
+ } while (begin == vec.begin());
+
+ size_t numAppended = vec.length() - initialLength;
+
+ BAfter = numAppended / 2;
+ AAfter = numAppended - numAppended / 2;
+
+ CHECK(gADestructorCalls == 0);
+ CHECK(gBDestructorCalls == 0);
+ return true;
+}
+
+static bool TestVector() {
+ gADestructorCalls = 0;
+ gBDestructorCalls = 0;
+
+ CHECK(TestVectorGuts());
+
+ CHECK(gADestructorCalls == 3 + AAfter + BAfter);
+ CHECK(gBDestructorCalls == 2 + BAfter);
+ return true;
+}
+
+typedef UniquePtr<int[]> IntArray;
+static_assert(sizeof(IntArray) == sizeof(int*), "stored most efficiently");
+
+static bool TestArray() {
+ static_assert(std::is_same_v<IntArray::DeleterType, DefaultDelete<int[]> >,
+ "weird deleter?");
+
+ IntArray n1(new int[5]);
+ CHECK(n1);
+ CHECK(n1.get() != nullptr);
+
+ n1 = nullptr;
+ CHECK(!n1);
+ CHECK(n1.get() == nullptr);
+
+ int* p1 = new int[42];
+ n1.reset(p1);
+ CHECK(n1);
+ IntArray n2(std::move(n1));
+ CHECK(!n1);
+ CHECK(n1.get() == nullptr);
+ CHECK(n2.get() == p1);
+
+ std::swap(n1, n2);
+ CHECK(n1.get() == p1);
+ CHECK(n2.get() == nullptr);
+
+ n1.swap(n2);
+ CHECK(n1.get() == nullptr);
+ CHECK(n2.get() == p1);
+ delete[] n2.release();
+
+ CHECK(n1.get() == nullptr);
+ CHECK(n2.get() == nullptr);
+
+ int* p2 = new int[7];
+ int* p3 = new int[42];
+ n1.reset(p2);
+ n2.reset(p3);
+ CHECK(n1.get() == p2);
+ CHECK(n2.get() == p3);
+
+ n1.swap(n2);
+ CHECK(n2.get() == p2);
+ CHECK(n1.get() == p3);
+
+ n1 = std::move(n2);
+ CHECK(n1.get() == p2);
+ n1 = std::move(n2);
+ CHECK(n1.get() == nullptr);
+
+ UniquePtr<A[]> a1(new A[17]);
+ static_assert(sizeof(a1) == sizeof(A*), "stored most efficiently");
+
+ UniquePtr<A[]> a2(new A[5], DefaultDelete<A[]>());
+ a2.reset(nullptr);
+ a2.reset(new A[17]);
+ a2 = nullptr;
+
+ UniquePtr<A[]> a3(nullptr);
+ a3.reset(new A[7]);
+
+ return true;
+}
+
+struct Q {
+ Q() = default;
+ Q(const Q&) = default;
+
+ Q(Q&, char) {}
+
+ template <typename T>
+ Q(Q, T&&, int) {}
+
+ Q(int, long, double, void*) {}
+};
+
+static int randomInt() { return 4; }
+
+static bool TestMakeUnique() {
+ UniquePtr<int> a1(MakeUnique<int>());
+ UniquePtr<long> a2(MakeUnique<long>(4));
+
+ // no args, easy
+ UniquePtr<Q> q0(MakeUnique<Q>());
+
+ // temporary bound to const lval ref
+ UniquePtr<Q> q1(MakeUnique<Q>(Q()));
+
+ // passing through a non-const lval ref
+ UniquePtr<Q> q2(MakeUnique<Q>(*q1, 'c'));
+
+ // pass by copying, forward a temporary, pass by value
+ UniquePtr<Q> q3(MakeUnique<Q>(Q(), UniquePtr<int>(), randomInt()));
+
+ // various type mismatching to test "fuzzy" forwarding
+ UniquePtr<Q> q4(MakeUnique<Q>('s', 66LL, 3.141592654, &q3));
+
+ UniquePtr<char[]> c1(MakeUnique<char[]>(5));
+
+ return true;
+}
+
+static bool TestVoid() {
+ // UniquePtr<void> supports all operations except operator*() and
+ // operator->().
+ UniqueFreePtr<void> p1(malloc(1));
+ UniqueFreePtr<void> p2;
+
+ auto x = p1.get();
+ CHECK(x != nullptr);
+ CHECK((std::is_same_v<decltype(x), void*>));
+
+ p2.reset(p1.release());
+ CHECK(p1.get() == nullptr);
+ CHECK(p2.get() != nullptr);
+
+ p1 = std::move(p2);
+ CHECK(p1);
+ CHECK(!p2);
+
+ p1.swap(p2);
+ CHECK(!p1);
+ CHECK(p2);
+
+ p2 = nullptr;
+ CHECK(!p2);
+
+ return true;
+}
+
+static bool TestTempPtrToSetter() {
+ static int sFooRefcount = 0;
+ struct Foo {
+ Foo() { sFooRefcount += 1; }
+
+ ~Foo() { sFooRefcount -= 1; }
+ };
+
+ const auto AllocByOutvar = [](Foo** out) -> bool {
+ *out = new Foo;
+ return true;
+ };
+
+ {
+ UniquePtr<Foo> f;
+ (void)AllocByOutvar(mozilla::TempPtrToSetter(&f));
+ CHECK(sFooRefcount == 1);
+ }
+ CHECK(sFooRefcount == 0);
+
+ {
+ std::unique_ptr<Foo> f;
+ (void)AllocByOutvar(mozilla::TempPtrToSetter(&f));
+ CHECK(sFooRefcount == 1);
+ }
+ CHECK(sFooRefcount == 0);
+
+ return true;
+}
+
+int main() {
+ TestDeleterType();
+
+ if (!TestDefaultFree()) {
+ return 1;
+ }
+ if (!TestFreeClass()) {
+ return 1;
+ }
+ if (!TestReferenceDeleter()) {
+ return 1;
+ }
+ if (!TestFunctionReferenceDeleter()) {
+ return 1;
+ }
+ if (!TestVector()) {
+ return 1;
+ }
+ if (!TestArray()) {
+ return 1;
+ }
+ if (!TestMakeUnique()) {
+ return 1;
+ }
+ if (!TestVoid()) {
+ return 1;
+ }
+ if (!TestTempPtrToSetter()) {
+ return 1;
+ }
+ return 0;
+}
diff --git a/mfbt/tests/TestUtf8.cpp b/mfbt/tests/TestUtf8.cpp
new file mode 100644
index 0000000000..b3ff9e9ee8
--- /dev/null
+++ b/mfbt/tests/TestUtf8.cpp
@@ -0,0 +1,755 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#define MOZ_PRETEND_NO_JSRUST 1
+
+#include "mozilla/Utf8.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/EnumSet.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/Span.h"
+
+using mozilla::ArrayLength;
+using mozilla::AsChars;
+using mozilla::DecodeOneUtf8CodePoint;
+using mozilla::EnumSet;
+using mozilla::IntegerRange;
+using mozilla::IsAscii;
+using mozilla::IsUtf8;
+using mozilla::Span;
+using mozilla::Utf8Unit;
+
+// Disable the C++ 2a warning. See bug #1509926
+#if defined(__clang__) && (__clang_major__ >= 6)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wc++2a-compat"
+#endif
+
+static void TestUtf8Unit() {
+ Utf8Unit c('A');
+ MOZ_RELEASE_ASSERT(c.toChar() == 'A');
+ MOZ_RELEASE_ASSERT(c == Utf8Unit('A'));
+ MOZ_RELEASE_ASSERT(c != Utf8Unit('B'));
+ MOZ_RELEASE_ASSERT(c.toUint8() == 0x41);
+
+ unsigned char asUnsigned = 'A';
+ MOZ_RELEASE_ASSERT(c.toUnsignedChar() == asUnsigned);
+ MOZ_RELEASE_ASSERT(Utf8Unit('B').toUnsignedChar() != asUnsigned);
+
+ Utf8Unit first('@');
+ Utf8Unit second('#');
+
+ MOZ_RELEASE_ASSERT(first != second);
+
+ first = second;
+ MOZ_RELEASE_ASSERT(first == second);
+}
+
+template <typename Char>
+struct ToUtf8Units {
+ public:
+ explicit ToUtf8Units(const Char* aStart, const Char* aEnd)
+ : lead(Utf8Unit(aStart[0])), iter(aStart + 1), end(aEnd) {
+ MOZ_RELEASE_ASSERT(!IsAscii(aStart[0]));
+ }
+
+ const Utf8Unit lead;
+ const Char* iter;
+ const Char* const end;
+};
+
+class AssertIfCalled {
+ public:
+ template <typename... Args>
+ void operator()(Args&&... aArgs) {
+ MOZ_RELEASE_ASSERT(false, "AssertIfCalled instance was called");
+ }
+};
+
+// NOTE: For simplicity in treating |aCharN| identically regardless whether it's
+// a string literal or a more-generalized array, we require |aCharN| be
+// null-terminated.
+
+template <typename Char, size_t N>
+static void ExpectValidCodePoint(const Char (&aCharN)[N],
+ char32_t aExpectedCodePoint) {
+ MOZ_RELEASE_ASSERT(aCharN[N - 1] == 0,
+ "array must be null-terminated for |aCharN + N - 1| to "
+ "compute the value of |aIter| as altered by "
+ "DecodeOneUtf8CodePoint");
+
+ ToUtf8Units<Char> simpleUnit(aCharN, aCharN + N - 1);
+ auto simple =
+ DecodeOneUtf8CodePoint(simpleUnit.lead, &simpleUnit.iter, simpleUnit.end);
+ MOZ_RELEASE_ASSERT(simple.isSome());
+ MOZ_RELEASE_ASSERT(*simple == aExpectedCodePoint);
+ MOZ_RELEASE_ASSERT(simpleUnit.iter == simpleUnit.end);
+
+ ToUtf8Units<Char> complexUnit(aCharN, aCharN + N - 1);
+ auto complex = DecodeOneUtf8CodePoint(
+ complexUnit.lead, &complexUnit.iter, complexUnit.end, AssertIfCalled(),
+ AssertIfCalled(), AssertIfCalled(), AssertIfCalled(), AssertIfCalled());
+ MOZ_RELEASE_ASSERT(complex.isSome());
+ MOZ_RELEASE_ASSERT(*complex == aExpectedCodePoint);
+ MOZ_RELEASE_ASSERT(complexUnit.iter == complexUnit.end);
+}
+
+enum class InvalidUtf8Reason {
+ BadLeadUnit,
+ NotEnoughUnits,
+ BadTrailingUnit,
+ BadCodePoint,
+ NotShortestForm,
+};
+
+template <typename Char, size_t N>
+static void ExpectInvalidCodePointHelper(const Char (&aCharN)[N],
+ InvalidUtf8Reason aExpectedReason,
+ uint8_t aExpectedUnitsAvailable,
+ uint8_t aExpectedUnitsNeeded,
+ char32_t aExpectedBadCodePoint,
+ uint8_t aExpectedUnitsObserved) {
+ MOZ_RELEASE_ASSERT(aCharN[N - 1] == 0,
+ "array must be null-terminated for |aCharN + N - 1| to "
+ "compute the value of |aIter| as altered by "
+ "DecodeOneUtf8CodePoint");
+
+ ToUtf8Units<Char> simpleUnit(aCharN, aCharN + N - 1);
+ auto simple =
+ DecodeOneUtf8CodePoint(simpleUnit.lead, &simpleUnit.iter, simpleUnit.end);
+ MOZ_RELEASE_ASSERT(simple.isNothing());
+ MOZ_RELEASE_ASSERT(static_cast<const void*>(simpleUnit.iter) == aCharN);
+
+ EnumSet<InvalidUtf8Reason> reasons;
+ uint8_t unitsAvailable;
+ uint8_t unitsNeeded;
+ char32_t badCodePoint;
+ uint8_t unitsObserved;
+
+ struct OnNotShortestForm {
+ EnumSet<InvalidUtf8Reason>& reasons;
+ char32_t& badCodePoint;
+ uint8_t& unitsObserved;
+
+ void operator()(char32_t aBadCodePoint, uint8_t aUnitsObserved) {
+ reasons += InvalidUtf8Reason::NotShortestForm;
+ badCodePoint = aBadCodePoint;
+ unitsObserved = aUnitsObserved;
+ }
+ };
+
+ ToUtf8Units<Char> complexUnit(aCharN, aCharN + N - 1);
+ auto complex = DecodeOneUtf8CodePoint(
+ complexUnit.lead, &complexUnit.iter, complexUnit.end,
+ [&reasons]() { reasons += InvalidUtf8Reason::BadLeadUnit; },
+ [&reasons, &unitsAvailable, &unitsNeeded](uint8_t aUnitsAvailable,
+ uint8_t aUnitsNeeded) {
+ reasons += InvalidUtf8Reason::NotEnoughUnits;
+ unitsAvailable = aUnitsAvailable;
+ unitsNeeded = aUnitsNeeded;
+ },
+ [&reasons, &unitsObserved](uint8_t aUnitsObserved) {
+ reasons += InvalidUtf8Reason::BadTrailingUnit;
+ unitsObserved = aUnitsObserved;
+ },
+ [&reasons, &badCodePoint, &unitsObserved](char32_t aBadCodePoint,
+ uint8_t aUnitsObserved) {
+ reasons += InvalidUtf8Reason::BadCodePoint;
+ badCodePoint = aBadCodePoint;
+ unitsObserved = aUnitsObserved;
+ },
+ [&reasons, &badCodePoint, &unitsObserved](char32_t aBadCodePoint,
+ uint8_t aUnitsObserved) {
+ reasons += InvalidUtf8Reason::NotShortestForm;
+ badCodePoint = aBadCodePoint;
+ unitsObserved = aUnitsObserved;
+ });
+ MOZ_RELEASE_ASSERT(complex.isNothing());
+ MOZ_RELEASE_ASSERT(static_cast<const void*>(complexUnit.iter) == aCharN);
+
+ bool alreadyIterated = false;
+ for (InvalidUtf8Reason reason : reasons) {
+ MOZ_RELEASE_ASSERT(!alreadyIterated);
+ alreadyIterated = true;
+
+ switch (reason) {
+ case InvalidUtf8Reason::BadLeadUnit:
+ break;
+
+ case InvalidUtf8Reason::NotEnoughUnits:
+ MOZ_RELEASE_ASSERT(unitsAvailable == aExpectedUnitsAvailable);
+ MOZ_RELEASE_ASSERT(unitsNeeded == aExpectedUnitsNeeded);
+ break;
+
+ case InvalidUtf8Reason::BadTrailingUnit:
+ MOZ_RELEASE_ASSERT(unitsObserved == aExpectedUnitsObserved);
+ break;
+
+ case InvalidUtf8Reason::BadCodePoint:
+ MOZ_RELEASE_ASSERT(badCodePoint == aExpectedBadCodePoint);
+ MOZ_RELEASE_ASSERT(unitsObserved == aExpectedUnitsObserved);
+ break;
+
+ case InvalidUtf8Reason::NotShortestForm:
+ MOZ_RELEASE_ASSERT(badCodePoint == aExpectedBadCodePoint);
+ MOZ_RELEASE_ASSERT(unitsObserved == aExpectedUnitsObserved);
+ break;
+ }
+ }
+}
+
+// NOTE: For simplicity in treating |aCharN| identically regardless whether it's
+// a string literal or a more-generalized array, we require |aCharN| be
+// null-terminated in all these functions.
+
+template <typename Char, size_t N>
+static void ExpectBadLeadUnit(const Char (&aCharN)[N]) {
+ ExpectInvalidCodePointHelper(aCharN, InvalidUtf8Reason::BadLeadUnit, 0xFF,
+ 0xFF, 0xFFFFFFFF, 0xFF);
+}
+
+template <typename Char, size_t N>
+static void ExpectNotEnoughUnits(const Char (&aCharN)[N],
+ uint8_t aExpectedUnitsAvailable,
+ uint8_t aExpectedUnitsNeeded) {
+ ExpectInvalidCodePointHelper(aCharN, InvalidUtf8Reason::NotEnoughUnits,
+ aExpectedUnitsAvailable, aExpectedUnitsNeeded,
+ 0xFFFFFFFF, 0xFF);
+}
+
+template <typename Char, size_t N>
+static void ExpectBadTrailingUnit(const Char (&aCharN)[N],
+ uint8_t aExpectedUnitsObserved) {
+ ExpectInvalidCodePointHelper(aCharN, InvalidUtf8Reason::BadTrailingUnit, 0xFF,
+ 0xFF, 0xFFFFFFFF, aExpectedUnitsObserved);
+}
+
+template <typename Char, size_t N>
+static void ExpectNotShortestForm(const Char (&aCharN)[N],
+ char32_t aExpectedBadCodePoint,
+ uint8_t aExpectedUnitsObserved) {
+ ExpectInvalidCodePointHelper(aCharN, InvalidUtf8Reason::NotShortestForm, 0xFF,
+ 0xFF, aExpectedBadCodePoint,
+ aExpectedUnitsObserved);
+}
+
+template <typename Char, size_t N>
+static void ExpectBadCodePoint(const Char (&aCharN)[N],
+ char32_t aExpectedBadCodePoint,
+ uint8_t aExpectedUnitsObserved) {
+ ExpectInvalidCodePointHelper(aCharN, InvalidUtf8Reason::BadCodePoint, 0xFF,
+ 0xFF, aExpectedBadCodePoint,
+ aExpectedUnitsObserved);
+}
+
+static void TestIsUtf8() {
+ // Note we include the U+0000 NULL in this one -- and that's fine.
+ static const char asciiBytes[] = u8"How about a nice game of chess?";
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(asciiBytes, ArrayLength(asciiBytes))));
+
+ static const char endNonAsciiBytes[] = u8"Life is like a 🌯";
+ MOZ_RELEASE_ASSERT(
+ IsUtf8(Span(endNonAsciiBytes, ArrayLength(endNonAsciiBytes) - 1)));
+
+ static const unsigned char badLeading[] = {0x80};
+ MOZ_RELEASE_ASSERT(
+ !IsUtf8(AsChars(Span(badLeading, ArrayLength(badLeading)))));
+
+ // Byte-counts
+
+ // 1
+ static const char oneBytes[] = u8"A"; // U+0041 LATIN CAPITAL LETTER A
+ constexpr size_t oneBytesLen = ArrayLength(oneBytes);
+ static_assert(oneBytesLen == 2, "U+0041 plus nul");
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(oneBytes, oneBytesLen)));
+
+ // 2
+ static const char twoBytes[] = u8"؆"; // U+0606 ARABIC-INDIC CUBE ROOT
+ constexpr size_t twoBytesLen = ArrayLength(twoBytes);
+ static_assert(twoBytesLen == 3, "U+0606 in two bytes plus nul");
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(twoBytes, twoBytesLen)));
+
+ ExpectValidCodePoint(twoBytes, 0x0606);
+
+ // 3
+ static const char threeBytes[] = u8"᨞"; // U+1A1E BUGINESE PALLAWA
+ constexpr size_t threeBytesLen = ArrayLength(threeBytes);
+ static_assert(threeBytesLen == 4, "U+1A1E in three bytes plus nul");
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(threeBytes, threeBytesLen)));
+
+ ExpectValidCodePoint(threeBytes, 0x1A1E);
+
+ // 4
+ static const char fourBytes[] =
+ u8"🁡"; // U+1F061 DOMINO TILE HORIZONTAL-06-06
+ constexpr size_t fourBytesLen = ArrayLength(fourBytes);
+ static_assert(fourBytesLen == 5, "U+1F061 in four bytes plus nul");
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(fourBytes, fourBytesLen)));
+
+ ExpectValidCodePoint(fourBytes, 0x1F061);
+
+ // Max code point
+ static const char maxCodePoint[] = u8"􏿿"; // U+10FFFF
+ constexpr size_t maxCodePointLen = ArrayLength(maxCodePoint);
+ static_assert(maxCodePointLen == 5, "U+10FFFF in four bytes plus nul");
+ MOZ_RELEASE_ASSERT(IsUtf8(Span(maxCodePoint, maxCodePointLen)));
+
+ ExpectValidCodePoint(maxCodePoint, 0x10FFFF);
+
+ // One past max code point
+ static const unsigned char onePastMaxCodePoint[] = {0xF4, 0x90, 0x80, 0x80,
+ 0x0};
+ constexpr size_t onePastMaxCodePointLen = ArrayLength(onePastMaxCodePoint);
+ MOZ_RELEASE_ASSERT(
+ !IsUtf8(AsChars(Span(onePastMaxCodePoint, onePastMaxCodePointLen))));
+
+ ExpectBadCodePoint(onePastMaxCodePoint, 0x110000, 4);
+
+ // Surrogate-related testing
+
+ // (Note that the various code unit sequences here are null-terminated to
+ // simplify life for ExpectValidCodePoint, which presumes null termination.)
+
+ static const unsigned char justBeforeSurrogates[] = {0xED, 0x9F, 0xBF, 0x0};
+ constexpr size_t justBeforeSurrogatesLen =
+ ArrayLength(justBeforeSurrogates) - 1;
+ MOZ_RELEASE_ASSERT(
+ IsUtf8(AsChars(Span(justBeforeSurrogates, justBeforeSurrogatesLen))));
+
+ ExpectValidCodePoint(justBeforeSurrogates, 0xD7FF);
+
+ static const unsigned char leastSurrogate[] = {0xED, 0xA0, 0x80, 0x0};
+ constexpr size_t leastSurrogateLen = ArrayLength(leastSurrogate) - 1;
+ MOZ_RELEASE_ASSERT(!IsUtf8(AsChars(Span(leastSurrogate, leastSurrogateLen))));
+
+ ExpectBadCodePoint(leastSurrogate, 0xD800, 3);
+
+ static const unsigned char arbitraryHighSurrogate[] = {0xED, 0xA2, 0x87, 0x0};
+ constexpr size_t arbitraryHighSurrogateLen =
+ ArrayLength(arbitraryHighSurrogate) - 1;
+ MOZ_RELEASE_ASSERT(!IsUtf8(
+ AsChars(Span(arbitraryHighSurrogate, arbitraryHighSurrogateLen))));
+
+ ExpectBadCodePoint(arbitraryHighSurrogate, 0xD887, 3);
+
+ static const unsigned char arbitraryLowSurrogate[] = {0xED, 0xB7, 0xAF, 0x0};
+ constexpr size_t arbitraryLowSurrogateLen =
+ ArrayLength(arbitraryLowSurrogate) - 1;
+ MOZ_RELEASE_ASSERT(
+ !IsUtf8(AsChars(Span(arbitraryLowSurrogate, arbitraryLowSurrogateLen))));
+
+ ExpectBadCodePoint(arbitraryLowSurrogate, 0xDDEF, 3);
+
+ static const unsigned char greatestSurrogate[] = {0xED, 0xBF, 0xBF, 0x0};
+ constexpr size_t greatestSurrogateLen = ArrayLength(greatestSurrogate) - 1;
+ MOZ_RELEASE_ASSERT(
+ !IsUtf8(AsChars(Span(greatestSurrogate, greatestSurrogateLen))));
+
+ ExpectBadCodePoint(greatestSurrogate, 0xDFFF, 3);
+
+ static const unsigned char justAfterSurrogates[] = {0xEE, 0x80, 0x80, 0x0};
+ constexpr size_t justAfterSurrogatesLen =
+ ArrayLength(justAfterSurrogates) - 1;
+ MOZ_RELEASE_ASSERT(
+ IsUtf8(AsChars(Span(justAfterSurrogates, justAfterSurrogatesLen))));
+
+ ExpectValidCodePoint(justAfterSurrogates, 0xE000);
+}
+
+static void TestDecodeOneValidUtf8CodePoint() {
+ // NOTE: DecodeOneUtf8CodePoint decodes only *non*-ASCII code points that
+ // consist of multiple code units, so there are no ASCII tests below.
+
+ // Length two.
+
+ ExpectValidCodePoint(u8"€", 0x80); // <control>
+ ExpectValidCodePoint(u8"©", 0xA9); // COPYRIGHT SIGN
+ ExpectValidCodePoint(u8"¶", 0xB6); // PILCROW SIGN
+ ExpectValidCodePoint(u8"¾", 0xBE); // VULGAR FRACTION THREE QUARTERS
+ ExpectValidCodePoint(u8"÷", 0xF7); // DIVISION SIGN
+ ExpectValidCodePoint(u8"ÿ", 0xFF); // LATIN SMALL LETTER Y WITH DIAERESIS
+ ExpectValidCodePoint(u8"Ā", 0x100); // LATIN CAPITAL LETTER A WITH MACRON
+ ExpectValidCodePoint(u8"IJ", 0x132); // LATIN CAPITAL LETTER LIGATURE IJ
+ ExpectValidCodePoint(u8"ͼ", 0x37C); // GREEK SMALL DOTTED LUNATE SIGMA SYMBOL
+ ExpectValidCodePoint(u8"Ӝ",
+ 0x4DC); // CYRILLIC CAPITAL LETTER ZHE WITTH DIAERESIS
+ ExpectValidCodePoint(u8"۩", 0x6E9); // ARABIC PLACE OF SAJDAH
+ ExpectValidCodePoint(u8"߿", 0x7FF); // <not assigned>
+
+ // Length three.
+
+ ExpectValidCodePoint(u8"ࠀ", 0x800); // SAMARITAN LETTER ALAF
+ ExpectValidCodePoint(u8"ࡁ", 0x841); // MANDAIC LETTER AB
+ ExpectValidCodePoint(u8"ࣿ", 0x8FF); // ARABIC MARK SIDEWAYS NOON GHUNNA
+ ExpectValidCodePoint(u8"ஆ", 0xB86); // TAMIL LETTER AA
+ ExpectValidCodePoint(u8"༃",
+ 0xF03); // TIBETAN MARK GTER YIG MGO -UM GTER TSHEG MA
+ ExpectValidCodePoint(
+ u8"࿉",
+ 0xFC9); // TIBETAN SYMBOL NOR BU (but on my system it really looks like
+ // SOFT-SERVE ICE CREAM FROM ABOVE THE PLANE if you ask me)
+ ExpectValidCodePoint(u8"ဪ", 0x102A); // MYANMAR LETTER AU
+ ExpectValidCodePoint(u8"ᚏ", 0x168F); // OGHAM LETTER RUIS
+ ExpectValidCodePoint("\xE2\x80\xA8", 0x2028); // (the hated) LINE SEPARATOR
+ ExpectValidCodePoint("\xE2\x80\xA9",
+ 0x2029); // (the hated) PARAGRAPH SEPARATOR
+ ExpectValidCodePoint(u8"☬", 0x262C); // ADI SHAKTI
+ ExpectValidCodePoint(u8"㊮", 0x32AE); // CIRCLED IDEOGRAPH RESOURCE
+ ExpectValidCodePoint(u8"㏖", 0x33D6); // SQUARE MOL
+ ExpectValidCodePoint(u8"ꔄ", 0xA504); // VAI SYLLABLE WEEN
+ ExpectValidCodePoint(u8"ퟕ", 0xD7D5); // HANGUL JONGSEONG RIEUL-SSANGKIYEOK
+ ExpectValidCodePoint(u8"퟿", 0xD7FF); // <not assigned>
+ ExpectValidCodePoint(u8"", 0xE000); // <Private Use>
+ ExpectValidCodePoint(u8"鱗", 0xF9F2); // CJK COMPATIBILITY IDEOGRAPH-F9F
+ ExpectValidCodePoint(
+ u8"﷽", 0xFDFD); // ARABIC LIGATURE BISMILLAH AR-RAHMAN AR-RAHHHEEEEM
+ ExpectValidCodePoint(u8"￿", 0xFFFF); // <not assigned>
+
+ // Length four.
+ ExpectValidCodePoint(u8"𐀀", 0x10000); // LINEAR B SYLLABLE B008 A
+ ExpectValidCodePoint(u8"𔑀", 0x14440); // ANATOLIAN HIEROGLYPH A058
+ ExpectValidCodePoint(u8"𝛗", 0x1D6D7); // MATHEMATICAL BOLD SMALL PHI
+ ExpectValidCodePoint(u8"💩", 0x1F4A9); // PILE OF POO
+ ExpectValidCodePoint(u8"🔫", 0x1F52B); // PISTOL
+ ExpectValidCodePoint(u8"🥌", 0x1F94C); // CURLING STONE
+ ExpectValidCodePoint(u8"🥏", 0x1F94F); // FLYING DISC
+ ExpectValidCodePoint(u8"𠍆", 0x20346); // CJK UNIFIED IDEOGRAPH-20346
+ ExpectValidCodePoint(u8"𡠺", 0x2183A); // CJK UNIFIED IDEOGRAPH-2183A
+ ExpectValidCodePoint(u8"񁟶", 0x417F6); // <not assigned>
+ ExpectValidCodePoint(u8"񾠶", 0x7E836); // <not assigned>
+ ExpectValidCodePoint(u8"󾽧", 0xFEF67); // <Plane 15 Private Use>
+ ExpectValidCodePoint(u8"􏿿", 0x10FFFF); //
+}
+
+static void TestDecodeBadLeadUnit() {
+ // These tests are actually exhaustive.
+
+ unsigned char badLead[] = {'\0', '\0'};
+
+ for (uint8_t lead : IntegerRange(0b1000'0000, 0b1100'0000)) {
+ badLead[0] = lead;
+ ExpectBadLeadUnit(badLead);
+ }
+
+ {
+ uint8_t lead = 0b1111'1000;
+ do {
+ badLead[0] = lead;
+ ExpectBadLeadUnit(badLead);
+ if (lead == 0b1111'1111) {
+ break;
+ }
+
+ lead++;
+ } while (true);
+ }
+}
+
+static void TestTooFewOrBadTrailingUnits() {
+ // Lead unit indicates a two-byte code point.
+
+ char truncatedTwo[] = {'\0', '\0'};
+ char badTrailTwo[] = {'\0', '\0', '\0'};
+
+ for (uint8_t lead : IntegerRange(0b1100'0000, 0b1110'0000)) {
+ truncatedTwo[0] = lead;
+ ExpectNotEnoughUnits(truncatedTwo, 1, 2);
+
+ badTrailTwo[0] = lead;
+ for (uint8_t trail : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailTwo[1] = trail;
+ ExpectBadTrailingUnit(badTrailTwo, 2);
+ }
+
+ for (uint8_t trail : IntegerRange(0b1100'0000, 0b1111'1111)) {
+ badTrailTwo[1] = trail;
+ ExpectBadTrailingUnit(badTrailTwo, 2);
+ }
+ }
+
+ // Lead unit indicates a three-byte code point.
+
+ char truncatedThreeOne[] = {'\0', '\0'};
+ char truncatedThreeTwo[] = {'\0', '\0', '\0'};
+ unsigned char badTrailThree[] = {'\0', '\0', '\0', '\0'};
+
+ for (uint8_t lead : IntegerRange(0b1110'0000, 0b1111'0000)) {
+ truncatedThreeOne[0] = lead;
+ ExpectNotEnoughUnits(truncatedThreeOne, 1, 3);
+
+ truncatedThreeTwo[0] = lead;
+ ExpectNotEnoughUnits(truncatedThreeTwo, 2, 3);
+
+ badTrailThree[0] = lead;
+ badTrailThree[2] = 0b1011'1111; // make valid to test overreads
+ for (uint8_t mid : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailThree[1] = mid;
+ ExpectBadTrailingUnit(badTrailThree, 2);
+ }
+ {
+ uint8_t mid = 0b1100'0000;
+ do {
+ badTrailThree[1] = mid;
+ ExpectBadTrailingUnit(badTrailThree, 2);
+ if (mid == 0b1111'1111) {
+ break;
+ }
+
+ mid++;
+ } while (true);
+ }
+
+ badTrailThree[1] = 0b1011'1111;
+ for (uint8_t last : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailThree[2] = last;
+ ExpectBadTrailingUnit(badTrailThree, 3);
+ }
+ {
+ uint8_t last = 0b1100'0000;
+ do {
+ badTrailThree[2] = last;
+ ExpectBadTrailingUnit(badTrailThree, 3);
+ if (last == 0b1111'1111) {
+ break;
+ }
+
+ last++;
+ } while (true);
+ }
+ }
+
+ // Lead unit indicates a four-byte code point.
+
+ char truncatedFourOne[] = {'\0', '\0'};
+ char truncatedFourTwo[] = {'\0', '\0', '\0'};
+ char truncatedFourThree[] = {'\0', '\0', '\0', '\0'};
+
+ unsigned char badTrailFour[] = {'\0', '\0', '\0', '\0', '\0'};
+
+ for (uint8_t lead : IntegerRange(0b1111'0000, 0b1111'1000)) {
+ truncatedFourOne[0] = lead;
+ ExpectNotEnoughUnits(truncatedFourOne, 1, 4);
+
+ truncatedFourTwo[0] = lead;
+ ExpectNotEnoughUnits(truncatedFourTwo, 2, 4);
+
+ truncatedFourThree[0] = lead;
+ ExpectNotEnoughUnits(truncatedFourThree, 3, 4);
+
+ badTrailFour[0] = lead;
+ badTrailFour[2] = badTrailFour[3] = 0b1011'1111; // test for overreads
+ for (uint8_t second : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailFour[1] = second;
+ ExpectBadTrailingUnit(badTrailFour, 2);
+ }
+ {
+ uint8_t second = 0b1100'0000;
+ do {
+ badTrailFour[1] = second;
+ ExpectBadTrailingUnit(badTrailFour, 2);
+ if (second == 0b1111'1111) {
+ break;
+ }
+
+ second++;
+ } while (true);
+ }
+
+ badTrailFour[1] = badTrailFour[3] = 0b1011'1111; // test for overreads
+ for (uint8_t third : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailFour[2] = third;
+ ExpectBadTrailingUnit(badTrailFour, 3);
+ }
+ {
+ uint8_t third = 0b1100'0000;
+ do {
+ badTrailFour[2] = third;
+ ExpectBadTrailingUnit(badTrailFour, 3);
+ if (third == 0b1111'1111) {
+ break;
+ }
+
+ third++;
+ } while (true);
+ }
+
+ badTrailFour[2] = 0b1011'1111;
+ for (uint8_t fourth : IntegerRange(0b0000'0000, 0b1000'0000)) {
+ badTrailFour[3] = fourth;
+ ExpectBadTrailingUnit(badTrailFour, 4);
+ }
+ {
+ uint8_t fourth = 0b1100'0000;
+ do {
+ badTrailFour[3] = fourth;
+ ExpectBadTrailingUnit(badTrailFour, 4);
+ if (fourth == 0b1111'1111) {
+ break;
+ }
+
+ fourth++;
+ } while (true);
+ }
+ }
+}
+
+static void TestBadSurrogate() {
+ // These tests are actually exhaustive.
+
+ ExpectValidCodePoint("\xED\x9F\xBF", 0xD7FF); // last before surrogates
+ ExpectValidCodePoint("\xEE\x80\x80", 0xE000); // first after surrogates
+
+ // First invalid surrogate encoding is { 0xED, 0xA0, 0x80 }. Last invalid
+ // surrogate encoding is { 0xED, 0xBF, 0xBF }.
+
+ char badSurrogate[] = {'\xED', '\0', '\0', '\0'};
+
+ for (char32_t c = 0xD800; c < 0xE000; c++) {
+ badSurrogate[1] = 0b1000'0000 ^ ((c & 0b1111'1100'0000) >> 6);
+ badSurrogate[2] = 0b1000'0000 ^ ((c & 0b0000'0011'1111));
+
+ ExpectBadCodePoint(badSurrogate, c, 3);
+ }
+}
+
+static void TestBadTooBig() {
+ // These tests are actually exhaustive.
+
+ ExpectValidCodePoint("\xF4\x8F\xBF\xBF", 0x10'FFFF); // last code point
+
+ // Four-byte code points are
+ //
+ // 0b1111'0xxx 0b10xx'xxxx 0b10xx'xxxx 0b10xx'xxxx
+ //
+ // with 3 + 6 + 6 + 6 == 21 unconstrained bytes, so the structurally
+ // representable limit (exclusive) is 2**21 - 1 == 2097152.
+
+ char tooLargeCodePoint[] = {'\0', '\0', '\0', '\0', '\0'};
+
+ for (char32_t c = 0x11'0000; c < (1 << 21); c++) {
+ tooLargeCodePoint[0] =
+ 0b1111'0000 ^ ((c & 0b1'1100'0000'0000'0000'0000) >> 18);
+ tooLargeCodePoint[1] =
+ 0b1000'0000 ^ ((c & 0b0'0011'1111'0000'0000'0000) >> 12);
+ tooLargeCodePoint[2] =
+ 0b1000'0000 ^ ((c & 0b0'0000'0000'1111'1100'0000) >> 6);
+ tooLargeCodePoint[3] = 0b1000'0000 ^ ((c & 0b0'0000'0000'0000'0011'1111));
+
+ ExpectBadCodePoint(tooLargeCodePoint, c, 4);
+ }
+}
+
+static void TestBadCodePoint() {
+ TestBadSurrogate();
+ TestBadTooBig();
+}
+
+static void TestNotShortestForm() {
+ {
+ // One-byte in two-byte.
+
+ char oneInTwo[] = {'\0', '\0', '\0'};
+
+ for (char32_t c = '\0'; c < 0x80; c++) {
+ oneInTwo[0] = 0b1100'0000 ^ ((c & 0b0111'1100'0000) >> 6);
+ oneInTwo[1] = 0b1000'0000 ^ ((c & 0b0000'0011'1111));
+
+ ExpectNotShortestForm(oneInTwo, c, 2);
+ }
+
+ // One-byte in three-byte.
+
+ char oneInThree[] = {'\0', '\0', '\0', '\0'};
+
+ for (char32_t c = '\0'; c < 0x80; c++) {
+ oneInThree[0] = 0b1110'0000 ^ ((c & 0b1111'0000'0000'0000) >> 12);
+ oneInThree[1] = 0b1000'0000 ^ ((c & 0b0000'1111'1100'0000) >> 6);
+ oneInThree[2] = 0b1000'0000 ^ ((c & 0b0000'0000'0011'1111));
+
+ ExpectNotShortestForm(oneInThree, c, 3);
+ }
+
+ // One-byte in four-byte.
+
+ char oneInFour[] = {'\0', '\0', '\0', '\0', '\0'};
+
+ for (char32_t c = '\0'; c < 0x80; c++) {
+ oneInFour[0] = 0b1111'0000 ^ ((c & 0b1'1100'0000'0000'0000'0000) >> 18);
+ oneInFour[1] = 0b1000'0000 ^ ((c & 0b0'0011'1111'0000'0000'0000) >> 12);
+ oneInFour[2] = 0b1000'0000 ^ ((c & 0b0'0000'0000'1111'1100'0000) >> 6);
+ oneInFour[3] = 0b1000'0000 ^ ((c & 0b0'0000'0000'0000'0011'1111));
+
+ ExpectNotShortestForm(oneInFour, c, 4);
+ }
+ }
+
+ {
+ // Two-byte in three-byte.
+
+ char twoInThree[] = {'\0', '\0', '\0', '\0'};
+
+ for (char32_t c = 0x80; c < 0x800; c++) {
+ twoInThree[0] = 0b1110'0000 ^ ((c & 0b1111'0000'0000'0000) >> 12);
+ twoInThree[1] = 0b1000'0000 ^ ((c & 0b0000'1111'1100'0000) >> 6);
+ twoInThree[2] = 0b1000'0000 ^ ((c & 0b0000'0000'0011'1111));
+
+ ExpectNotShortestForm(twoInThree, c, 3);
+ }
+
+ // Two-byte in four-byte.
+
+ char twoInFour[] = {'\0', '\0', '\0', '\0', '\0'};
+
+ for (char32_t c = 0x80; c < 0x800; c++) {
+ twoInFour[0] = 0b1111'0000 ^ ((c & 0b1'1100'0000'0000'0000'0000) >> 18);
+ twoInFour[1] = 0b1000'0000 ^ ((c & 0b0'0011'1111'0000'0000'0000) >> 12);
+ twoInFour[2] = 0b1000'0000 ^ ((c & 0b0'0000'0000'1111'1100'0000) >> 6);
+ twoInFour[3] = 0b1000'0000 ^ ((c & 0b0'0000'0000'0000'0011'1111));
+
+ ExpectNotShortestForm(twoInFour, c, 4);
+ }
+ }
+
+ {
+ // Three-byte in four-byte.
+
+ char threeInFour[] = {'\0', '\0', '\0', '\0', '\0'};
+
+ for (char32_t c = 0x800; c < 0x1'0000; c++) {
+ threeInFour[0] = 0b1111'0000 ^ ((c & 0b1'1100'0000'0000'0000'0000) >> 18);
+ threeInFour[1] = 0b1000'0000 ^ ((c & 0b0'0011'1111'0000'0000'0000) >> 12);
+ threeInFour[2] = 0b1000'0000 ^ ((c & 0b0'0000'0000'1111'1100'0000) >> 6);
+ threeInFour[3] = 0b1000'0000 ^ ((c & 0b0'0000'0000'0000'0011'1111));
+
+ ExpectNotShortestForm(threeInFour, c, 4);
+ }
+ }
+}
+
+static void TestDecodeOneInvalidUtf8CodePoint() {
+ TestDecodeBadLeadUnit();
+ TestTooFewOrBadTrailingUnits();
+ TestBadCodePoint();
+ TestNotShortestForm();
+}
+
+static void TestDecodeOneUtf8CodePoint() {
+ TestDecodeOneValidUtf8CodePoint();
+ TestDecodeOneInvalidUtf8CodePoint();
+}
+
+int main() {
+ TestUtf8Unit();
+ TestIsUtf8();
+ TestDecodeOneUtf8CodePoint();
+ return 0;
+}
+
+#if defined(__clang__) && (__clang_major__ >= 6)
+# pragma clang diagnostic pop
+#endif
diff --git a/mfbt/tests/TestVariant.cpp b/mfbt/tests/TestVariant.cpp
new file mode 100644
index 0000000000..552be723b8
--- /dev/null
+++ b/mfbt/tests/TestVariant.cpp
@@ -0,0 +1,1153 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <type_traits>
+
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Variant.h"
+
+#include <tuple>
+
+using mozilla::MakeUnique;
+using mozilla::UniquePtr;
+using mozilla::Variant;
+
+struct Destroyer {
+ static int destroyedCount;
+ ~Destroyer() { destroyedCount++; }
+};
+
+int Destroyer::destroyedCount = 0;
+
+static void testDetails() {
+ printf("testDetails\n");
+
+ using mozilla::detail::Nth;
+
+ // Test Nth with a list of 1 item.
+ static_assert(std::is_same_v<typename Nth<0, int>::Type, int>,
+ "Nth<0, int>::Type should be int");
+
+ // Test Nth with a list of more than 1 item.
+ static_assert(std::is_same_v<typename Nth<0, int, char>::Type, int>,
+ "Nth<0, int, char>::Type should be int");
+ static_assert(std::is_same_v<typename Nth<1, int, char>::Type, char>,
+ "Nth<1, int, char>::Type should be char");
+
+ using mozilla::detail::SelectVariantType;
+
+ // SelectVariantType for zero items (shouldn't happen, but `count` should
+ // still work ok.)
+ static_assert(SelectVariantType<int, char>::count == 0,
+ "SelectVariantType<int, char>::count should be 0");
+
+ // SelectVariantType for 1 type, for all combinations from/to T, const T,
+ // const T&, T&&
+ // - type to type
+ static_assert(std::is_same_v<typename SelectVariantType<int, int>::Type, int>,
+ "SelectVariantType<int, int>::Type should be int");
+ static_assert(SelectVariantType<int, int>::count == 1,
+ "SelectVariantType<int, int>::count should be 1");
+
+ // - type to const type
+ static_assert(std::is_same_v<typename SelectVariantType<int, const int>::Type,
+ const int>,
+ "SelectVariantType<int, const int>::Type should be const int");
+ static_assert(SelectVariantType<int, const int>::count == 1,
+ "SelectVariantType<int, const int>::count should be 1");
+
+ // - type to const type&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, const int&>::Type,
+ const int&>,
+ "SelectVariantType<int, const int&>::Type should be const int&");
+ static_assert(SelectVariantType<int, const int&>::count == 1,
+ "SelectVariantType<int, const int&>::count should be 1");
+
+ // - type to type&&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int&&>::Type, int&&>,
+ "SelectVariantType<int, int&&>::Type should be int&&");
+ static_assert(SelectVariantType<int, int&&>::count == 1,
+ "SelectVariantType<int, int&&>::count should be 1");
+
+ // - const type to type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int, int>::Type, int>,
+ "SelectVariantType<const int, int>::Type should be int");
+ static_assert(SelectVariantType<const int, int>::count == 1,
+ "SelectVariantType<const int, int>::count should be 1");
+
+ // - const type to const type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int, const int>::Type,
+ const int>,
+ "SelectVariantType<const int, const int>::Type should be const int");
+ static_assert(SelectVariantType<const int, const int>::count == 1,
+ "SelectVariantType<const int, const int>::count should be 1");
+
+ // - const type to const type&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int, const int&>::Type,
+ const int&>,
+ "SelectVariantType<const int, const int&>::Type should be const int&");
+ static_assert(SelectVariantType<const int, const int&>::count == 1,
+ "SelectVariantType<const int, const int&>::count should be 1");
+
+ // - const type to type&&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int, int&&>::Type, int&&>,
+ "SelectVariantType<const int, int&&>::Type should be int&&");
+ static_assert(SelectVariantType<const int, int&&>::count == 1,
+ "SelectVariantType<const int, int&&>::count should be 1");
+
+ // - const type& to type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int&, int>::Type, int>,
+ "SelectVariantType<const int&, int>::Type should be int");
+ static_assert(SelectVariantType<const int&, int>::count == 1,
+ "SelectVariantType<const int&, int>::count should be 1");
+
+ // - const type& to const type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int&, const int>::Type,
+ const int>,
+ "SelectVariantType<const int&, const int>::Type should be const int");
+ static_assert(SelectVariantType<const int&, const int>::count == 1,
+ "SelectVariantType<const int&, const int>::count should be 1");
+
+ // - const type& to const type&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int&, const int&>::Type,
+ const int&>,
+ "SelectVariantType<const int&, const int&>::Type should be const int&");
+ static_assert(SelectVariantType<const int&, const int&>::count == 1,
+ "SelectVariantType<const int&, const int&>::count should be 1");
+
+ // - const type& to type&&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<const int&, int&&>::Type,
+ int&&>,
+ "SelectVariantType<const int&, int&&>::Type should be int&&");
+ static_assert(SelectVariantType<const int&, int&&>::count == 1,
+ "SelectVariantType<const int&, int&&>::count should be 1");
+
+ // - type&& to type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int&&, int>::Type, int>,
+ "SelectVariantType<int&&, int>::Type should be int");
+ static_assert(SelectVariantType<int&&, int>::count == 1,
+ "SelectVariantType<int&&, int>::count should be 1");
+
+ // - type&& to const type
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int&&, const int>::Type,
+ const int>,
+ "SelectVariantType<int&&, const int>::Type should be const int");
+ static_assert(SelectVariantType<int&&, const int>::count == 1,
+ "SelectVariantType<int&&, const int>::count should be 1");
+
+ // - type&& to const type&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int&&, const int&>::Type,
+ const int&>,
+ "SelectVariantType<int&&, const int&>::Type should be const int&");
+ static_assert(SelectVariantType<int&&, const int&>::count == 1,
+ "SelectVariantType<int&&, const int&>::count should be 1");
+
+ // - type&& to type&&
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int&&, int&&>::Type, int&&>,
+ "SelectVariantType<int&&, int&&>::Type should be int&&");
+ static_assert(SelectVariantType<int&&, int&&>::count == 1,
+ "SelectVariantType<int&&, int&&>::count should be 1");
+
+ // SelectVariantType for two different types.
+ // (Don't test all combinations, trust that the above tests are sufficient.)
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int, char>::Type, int>,
+ "SelectVariantType<int, int, char>::Type should be int");
+ static_assert(SelectVariantType<int, int, char>::count == 1,
+ "SelectVariantType<int, int, char>::count should be 1");
+ static_assert(
+ std::is_same_v<typename SelectVariantType<char, int, char>::Type, char>,
+ "SelectVariantType<char, int, char>::Type should be char");
+ static_assert(SelectVariantType<char, int, char>::count == 1,
+ "SelectVariantType<char, int, char>::count should be 1");
+
+ // SelectVariantType for two identical types.
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int, int>::Type, int>,
+ "SelectVariantType<int, int, int>::Type should be int");
+ static_assert(SelectVariantType<int, int, int>::count == 2,
+ "SelectVariantType<int, int, int>::count should be 2");
+
+ // SelectVariantType for two identical types, with others around.
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, char, int, int>::Type,
+ int>,
+ "SelectVariantType<int, char, int, int>::Type should be int");
+ static_assert(SelectVariantType<int, char, int, int>::count == 2,
+ "SelectVariantType<int, char, int, int>::count should be 2");
+
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int, char, int>::Type,
+ int>,
+ "SelectVariantType<int, int, char, int>::Type should be int");
+ static_assert(SelectVariantType<int, int, char, int>::count == 2,
+ "SelectVariantType<int, int, char, int>::count should be 2");
+
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int, int, char>::Type,
+ int>,
+ "SelectVariantType<int, int, int, char>::Type should be int");
+ static_assert(SelectVariantType<int, int, int, char>::count == 2,
+ "SelectVariantType<int, int, int, char>::count should be 2");
+
+ static_assert(
+ std::is_same_v<
+ typename SelectVariantType<int, char, int, char, int, char>::Type,
+ int>,
+ "SelectVariantType<int, char, int, char, int, char>::Type should be int");
+ static_assert(
+ SelectVariantType<int, char, int, char, int, char>::count == 2,
+ "SelectVariantType<int, char, int, char, int, char>::count should be 2");
+
+ // SelectVariantType for two identically-selectable types (first one wins!).
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, int, const int>::Type,
+ int>,
+ "SelectVariantType<int, int, const int>::Type should be int");
+ static_assert(SelectVariantType<int, int, const int>::count == 2,
+ "SelectVariantType<int, int, const int>::count should be 2");
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, const int, int>::Type,
+ const int>,
+ "SelectVariantType<int, const int, int>::Type should be const int");
+ static_assert(SelectVariantType<int, const int, int>::count == 2,
+ "SelectVariantType<int, const int, int>::count should be 2");
+ static_assert(
+ std::is_same_v<typename SelectVariantType<int, const int, int&&>::Type,
+ const int>,
+ "SelectVariantType<int, const int, int&&>::Type should be const int");
+ static_assert(SelectVariantType<int, const int, int&&>::count == 2,
+ "SelectVariantType<int, const int, int&&>::count should be 2");
+}
+
+static void testSimple() {
+ printf("testSimple\n");
+ using V = Variant<uint32_t, uint64_t>;
+
+ // Non-const lvalue.
+ V v(uint64_t(1));
+ MOZ_RELEASE_ASSERT(v.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!v.is<uint32_t>());
+ MOZ_RELEASE_ASSERT(v.as<uint64_t>() == 1);
+
+ MOZ_RELEASE_ASSERT(v.is<1>());
+ MOZ_RELEASE_ASSERT(!v.is<0>());
+ static_assert(std::is_same_v<decltype(v.as<1>()), uint64_t&>,
+ "v.as<1>() should return a uint64_t&");
+ MOZ_RELEASE_ASSERT(v.as<1>() == 1);
+
+ // Const lvalue.
+ const V& cv = v;
+ MOZ_RELEASE_ASSERT(cv.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!cv.is<uint32_t>());
+ MOZ_RELEASE_ASSERT(cv.as<uint64_t>() == 1);
+
+ MOZ_RELEASE_ASSERT(cv.is<1>());
+ MOZ_RELEASE_ASSERT(!cv.is<0>());
+ static_assert(std::is_same_v<decltype(cv.as<1>()), const uint64_t&>,
+ "cv.as<1>() should return a const uint64_t&");
+ MOZ_RELEASE_ASSERT(cv.as<1>() == 1);
+
+ // Non-const rvalue, using a function to create a temporary.
+ auto MakeV = []() { return V(uint64_t(1)); };
+ MOZ_RELEASE_ASSERT(MakeV().is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!MakeV().is<uint32_t>());
+ MOZ_RELEASE_ASSERT(MakeV().as<uint64_t>() == 1);
+
+ MOZ_RELEASE_ASSERT(MakeV().is<1>());
+ MOZ_RELEASE_ASSERT(!MakeV().is<0>());
+ static_assert(std::is_same_v<decltype(MakeV().as<1>()), uint64_t&&>,
+ "MakeV().as<1>() should return a uint64_t&&");
+ MOZ_RELEASE_ASSERT(MakeV().as<1>() == 1);
+
+ // Const rvalue, using a function to create a temporary.
+ auto MakeCV = []() -> const V { return V(uint64_t(1)); };
+ MOZ_RELEASE_ASSERT(MakeCV().is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!MakeCV().is<uint32_t>());
+ MOZ_RELEASE_ASSERT(MakeCV().as<uint64_t>() == 1);
+
+ MOZ_RELEASE_ASSERT(MakeCV().is<1>());
+ MOZ_RELEASE_ASSERT(!MakeCV().is<0>());
+ static_assert(std::is_same_v<decltype(MakeCV().as<1>()), const uint64_t&&>,
+ "MakeCV().as<1>() should return a const uint64_t&&");
+ MOZ_RELEASE_ASSERT(MakeCV().as<1>() == 1);
+}
+
+static void testDuplicate() {
+ printf("testDuplicate\n");
+ Variant<uint32_t, uint64_t, uint32_t> v(uint64_t(1));
+ MOZ_RELEASE_ASSERT(v.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(v.as<uint64_t>() == 1);
+ // Note: uint32_t is not unique, so `v.is<uint32_t>()` is not allowed.
+
+ MOZ_RELEASE_ASSERT(v.is<1>());
+ MOZ_RELEASE_ASSERT(!v.is<0>());
+ MOZ_RELEASE_ASSERT(!v.is<2>());
+ static_assert(std::is_same_v<decltype(v.as<0>()), uint32_t&>,
+ "as<0>() should return a uint64_t");
+ static_assert(std::is_same_v<decltype(v.as<1>()), uint64_t&>,
+ "as<1>() should return a uint64_t");
+ static_assert(std::is_same_v<decltype(v.as<2>()), uint32_t&>,
+ "as<2>() should return a uint64_t");
+ MOZ_RELEASE_ASSERT(v.as<1>() == 1);
+ MOZ_RELEASE_ASSERT(v.extract<1>() == 1);
+}
+
+static void testConstructionWithVariantType() {
+ Variant<uint32_t, uint64_t, uint32_t> v(mozilla::VariantType<uint64_t>{}, 3);
+ MOZ_RELEASE_ASSERT(v.is<uint64_t>());
+ // MOZ_RELEASE_ASSERT(!v.is<uint32_t>()); // uint32_t is not unique!
+ MOZ_RELEASE_ASSERT(v.as<uint64_t>() == 3);
+}
+
+static void testConstructionWithVariantIndex() {
+ Variant<uint32_t, uint64_t, uint32_t> v(mozilla::VariantIndex<2>{}, 2);
+ MOZ_RELEASE_ASSERT(!v.is<uint64_t>());
+ // Note: uint32_t is not unique, so `v.is<uint32_t>()` is not allowed.
+
+ MOZ_RELEASE_ASSERT(!v.is<1>());
+ MOZ_RELEASE_ASSERT(!v.is<0>());
+ MOZ_RELEASE_ASSERT(v.is<2>());
+ MOZ_RELEASE_ASSERT(v.as<2>() == 2);
+ MOZ_RELEASE_ASSERT(v.extract<2>() == 2);
+}
+
+static void testEmplaceWithType() {
+ printf("testEmplaceWithType\n");
+ Variant<uint32_t, uint64_t, uint32_t> v1(mozilla::VariantIndex<0>{}, 0);
+ v1.emplace<uint64_t>(3);
+ MOZ_RELEASE_ASSERT(v1.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(v1.as<uint64_t>() == 3);
+
+ Variant<UniquePtr<int>, char> v2('a');
+ v2.emplace<UniquePtr<int>>();
+ MOZ_RELEASE_ASSERT(v2.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(!v2.as<UniquePtr<int>>().get());
+
+ Variant<UniquePtr<int>, char> v3('a');
+ v3.emplace<UniquePtr<int>>(MakeUnique<int>(4));
+ MOZ_RELEASE_ASSERT(v3.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(*v3.as<UniquePtr<int>>().get() == 4);
+}
+
+static void testEmplaceWithIndex() {
+ printf("testEmplaceWithIndex\n");
+ Variant<uint32_t, uint64_t, uint32_t> v1(mozilla::VariantIndex<1>{}, 0);
+ v1.emplace<2>(2);
+ MOZ_RELEASE_ASSERT(!v1.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!v1.is<1>());
+ MOZ_RELEASE_ASSERT(!v1.is<0>());
+ MOZ_RELEASE_ASSERT(v1.is<2>());
+ MOZ_RELEASE_ASSERT(v1.as<2>() == 2);
+ MOZ_RELEASE_ASSERT(v1.extract<2>() == 2);
+
+ Variant<UniquePtr<int>, char> v2('a');
+ v2.emplace<0>();
+ MOZ_RELEASE_ASSERT(v2.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(!v2.is<1>());
+ MOZ_RELEASE_ASSERT(v2.is<0>());
+ MOZ_RELEASE_ASSERT(!v2.as<0>().get());
+ MOZ_RELEASE_ASSERT(!v2.extract<0>().get());
+
+ Variant<UniquePtr<int>, char> v3('a');
+ v3.emplace<0>(MakeUnique<int>(4));
+ MOZ_RELEASE_ASSERT(v3.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(!v3.is<1>());
+ MOZ_RELEASE_ASSERT(v3.is<0>());
+ MOZ_RELEASE_ASSERT(*v3.as<0>().get() == 4);
+ MOZ_RELEASE_ASSERT(*v3.extract<0>().get() == 4);
+}
+
+static void testCopy() {
+ printf("testCopy\n");
+ Variant<uint32_t, uint64_t> v1(uint64_t(1));
+ Variant<uint32_t, uint64_t> v2(v1);
+ MOZ_RELEASE_ASSERT(v2.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(!v2.is<uint32_t>());
+ MOZ_RELEASE_ASSERT(v2.as<uint64_t>() == 1);
+
+ Variant<uint32_t, uint64_t> v3(uint32_t(10));
+ v3 = v2;
+ MOZ_RELEASE_ASSERT(v3.is<uint64_t>());
+ MOZ_RELEASE_ASSERT(v3.as<uint64_t>() == 1);
+}
+
+static void testMove() {
+ printf("testMove\n");
+ Variant<UniquePtr<int>, char> v1(MakeUnique<int>(5));
+ Variant<UniquePtr<int>, char> v2(std::move(v1));
+
+ MOZ_RELEASE_ASSERT(v2.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(*v2.as<UniquePtr<int>>() == 5);
+
+ MOZ_RELEASE_ASSERT(v1.is<UniquePtr<int>>());
+ MOZ_RELEASE_ASSERT(v1.as<UniquePtr<int>>() == nullptr);
+
+ Destroyer::destroyedCount = 0;
+ {
+ Variant<char, UniquePtr<Destroyer>> v3(MakeUnique<Destroyer>());
+ Variant<char, UniquePtr<Destroyer>> v4(std::move(v3));
+
+ Variant<char, UniquePtr<Destroyer>> v5('a');
+ v5 = std::move(v4);
+
+ auto ptr = v5.extract<UniquePtr<Destroyer>>();
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount == 0);
+ }
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount == 1);
+}
+
+static void testDestructor() {
+ printf("testDestructor\n");
+ Destroyer::destroyedCount = 0;
+
+ {
+ Destroyer d;
+
+ {
+ Variant<char, UniquePtr<char[]>, Destroyer> v1(d);
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount ==
+ 0); // None destroyed yet.
+ }
+
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount ==
+ 1); // v1's copy of d is destroyed.
+
+ {
+ Variant<char, UniquePtr<char[]>, Destroyer> v2(
+ mozilla::VariantIndex<2>{});
+ v2.emplace<Destroyer>(d);
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount ==
+ 2); // v2's initial value is destroyed.
+ }
+
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount ==
+ 3); // v2's second value is destroyed.
+ }
+
+ MOZ_RELEASE_ASSERT(Destroyer::destroyedCount == 4); // d is destroyed.
+}
+
+static void testEquality() {
+ printf("testEquality\n");
+ using V = Variant<char, int>;
+
+ V v0('a');
+ V v1('b');
+ V v2('b');
+ V v3(42);
+ V v4(27);
+ V v5(27);
+ V v6(int('b'));
+
+ MOZ_RELEASE_ASSERT(v0 != v1);
+ MOZ_RELEASE_ASSERT(v1 == v2);
+ MOZ_RELEASE_ASSERT(v2 != v3);
+ MOZ_RELEASE_ASSERT(v3 != v4);
+ MOZ_RELEASE_ASSERT(v4 == v5);
+ MOZ_RELEASE_ASSERT(v1 != v6);
+
+ MOZ_RELEASE_ASSERT(v0 == v0);
+ MOZ_RELEASE_ASSERT(v1 == v1);
+ MOZ_RELEASE_ASSERT(v2 == v2);
+ MOZ_RELEASE_ASSERT(v3 == v3);
+ MOZ_RELEASE_ASSERT(v4 == v4);
+ MOZ_RELEASE_ASSERT(v5 == v5);
+ MOZ_RELEASE_ASSERT(v6 == v6);
+}
+
+// Matcher that returns a description of how its call-operator was invoked.
+struct Describer {
+ enum class ParameterSize { NA, U8, U32, U64 };
+ enum class ParameterQualifier {
+ NA,
+ ParamLREF,
+ ParamCLREF,
+ ParamRREF,
+ ParamCRREF
+ };
+ enum class ThisQualifier { NA, ThisLREF, ThisCLREF, ThisRREF, ThisCRREF };
+
+ using Result =
+ std::tuple<ParameterSize, ParameterQualifier, ThisQualifier, uint64_t>;
+
+#define RESULT(SIZE, PQUAL, TQUAL, VALUE) \
+ Describer::Result(Describer::ParameterSize::SIZE, \
+ Describer::ParameterQualifier::PQUAL, \
+ Describer::ThisQualifier::TQUAL, VALUE)
+
+#define CALL(TYPE, SIZE, PQUAL, TREF, TQUAL) \
+ Result operator()(TYPE aValue) TREF { \
+ return RESULT(SIZE, PQUAL, TQUAL, aValue); \
+ }
+
+ // All combinations of possible call operators:
+ // Every line, the parameter integer type changes.
+ // Every 3 lines, the parameter type changes constness.
+ // Every 6 lines, the parameter changes reference l/r-valueness.
+ // Every 12 lines, the member function qualifier changes constness.
+ // After 24 lines, the member function qualifier changes ref l/r-valueness.
+ CALL(uint8_t&, U8, ParamLREF, &, ThisLREF)
+ CALL(uint32_t&, U32, ParamLREF, &, ThisLREF)
+ CALL(uint64_t&, U64, ParamLREF, &, ThisLREF)
+
+ CALL(const uint8_t&, U8, ParamCLREF, &, ThisLREF)
+ CALL(const uint32_t&, U32, ParamCLREF, &, ThisLREF)
+ CALL(const uint64_t&, U64, ParamCLREF, &, ThisLREF)
+
+ CALL(uint8_t&&, U8, ParamRREF, &, ThisLREF)
+ CALL(uint32_t&&, U32, ParamRREF, &, ThisLREF)
+ CALL(uint64_t&&, U64, ParamRREF, &, ThisLREF)
+
+ CALL(const uint8_t&&, U8, ParamCRREF, &, ThisLREF)
+ CALL(const uint32_t&&, U32, ParamCRREF, &, ThisLREF)
+ CALL(const uint64_t&&, U64, ParamCRREF, &, ThisLREF)
+
+ CALL(uint8_t&, U8, ParamLREF, const&, ThisCLREF)
+ CALL(uint32_t&, U32, ParamLREF, const&, ThisCLREF)
+ CALL(uint64_t&, U64, ParamLREF, const&, ThisCLREF)
+
+ CALL(const uint8_t&, U8, ParamCLREF, const&, ThisCLREF)
+ CALL(const uint32_t&, U32, ParamCLREF, const&, ThisCLREF)
+ CALL(const uint64_t&, U64, ParamCLREF, const&, ThisCLREF)
+
+ CALL(uint8_t&&, U8, ParamRREF, const&, ThisCLREF)
+ CALL(uint32_t&&, U32, ParamRREF, const&, ThisCLREF)
+ CALL(uint64_t&&, U64, ParamRREF, const&, ThisCLREF)
+
+ CALL(const uint8_t&&, U8, ParamCRREF, const&, ThisCLREF)
+ CALL(const uint32_t&&, U32, ParamCRREF, const&, ThisCLREF)
+ CALL(const uint64_t&&, U64, ParamCRREF, const&, ThisCLREF)
+
+ CALL(uint8_t&, U8, ParamLREF, &&, ThisRREF)
+ CALL(uint32_t&, U32, ParamLREF, &&, ThisRREF)
+ CALL(uint64_t&, U64, ParamLREF, &&, ThisRREF)
+
+ CALL(const uint8_t&, U8, ParamCLREF, &&, ThisRREF)
+ CALL(const uint32_t&, U32, ParamCLREF, &&, ThisRREF)
+ CALL(const uint64_t&, U64, ParamCLREF, &&, ThisRREF)
+
+ CALL(uint8_t&&, U8, ParamRREF, &&, ThisRREF)
+ CALL(uint32_t&&, U32, ParamRREF, &&, ThisRREF)
+ CALL(uint64_t&&, U64, ParamRREF, &&, ThisRREF)
+
+ CALL(const uint8_t&&, U8, ParamCRREF, &&, ThisRREF)
+ CALL(const uint32_t&&, U32, ParamCRREF, &&, ThisRREF)
+ CALL(const uint64_t&&, U64, ParamCRREF, &&, ThisRREF)
+
+ CALL(uint8_t&, U8, ParamLREF, const&&, ThisCRREF)
+ CALL(uint32_t&, U32, ParamLREF, const&&, ThisCRREF)
+ CALL(uint64_t&, U64, ParamLREF, const&&, ThisCRREF)
+
+ CALL(const uint8_t&, U8, ParamCLREF, const&&, ThisCRREF)
+ CALL(const uint32_t&, U32, ParamCLREF, const&&, ThisCRREF)
+ CALL(const uint64_t&, U64, ParamCLREF, const&&, ThisCRREF)
+
+ CALL(uint8_t&&, U8, ParamRREF, const&&, ThisCRREF)
+ CALL(uint32_t&&, U32, ParamRREF, const&&, ThisCRREF)
+ CALL(uint64_t&&, U64, ParamRREF, const&&, ThisCRREF)
+
+ CALL(const uint8_t&&, U8, ParamCRREF, const&&, ThisCRREF)
+ CALL(const uint32_t&&, U32, ParamCRREF, const&&, ThisCRREF)
+ CALL(const uint64_t&&, U64, ParamCRREF, const&&, ThisCRREF)
+
+#undef CALL
+
+ // Catch-all, to verify that there is no call with any type other than the
+ // expected ones above.
+ template <typename Other>
+ Result operator()(const Other&) {
+ MOZ_RELEASE_ASSERT(false);
+ return RESULT(NA, NA, NA, 0);
+ }
+};
+
+static void testMatching() {
+ printf("testMatching\n");
+ using V = Variant<uint8_t, uint32_t, uint64_t>;
+
+ Describer desc;
+ const Describer descConst;
+ auto MakeDescriber = []() { return Describer(); };
+ auto MakeConstDescriber = []() -> const Describer { return Describer(); };
+
+ V v1(uint8_t(1));
+ V v2(uint32_t(2));
+ V v3(uint64_t(3));
+
+ const V& constRef1 = v1;
+ const V& constRef2 = v2;
+ const V& constRef3 = v3;
+
+ // Create a temporary variant by returning a copy of one.
+ auto CopyV = [](const V& aV) { return aV; };
+
+ // Create a temporary variant by returning a const copy of one.
+ auto CopyConstV = [](const V& aV) -> const V { return aV; };
+
+ // All combinations of possible calls:
+ // Every line, the variant integer type changes.
+ // Every 3 lines, the variant type changes constness.
+ // Every 6 lines, the variant changes reference l/r-valueness.
+ // Every 12 lines, the matcher changes constness.
+ // After 24 lines, the matcher changes ref l/r-valueness.
+ MOZ_RELEASE_ASSERT(v1.match(desc) == RESULT(U8, ParamLREF, ThisLREF, 1));
+ MOZ_RELEASE_ASSERT(v2.match(desc) == RESULT(U32, ParamLREF, ThisLREF, 2));
+ MOZ_RELEASE_ASSERT(v3.match(desc) == RESULT(U64, ParamLREF, ThisLREF, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(desc) ==
+ RESULT(U8, ParamCLREF, ThisLREF, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(desc) ==
+ RESULT(U32, ParamCLREF, ThisLREF, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(desc) ==
+ RESULT(U64, ParamCLREF, ThisLREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(desc) ==
+ RESULT(U8, ParamRREF, ThisLREF, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(desc) ==
+ RESULT(U32, ParamRREF, ThisLREF, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(desc) ==
+ RESULT(U64, ParamRREF, ThisLREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(desc) ==
+ RESULT(U8, ParamCRREF, ThisLREF, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(desc) ==
+ RESULT(U32, ParamCRREF, ThisLREF, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(desc) ==
+ RESULT(U64, ParamCRREF, ThisLREF, 3));
+
+ MOZ_RELEASE_ASSERT(v1.match(descConst) ==
+ RESULT(U8, ParamLREF, ThisCLREF, 1));
+ MOZ_RELEASE_ASSERT(v2.match(descConst) ==
+ RESULT(U32, ParamLREF, ThisCLREF, 2));
+ MOZ_RELEASE_ASSERT(v3.match(descConst) ==
+ RESULT(U64, ParamLREF, ThisCLREF, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(descConst) ==
+ RESULT(U8, ParamCLREF, ThisCLREF, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(descConst) ==
+ RESULT(U32, ParamCLREF, ThisCLREF, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(descConst) ==
+ RESULT(U64, ParamCLREF, ThisCLREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(descConst) ==
+ RESULT(U8, ParamRREF, ThisCLREF, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(descConst) ==
+ RESULT(U32, ParamRREF, ThisCLREF, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(descConst) ==
+ RESULT(U64, ParamRREF, ThisCLREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(descConst) ==
+ RESULT(U8, ParamCRREF, ThisCLREF, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(descConst) ==
+ RESULT(U32, ParamCRREF, ThisCLREF, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(descConst) ==
+ RESULT(U64, ParamCRREF, ThisCLREF, 3));
+
+ MOZ_RELEASE_ASSERT(v1.match(MakeDescriber()) ==
+ RESULT(U8, ParamLREF, ThisRREF, 1));
+ MOZ_RELEASE_ASSERT(v2.match(MakeDescriber()) ==
+ RESULT(U32, ParamLREF, ThisRREF, 2));
+ MOZ_RELEASE_ASSERT(v3.match(MakeDescriber()) ==
+ RESULT(U64, ParamLREF, ThisRREF, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(MakeDescriber()) ==
+ RESULT(U8, ParamCLREF, ThisRREF, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(MakeDescriber()) ==
+ RESULT(U32, ParamCLREF, ThisRREF, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(MakeDescriber()) ==
+ RESULT(U64, ParamCLREF, ThisRREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(MakeDescriber()) ==
+ RESULT(U8, ParamRREF, ThisRREF, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(MakeDescriber()) ==
+ RESULT(U32, ParamRREF, ThisRREF, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(MakeDescriber()) ==
+ RESULT(U64, ParamRREF, ThisRREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(MakeDescriber()) ==
+ RESULT(U8, ParamCRREF, ThisRREF, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(MakeDescriber()) ==
+ RESULT(U32, ParamCRREF, ThisRREF, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(MakeDescriber()) ==
+ RESULT(U64, ParamCRREF, ThisRREF, 3));
+
+ MOZ_RELEASE_ASSERT(v1.match(MakeConstDescriber()) ==
+ RESULT(U8, ParamLREF, ThisCRREF, 1));
+ MOZ_RELEASE_ASSERT(v2.match(MakeConstDescriber()) ==
+ RESULT(U32, ParamLREF, ThisCRREF, 2));
+ MOZ_RELEASE_ASSERT(v3.match(MakeConstDescriber()) ==
+ RESULT(U64, ParamLREF, ThisCRREF, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(MakeConstDescriber()) ==
+ RESULT(U8, ParamCLREF, ThisCRREF, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(MakeConstDescriber()) ==
+ RESULT(U32, ParamCLREF, ThisCRREF, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(MakeConstDescriber()) ==
+ RESULT(U64, ParamCLREF, ThisCRREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(MakeConstDescriber()) ==
+ RESULT(U8, ParamRREF, ThisCRREF, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(MakeConstDescriber()) ==
+ RESULT(U32, ParamRREF, ThisCRREF, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(MakeConstDescriber()) ==
+ RESULT(U64, ParamRREF, ThisCRREF, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(MakeConstDescriber()) ==
+ RESULT(U8, ParamCRREF, ThisCRREF, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(MakeConstDescriber()) ==
+ RESULT(U32, ParamCRREF, ThisCRREF, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(MakeConstDescriber()) ==
+ RESULT(U64, ParamCRREF, ThisCRREF, 3));
+}
+
+static void testMatchingLambda() {
+ printf("testMatchingLambda\n");
+ using V = Variant<uint8_t, uint32_t, uint64_t>;
+
+ // Note: Lambdas' call operators are const by default (unless the lambda is
+ // declared `mutable`).
+ // There is no need to test mutable lambdas, nor rvalue lambda, because there
+ // would be no way to distinguish how each lambda is actually invoked because
+ // there is only one choice of call operator in each overload set.
+ auto desc = [](auto&& a) {
+ if constexpr (std::is_same_v<decltype(a), uint8_t&>) {
+ return RESULT(U8, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&>) {
+ return RESULT(U8, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint8_t&&>) {
+ return RESULT(U8, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&&>) {
+ return RESULT(U8, ParamCRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&>) {
+ return RESULT(U32, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&>) {
+ return RESULT(U32, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&&>) {
+ return RESULT(U32, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&&>) {
+ return RESULT(U32, ParamCRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&>) {
+ return RESULT(U64, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&>) {
+ return RESULT(U64, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&&>) {
+ return RESULT(U64, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&&>) {
+ return RESULT(U64, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+
+ V v1(uint8_t(1));
+ V v2(uint32_t(2));
+ V v3(uint64_t(3));
+
+ const V& constRef1 = v1;
+ const V& constRef2 = v2;
+ const V& constRef3 = v3;
+
+ // Create a temporary variant by returning a copy of one.
+ auto CopyV = [](const V& aV) { return aV; };
+
+ // Create a temporary variant by returning a const copy of one.
+ auto CopyConstV = [](const V& aV) -> const V { return aV; };
+
+ MOZ_RELEASE_ASSERT(v1.match(desc) == RESULT(U8, ParamLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(v2.match(desc) == RESULT(U32, ParamLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(v3.match(desc) == RESULT(U64, ParamLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(desc) == RESULT(U8, ParamCLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(desc) == RESULT(U32, ParamCLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(desc) == RESULT(U64, ParamCLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(desc) == RESULT(U8, ParamRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(desc) == RESULT(U32, ParamRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(desc) == RESULT(U64, ParamRREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(desc) ==
+ RESULT(U8, ParamCRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(desc) ==
+ RESULT(U32, ParamCRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(desc) ==
+ RESULT(U64, ParamCRREF, NA, 3));
+}
+
+static void testMatchingLambdaWithIndex() {
+ printf("testMatchingLambdaWithIndex\n");
+ using V = Variant<uint8_t, uint32_t, uint64_t>;
+
+ // Note: Lambdas' call operators are const by default (unless the lambda is
+ // declared `mutable`), hence the use of "...Const" strings below.
+ // There is no need to test mutable lambdas, nor rvalue lambda, because there
+ // would be no way to distinguish how each lambda is actually invoked because
+ // there is only one choice of call operator in each overload set.
+ auto desc = [](auto aIndex, auto&& a) {
+ static_assert(
+ std::is_same_v<decltype(aIndex), uint_fast8_t>,
+ "Expected a uint_fast8_t index for a Variant with 3 alternatives");
+ if constexpr (std::is_same_v<decltype(a), uint8_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 0);
+ return RESULT(U8, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 0);
+ return RESULT(U8, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint8_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 0);
+ return RESULT(U8, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 0);
+ return RESULT(U8, ParamCRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 1);
+ return RESULT(U32, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 1);
+ return RESULT(U32, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 1);
+ return RESULT(U32, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 1);
+ return RESULT(U32, ParamCRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 2);
+ return RESULT(U64, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 2);
+ return RESULT(U64, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 2);
+ return RESULT(U64, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&&>) {
+ MOZ_RELEASE_ASSERT(aIndex == 2);
+ return RESULT(U64, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+
+ V v1(uint8_t(1));
+ V v2(uint32_t(2));
+ V v3(uint64_t(3));
+
+ const V& constRef1 = v1;
+ const V& constRef2 = v2;
+ const V& constRef3 = v3;
+
+ // Create a temporary variant by returning a copy of one.
+ auto CopyV = [](const V& aV) { return aV; };
+
+ // Create a temporary variant by returning a const copy of one.
+ auto CopyConstV = [](const V& aV) -> const V { return aV; };
+
+ MOZ_RELEASE_ASSERT(v1.match(desc) == RESULT(U8, ParamLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(v2.match(desc) == RESULT(U32, ParamLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(v3.match(desc) == RESULT(U64, ParamLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(desc) == RESULT(U8, ParamCLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(desc) == RESULT(U32, ParamCLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(desc) == RESULT(U64, ParamCLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(desc) == RESULT(U8, ParamRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(desc) == RESULT(U32, ParamRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(desc) == RESULT(U64, ParamRREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(desc) ==
+ RESULT(U8, ParamCRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(desc) ==
+ RESULT(U32, ParamCRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(desc) ==
+ RESULT(U64, ParamCRREF, NA, 3));
+}
+
+static void testMatchingLambdas() {
+ printf("testMatchingLambdas\n");
+ using V = Variant<uint8_t, uint32_t, uint64_t>;
+
+ auto desc8 = [](auto&& a) {
+ if constexpr (std::is_same_v<decltype(a), uint8_t&>) {
+ return RESULT(U8, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&>) {
+ return RESULT(U8, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint8_t&&>) {
+ return RESULT(U8, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&&>) {
+ return RESULT(U8, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+ auto desc32 = [](auto&& a) {
+ if constexpr (std::is_same_v<decltype(a), uint32_t&>) {
+ return RESULT(U32, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&>) {
+ return RESULT(U32, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&&>) {
+ return RESULT(U32, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&&>) {
+ return RESULT(U32, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+ auto desc64 = [](auto&& a) {
+ if constexpr (std::is_same_v<decltype(a), uint64_t&>) {
+ return RESULT(U64, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&>) {
+ return RESULT(U64, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&&>) {
+ return RESULT(U64, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&&>) {
+ return RESULT(U64, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+
+ V v1(uint8_t(1));
+ V v2(uint32_t(2));
+ V v3(uint64_t(3));
+
+ const V& constRef1 = v1;
+ const V& constRef2 = v2;
+ const V& constRef3 = v3;
+
+ // Create a temporary variant by returning a copy of one.
+ auto CopyV = [](const V& aV) { return aV; };
+
+ // Create a temporary variant by returning a const copy of one.
+ auto CopyConstV = [](const V& aV) -> const V { return aV; };
+
+ MOZ_RELEASE_ASSERT(v1.match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(v2.match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(v3.match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamCLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamCLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamCLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamRREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamCRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamCRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamCRREF, NA, 3));
+}
+
+static void testMatchingLambdasWithIndex() {
+ printf("testMatchingLambdasWithIndex\n");
+ using V = Variant<uint8_t, uint32_t, uint64_t>;
+
+ auto desc8 = [](size_t aIndex, auto&& a) {
+ MOZ_RELEASE_ASSERT(aIndex == 0);
+ if constexpr (std::is_same_v<decltype(a), uint8_t&>) {
+ return RESULT(U8, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&>) {
+ return RESULT(U8, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint8_t&&>) {
+ return RESULT(U8, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint8_t&&>) {
+ return RESULT(U8, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+ auto desc32 = [](size_t aIndex, auto&& a) {
+ MOZ_RELEASE_ASSERT(aIndex == 1);
+ if constexpr (std::is_same_v<decltype(a), uint32_t&>) {
+ return RESULT(U32, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&>) {
+ return RESULT(U32, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint32_t&&>) {
+ return RESULT(U32, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint32_t&&>) {
+ return RESULT(U32, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+ auto desc64 = [](size_t aIndex, auto&& a) {
+ MOZ_RELEASE_ASSERT(aIndex == 2);
+ if constexpr (std::is_same_v<decltype(a), uint64_t&>) {
+ return RESULT(U64, ParamLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&>) {
+ return RESULT(U64, ParamCLREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), uint64_t&&>) {
+ return RESULT(U64, ParamRREF, NA, a);
+ } else if constexpr (std::is_same_v<decltype(a), const uint64_t&&>) {
+ return RESULT(U64, ParamCRREF, NA, a);
+ } else {
+ // We don't expect any other type.
+ // Tech note: We can't just do `static_assert(false)` which would always
+ // fail during the initial parsing. So we depend on the templated
+ // parameter to delay computing `false` until actual instantiation.
+ static_assert(sizeof(a) == size_t(-1));
+ return RESULT(NA, NA, NA, 0);
+ }
+ };
+
+ V v1(uint8_t(1));
+ V v2(uint32_t(2));
+ V v3(uint64_t(3));
+
+ const V& constRef1 = v1;
+ const V& constRef2 = v2;
+ const V& constRef3 = v3;
+
+ // Create a temporary variant by returning a copy of one.
+ auto CopyV = [](const V& aV) { return aV; };
+
+ // Create a temporary variant by returning a const copy of one.
+ auto CopyConstV = [](const V& aV) -> const V { return aV; };
+
+ MOZ_RELEASE_ASSERT(v1.match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(v2.match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(v3.match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(constRef1.match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamCLREF, NA, 1));
+ MOZ_RELEASE_ASSERT(constRef2.match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamCLREF, NA, 2));
+ MOZ_RELEASE_ASSERT(constRef3.match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamCLREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyV(v1).match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyV(v2).match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyV(v3).match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamRREF, NA, 3));
+
+ MOZ_RELEASE_ASSERT(CopyConstV(v1).match(desc8, desc32, desc64) ==
+ RESULT(U8, ParamCRREF, NA, 1));
+ MOZ_RELEASE_ASSERT(CopyConstV(v2).match(desc8, desc32, desc64) ==
+ RESULT(U32, ParamCRREF, NA, 2));
+ MOZ_RELEASE_ASSERT(CopyConstV(v3).match(desc8, desc32, desc64) ==
+ RESULT(U64, ParamCRREF, NA, 3));
+}
+
+#undef RESULT
+
+static void testAddTagToHash() {
+ printf("testAddToHash\n");
+ using V = Variant<uint8_t, uint16_t, uint32_t, uint64_t>;
+
+ // We don't know what our hash function is, and these are certainly not all
+ // true under all hash functions. But they are probably true under almost any
+ // decent hash function, and our aim is simply to establish that the tag
+ // *does* influence the hash value.
+ {
+ mozilla::HashNumber h8 = V(uint8_t(1)).addTagToHash(0);
+ mozilla::HashNumber h16 = V(uint16_t(1)).addTagToHash(0);
+ mozilla::HashNumber h32 = V(uint32_t(1)).addTagToHash(0);
+ mozilla::HashNumber h64 = V(uint64_t(1)).addTagToHash(0);
+
+ MOZ_RELEASE_ASSERT(h8 != h16 && h8 != h32 && h8 != h64);
+ MOZ_RELEASE_ASSERT(h16 != h32 && h16 != h64);
+ MOZ_RELEASE_ASSERT(h32 != h64);
+ }
+
+ {
+ mozilla::HashNumber h8 = V(uint8_t(1)).addTagToHash(0x124356);
+ mozilla::HashNumber h16 = V(uint16_t(1)).addTagToHash(0x124356);
+ mozilla::HashNumber h32 = V(uint32_t(1)).addTagToHash(0x124356);
+ mozilla::HashNumber h64 = V(uint64_t(1)).addTagToHash(0x124356);
+
+ MOZ_RELEASE_ASSERT(h8 != h16 && h8 != h32 && h8 != h64);
+ MOZ_RELEASE_ASSERT(h16 != h32 && h16 != h64);
+ MOZ_RELEASE_ASSERT(h32 != h64);
+ }
+}
+
+int main() {
+ testDetails();
+ testSimple();
+ testDuplicate();
+ testConstructionWithVariantType();
+ testConstructionWithVariantIndex();
+ testEmplaceWithType();
+ testEmplaceWithIndex();
+ testCopy();
+ testMove();
+ testDestructor();
+ testEquality();
+ testMatching();
+ testMatchingLambda();
+ testMatchingLambdaWithIndex();
+ testMatchingLambdas();
+ testMatchingLambdasWithIndex();
+ testAddTagToHash();
+
+ printf("TestVariant OK!\n");
+ return 0;
+}
diff --git a/mfbt/tests/TestVector.cpp b/mfbt/tests/TestVector.cpp
new file mode 100644
index 0000000000..021d02976b
--- /dev/null
+++ b/mfbt/tests/TestVector.cpp
@@ -0,0 +1,792 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <utility>
+
+#include "mozilla/IntegerRange.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Vector.h"
+
+using mozilla::IntegerRange;
+using mozilla::MakeUnique;
+using mozilla::UniquePtr;
+using mozilla::Vector;
+using mozilla::detail::VectorTesting;
+
+struct mozilla::detail::VectorTesting {
+ static void testReserved();
+ static void testConstRange();
+ static void testEmplaceBack();
+ static void testReverse();
+ static void testExtractRawBuffer();
+ static void testExtractOrCopyRawBuffer();
+ static void testReplaceRawBuffer();
+ static void testInsert();
+ static void testErase();
+ static void testShrinkStorageToFit();
+ static void testAppend();
+};
+
+void mozilla::detail::VectorTesting::testReserved() {
+#ifdef DEBUG
+ Vector<bool> bv;
+ MOZ_RELEASE_ASSERT(bv.reserved() == 0);
+
+ MOZ_RELEASE_ASSERT(bv.append(true));
+ MOZ_RELEASE_ASSERT(bv.reserved() == 1);
+
+ Vector<bool> otherbv;
+ MOZ_RELEASE_ASSERT(otherbv.append(false));
+ MOZ_RELEASE_ASSERT(otherbv.append(true));
+ MOZ_RELEASE_ASSERT(bv.appendAll(otherbv));
+ MOZ_RELEASE_ASSERT(bv.reserved() == 3);
+
+ MOZ_RELEASE_ASSERT(bv.reserve(5));
+ MOZ_RELEASE_ASSERT(bv.reserved() == 5);
+
+ MOZ_RELEASE_ASSERT(bv.reserve(1));
+ MOZ_RELEASE_ASSERT(bv.reserved() == 5);
+
+ Vector<bool> bv2(std::move(bv));
+ MOZ_RELEASE_ASSERT(bv.reserved() == 0);
+ MOZ_RELEASE_ASSERT(bv2.reserved() == 5);
+
+ bv2.clearAndFree();
+ MOZ_RELEASE_ASSERT(bv2.reserved() == 0);
+
+ Vector<int, 42> iv;
+ MOZ_RELEASE_ASSERT(iv.reserved() == 0);
+
+ MOZ_RELEASE_ASSERT(iv.append(17));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 1);
+
+ Vector<int, 42> otheriv;
+ MOZ_RELEASE_ASSERT(otheriv.append(42));
+ MOZ_RELEASE_ASSERT(otheriv.append(37));
+ MOZ_RELEASE_ASSERT(iv.appendAll(otheriv));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 3);
+
+ MOZ_RELEASE_ASSERT(iv.reserve(5));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 5);
+
+ MOZ_RELEASE_ASSERT(iv.reserve(1));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 5);
+
+ MOZ_RELEASE_ASSERT(iv.reserve(55));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 55);
+
+ Vector<int, 42> iv2(std::move(iv));
+ MOZ_RELEASE_ASSERT(iv.reserved() == 0);
+ MOZ_RELEASE_ASSERT(iv2.reserved() == 55);
+
+ iv2.clearAndFree();
+ MOZ_RELEASE_ASSERT(iv2.reserved() == 0);
+#endif
+}
+
+void mozilla::detail::VectorTesting::testConstRange() {
+#ifdef DEBUG
+ Vector<int> vec;
+
+ for (int i = 0; i < 10; i++) {
+ MOZ_RELEASE_ASSERT(vec.append(i));
+ }
+
+ const auto& vecRef = vec;
+
+ Vector<int>::ConstRange range = vecRef.all();
+ for (int i = 0; i < 10; i++) {
+ MOZ_RELEASE_ASSERT(!range.empty());
+ MOZ_RELEASE_ASSERT(range.front() == i);
+ range.popFront();
+ }
+#endif
+}
+
+namespace {
+
+struct S {
+ size_t j;
+ UniquePtr<size_t> k;
+
+ static size_t constructCount;
+ static size_t moveCount;
+ static size_t destructCount;
+
+ static void resetCounts() {
+ constructCount = 0;
+ moveCount = 0;
+ destructCount = 0;
+ }
+
+ S(size_t j, size_t k) : j(j), k(MakeUnique<size_t>(k)) { constructCount++; }
+
+ S(S&& rhs) : j(rhs.j), k(std::move(rhs.k)) {
+ rhs.j = 0;
+ rhs.k.reset(0);
+ moveCount++;
+ }
+
+ ~S() { destructCount++; }
+
+ S& operator=(S&& rhs) {
+ j = rhs.j;
+ rhs.j = 0;
+ k = std::move(rhs.k);
+ rhs.k.reset();
+ moveCount++;
+ return *this;
+ }
+
+ bool operator==(const S& rhs) const { return j == rhs.j && *k == *rhs.k; }
+
+ S(const S&) = delete;
+ S& operator=(const S&) = delete;
+};
+
+size_t S::constructCount = 0;
+size_t S::moveCount = 0;
+size_t S::destructCount = 0;
+
+} // namespace
+
+void mozilla::detail::VectorTesting::testEmplaceBack() {
+ S::resetCounts();
+
+ Vector<S> vec;
+ MOZ_RELEASE_ASSERT(vec.reserve(20));
+
+ for (size_t i = 0; i < 10; i++) {
+ S s(i, i * i);
+ MOZ_RELEASE_ASSERT(vec.append(std::move(s)));
+ }
+
+ MOZ_RELEASE_ASSERT(vec.length() == 10);
+ MOZ_RELEASE_ASSERT(S::constructCount == 10);
+ MOZ_RELEASE_ASSERT(S::moveCount == 10);
+
+ for (size_t i = 10; i < 20; i++) {
+ MOZ_RELEASE_ASSERT(vec.emplaceBack(i, i * i));
+ }
+
+ MOZ_RELEASE_ASSERT(vec.length() == 20);
+ MOZ_RELEASE_ASSERT(S::constructCount == 20);
+ MOZ_RELEASE_ASSERT(S::moveCount == 10);
+
+ for (size_t i = 0; i < 20; i++) {
+ MOZ_RELEASE_ASSERT(vec[i].j == i);
+ MOZ_RELEASE_ASSERT(*vec[i].k == i * i);
+ }
+}
+
+void mozilla::detail::VectorTesting::testReverse() {
+ // Use UniquePtr to make sure that reverse() can handler move-only types.
+ Vector<UniquePtr<uint8_t>, 0> vec;
+
+ // Reverse an odd number of elements.
+
+ for (uint8_t i = 0; i < 5; i++) {
+ auto p = MakeUnique<uint8_t>(i);
+ MOZ_RELEASE_ASSERT(p);
+ MOZ_RELEASE_ASSERT(vec.append(std::move(p)));
+ }
+
+ vec.reverse();
+
+ MOZ_RELEASE_ASSERT(*vec[0] == 4);
+ MOZ_RELEASE_ASSERT(*vec[1] == 3);
+ MOZ_RELEASE_ASSERT(*vec[2] == 2);
+ MOZ_RELEASE_ASSERT(*vec[3] == 1);
+ MOZ_RELEASE_ASSERT(*vec[4] == 0);
+
+ // Reverse an even number of elements.
+
+ vec.popBack();
+ vec.reverse();
+
+ MOZ_RELEASE_ASSERT(*vec[0] == 1);
+ MOZ_RELEASE_ASSERT(*vec[1] == 2);
+ MOZ_RELEASE_ASSERT(*vec[2] == 3);
+ MOZ_RELEASE_ASSERT(*vec[3] == 4);
+
+ // Reverse an empty vector.
+
+ vec.clear();
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+ vec.reverse();
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+
+ // Reverse a vector using only inline storage.
+
+ Vector<UniquePtr<uint8_t>, 5> vec2;
+ for (uint8_t i = 0; i < 5; i++) {
+ auto p = MakeUnique<uint8_t>(i);
+ MOZ_RELEASE_ASSERT(p);
+ MOZ_RELEASE_ASSERT(vec2.append(std::move(p)));
+ }
+
+ vec2.reverse();
+
+ MOZ_RELEASE_ASSERT(*vec2[0] == 4);
+ MOZ_RELEASE_ASSERT(*vec2[1] == 3);
+ MOZ_RELEASE_ASSERT(*vec2[2] == 2);
+ MOZ_RELEASE_ASSERT(*vec2[3] == 1);
+ MOZ_RELEASE_ASSERT(*vec2[4] == 0);
+}
+
+void mozilla::detail::VectorTesting::testExtractRawBuffer() {
+ S::resetCounts();
+
+ Vector<S, 5> vec;
+ MOZ_RELEASE_ASSERT(vec.reserve(5));
+ for (size_t i = 0; i < 5; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+ MOZ_RELEASE_ASSERT(vec.length() == 5);
+ MOZ_ASSERT(vec.reserved() == 5);
+ MOZ_RELEASE_ASSERT(S::constructCount == 5);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ S* buf = vec.extractRawBuffer();
+ MOZ_RELEASE_ASSERT(!buf);
+ MOZ_RELEASE_ASSERT(vec.length() == 5);
+ MOZ_ASSERT(vec.reserved() == 5);
+ MOZ_RELEASE_ASSERT(S::constructCount == 5);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ MOZ_RELEASE_ASSERT(vec.reserve(10));
+ for (size_t i = 5; i < 10; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+ MOZ_RELEASE_ASSERT(vec.length() == 10);
+ MOZ_ASSERT(vec.reserved() == 10);
+ MOZ_RELEASE_ASSERT(S::constructCount == 10);
+ MOZ_RELEASE_ASSERT(S::moveCount == 5);
+ MOZ_RELEASE_ASSERT(S::destructCount == 5);
+
+ buf = vec.extractRawBuffer();
+ MOZ_RELEASE_ASSERT(buf);
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+ MOZ_ASSERT(vec.reserved() == 0);
+ MOZ_RELEASE_ASSERT(S::constructCount == 10);
+ MOZ_RELEASE_ASSERT(S::moveCount == 5);
+ MOZ_RELEASE_ASSERT(S::destructCount == 5);
+
+ for (size_t i = 0; i < 10; i++) {
+ MOZ_RELEASE_ASSERT(buf[i].j == i);
+ MOZ_RELEASE_ASSERT(*buf[i].k == i * i);
+ }
+
+ free(buf);
+}
+
+void mozilla::detail::VectorTesting::testExtractOrCopyRawBuffer() {
+ S::resetCounts();
+
+ Vector<S, 5> vec;
+ MOZ_RELEASE_ASSERT(vec.reserve(5));
+ for (size_t i = 0; i < 5; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+ MOZ_RELEASE_ASSERT(vec.length() == 5);
+ MOZ_ASSERT(vec.reserved() == 5);
+ MOZ_RELEASE_ASSERT(S::constructCount == 5);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ S* buf = vec.extractOrCopyRawBuffer();
+ MOZ_RELEASE_ASSERT(buf);
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+ MOZ_ASSERT(vec.reserved() == 0);
+ MOZ_RELEASE_ASSERT(S::constructCount == 5);
+ MOZ_RELEASE_ASSERT(S::moveCount == 5);
+ MOZ_RELEASE_ASSERT(S::destructCount == 5);
+
+ for (size_t i = 0; i < 5; i++) {
+ MOZ_RELEASE_ASSERT(buf[i].j == i);
+ MOZ_RELEASE_ASSERT(*buf[i].k == i * i);
+ }
+
+ S::resetCounts();
+
+ MOZ_RELEASE_ASSERT(vec.reserve(10));
+ for (size_t i = 0; i < 10; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+ MOZ_RELEASE_ASSERT(vec.length() == 10);
+ MOZ_ASSERT(vec.reserved() == 10);
+ MOZ_RELEASE_ASSERT(S::constructCount == 10);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ buf = vec.extractOrCopyRawBuffer();
+ MOZ_RELEASE_ASSERT(buf);
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+ MOZ_ASSERT(vec.reserved() == 0);
+ MOZ_RELEASE_ASSERT(S::constructCount == 10);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ for (size_t i = 0; i < 10; i++) {
+ MOZ_RELEASE_ASSERT(buf[i].j == i);
+ MOZ_RELEASE_ASSERT(*buf[i].k == i * i);
+ }
+
+ free(buf);
+}
+
+void mozilla::detail::VectorTesting::testReplaceRawBuffer() {
+ S::resetCounts();
+
+ S* s = nullptr;
+
+ {
+ Vector<S> v;
+ MOZ_RELEASE_ASSERT(v.reserve(4));
+ v.infallibleEmplaceBack(1, 2);
+ v.infallibleEmplaceBack(3, 4);
+ MOZ_ASSERT(S::constructCount == 2);
+ s = v.extractRawBuffer();
+ }
+
+ MOZ_ASSERT(S::constructCount == 2);
+ MOZ_ASSERT(S::moveCount == 0);
+ MOZ_ASSERT(S::destructCount == 0);
+
+ {
+ Vector<S, 10> v;
+ v.replaceRawBuffer(s, 2);
+ MOZ_ASSERT(v.length() == 2);
+ MOZ_ASSERT(v.reserved() == 2);
+ MOZ_ASSERT(v.capacity() == 10);
+ MOZ_ASSERT(v[0].j == 1);
+ MOZ_ASSERT(v[1].j == 3);
+ MOZ_ASSERT(S::destructCount == 2);
+ }
+
+ MOZ_ASSERT(S::constructCount == 2);
+ MOZ_ASSERT(S::moveCount == 2);
+ MOZ_ASSERT(S::destructCount == 4);
+
+ S::resetCounts();
+
+ {
+ Vector<S, 2> v;
+ MOZ_RELEASE_ASSERT(v.reserve(4));
+ v.infallibleEmplaceBack(9, 10);
+ MOZ_ASSERT(S::constructCount == 1);
+ s = v.extractRawBuffer();
+ MOZ_ASSERT(S::constructCount == 1);
+ MOZ_ASSERT(S::moveCount == 0);
+ }
+
+ MOZ_ASSERT(S::destructCount == 0);
+
+ {
+ Vector<S> v;
+ v.replaceRawBuffer(s, 1, 4);
+ MOZ_ASSERT(v.length() == 1);
+ MOZ_ASSERT(v.reserved() == 4);
+ MOZ_ASSERT(v.capacity() == 4);
+ MOZ_ASSERT(v[0].j == 9);
+ for (size_t i = 0; i < 5; i++) MOZ_RELEASE_ASSERT(v.emplaceBack(i, i));
+ MOZ_ASSERT(v.length() == 6);
+ MOZ_ASSERT(v.reserved() == 6);
+ MOZ_ASSERT(S::constructCount == 6);
+ MOZ_ASSERT(S::moveCount == 4);
+ MOZ_ASSERT(S::destructCount == 4);
+ }
+
+ MOZ_ASSERT(S::destructCount == 10);
+}
+
+void mozilla::detail::VectorTesting::testInsert() {
+ S::resetCounts();
+
+ Vector<S, 8> vec;
+ MOZ_RELEASE_ASSERT(vec.reserve(8));
+ for (size_t i = 0; i < 7; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+
+ MOZ_RELEASE_ASSERT(vec.length() == 7);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 7);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+
+ S s(42, 43);
+ MOZ_RELEASE_ASSERT(vec.insert(vec.begin() + 4, std::move(s)));
+
+ for (size_t i = 0; i < vec.length(); i++) {
+ const S& s = vec[i];
+ MOZ_RELEASE_ASSERT(s.k);
+ if (i < 4) {
+ MOZ_RELEASE_ASSERT(s.j == i && *s.k == i * i);
+ } else if (i == 4) {
+ MOZ_RELEASE_ASSERT(s.j == 42 && *s.k == 43);
+ } else {
+ MOZ_RELEASE_ASSERT(s.j == i - 1 && *s.k == (i - 1) * (i - 1));
+ }
+ }
+
+ MOZ_RELEASE_ASSERT(vec.length() == 8);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 8);
+ MOZ_RELEASE_ASSERT(S::moveCount == 1 /* move in insert() call */ +
+ 1 /* move the back() element */ +
+ 3 /* elements to shift */);
+ MOZ_RELEASE_ASSERT(S::destructCount == 1);
+}
+
+void mozilla::detail::VectorTesting::testErase() {
+ S::resetCounts();
+
+ Vector<S, 8> vec;
+ MOZ_RELEASE_ASSERT(vec.reserve(8));
+ for (size_t i = 0; i < 7; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+
+ // vec: [0, 1, 2, 3, 4, 5, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 7);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 7);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 0);
+ S::resetCounts();
+
+ vec.erase(&vec[4]);
+ // vec: [0, 1, 2, 3, 5, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 6);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ // 5 and 6 should have been moved into 4 and 5.
+ MOZ_RELEASE_ASSERT(S::moveCount == 2);
+ MOZ_RELEASE_ASSERT(S::destructCount == 1);
+ MOZ_RELEASE_ASSERT(vec[4] == S(5, 5 * 5));
+ MOZ_RELEASE_ASSERT(vec[5] == S(6, 6 * 6));
+ S::resetCounts();
+
+ vec.erase(&vec[3], &vec[5]);
+ // vec: [0, 1, 2, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 4);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ // 6 should have been moved into 3.
+ MOZ_RELEASE_ASSERT(S::moveCount == 1);
+ MOZ_RELEASE_ASSERT(S::destructCount == 2);
+ MOZ_RELEASE_ASSERT(vec[3] == S(6, 6 * 6));
+
+ S s2(2, 2 * 2);
+ S::resetCounts();
+
+ vec.eraseIfEqual(s2);
+ // vec: [0, 1, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 3);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ // 6 should have been moved into 2.
+ MOZ_RELEASE_ASSERT(S::moveCount == 1);
+ MOZ_RELEASE_ASSERT(S::destructCount == 1);
+ MOZ_RELEASE_ASSERT(vec[2] == S(6, 6 * 6));
+ S::resetCounts();
+
+ // Predicate to find one element.
+ vec.eraseIf([](const S& s) { return s.j == 1; });
+ // vec: [0, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 2);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ // 6 should have been moved into 1.
+ MOZ_RELEASE_ASSERT(S::moveCount == 1);
+ MOZ_RELEASE_ASSERT(S::destructCount == 1);
+ MOZ_RELEASE_ASSERT(vec[1] == S(6, 6 * 6));
+ S::resetCounts();
+
+ // Generic predicate that flags everything.
+ vec.eraseIf([](auto&&) { return true; });
+ // vec: []
+ MOZ_RELEASE_ASSERT(vec.length() == 0);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ MOZ_RELEASE_ASSERT(S::moveCount == 0);
+ MOZ_RELEASE_ASSERT(S::destructCount == 2);
+
+ for (size_t i = 0; i < 7; i++) {
+ vec.infallibleEmplaceBack(i, i * i);
+ }
+ // vec: [0, 1, 2, 3, 4, 5, 6]
+ MOZ_RELEASE_ASSERT(vec.length() == 7);
+ S::resetCounts();
+
+ // Predicate that flags all even numbers.
+ vec.eraseIf([](const S& s) { return s.j % 2 == 0; });
+ // vec: [1 (was 0), 3 (was 1), 5 (was 2)]
+ MOZ_RELEASE_ASSERT(vec.length() == 3);
+ MOZ_ASSERT(vec.reserved() == 8);
+ MOZ_RELEASE_ASSERT(S::constructCount == 0);
+ MOZ_RELEASE_ASSERT(S::moveCount == 3);
+ MOZ_RELEASE_ASSERT(S::destructCount == 4);
+}
+
+void mozilla::detail::VectorTesting::testShrinkStorageToFit() {
+ // Vectors not using inline storage realloc capacity to exact length.
+ {
+ Vector<int, 0> v1;
+ MOZ_RELEASE_ASSERT(v1.reserve(10));
+ v1.infallibleAppend(1);
+ MOZ_ASSERT(v1.length() == 1);
+ MOZ_ASSERT(v1.reserved() == 10);
+ MOZ_ASSERT(v1.capacity() >= 10);
+ v1.shrinkStorageToFit();
+ MOZ_ASSERT(v1.length() == 1);
+ MOZ_ASSERT(v1.reserved() == 1);
+ MOZ_ASSERT(v1.capacity() == 1);
+ }
+
+ // Vectors using inline storage do nothing.
+ {
+ Vector<int, 2> v2;
+ MOZ_RELEASE_ASSERT(v2.reserve(2));
+ v2.infallibleAppend(1);
+ MOZ_ASSERT(v2.length() == 1);
+ MOZ_ASSERT(v2.reserved() == 2);
+ MOZ_ASSERT(v2.capacity() == 2);
+ v2.shrinkStorageToFit();
+ MOZ_ASSERT(v2.length() == 1);
+ MOZ_ASSERT(v2.reserved() == 2);
+ MOZ_ASSERT(v2.capacity() == 2);
+ }
+
+ // shrinkStorageToFit uses inline storage if possible.
+ {
+ Vector<int, 2> v;
+ MOZ_RELEASE_ASSERT(v.reserve(4));
+ v.infallibleAppend(1);
+ MOZ_ASSERT(v.length() == 1);
+ MOZ_ASSERT(v.reserved() == 4);
+ MOZ_ASSERT(v.capacity() >= 4);
+ v.shrinkStorageToFit();
+ MOZ_ASSERT(v.length() == 1);
+ MOZ_ASSERT(v.reserved() == 1);
+ MOZ_ASSERT(v.capacity() == 2);
+ }
+
+ // Non-pod shrinking to non-inline storage.
+ {
+ static size_t sConstructCounter = 0;
+ static size_t sCopyCounter = 0;
+ static size_t sMoveCounter = 0;
+ static size_t sDestroyCounter = 0;
+ struct NonPod {
+ int mSomething = 10;
+
+ NonPod() { sConstructCounter++; }
+
+ NonPod(const NonPod& aOther) : mSomething(aOther.mSomething) {
+ sCopyCounter++;
+ }
+ NonPod(NonPod&& aOther) : mSomething(aOther.mSomething) {
+ sMoveCounter++;
+ }
+ ~NonPod() { sDestroyCounter++; }
+ };
+
+ Vector<NonPod, 5> v;
+ MOZ_RELEASE_ASSERT(v.reserve(10));
+ for (size_t i = 0; i < 8; ++i) {
+ v.infallibleEmplaceBack();
+ }
+ MOZ_RELEASE_ASSERT(sConstructCounter == 8);
+ MOZ_RELEASE_ASSERT(sCopyCounter == 0);
+ MOZ_RELEASE_ASSERT(sMoveCounter == 0);
+ MOZ_RELEASE_ASSERT(sDestroyCounter == 0);
+ MOZ_RELEASE_ASSERT(v.length() == 8);
+ MOZ_ASSERT(v.reserved() == 10);
+ MOZ_RELEASE_ASSERT(v.capacity() >= 10);
+ MOZ_RELEASE_ASSERT(v.shrinkStorageToFit());
+
+ MOZ_RELEASE_ASSERT(sConstructCounter == 8);
+ MOZ_RELEASE_ASSERT(sCopyCounter == 0);
+ MOZ_RELEASE_ASSERT(sMoveCounter == 8);
+ MOZ_RELEASE_ASSERT(sDestroyCounter == 8);
+ MOZ_RELEASE_ASSERT(v.length() == 8);
+ MOZ_ASSERT(v.reserved() == 8);
+ MOZ_RELEASE_ASSERT(v.capacity() == 8);
+ }
+
+ // Non-POD shrinking to inline storage.
+ {
+ static size_t sConstructCounter = 0;
+ static size_t sCopyCounter = 0;
+ static size_t sMoveCounter = 0;
+ static size_t sDestroyCounter = 0;
+ struct NonPod {
+ int mSomething = 10;
+
+ NonPod() { sConstructCounter++; }
+
+ NonPod(const NonPod& aOther) : mSomething(aOther.mSomething) {
+ sCopyCounter++;
+ }
+ NonPod(NonPod&& aOther) : mSomething(aOther.mSomething) {
+ sMoveCounter++;
+ }
+ ~NonPod() { sDestroyCounter++; }
+ };
+
+ Vector<NonPod, 5> v;
+ MOZ_RELEASE_ASSERT(v.reserve(10));
+ for (size_t i = 0; i < 3; ++i) {
+ v.infallibleEmplaceBack();
+ }
+ MOZ_RELEASE_ASSERT(sConstructCounter == 3);
+ MOZ_RELEASE_ASSERT(sCopyCounter == 0);
+ MOZ_RELEASE_ASSERT(sMoveCounter == 0);
+ MOZ_RELEASE_ASSERT(sDestroyCounter == 0);
+ MOZ_RELEASE_ASSERT(v.length() == 3);
+ MOZ_ASSERT(v.reserved() == 10);
+ MOZ_RELEASE_ASSERT(v.capacity() >= 10);
+ MOZ_RELEASE_ASSERT(v.shrinkStorageToFit());
+
+ MOZ_RELEASE_ASSERT(sConstructCounter == 3);
+ MOZ_RELEASE_ASSERT(sCopyCounter == 0);
+ MOZ_RELEASE_ASSERT(sMoveCounter == 3);
+ MOZ_RELEASE_ASSERT(sDestroyCounter == 3);
+ MOZ_RELEASE_ASSERT(v.length() == 3);
+ MOZ_ASSERT(v.reserved() == 3);
+ MOZ_RELEASE_ASSERT(v.capacity() == 5);
+ }
+}
+
+void mozilla::detail::VectorTesting::testAppend() {
+ // Test moving append/appendAll with a move-only type
+ Vector<UniquePtr<int>> bv;
+ for (const int val : IntegerRange<int>(0, 3)) {
+ MOZ_RELEASE_ASSERT(bv.append(MakeUnique<int>(val)));
+ }
+
+ Vector<UniquePtr<int>> otherbv;
+ for (const int val : IntegerRange<int>(3, 8)) {
+ MOZ_RELEASE_ASSERT(otherbv.append(MakeUnique<int>(val)));
+ }
+ MOZ_RELEASE_ASSERT(bv.appendAll(std::move(otherbv)));
+
+ MOZ_RELEASE_ASSERT(otherbv.length() == 0);
+ MOZ_RELEASE_ASSERT(bv.length() == 8);
+ for (const int val : IntegerRange<int>(0, 8)) {
+ MOZ_RELEASE_ASSERT(*bv[val] == val);
+ }
+}
+
+// Vector with no inline storage should occupy the absolute minimum space in
+// non-debug builds. (Debug adds a laundry list of other constraints, none
+// directly relevant to shipping builds, that aren't worth precisely modeling.)
+#ifndef DEBUG
+
+template <typename T>
+struct NoInlineStorageLayout {
+ T* mBegin;
+ size_t mLength;
+ struct CRAndStorage {
+ size_t mCapacity;
+ } mTail;
+};
+
+// Only one of these should be necessary, but test a few of them for good
+// measure.
+static_assert(sizeof(Vector<int, 0>) == sizeof(NoInlineStorageLayout<int>),
+ "Vector of int without inline storage shouldn't occupy dead "
+ "space for that absence of storage");
+
+static_assert(sizeof(Vector<bool, 0>) == sizeof(NoInlineStorageLayout<bool>),
+ "Vector of bool without inline storage shouldn't occupy dead "
+ "space for that absence of storage");
+
+static_assert(sizeof(Vector<S, 0>) == sizeof(NoInlineStorageLayout<S>),
+ "Vector of S without inline storage shouldn't occupy dead "
+ "space for that absence of storage");
+
+#endif // DEBUG
+
+static void TestVectorBeginNonNull() {
+ // Vector::begin() should never return nullptr, to accommodate callers that
+ // (either for hygiene, or for semantic reasons) need a non-null pointer even
+ // for zero elements.
+
+ Vector<bool, 0> bvec0;
+ MOZ_RELEASE_ASSERT(bvec0.length() == 0);
+ MOZ_RELEASE_ASSERT(bvec0.begin() != nullptr);
+
+ Vector<bool, 1> bvec1;
+ MOZ_RELEASE_ASSERT(bvec1.length() == 0);
+ MOZ_RELEASE_ASSERT(bvec1.begin() != nullptr);
+
+ Vector<bool, 64> bvec64;
+ MOZ_RELEASE_ASSERT(bvec64.length() == 0);
+ MOZ_RELEASE_ASSERT(bvec64.begin() != nullptr);
+
+ Vector<int, 0> ivec0;
+ MOZ_RELEASE_ASSERT(ivec0.length() == 0);
+ MOZ_RELEASE_ASSERT(ivec0.begin() != nullptr);
+
+ Vector<int, 1> ivec1;
+ MOZ_RELEASE_ASSERT(ivec1.length() == 0);
+ MOZ_RELEASE_ASSERT(ivec1.begin() != nullptr);
+
+ Vector<int, 64> ivec64;
+ MOZ_RELEASE_ASSERT(ivec64.length() == 0);
+ MOZ_RELEASE_ASSERT(ivec64.begin() != nullptr);
+
+ Vector<long, 0> lvec0;
+ MOZ_RELEASE_ASSERT(lvec0.length() == 0);
+ MOZ_RELEASE_ASSERT(lvec0.begin() != nullptr);
+
+ Vector<long, 1> lvec1;
+ MOZ_RELEASE_ASSERT(lvec1.length() == 0);
+ MOZ_RELEASE_ASSERT(lvec1.begin() != nullptr);
+
+ Vector<long, 64> lvec64;
+ MOZ_RELEASE_ASSERT(lvec64.length() == 0);
+ MOZ_RELEASE_ASSERT(lvec64.begin() != nullptr);
+
+ // Vector<T, N> doesn't guarantee N inline elements -- the actual count is
+ // capped so that any Vector fits in a not-crazy amount of space -- so the
+ // code below won't overflow stacks or anything crazy.
+ struct VeryBig {
+ int array[16 * 1024 * 1024];
+ };
+
+ Vector<VeryBig, 0> vbvec0;
+ MOZ_RELEASE_ASSERT(vbvec0.length() == 0);
+ MOZ_RELEASE_ASSERT(vbvec0.begin() != nullptr);
+
+ Vector<VeryBig, 1> vbvec1;
+ MOZ_RELEASE_ASSERT(vbvec1.length() == 0);
+ MOZ_RELEASE_ASSERT(vbvec1.begin() != nullptr);
+
+ Vector<VeryBig, 64> vbvec64;
+ MOZ_RELEASE_ASSERT(vbvec64.length() == 0);
+ MOZ_RELEASE_ASSERT(vbvec64.begin() != nullptr);
+}
+
+int main() {
+ VectorTesting::testReserved();
+ VectorTesting::testConstRange();
+ VectorTesting::testEmplaceBack();
+ VectorTesting::testReverse();
+ VectorTesting::testExtractRawBuffer();
+ VectorTesting::testExtractOrCopyRawBuffer();
+ VectorTesting::testReplaceRawBuffer();
+ VectorTesting::testInsert();
+ VectorTesting::testErase();
+ VectorTesting::testShrinkStorageToFit();
+ VectorTesting::testAppend();
+ TestVectorBeginNonNull();
+}
diff --git a/mfbt/tests/TestWeakPtr.cpp b/mfbt/tests/TestWeakPtr.cpp
new file mode 100644
index 0000000000..0599975a9c
--- /dev/null
+++ b/mfbt/tests/TestWeakPtr.cpp
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/WeakPtr.h"
+
+using mozilla::SupportsWeakPtr;
+using mozilla::WeakPtr;
+
+static char IamB[] = "B";
+static char IamC[] = "C";
+static char IamD[] = "D";
+
+class B : public SupportsWeakPtr {
+ public:
+ char const* whoAmI() const { return IamB; }
+};
+
+// To have a class C support weak pointers, inherit from SupportsWeakPtr.
+class C : public SupportsWeakPtr {
+ public:
+ int mNum;
+
+ C() : mNum(0) {}
+
+ ~C() {
+ // Setting mNum in the destructor allows us to test against use-after-free
+ // below
+ mNum = 0xDEAD;
+ }
+
+ char const* whoAmI() const { return IamC; }
+
+ void act() {}
+
+ bool isConst() { return false; }
+
+ bool isConst() const { return true; }
+};
+
+// Derived from a class that supports weakptr, but doesn't implement itself
+// To check upcast works as expected
+class D : public B {
+ public:
+ char const* whoAmI() const { return IamD; }
+};
+
+bool isConst(C*) { return false; }
+
+bool isConst(const C*) { return true; }
+
+int main() {
+ C* c1 = new C;
+ MOZ_RELEASE_ASSERT(c1->mNum == 0);
+
+ // Get weak pointers to c1. The first time,
+ // a reference-counted WeakReference object is created that
+ // can live beyond the lifetime of 'c1'. The WeakReference
+ // object will be notified of 'c1's destruction.
+ WeakPtr<C> w1 = c1;
+ // Test a weak pointer for validity before using it.
+ MOZ_RELEASE_ASSERT(w1);
+ MOZ_RELEASE_ASSERT(w1 == c1);
+ w1->mNum = 1;
+ w1->act();
+
+ // Test taking another WeakPtr<C> to c1
+ WeakPtr<C> w2 = c1;
+ MOZ_RELEASE_ASSERT(w2);
+ MOZ_RELEASE_ASSERT(w2 == c1);
+ MOZ_RELEASE_ASSERT(w2 == w1);
+ MOZ_RELEASE_ASSERT(w2->mNum == 1);
+
+ // Test a WeakPtr<const C>
+ WeakPtr<const C> w3const = c1;
+ MOZ_RELEASE_ASSERT(w3const);
+ MOZ_RELEASE_ASSERT(w3const == c1);
+ MOZ_RELEASE_ASSERT(w3const == w1);
+ MOZ_RELEASE_ASSERT(w3const == w2);
+ MOZ_RELEASE_ASSERT(w3const->mNum == 1);
+
+ // Test const-correctness of operator-> and operator T*
+ MOZ_RELEASE_ASSERT(!w1->isConst());
+ MOZ_RELEASE_ASSERT(w3const->isConst());
+ MOZ_RELEASE_ASSERT(!isConst(w1));
+ MOZ_RELEASE_ASSERT(isConst(w3const));
+
+ // Test that when a WeakPtr is destroyed, it does not destroy the object that
+ // it points to, and it does not affect other WeakPtrs pointing to the same
+ // object (e.g. it does not destroy the WeakReference object).
+ {
+ WeakPtr<C> w4local = c1;
+ MOZ_RELEASE_ASSERT(w4local == c1);
+ }
+ // Now w4local has gone out of scope. If that had destroyed c1, then the
+ // following would fail for sure (see C::~C()).
+ MOZ_RELEASE_ASSERT(c1->mNum == 1);
+ // Check that w4local going out of scope hasn't affected other WeakPtr's
+ // pointing to c1
+ MOZ_RELEASE_ASSERT(w1 == c1);
+ MOZ_RELEASE_ASSERT(w2 == c1);
+
+ // Now construct another C object and test changing what object a WeakPtr
+ // points to
+ C* c2 = new C;
+ c2->mNum = 2;
+ MOZ_RELEASE_ASSERT(w2->mNum == 1); // w2 was pointing to c1
+ w2 = c2;
+ MOZ_RELEASE_ASSERT(w2);
+ MOZ_RELEASE_ASSERT(w2 == c2);
+ MOZ_RELEASE_ASSERT(w2 != c1);
+ MOZ_RELEASE_ASSERT(w2 != w1);
+ MOZ_RELEASE_ASSERT(w2->mNum == 2);
+
+ // Destroying the underlying object clears weak pointers to it.
+ // It should not affect pointers that are not currently pointing to it.
+ delete c1;
+ MOZ_RELEASE_ASSERT(!w1, "Deleting an object should clear WeakPtr's to it.");
+ MOZ_RELEASE_ASSERT(!w3const,
+ "Deleting an object should clear WeakPtr's to it.");
+ MOZ_RELEASE_ASSERT(w2,
+ "Deleting an object should not clear WeakPtr that are not "
+ "pointing to it.");
+
+ delete c2;
+ MOZ_RELEASE_ASSERT(!w2, "Deleting an object should clear WeakPtr's to it.");
+
+ // Check that we correctly upcast to the base class supporting weakptr
+ D* d = new D;
+ WeakPtr<B> db = d;
+
+ // You should be able to use WeakPtr<D> even if it's a base class which
+ // implements SupportsWeakPtr.
+ WeakPtr<D> weakd = d;
+
+ MOZ_RELEASE_ASSERT(db->whoAmI() == IamB);
+ MOZ_RELEASE_ASSERT(weakd.get() == db.get());
+
+ delete d;
+
+ MOZ_RELEASE_ASSERT(!db);
+ MOZ_RELEASE_ASSERT(!weakd);
+}
diff --git a/mfbt/tests/TestWinArchDefs.cpp b/mfbt/tests/TestWinArchDefs.cpp
new file mode 100644
index 0000000000..d8965d3d7c
--- /dev/null
+++ b/mfbt/tests/TestWinArchDefs.cpp
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This code tests the consistency of architecture-specific predefined macros
+// inherited from MSVC, before and after windows.h inclusion. See
+// https://learn.microsoft.com/en-us/cpp/preprocessor/predefined-macros for a
+// list of such macros.
+
+// If this test compiles, it is successful. See bug 1866562 for an example
+// where mingwclang builds were failing to compile this code.
+
+#if defined(_M_IX86)
+constexpr auto kIX86 = _M_IX86;
+#endif
+
+#if defined(_M_X64)
+constexpr auto kX64 = _M_X64;
+#endif
+
+#if defined(_M_AMD64)
+constexpr auto kAMD64 = _M_AMD64;
+#endif
+
+#if defined(_M_ARM)
+constexpr auto kARM = _M_ARM;
+#endif
+
+#if defined(_M_ARM64)
+constexpr auto kARM64 = _M_ARM64;
+#endif
+
+#include <windows.h>
+
+#if defined(_M_IX86)
+static_assert(kIX86 == _M_IX86);
+#endif
+
+#if defined(_M_X64)
+static_assert(kX64 == _M_X64);
+#endif
+
+#if defined(_M_AMD64)
+static_assert(kAMD64 == _M_AMD64);
+#endif
+
+#if defined(_M_ARM)
+static_assert(kARM == _M_ARM);
+#endif
+
+#if defined(_M_ARM64)
+static_assert(kARM64 == _M_ARM64);
+#endif
+
+// If this test compiles, it is successful.
+int main() { return 0; }
diff --git a/mfbt/tests/TestWrappingOperations.cpp b/mfbt/tests/TestWrappingOperations.cpp
new file mode 100644
index 0000000000..c1cbb8ae6a
--- /dev/null
+++ b/mfbt/tests/TestWrappingOperations.cpp
@@ -0,0 +1,587 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/WrappingOperations.h"
+
+#include <stdint.h>
+
+using mozilla::WrappingAdd;
+using mozilla::WrappingMultiply;
+using mozilla::WrappingSubtract;
+using mozilla::WrapToSigned;
+
+// NOTE: In places below |-FOO_MAX - 1| is used instead of |-FOO_MIN| because
+// in C++ numeric literals are full expressions -- the |-| in a negative
+// number is technically separate. So with most compilers that limit
+// |int| to the signed 32-bit range, something like |-2147483648| is
+// operator-() applied to an *unsigned* expression. And MSVC, at least,
+// warns when you do that. (The operation is well-defined, but it likely
+// doesn't do what was intended.) So we do the usual workaround for this
+// (see your local copy of <stdint.h> for a likely demo of this), writing
+// it out by negating the max value and subtracting 1.
+
+static_assert(WrapToSigned(uint8_t(17)) == 17,
+ "no wraparound should work, 8-bit");
+static_assert(WrapToSigned(uint8_t(128)) == -128,
+ "works for 8-bit numbers, wraparound low end");
+static_assert(WrapToSigned(uint8_t(128 + 7)) == -128 + 7,
+ "works for 8-bit numbers, wraparound mid");
+static_assert(WrapToSigned(uint8_t(128 + 127)) == -128 + 127,
+ "works for 8-bit numbers, wraparound high end");
+
+static_assert(WrapToSigned(uint16_t(12345)) == 12345,
+ "no wraparound should work, 16-bit");
+static_assert(WrapToSigned(uint16_t(32768)) == -32768,
+ "works for 16-bit numbers, wraparound low end");
+static_assert(WrapToSigned(uint16_t(32768 + 42)) == -32768 + 42,
+ "works for 16-bit numbers, wraparound mid");
+static_assert(WrapToSigned(uint16_t(32768 + 32767)) == -32768 + 32767,
+ "works for 16-bit numbers, wraparound high end");
+
+static_assert(WrapToSigned(uint32_t(8675309)) == 8675309,
+ "no wraparound should work, 32-bit");
+static_assert(WrapToSigned(uint32_t(2147483648)) == -2147483647 - 1,
+ "works for 32-bit numbers, wraparound low end");
+static_assert(WrapToSigned(uint32_t(2147483648 + 42)) == -2147483647 - 1 + 42,
+ "works for 32-bit numbers, wraparound mid");
+static_assert(WrapToSigned(uint32_t(2147483648 + 2147483647)) ==
+ -2147483647 - 1 + 2147483647,
+ "works for 32-bit numbers, wraparound high end");
+
+static_assert(WrapToSigned(uint64_t(4152739164)) == 4152739164,
+ "no wraparound should work, 64-bit");
+static_assert(WrapToSigned(uint64_t(9223372036854775808ULL)) ==
+ -9223372036854775807LL - 1,
+ "works for 64-bit numbers, wraparound low end");
+static_assert(WrapToSigned(uint64_t(9223372036854775808ULL + 8005552368LL)) ==
+ -9223372036854775807LL - 1 + 8005552368LL,
+ "works for 64-bit numbers, wraparound mid");
+static_assert(WrapToSigned(uint64_t(9223372036854775808ULL +
+ 9223372036854775807ULL)) ==
+ -9223372036854775807LL - 1 + 9223372036854775807LL,
+ "works for 64-bit numbers, wraparound high end");
+
+template <typename T>
+inline constexpr bool TestEqual(T aX, T aY) {
+ return aX == aY;
+}
+
+static void TestWrappingAdd8() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint8_t(0), uint8_t(128)), uint8_t(128)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint8_t(17), uint8_t(42)), uint8_t(59)),
+ "17 + 42 == 59");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint8_t(255), uint8_t(1)), uint8_t(0)),
+ "all bits plus one overflows to zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint8_t(128), uint8_t(127)), uint8_t(255)),
+ "high bit plus all lower bits is all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint8_t(128), uint8_t(193)), uint8_t(65)),
+ "128 + 193 is 256 + 65");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int8_t(0), int8_t(-128)), int8_t(-128)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int8_t(123), int8_t(8)), int8_t(-125)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int8_t(5), int8_t(-123)), int8_t(-118)),
+ "5 - 123 is -118");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int8_t(-85), int8_t(-73)), int8_t(98)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int8_t(-128), int8_t(127)), int8_t(-1)),
+ "high bit plus all lower bits is -1");
+}
+
+static void TestWrappingAdd16() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint16_t(0), uint16_t(32768)), uint16_t(32768)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint16_t(24389), uint16_t(2682)), uint16_t(27071)),
+ "24389 + 2682 == 27071");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint16_t(65535), uint16_t(1)), uint16_t(0)),
+ "all bits plus one overflows to zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint16_t(32768), uint16_t(32767)), uint16_t(65535)),
+ "high bit plus all lower bits is all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint16_t(32768), uint16_t(47582)), uint16_t(14814)),
+ "32768 + 47582 is 65536 + 14814");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int16_t(0), int16_t(-32768)), int16_t(-32768)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int16_t(32765), int16_t(8)), int16_t(-32763)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int16_t(5), int16_t(-28933)), int16_t(-28928)),
+ "5 - 28933 is -28928");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int16_t(-23892), int16_t(-12893)), int16_t(28751)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int16_t(-32768), int16_t(32767)), int16_t(-1)),
+ "high bit plus all lower bits is -1");
+}
+
+static void TestWrappingAdd32() {
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(uint32_t(0), uint32_t(2147483648)),
+ uint32_t(2147483648)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint32_t(1398742328), uint32_t(714192829)),
+ uint32_t(2112935157)),
+ "1398742328 + 714192829 == 2112935157");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint32_t(4294967295), uint32_t(1)), uint32_t(0)),
+ "all bits plus one overflows to zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint32_t(2147483648), uint32_t(2147483647)),
+ uint32_t(4294967295)),
+ "high bit plus all lower bits is all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint32_t(2147483648), uint32_t(3146492712)),
+ uint32_t(999009064)),
+ "2147483648 + 3146492712 is 4294967296 + 999009064");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int32_t(0), int32_t(-2147483647 - 1)),
+ int32_t(-2147483647 - 1)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(int32_t(2147483645), int32_t(8)),
+ int32_t(-2147483643)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(int32_t(257), int32_t(-23947248)),
+ int32_t(-23946991)),
+ "257 - 23947248 is -23946991");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int32_t(-2147483220), int32_t(-12893)),
+ int32_t(2147471183)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int32_t(-32768), int32_t(32767)), int32_t(-1)),
+ "high bit plus all lower bits is -1");
+}
+
+static void TestWrappingAdd64() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint64_t(0), uint64_t(9223372036854775808ULL)),
+ uint64_t(9223372036854775808ULL)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint64_t(70368744177664), uint64_t(3740873592)),
+ uint64_t(70372485051256)),
+ "70368744177664 + 3740873592 == 70372485051256");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(uint64_t(18446744073709551615ULL), uint64_t(1)),
+ uint64_t(0)),
+ "all bits plus one overflows to zero");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(uint64_t(9223372036854775808ULL),
+ uint64_t(9223372036854775807ULL)),
+ uint64_t(18446744073709551615ULL)),
+ "high bit plus all lower bits is all bits");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(uint64_t(14552598638644786479ULL),
+ uint64_t(3894174382537247221ULL)),
+ uint64_t(28947472482084)),
+ "9223372036854775808 + 3146492712 is 18446744073709551616 "
+ "+ 28947472482084");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int64_t(0), int64_t(-9223372036854775807LL - 1)),
+ int64_t(-9223372036854775807LL - 1)),
+ "zero plus anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int64_t(9223372036854775802LL), int64_t(8)),
+ int64_t(-9223372036854775806LL)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingAdd(int64_t(37482739294298742LL),
+ int64_t(-437843573929483498LL)),
+ int64_t(-400360834635184756LL)),
+ "37482739294298742 - 437843573929483498 is -400360834635184756");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(int64_t(-9127837934058953374LL),
+ int64_t(-4173572032144775807LL)),
+ int64_t(5145334107505822435L)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingAdd(int64_t(-9223372036854775807LL - 1),
+ int64_t(9223372036854775807LL)),
+ int64_t(-1)),
+ "high bit plus all lower bits is -1");
+}
+
+static void TestWrappingAdd() {
+ TestWrappingAdd8();
+ TestWrappingAdd16();
+ TestWrappingAdd32();
+ TestWrappingAdd64();
+}
+
+static void TestWrappingSubtract8() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint8_t(0), uint8_t(128)), uint8_t(128)),
+ "zero minus half is half");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint8_t(17), uint8_t(42)), uint8_t(231)),
+ "17 - 42 == -25 added to 256 is 231");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint8_t(0), uint8_t(1)), uint8_t(255)),
+ "zero underflows to all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint8_t(128), uint8_t(127)), uint8_t(1)),
+ "128 - 127 == 1");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint8_t(128), uint8_t(193)), uint8_t(191)),
+ "128 - 193 is -65 so -65 + 256 == 191");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int8_t(0), int8_t(-128)), int8_t(-128)),
+ "zero minus high bit wraps to high bit");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int8_t(-126), int8_t(4)), int8_t(126)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int8_t(5), int8_t(-123)), int8_t(-128)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int8_t(-85), int8_t(-73)), int8_t(-12)),
+ "negative minus smaller negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int8_t(-128), int8_t(127)), int8_t(1)),
+ "underflow to 1");
+}
+
+static void TestWrappingSubtract16() {
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingSubtract(uint16_t(0), uint16_t(32768)),
+ uint16_t(32768)),
+ "zero minus half is half");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint16_t(24389), uint16_t(2682)),
+ uint16_t(21707)),
+ "24389 - 2682 == 21707");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint16_t(0), uint16_t(1)), uint16_t(65535)),
+ "zero underflows to all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint16_t(32768), uint16_t(32767)),
+ uint16_t(1)),
+ "high bit minus all lower bits is one");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint16_t(32768), uint16_t(47582)),
+ uint16_t(50722)),
+ "32768 - 47582 + 65536 is 50722");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int16_t(0), int16_t(-32768)), int16_t(-32768)),
+ "zero minus high bit wraps to high bit");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int16_t(-32766), int16_t(4)), int16_t(32766)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int16_t(5), int16_t(-28933)), int16_t(28938)),
+ "5 - -28933 is 28938");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int16_t(-23892), int16_t(-12893)),
+ int16_t(-10999)),
+ "negative minus smaller negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int16_t(-32768), int16_t(32767)), int16_t(1)),
+ "underflow to 1");
+}
+
+static void TestWrappingSubtract32() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint32_t(0), uint32_t(2147483648)),
+ uint32_t(2147483648)),
+ "zero minus half is half");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint32_t(1398742328), uint32_t(714192829)),
+ uint32_t(684549499)),
+ "1398742328 - 714192829 == 684549499");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingSubtract(uint32_t(0), uint32_t(1)),
+ uint32_t(4294967295)),
+ "zero underflows to all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint32_t(2147483648), uint32_t(2147483647)),
+ uint32_t(1)),
+ "high bit minus all lower bits is one");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint32_t(2147483648), uint32_t(3146492712)),
+ uint32_t(3295958232)),
+ "2147483648 - 3146492712 + 4294967296 is 3295958232");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int32_t(0), int32_t(-2147483647 - 1)),
+ int32_t(-2147483647 - 1)),
+ "zero minus high bit wraps to high bit");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int32_t(-2147483646), int32_t(4)),
+ int32_t(2147483646)),
+ "underflow to positive");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int32_t(257), int32_t(-23947248)),
+ int32_t(23947505)),
+ "257 - -23947248 is 23947505");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int32_t(-2147483220), int32_t(-12893)),
+ int32_t(-2147470327)),
+ "negative minus smaller negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int32_t(-2147483647 - 1), int32_t(2147483647)),
+ int32_t(1)),
+ "underflow to 1");
+}
+
+static void TestWrappingSubtract64() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint64_t(0), uint64_t(9223372036854775808ULL)),
+ uint64_t(9223372036854775808ULL)),
+ "zero minus half is half");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingSubtract(uint64_t(70368744177664),
+ uint64_t(3740873592)),
+ uint64_t(70365003304072)),
+ "70368744177664 - 3740873592 == 70365003304072");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingSubtract(uint64_t(0), uint64_t(1)),
+ uint64_t(18446744073709551615ULL)),
+ "zero underflows to all bits");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint64_t(9223372036854775808ULL),
+ uint64_t(9223372036854775807ULL)),
+ uint64_t(1)),
+ "high bit minus all lower bits is one");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(uint64_t(14552598638644786479ULL),
+ uint64_t(3894174382537247221ULL)),
+ uint64_t(10658424256107539258ULL)),
+ "14552598638644786479 - 39763621533397112216 is 10658424256107539258L");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(
+ WrappingSubtract(int64_t(0), int64_t(-9223372036854775807LL - 1)),
+ int64_t(-9223372036854775807LL - 1)),
+ "zero minus high bit wraps to high bit");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int64_t(-9223372036854775802LL), int64_t(8)),
+ int64_t(9223372036854775806LL)),
+ "overflow to negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int64_t(37482739294298742LL),
+ int64_t(-437843573929483498LL)),
+ int64_t(475326313223782240)),
+ "37482739294298742 - -437843573929483498 is 475326313223782240");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int64_t(-9127837934058953374LL),
+ int64_t(-4173572032144775807LL)),
+ int64_t(-4954265901914177567LL)),
+ "negative minus smaller negative");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingSubtract(int64_t(-9223372036854775807LL - 1),
+ int64_t(9223372036854775807LL)),
+ int64_t(1)),
+ "underflow to 1");
+}
+
+static void TestWrappingSubtract() {
+ TestWrappingSubtract8();
+ TestWrappingSubtract16();
+ TestWrappingSubtract32();
+ TestWrappingSubtract64();
+}
+
+static void TestWrappingMultiply8() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint8_t(0), uint8_t(128)), uint8_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint8_t(128), uint8_t(1)), uint8_t(128)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint8_t(2), uint8_t(128)), uint8_t(0)),
+ "2 times high bit overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint8_t(8), uint8_t(16)), uint8_t(128)),
+ "multiply that populates the high bit produces that value");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint8_t(127), uint8_t(127)), uint8_t(1)),
+ "multiplying signed maxvals overflows all the way to 1");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(0), int8_t(-128)), int8_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(-128), int8_t(1)), int8_t(-128)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(2), int8_t(-128)), int8_t(0)),
+ "2 times min overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(16), int8_t(24)), int8_t(-128)),
+ "multiply that populates the sign bit produces minval");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(8), int8_t(16)), int8_t(-128)),
+ "multiply that populates the sign bit produces minval");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int8_t(127), int8_t(127)), int8_t(1)),
+ "multiplying maxvals overflows all the way to 1");
+}
+
+static void TestWrappingMultiply16() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint16_t(0), uint16_t(32768)), uint16_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingMultiply(uint16_t(32768), uint16_t(1)),
+ uint16_t(32768)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint16_t(2), uint16_t(32768)), uint16_t(0)),
+ "2 times high bit overflows, produces zero");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingMultiply(uint16_t(3), uint16_t(32768)),
+ uint16_t(-32768)),
+ "3 * 32768 - 65536 is 32768");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint16_t(64), uint16_t(512)), uint16_t(32768)),
+ "multiply that populates the high bit produces that value");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint16_t(32767), uint16_t(32767)),
+ uint16_t(1)),
+ "multiplying signed maxvals overflows all the way to 1");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(0), int16_t(-32768)), int16_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(-32768), int16_t(1)), int16_t(-32768)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(-456), int16_t(123)), int16_t(9448)),
+ "multiply opposite signs, then add 2**16 for the result");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(2), int16_t(-32768)), int16_t(0)),
+ "2 times min overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(64), int16_t(512)), int16_t(-32768)),
+ "multiply that populates the sign bit produces minval");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int16_t(32767), int16_t(32767)), int16_t(1)),
+ "multiplying maxvals overflows all the way to 1");
+}
+
+static void TestWrappingMultiply32() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(0), uint32_t(2147483648)),
+ uint32_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(42), uint32_t(17)), uint32_t(714)),
+ "42 * 17 is 714 without wraparound");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(2147483648), uint32_t(1)),
+ uint32_t(2147483648)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(2), uint32_t(2147483648)),
+ uint32_t(0)),
+ "2 times high bit overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(8192), uint32_t(262144)),
+ uint32_t(2147483648)),
+ "multiply that populates the high bit produces that value");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint32_t(2147483647), uint32_t(2147483647)),
+ uint32_t(1)),
+ "multiplying signed maxvals overflows all the way to 1");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int32_t(0), int32_t(-2147483647 - 1)),
+ int32_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int32_t(-2147483647 - 1), int32_t(1)),
+ int32_t(-2147483647 - 1)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int32_t(2), int32_t(-2147483647 - 1)),
+ int32_t(0)),
+ "2 times min overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int32_t(-7), int32_t(-9)), int32_t(63)),
+ "-7 * -9 is 63, no wraparound needed");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingMultiply(int32_t(8192), int32_t(262144)),
+ int32_t(-2147483647 - 1)),
+ "multiply that populates the sign bit produces minval");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int32_t(2147483647), int32_t(2147483647)),
+ int32_t(1)),
+ "multiplying maxvals overflows all the way to 1");
+}
+
+static void TestWrappingMultiply64() {
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint64_t(0), uint64_t(9223372036854775808ULL)),
+ uint64_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint64_t(9223372036854775808ULL), uint64_t(1)),
+ uint64_t(9223372036854775808ULL)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint64_t(2), uint64_t(9223372036854775808ULL)),
+ uint64_t(0)),
+ "2 times high bit overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(uint64_t(131072), uint64_t(70368744177664)),
+ uint64_t(9223372036854775808ULL)),
+ "multiply that populates the high bit produces that value");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingMultiply(uint64_t(9223372036854775807),
+ uint64_t(9223372036854775807)),
+ uint64_t(1)),
+ "multiplying signed maxvals overflows all the way to 1");
+
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int64_t(0), int64_t(-9223372036854775807 - 1)),
+ int64_t(0)),
+ "zero times anything is zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int64_t(-9223372036854775807 - 1), int64_t(1)),
+ int64_t(-9223372036854775807 - 1)),
+ "1 times anything is anything");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int64_t(2), int64_t(-9223372036854775807 - 1)),
+ int64_t(0)),
+ "2 times min overflows, produces zero");
+ MOZ_RELEASE_ASSERT(
+ TestEqual(WrappingMultiply(int64_t(131072), int64_t(70368744177664)),
+ int64_t(-9223372036854775807 - 1)),
+ "multiply that populates the sign bit produces minval");
+ MOZ_RELEASE_ASSERT(TestEqual(WrappingMultiply(int64_t(9223372036854775807),
+ int64_t(9223372036854775807)),
+ int64_t(1)),
+ "multiplying maxvals overflows all the way to 1");
+}
+
+static void TestWrappingMultiply() {
+ TestWrappingMultiply8();
+ TestWrappingMultiply16();
+ TestWrappingMultiply32();
+ TestWrappingMultiply64();
+}
+
+int main() {
+ TestWrappingAdd();
+ TestWrappingSubtract();
+ TestWrappingMultiply();
+ return 0;
+}
diff --git a/mfbt/tests/TestXorShift128PlusRNG.cpp b/mfbt/tests/TestXorShift128PlusRNG.cpp
new file mode 100644
index 0000000000..12b3c547ac
--- /dev/null
+++ b/mfbt/tests/TestXorShift128PlusRNG.cpp
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <math.h>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+using mozilla::non_crypto::XorShift128PlusRNG;
+
+static void TestDumbSequence() {
+ XorShift128PlusRNG rng(1, 4);
+
+ // Calculated by hand following the algorithm given in the paper. The upper
+ // bits are mostly zero because we started with a poor seed; once it has run
+ // for a while, we'll get an even mix of ones and zeros in all 64 bits.
+ MOZ_RELEASE_ASSERT(rng.next() == 0x800049);
+ MOZ_RELEASE_ASSERT(rng.next() == 0x3000186);
+ MOZ_RELEASE_ASSERT(rng.next() == 0x400003001145);
+
+ // Using ldexp here lets us write out the mantissa in hex, so we can compare
+ // them with the results generated by hand.
+ MOZ_RELEASE_ASSERT(rng.nextDouble() ==
+ ldexp(static_cast<double>(0x1400003105049), -53));
+ MOZ_RELEASE_ASSERT(rng.nextDouble() ==
+ ldexp(static_cast<double>(0x2000802e49146), -53));
+ MOZ_RELEASE_ASSERT(rng.nextDouble() ==
+ ldexp(static_cast<double>(0x248300468544d), -53));
+}
+
+static size_t Population(uint64_t n) {
+ size_t pop = 0;
+
+ while (n > 0) {
+ n &= n - 1; // Clear the rightmost 1-bit in n.
+ pop++;
+ }
+
+ return pop;
+}
+
+static void TestPopulation() {
+ XorShift128PlusRNG rng(698079309544035222ULL, 6012389156611637584ULL);
+
+ // Give it some time to warm up; it should tend towards more
+ // even distributions of zeros and ones.
+ for (size_t i = 0; i < 40; i++) rng.next();
+
+ for (size_t i = 0; i < 40; i++) {
+ size_t pop = Population(rng.next());
+ MOZ_RELEASE_ASSERT(24 <= pop && pop <= 40);
+ }
+}
+
+static void TestSetState() {
+ static const uint64_t seed[2] = {1795644156779822404ULL,
+ 14162896116325912595ULL};
+ XorShift128PlusRNG rng(seed[0], seed[1]);
+
+ const size_t n = 10;
+ uint64_t log[n];
+
+ for (size_t i = 0; i < n; i++) log[i] = rng.next();
+
+ rng.setState(seed[0], seed[1]);
+
+ for (size_t i = 0; i < n; i++) MOZ_RELEASE_ASSERT(log[i] == rng.next());
+}
+
+static void TestDoubleDistribution() {
+ XorShift128PlusRNG rng(0xa207aaede6859736, 0xaca6ca5060804791);
+
+ const size_t n = 100;
+ size_t bins[n];
+ mozilla::PodArrayZero(bins);
+
+ // This entire file runs in 0.006s on my laptop. Generating
+ // more numbers lets us put tighter bounds on the bins.
+ for (size_t i = 0; i < 100000; i++) {
+ double d = rng.nextDouble();
+ MOZ_RELEASE_ASSERT(0.0 <= d && d < 1.0);
+ bins[(int)(d * n)]++;
+ }
+
+ for (size_t i = 0; i < n; i++) {
+ MOZ_RELEASE_ASSERT(900 <= bins[i] && bins[i] <= 1100);
+ }
+}
+
+int main() {
+ TestDumbSequence();
+ TestPopulation();
+ TestSetState();
+ TestDoubleDistribution();
+
+ return 0;
+}
diff --git a/mfbt/tests/gtest/TestAlgorithm.cpp b/mfbt/tests/gtest/TestAlgorithm.cpp
new file mode 100644
index 0000000000..a01531fa77
--- /dev/null
+++ b/mfbt/tests/gtest/TestAlgorithm.cpp
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/Algorithm.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ResultVariant.h"
+
+#include <iterator>
+#include <vector>
+
+using namespace mozilla;
+using std::begin;
+using std::end;
+
+namespace {
+struct MoveOnly {
+ explicit MoveOnly(int32_t aValue) : mValue{Some(aValue)} {}
+
+ MoveOnly(MoveOnly&&) = default;
+ MoveOnly& operator=(MoveOnly&&) = default;
+
+ Maybe<int32_t> mValue;
+};
+
+struct TestError {};
+
+constexpr static int32_t arr1[3] = {1, 2, 3};
+} // namespace
+
+TEST(MFBT_Algorithm_TransformAbortOnErr, NoError)
+{
+ std::vector<int64_t> out;
+ auto res = TransformAbortOnErr(
+ begin(arr1), end(arr1), std::back_inserter(out),
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isOk());
+
+ const std::vector<int64_t> expected = {10, 20, 30};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformAbortOnErr, NoError_Range)
+{
+ std::vector<int64_t> out;
+ auto res = TransformAbortOnErr(
+ arr1, std::back_inserter(out),
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isOk());
+
+ const std::vector<int64_t> expected = {10, 20, 30};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformAbortOnErr, ErrorOnFirst)
+{
+ std::vector<int64_t> out;
+ auto res = TransformAbortOnErr(
+ begin(arr1), end(arr1), std::back_inserter(out),
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ return Err(TestError{});
+ });
+ ASSERT_TRUE(res.isErr());
+ ASSERT_TRUE(out.empty());
+}
+
+TEST(MFBT_Algorithm_TransformAbortOnErr, ErrorOnOther)
+{
+ std::vector<int64_t> out;
+ auto res = TransformAbortOnErr(
+ begin(arr1), end(arr1), std::back_inserter(out),
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ if (value > 2) {
+ return Err(TestError{});
+ }
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isErr());
+
+ // XXX Should we assert on this, or is the content of out an implementation
+ // detail?
+ const std::vector<int64_t> expected = {10, 20};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformAbortOnErr, ErrorOnOther_Move)
+{
+ MoveOnly in[3] = {MoveOnly{1}, MoveOnly{2}, MoveOnly{3}};
+ std::vector<int64_t> out;
+ auto res = TransformAbortOnErr(
+ std::make_move_iterator(begin(in)), std::make_move_iterator(end(in)),
+ std::back_inserter(out),
+ [](MoveOnly value) -> Result<int64_t, TestError> {
+ if (*value.mValue > 1) {
+ return Err(TestError{});
+ }
+ return *value.mValue * 10;
+ });
+ ASSERT_TRUE(res.isErr());
+
+ ASSERT_FALSE(in[0].mValue);
+ ASSERT_FALSE(in[1].mValue);
+ ASSERT_TRUE(in[2].mValue);
+
+ // XXX Should we assert on this, or is the content of out an implementation
+ // detail?
+ const std::vector<int64_t> expected = {10};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformIfAbortOnErr, NoError)
+{
+ std::vector<int64_t> out;
+ auto res = TransformIfAbortOnErr(
+ begin(arr1), end(arr1), std::back_inserter(out),
+ [](const int32_t value) { return value % 2 == 1; },
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isOk());
+
+ const std::vector<int64_t> expected = {10, 30};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformIfAbortOnErr, NoError_Range)
+{
+ std::vector<int64_t> out;
+ auto res = TransformIfAbortOnErr(
+ arr1, std::back_inserter(out),
+ [](const int32_t value) { return value % 2 == 1; },
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isOk());
+
+ const std::vector<int64_t> expected = {10, 30};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformIfAbortOnErr, ErrorOnOther)
+{
+ std::vector<int64_t> out;
+ auto res = TransformIfAbortOnErr(
+ begin(arr1), end(arr1), std::back_inserter(out),
+ [](const int32_t value) { return value % 2 == 1; },
+ [](const int32_t value) -> Result<int64_t, TestError> {
+ if (value > 2) {
+ return Err(TestError{});
+ }
+ return value * 10;
+ });
+ ASSERT_TRUE(res.isErr());
+
+ const std::vector<int64_t> expected = {10};
+ ASSERT_EQ(expected, out);
+}
+
+TEST(MFBT_Algorithm_TransformIfAbortOnErr, ErrorOnOther_Move)
+{
+ MoveOnly in[3] = {MoveOnly{1}, MoveOnly{2}, MoveOnly{3}};
+ std::vector<int64_t> out;
+ auto res = TransformIfAbortOnErr(
+ std::make_move_iterator(begin(in)), std::make_move_iterator(end(in)),
+ std::back_inserter(out),
+ [](const MoveOnly& value) { return *value.mValue % 2 == 1; },
+ [](MoveOnly value) -> Result<int64_t, TestError> {
+ if (*value.mValue > 1) {
+ return Err(TestError{});
+ }
+ return *value.mValue * 10;
+ });
+ ASSERT_TRUE(res.isErr());
+
+ ASSERT_FALSE(in[0].mValue);
+ ASSERT_TRUE(in[1].mValue);
+ ASSERT_FALSE(in[2].mValue);
+
+ // XXX Should we assert on this, or is the content of out an implementation
+ // detail?
+ const std::vector<int64_t> expected = {10};
+ ASSERT_EQ(expected, out);
+}
diff --git a/mfbt/tests/gtest/TestBuffer.cpp b/mfbt/tests/gtest/TestBuffer.cpp
new file mode 100644
index 0000000000..df36282be1
--- /dev/null
+++ b/mfbt/tests/gtest/TestBuffer.cpp
@@ -0,0 +1,96 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/Buffer.h"
+#include "mozilla/Array.h"
+
+using namespace mozilla;
+
+TEST(Buffer, TestBufferInfallible)
+{
+ const size_t LEN = 8;
+ Array<int32_t, LEN> arr = {1, 2, 3, 4, 5, 6, 7, 8};
+ Buffer<int32_t> buf(arr);
+
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(buf[i], arr[i]);
+ }
+
+ auto iter = buf.begin();
+ auto end = buf.end();
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(*iter, arr[i]);
+ iter++;
+ }
+ ASSERT_EQ(iter, end);
+
+ Span<int32_t> span = buf;
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(span[i], arr[i]);
+ }
+
+ auto spanIter = span.begin();
+ auto spanEnd = span.end();
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(*spanIter, arr[i]);
+ spanIter++;
+ }
+ ASSERT_EQ(spanIter, spanEnd);
+
+ span[3] = 42;
+ ASSERT_EQ(buf[3], 42);
+
+ Buffer<int32_t> another(std::move(buf));
+ ASSERT_EQ(another[3], 42);
+ ASSERT_EQ(buf.Length(), 0U);
+}
+
+TEST(Buffer, TestBufferFallible)
+{
+ const size_t LEN = 8;
+ Array<int32_t, LEN> arr = {1, 2, 3, 4, 5, 6, 7, 8};
+ auto maybe = Buffer<int32_t>::CopyFrom(arr);
+ ASSERT_TRUE(maybe.isSome());
+ Buffer<int32_t> buf(std::move(*maybe));
+
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(buf[i], arr[i]);
+ }
+
+ auto iter = buf.begin();
+ auto end = buf.end();
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(*iter, arr[i]);
+ iter++;
+ }
+ ASSERT_EQ(iter, end);
+
+ Span<int32_t> span = buf;
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(span[i], arr[i]);
+ }
+
+ auto spanIter = span.begin();
+ auto spanEnd = span.end();
+ for (size_t i = 0; i < LEN; i++) {
+ ASSERT_EQ(*spanIter, arr[i]);
+ spanIter++;
+ }
+ ASSERT_EQ(spanIter, spanEnd);
+
+ span[3] = 42;
+ ASSERT_EQ(buf[3], 42);
+
+ Buffer<int32_t> another(std::move(buf));
+ ASSERT_EQ(another[3], 42);
+ ASSERT_EQ(buf.Length(), 0U);
+}
+
+TEST(Buffer, TestBufferElements)
+{
+ ASSERT_EQ(Buffer<int32_t>().Elements(),
+ reinterpret_cast<int32_t*>(alignof(int32_t)));
+}
diff --git a/mfbt/tests/gtest/TestInitializedOnce.cpp b/mfbt/tests/gtest/TestInitializedOnce.cpp
new file mode 100644
index 0000000000..a043013451
--- /dev/null
+++ b/mfbt/tests/gtest/TestInitializedOnce.cpp
@@ -0,0 +1,200 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/InitializedOnce.h"
+
+#include <type_traits>
+
+using namespace mozilla;
+
+namespace {
+template <typename T>
+void AssertIsSome(const T& aVal) {
+ ASSERT_TRUE(aVal);
+ ASSERT_TRUE(aVal.isSome());
+ ASSERT_FALSE(aVal.isNothing());
+}
+
+template <typename T>
+void AssertIsNothing(const T& aVal) {
+ ASSERT_FALSE(aVal);
+ ASSERT_FALSE(aVal.isSome());
+ ASSERT_TRUE(aVal.isNothing());
+}
+
+static_assert(std::is_trivially_destructible_v<InitializedOnce<const int>>);
+static_assert(std::is_trivially_destructible_v<LazyInitializedOnce<const int>>);
+
+static_assert(!std::is_copy_constructible_v<InitializedOnce<const int>>);
+static_assert(!std::is_copy_assignable_v<InitializedOnce<const int>>);
+
+static_assert(!std::is_default_constructible_v<InitializedOnce<const int>>);
+static_assert(std::is_default_constructible_v<LazyInitializedOnce<const int>>);
+static_assert(std::is_default_constructible_v<
+ LazyInitializedOnceEarlyDestructible<const int>>);
+
+// XXX We cannot test for move-constructability/move-assignability at the
+// moment, since the operations are always defined, but trigger static_assert's
+// if they should not be used. This is not too bad, since we are never copyable.
+
+constexpr InitializedOnce<const int>* kPtrInitializedOnceIntLazyInitForbid =
+ nullptr;
+constexpr LazyInitializedOnce<const int>* kPtrInitializedOnceIntLazyInitAllow =
+ nullptr;
+constexpr LazyInitializedOnceEarlyDestructible<const int>*
+ kPtrInitializedOnceIntLazyInitAllowResettable = nullptr;
+
+template <class T, typename = decltype(std::declval<T*>()->destroy())>
+constexpr bool test_has_destroy_method(const T*) {
+ return true;
+}
+constexpr bool test_has_destroy_method(...) { return false; }
+
+static_assert(test_has_destroy_method(kPtrInitializedOnceIntLazyInitForbid));
+static_assert(!test_has_destroy_method(kPtrInitializedOnceIntLazyInitAllow));
+static_assert(
+ test_has_destroy_method(kPtrInitializedOnceIntLazyInitAllowResettable));
+
+template <class T,
+ typename = decltype(std::declval<T*>()->init(std::declval<int>()))>
+constexpr bool test_has_init_method(const T*) {
+ return true;
+}
+constexpr bool test_has_init_method(...) { return false; }
+
+static_assert(!test_has_init_method(kPtrInitializedOnceIntLazyInitForbid));
+static_assert(test_has_init_method(kPtrInitializedOnceIntLazyInitAllow));
+static_assert(
+ test_has_init_method(kPtrInitializedOnceIntLazyInitAllowResettable));
+
+struct MoveOnly {
+ explicit constexpr MoveOnly(int aValue) : mValue{aValue} {}
+
+ MoveOnly(MoveOnly&&) = default;
+ MoveOnly& operator=(MoveOnly&&) = default;
+
+ int mValue;
+};
+
+} // namespace
+
+constexpr int testValue = 32;
+
+TEST(InitializedOnce, ImmediateInit)
+{
+ constexpr InitializedOnce<const MoveOnly> val{testValue};
+
+ // compile-time assertions
+ static_assert(val);
+ static_assert(val.isSome());
+ static_assert(!val.isNothing());
+ static_assert(testValue == (*val).mValue);
+ static_assert(testValue == val->mValue);
+ static_assert(testValue == val.ref().mValue);
+
+ // run-time assertions
+ AssertIsSome(val);
+ ASSERT_EQ(testValue, (*val).mValue);
+ ASSERT_EQ(testValue, val->mValue);
+ ASSERT_EQ(testValue, val.ref().mValue);
+}
+
+TEST(InitializedOnce, ImmediateInitReset)
+{
+ InitializedOnce<const MoveOnly> val{testValue};
+ val.destroy();
+
+ AssertIsNothing(val);
+}
+
+TEST(InitializedOnce, MoveConstruct)
+{
+ InitializedOnce<const MoveOnly> oldVal{testValue};
+ InitializedOnce<const MoveOnly> val{std::move(oldVal)};
+
+ AssertIsNothing(oldVal);
+ AssertIsSome(val);
+}
+
+TEST(InitializedOnceAllowLazy, DefaultCtor)
+{
+ LazyInitializedOnce<const MoveOnly> val;
+
+ AssertIsNothing(val);
+}
+
+TEST(InitializedOnceAllowLazy, Init)
+{
+ LazyInitializedOnce<const MoveOnly> val;
+ val.init(testValue);
+
+ AssertIsSome(val);
+ ASSERT_EQ(testValue, (*val).mValue);
+ ASSERT_EQ(testValue, val->mValue);
+ ASSERT_EQ(testValue, val.ref().mValue);
+}
+
+TEST(InitializedOnceAllowLazy, do_Init)
+{
+ LazyInitializedOnce<const MoveOnly> val;
+ do_Init(val) = MoveOnly{testValue};
+
+ AssertIsSome(val);
+ ASSERT_EQ(testValue, (*val).mValue);
+ ASSERT_EQ(testValue, val->mValue);
+ ASSERT_EQ(testValue, val.ref().mValue);
+}
+
+TEST(InitializedOnceAllowLazyResettable, DefaultCtor)
+{
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> val;
+
+ AssertIsNothing(val);
+}
+
+TEST(InitializedOnceAllowLazyResettable, Init)
+{
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> val;
+ val.init(testValue);
+
+ AssertIsSome(val);
+ ASSERT_EQ(testValue, (*val).mValue);
+ ASSERT_EQ(testValue, val->mValue);
+ ASSERT_EQ(testValue, val.ref().mValue);
+}
+
+TEST(InitializedOnceAllowLazyResettable, InitReset)
+{
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> val;
+ val.init(testValue);
+ val.destroy();
+
+ AssertIsNothing(val);
+}
+
+TEST(InitializedOnceAllowLazyResettable, MoveConstruct)
+{
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> oldVal{testValue};
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> val{std::move(oldVal)};
+
+ AssertIsNothing(oldVal);
+ AssertIsSome(val);
+}
+
+TEST(InitializedOnceAllowLazyResettable, MoveAssign)
+{
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> oldVal{testValue};
+ LazyInitializedOnceEarlyDestructible<const MoveOnly> val;
+
+ val = std::move(oldVal);
+
+ AssertIsNothing(oldVal);
+ AssertIsSome(val);
+}
+
+// XXX How do we test for assertions to be hit?
diff --git a/mfbt/tests/gtest/TestLinkedList.cpp b/mfbt/tests/gtest/TestLinkedList.cpp
new file mode 100644
index 0000000000..d53cba5920
--- /dev/null
+++ b/mfbt/tests/gtest/TestLinkedList.cpp
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/LinkedList.h"
+#include "mozilla/RefPtr.h"
+
+using mozilla::AutoCleanLinkedList;
+using mozilla::LinkedList;
+using mozilla::LinkedListElement;
+
+class PtrClass : public LinkedListElement<PtrClass> {
+ public:
+ bool* mResult;
+
+ explicit PtrClass(bool* result) : mResult(result) { EXPECT_TRUE(!*mResult); }
+
+ virtual ~PtrClass() { *mResult = true; }
+};
+
+class InheritedPtrClass : public PtrClass {
+ public:
+ bool* mInheritedResult;
+
+ InheritedPtrClass(bool* result, bool* inheritedResult)
+ : PtrClass(result), mInheritedResult(inheritedResult) {
+ EXPECT_TRUE(!*mInheritedResult);
+ }
+
+ virtual ~InheritedPtrClass() { *mInheritedResult = true; }
+};
+
+TEST(LinkedList, AutoCleanLinkedList)
+{
+ bool rv1 = false;
+ bool rv2 = false;
+ bool rv3 = false;
+ {
+ AutoCleanLinkedList<PtrClass> list;
+ list.insertBack(new PtrClass(&rv1));
+ list.insertBack(new InheritedPtrClass(&rv2, &rv3));
+ }
+
+ EXPECT_TRUE(rv1);
+ EXPECT_TRUE(rv2);
+ EXPECT_TRUE(rv3);
+}
+
+class CountedClass final : public LinkedListElement<RefPtr<CountedClass>> {
+ public:
+ int mCount;
+ void AddRef() { mCount++; }
+ void Release() { mCount--; }
+
+ CountedClass() : mCount(0) {}
+ ~CountedClass() { EXPECT_TRUE(mCount == 0); }
+};
+
+TEST(LinkedList, AutoCleanLinkedListRefPtr)
+{
+ RefPtr<CountedClass> elt1 = new CountedClass;
+ CountedClass* elt2 = new CountedClass;
+ {
+ AutoCleanLinkedList<RefPtr<CountedClass>> list;
+ list.insertBack(elt1);
+ list.insertBack(elt2);
+
+ EXPECT_TRUE(elt1->mCount == 2);
+ EXPECT_TRUE(elt2->mCount == 1);
+ }
+
+ EXPECT_TRUE(elt1->mCount == 1);
+ EXPECT_TRUE(elt2->mCount == 0);
+}
diff --git a/mfbt/tests/gtest/TestMainThreadWeakPtr.cpp b/mfbt/tests/gtest/TestMainThreadWeakPtr.cpp
new file mode 100644
index 0000000000..1722ade8c1
--- /dev/null
+++ b/mfbt/tests/gtest/TestMainThreadWeakPtr.cpp
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/WeakPtr.h"
+#include "mozilla/UniquePtr.h"
+#include <thread>
+
+using namespace mozilla;
+
+struct C : public SupportsWeakPtr {
+ int mNum{0};
+};
+
+struct HasWeakPtrToC {
+ explicit HasWeakPtrToC(C* c) : mPtr(c) {}
+
+ MainThreadWeakPtr<C> mPtr;
+
+ ~HasWeakPtrToC() {
+ MOZ_RELEASE_ASSERT(!NS_IsMainThread(), "Should be released OMT");
+ }
+};
+
+TEST(MFBT_MainThreadWeakPtr, Basic)
+{
+ auto c = MakeUnique<C>();
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
+
+ auto weakRef = MakeUnique<HasWeakPtrToC>(c.get());
+
+ std::thread t([weakRef = std::move(weakRef)] {});
+
+ MOZ_RELEASE_ASSERT(!weakRef);
+ c = nullptr;
+
+ t.join();
+}
diff --git a/mfbt/tests/gtest/TestMozDbg.cpp b/mfbt/tests/gtest/TestMozDbg.cpp
new file mode 100644
index 0000000000..24ccd8ed37
--- /dev/null
+++ b/mfbt/tests/gtest/TestMozDbg.cpp
@@ -0,0 +1,170 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <iostream>
+#include <type_traits>
+
+#include "gtest/gtest.h"
+#include "mozilla/DbgMacro.h"
+#include "mozilla/Unused.h"
+
+using namespace mozilla;
+
+#define TEST_MOZ_DBG_TYPE_IS(type_, expression_...) \
+ static_assert(std::is_same_v<type_, decltype(MOZ_DBG(expression_))>, \
+ "MOZ_DBG should return the indicated type")
+
+#define TEST_MOZ_DBG_TYPE_SAME(expression_...) \
+ static_assert( \
+ std::is_same_v<decltype((expression_)), decltype(MOZ_DBG(expression_))>, \
+ "MOZ_DBG should return the same type")
+
+struct Number {
+ explicit Number(int aValue) : mValue(aValue) {}
+
+ Number(const Number& aOther) = default;
+
+ Number(Number&& aOther) : mValue(aOther.mValue) { aOther.mValue = 0; }
+
+ Number& operator=(const Number& aOther) = default;
+
+ Number& operator=(Number&& aOther) {
+ mValue = aOther.mValue;
+ aOther.mValue = 0;
+ return *this;
+ }
+
+ ~Number() { mValue = -999; }
+
+ int mValue;
+
+ MOZ_DEFINE_DBG(Number, mValue)
+};
+
+struct MoveOnly {
+ explicit MoveOnly(int aValue) : mValue(aValue) {}
+
+ MoveOnly(const MoveOnly& aOther) = delete;
+
+ MoveOnly(MoveOnly&& aOther) : mValue(aOther.mValue) { aOther.mValue = 0; }
+
+ MoveOnly& operator=(MoveOnly& aOther) = default;
+
+ MoveOnly& operator=(MoveOnly&& aOther) {
+ mValue = aOther.mValue;
+ aOther.mValue = 0;
+ return *this;
+ }
+
+ int mValue;
+
+ MOZ_DEFINE_DBG(MoveOnly)
+};
+
+void StaticAssertions() {
+ int x = 123;
+ Number y(123);
+ Number z(234);
+ MoveOnly w(456);
+
+ // Static assertions.
+
+ // lvalues
+ TEST_MOZ_DBG_TYPE_SAME(x); // int&
+ TEST_MOZ_DBG_TYPE_SAME(y); // Number&
+ TEST_MOZ_DBG_TYPE_SAME(x = 234); // int&
+ TEST_MOZ_DBG_TYPE_SAME(y = z); // Number&
+ TEST_MOZ_DBG_TYPE_SAME(w); // MoveOnly&
+
+ // prvalues (which MOZ_DBG turns into xvalues by creating objects for them)
+ TEST_MOZ_DBG_TYPE_IS(int&&, 123);
+ TEST_MOZ_DBG_TYPE_IS(int&&, 1 + 2);
+ TEST_MOZ_DBG_TYPE_IS(int*&&, &x);
+ TEST_MOZ_DBG_TYPE_IS(int&&, x++);
+ TEST_MOZ_DBG_TYPE_IS(Number&&, Number(123));
+ TEST_MOZ_DBG_TYPE_IS(MoveOnly&&, MoveOnly(123));
+
+ // xvalues
+ TEST_MOZ_DBG_TYPE_SAME(std::move(y)); // int&&
+ TEST_MOZ_DBG_TYPE_SAME(std::move(y)); // Number&&
+ TEST_MOZ_DBG_TYPE_SAME(std::move(w)); // MoveOnly&
+
+ Unused << x;
+ Unused << y;
+ Unused << z;
+}
+
+TEST(MozDbg, ObjectValues)
+{
+ // Test that moves and assignments all operate correctly with MOZ_DBG wrapped
+ // around various parts of the expression.
+
+ Number a(1);
+ Number b(4);
+
+ ASSERT_EQ(a.mValue, 1);
+
+ MOZ_DBG(a.mValue);
+ ASSERT_EQ(a.mValue, 1);
+
+ MOZ_DBG(a.mValue + 1);
+ ASSERT_EQ(a.mValue, 1);
+
+ MOZ_DBG(a.mValue = 2);
+ ASSERT_EQ(a.mValue, 2);
+
+ MOZ_DBG(a).mValue = 3;
+ ASSERT_EQ(a.mValue, 3);
+
+ MOZ_DBG(a = b);
+ ASSERT_EQ(a.mValue, 4);
+ ASSERT_EQ(b.mValue, 4);
+
+ b.mValue = 5;
+ MOZ_DBG(a) = b;
+ ASSERT_EQ(a.mValue, 5);
+ ASSERT_EQ(b.mValue, 5);
+
+ b.mValue = 6;
+ MOZ_DBG(a = std::move(b));
+ ASSERT_EQ(a.mValue, 6);
+ ASSERT_EQ(b.mValue, 0);
+
+ b.mValue = 7;
+ MOZ_DBG(a) = std::move(b);
+ ASSERT_EQ(a.mValue, 7);
+ ASSERT_EQ(b.mValue, 0);
+
+ b.mValue = 8;
+ a = std::move(MOZ_DBG(b));
+ ASSERT_EQ(a.mValue, 8);
+ ASSERT_EQ(b.mValue, 0);
+
+ a = MOZ_DBG(Number(9));
+ ASSERT_EQ(a.mValue, 9);
+
+ MoveOnly c(1);
+ MoveOnly d(2);
+
+ c = std::move(MOZ_DBG(d));
+ ASSERT_EQ(c.mValue, 2);
+ ASSERT_EQ(d.mValue, 0);
+
+ c.mValue = 3;
+ d.mValue = 4;
+ c = MOZ_DBG(std::move(d));
+ ASSERT_EQ(c.mValue, 4);
+ ASSERT_EQ(d.mValue, 0);
+
+ c.mValue = 5;
+ d.mValue = 6;
+ MOZ_DBG(c = std::move(d));
+ ASSERT_EQ(c.mValue, 6);
+ ASSERT_EQ(d.mValue, 0);
+
+ c = MOZ_DBG(MoveOnly(7));
+ ASSERT_EQ(c.mValue, 7);
+}
diff --git a/mfbt/tests/gtest/TestResultExtensions.cpp b/mfbt/tests/gtest/TestResultExtensions.cpp
new file mode 100644
index 0000000000..711e4f33e4
--- /dev/null
+++ b/mfbt/tests/gtest/TestResultExtensions.cpp
@@ -0,0 +1,579 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/ResultExtensions.h"
+#include "nsLocalFile.h"
+
+#include <functional>
+
+using namespace mozilla;
+
+namespace {
+class TestClass {
+ public:
+ static constexpr int kTestValue = 42;
+
+ nsresult NonOverloadedNoInput(int* aOut) {
+ *aOut = kTestValue;
+ return NS_OK;
+ }
+ nsresult NonOverloadedNoInputFails(int* aOut) { return NS_ERROR_FAILURE; }
+
+ nsresult NonOverloadedNoInputConst(int* aOut) const {
+ *aOut = kTestValue;
+ return NS_OK;
+ }
+ nsresult NonOverloadedNoInputFailsConst(int* aOut) const {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult NonOverloadedNoInputRef(int& aOut) {
+ aOut = kTestValue;
+ return NS_OK;
+ }
+ nsresult NonOverloadedNoInputFailsRef(int& aOut) { return NS_ERROR_FAILURE; }
+
+ nsresult NonOverloadedNoInputComplex(std::pair<int, int>* aOut) {
+ *aOut = std::pair{kTestValue, kTestValue};
+ return NS_OK;
+ }
+ nsresult NonOverloadedNoInputFailsComplex(std::pair<int, int>* aOut) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult NonOverloadedWithInput(int aIn, int* aOut) {
+ *aOut = aIn;
+ return NS_OK;
+ }
+ nsresult NonOverloadedWithInputFails(int aIn, int* aOut) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult NonOverloadedNoOutput(int aIn) { return NS_OK; }
+ nsresult NonOverloadedNoOutputFails(int aIn) { return NS_ERROR_FAILURE; }
+
+ nsresult PolymorphicNoInput(nsIFile** aOut) {
+ *aOut = MakeAndAddRef<nsLocalFile>().take();
+ return NS_OK;
+ }
+ nsresult PolymorphicNoInputFails(nsIFile** aOut) { return NS_ERROR_FAILURE; }
+};
+
+class RefCountedTestClass {
+ public:
+ NS_INLINE_DECL_REFCOUNTING(RefCountedTestClass);
+
+ static constexpr int kTestValue = 42;
+
+ nsresult NonOverloadedNoInput(int* aOut) {
+ *aOut = kTestValue;
+ return NS_OK;
+ }
+ nsresult NonOverloadedNoInputFails(int* aOut) { return NS_ERROR_FAILURE; }
+
+ private:
+ ~RefCountedTestClass() = default;
+};
+
+// Check that DerefedType deduces the types as expected
+static_assert(std::is_same_v<mozilla::detail::DerefedType<RefCountedTestClass&>,
+ RefCountedTestClass>);
+static_assert(std::is_same_v<mozilla::detail::DerefedType<RefCountedTestClass*>,
+ RefCountedTestClass>);
+static_assert(
+ std::is_same_v<mozilla::detail::DerefedType<RefPtr<RefCountedTestClass>>,
+ RefCountedTestClass>);
+
+static_assert(std::is_same_v<mozilla::detail::DerefedType<nsIFile&>, nsIFile>);
+static_assert(std::is_same_v<mozilla::detail::DerefedType<nsIFile*>, nsIFile>);
+static_assert(
+ std::is_same_v<mozilla::detail::DerefedType<nsCOMPtr<nsIFile>>, nsIFile>);
+} // namespace
+
+TEST(ResultExtensions_ToResultInvoke, Lambda_NoInput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = ToResultInvoke<int>(
+ [&foo](int* out) { return foo.NonOverloadedNoInput(out); });
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvoke<int>(
+ [&foo](int* out) { return foo.NonOverloadedNoInputFails(out); });
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvoke, MemFn_NoInput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr =
+ ToResultInvoke<int>(std::mem_fn(&TestClass::NonOverloadedNoInput), foo);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvoke<int>(
+ std::mem_fn(&TestClass::NonOverloadedNoInputFails), foo);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvoke, MemFn_Polymorphic_NoInput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = ToResultInvoke<nsCOMPtr<nsIFile>>(
+ std::mem_fn(&TestClass::PolymorphicNoInput), foo);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<nsCOMPtr<nsIFile>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_NE(nullptr, valOrErr.inspect());
+
+ ASSERT_EQ(ToResultInvoke<nsString>(std::mem_fn(&nsIFile::GetPath),
+ *MakeRefPtr<nsLocalFile>())
+ .inspect(),
+ ToResultInvoke<nsString>(std::mem_fn(&nsIFile::GetPath),
+ valOrErr.inspect())
+ .inspect());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvoke<nsCOMPtr<nsIFile>>(
+ std::mem_fn(&TestClass::PolymorphicNoInputFails), foo);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<nsCOMPtr<nsIFile>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInput);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputFails);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Const)
+{
+ const TestClass foo;
+
+ // success
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputConst);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputFailsConst);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Ref)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputRef);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputFailsRef);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Complex)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputComplex);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ((std::pair{TestClass::kTestValue, TestClass::kTestValue}),
+ valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &TestClass::NonOverloadedNoInputFailsComplex);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, WithInput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = ToResultInvokeMember(
+ foo, &TestClass::NonOverloadedWithInput, -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(-TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvokeMember(
+ foo, &TestClass::NonOverloadedWithInputFails, -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoOutput)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = ToResultInvokeMember(foo, &TestClass::NonOverloadedNoOutput,
+ -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<Ok, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvokeMember(
+ foo, &TestClass::NonOverloadedNoOutputFails, -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<Ok, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Macro)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInput);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputFails);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Const_Macro)
+{
+ const TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputConst);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputFailsConst);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Ref_Macro)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputRef);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputFailsRef);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Complex_Macro)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputComplex);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ((std::pair{TestClass::kTestValue, TestClass::kTestValue}),
+ valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputFailsComplex);
+
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, WithInput_Macro)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedWithInput,
+ -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(-TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(
+ foo, NonOverloadedWithInputFails, -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoOutput_Macro)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoOutput,
+ -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<Ok, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ }
+
+ // failure
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoOutputFails,
+ -TestClass::kTestValue);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<Ok, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, NoInput_Complex_Macro_Typed)
+{
+ TestClass foo;
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER_TYPED(
+ (std::pair<int, int>), foo, NonOverloadedNoInputComplex);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ((std::pair{TestClass::kTestValue, TestClass::kTestValue}),
+ valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER_TYPED(
+ (std::pair<int, int>), foo, NonOverloadedNoInputFailsComplex);
+ static_assert(std::is_same_v<decltype(valOrErr),
+ Result<std::pair<int, int>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, RefPtr_NoInput)
+{
+ auto foo = MakeRefPtr<RefCountedTestClass>();
+
+ // success
+ {
+ auto valOrErr =
+ ToResultInvokeMember(foo, &RefCountedTestClass::NonOverloadedNoInput);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = ToResultInvokeMember(
+ foo, &RefCountedTestClass::NonOverloadedNoInputFails);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, RefPtr_NoInput_Macro)
+{
+ auto foo = MakeRefPtr<RefCountedTestClass>();
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInput);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(foo, NonOverloadedNoInputFails);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, RawPtr_NoInput_Macro)
+{
+ auto foo = MakeRefPtr<RefCountedTestClass>();
+ auto* fooPtr = foo.get();
+
+ // success
+ {
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(fooPtr, NonOverloadedNoInput);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(TestClass::kTestValue, valOrErr.unwrap());
+ }
+
+ // failure
+ {
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER(fooPtr, NonOverloadedNoInputFails);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<int, nsresult>>);
+ ASSERT_TRUE(valOrErr.isErr());
+ ASSERT_EQ(NS_ERROR_FAILURE, valOrErr.unwrapErr());
+ }
+}
+
+TEST(ResultExtensions_ToResultInvokeMember, nsCOMPtr_AbstractClass_WithInput)
+{
+ nsCOMPtr<nsIFile> file = MakeAndAddRef<nsLocalFile>();
+
+ auto valOrErr = ToResultInvokeMember(file, &nsIFile::Equals, file);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<bool, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(true, valOrErr.unwrap());
+}
+
+TEST(ResultExtensions_ToResultInvokeMember,
+ RawPtr_AbstractClass_WithInput_Macro)
+{
+ nsCOMPtr<nsIFile> file = MakeAndAddRef<nsLocalFile>();
+ auto* filePtr = file.get();
+
+ auto valOrErr = MOZ_TO_RESULT_INVOKE_MEMBER(filePtr, Equals, file);
+ static_assert(std::is_same_v<decltype(valOrErr), Result<bool, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_EQ(true, valOrErr.unwrap());
+}
+
+TEST(ResultExtensions_ToResultInvokeMember,
+ RawPtr_AbstractClass_NoInput_Macro_Typed)
+{
+ nsCOMPtr<nsIFile> file = MakeAndAddRef<nsLocalFile>();
+ auto* filePtr = file.get();
+
+ auto valOrErr =
+ MOZ_TO_RESULT_INVOKE_MEMBER_TYPED(nsCOMPtr<nsIFile>, filePtr, Clone);
+ static_assert(
+ std::is_same_v<decltype(valOrErr), Result<nsCOMPtr<nsIFile>, nsresult>>);
+ ASSERT_TRUE(valOrErr.isOk());
+ ASSERT_NE(nullptr, valOrErr.unwrap());
+}
diff --git a/mfbt/tests/gtest/TestReverseIterator.cpp b/mfbt/tests/gtest/TestReverseIterator.cpp
new file mode 100644
index 0000000000..a1ba019aa1
--- /dev/null
+++ b/mfbt/tests/gtest/TestReverseIterator.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/ReverseIterator.h"
+
+using namespace mozilla;
+
+TEST(ReverseIterator, Const_RangeBasedFor)
+{
+ const std::vector<int> in = {1, 2, 3, 4};
+ const auto reversedRange =
+ detail::IteratorRange<ReverseIterator<std::vector<int>::const_iterator>>{
+ ReverseIterator{in.end()}, ReverseIterator{in.begin()}};
+
+ const std::vector<int> expected = {4, 3, 2, 1};
+ std::vector<int> out;
+ for (auto i : reversedRange) {
+ out.emplace_back(i);
+ }
+
+ EXPECT_EQ(expected, out);
+}
+
+TEST(ReverseIterator, NonConst_RangeBasedFor)
+{
+ std::vector<int> in = {1, 2, 3, 4};
+ auto reversedRange =
+ detail::IteratorRange<ReverseIterator<std::vector<int>::iterator>>{
+ ReverseIterator{in.end()}, ReverseIterator{in.begin()}};
+
+ const std::vector<int> expected = {-1, -2, -3, -4};
+ for (auto& i : reversedRange) {
+ i = -i;
+ }
+
+ EXPECT_EQ(expected, in);
+}
+
+TEST(ReverseIterator, Difference)
+{
+ const std::vector<int> in = {1, 2, 3, 4};
+ using reverse_iterator = ReverseIterator<std::vector<int>::const_iterator>;
+
+ reverse_iterator rbegin = reverse_iterator{in.end()},
+ rend = reverse_iterator{in.begin()};
+ EXPECT_EQ(4, rend - rbegin);
+ EXPECT_EQ(0, rend - rend);
+ EXPECT_EQ(0, rbegin - rbegin);
+
+ --rend;
+ EXPECT_EQ(3, rend - rbegin);
+
+ ++rbegin;
+ EXPECT_EQ(2, rend - rbegin);
+
+ rend--;
+ EXPECT_EQ(1, rend - rbegin);
+
+ rbegin++;
+ EXPECT_EQ(0, rend - rbegin);
+}
+
+TEST(ReverseIterator, Comparison)
+{
+ const std::vector<int> in = {1, 2, 3, 4};
+ using reverse_iterator = ReverseIterator<std::vector<int>::const_iterator>;
+
+ reverse_iterator rbegin = reverse_iterator{in.end()},
+ rend = reverse_iterator{in.begin()};
+ EXPECT_TRUE(rbegin < rend);
+ EXPECT_FALSE(rend < rbegin);
+ EXPECT_FALSE(rend < rend);
+ EXPECT_FALSE(rbegin < rbegin);
+
+ EXPECT_TRUE(rend > rbegin);
+ EXPECT_FALSE(rbegin > rend);
+ EXPECT_FALSE(rend > rend);
+ EXPECT_FALSE(rbegin > rbegin);
+
+ EXPECT_TRUE(rbegin <= rend);
+ EXPECT_FALSE(rend <= rbegin);
+ EXPECT_TRUE(rend <= rend);
+ EXPECT_TRUE(rbegin <= rbegin);
+
+ EXPECT_TRUE(rend >= rbegin);
+ EXPECT_FALSE(rbegin >= rend);
+ EXPECT_TRUE(rend >= rend);
+ EXPECT_TRUE(rbegin >= rbegin);
+
+ EXPECT_FALSE(rend == rbegin);
+ EXPECT_FALSE(rbegin == rend);
+ EXPECT_TRUE(rend == rend);
+ EXPECT_TRUE(rbegin == rbegin);
+
+ EXPECT_TRUE(rend != rbegin);
+ EXPECT_TRUE(rbegin != rend);
+ EXPECT_FALSE(rend != rend);
+ EXPECT_FALSE(rbegin != rbegin);
+}
diff --git a/mfbt/tests/gtest/TestSpan.cpp b/mfbt/tests/gtest/TestSpan.cpp
new file mode 100644
index 0000000000..fb7db0d158
--- /dev/null
+++ b/mfbt/tests/gtest/TestSpan.cpp
@@ -0,0 +1,2355 @@
+///////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
+//
+// This code is licensed under the MIT License (MIT).
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// Adapted from
+// https://github.com/Microsoft/GSL/blob/3819df6e378ffccf0e29465afe99c3b324c2aa70/tests/Span_tests.cpp
+
+#include "gtest/gtest.h"
+
+#include "mozilla/Array.h"
+#include "mozilla/Span.h"
+
+#include "nsString.h"
+#include "nsTArray.h"
+#include "mozilla/Range.h"
+
+#include <type_traits>
+
+#define SPAN_TEST(name) TEST(SpanTest, name)
+#define CHECK_THROW(a, b)
+
+using namespace mozilla;
+
+static_assert(std::is_convertible_v<Range<int>, Span<const int>>,
+ "Range should convert into const");
+static_assert(std::is_convertible_v<Range<const int>, Span<const int>>,
+ "const Range should convert into const");
+static_assert(!std::is_convertible_v<Range<const int>, Span<int>>,
+ "Range should not drop const in conversion");
+static_assert(std::is_convertible_v<Span<int>, Range<const int>>,
+ "Span should convert into const");
+static_assert(std::is_convertible_v<Span<const int>, Range<const int>>,
+ "const Span should convert into const");
+static_assert(!std::is_convertible_v<Span<const int>, Range<int>>,
+ "Span should not drop const in conversion");
+static_assert(std::is_convertible_v<Span<const int>, Span<const int>>,
+ "const Span should convert into const");
+static_assert(std::is_convertible_v<Span<int>, Span<const int>>,
+ "Span should convert into const");
+static_assert(!std::is_convertible_v<Span<const int>, Span<int>>,
+ "Span should not drop const in conversion");
+static_assert(std::is_convertible_v<const nsTArray<int>, Span<const int>>,
+ "const nsTArray should convert into const");
+static_assert(std::is_convertible_v<nsTArray<int>, Span<const int>>,
+ "nsTArray should convert into const");
+static_assert(!std::is_convertible_v<const nsTArray<int>, Span<int>>,
+ "nsTArray should not drop const in conversion");
+static_assert(std::is_convertible_v<nsTArray<const int>, Span<const int>>,
+ "nsTArray should convert into const");
+static_assert(!std::is_convertible_v<nsTArray<const int>, Span<int>>,
+ "nsTArray should not drop const in conversion");
+
+static_assert(std::is_convertible_v<const std::vector<int>, Span<const int>>,
+ "const std::vector should convert into const");
+static_assert(std::is_convertible_v<std::vector<int>, Span<const int>>,
+ "std::vector should convert into const");
+static_assert(!std::is_convertible_v<const std::vector<int>, Span<int>>,
+ "std::vector should not drop const in conversion");
+
+/**
+ * Rust slice-compatible nullptr replacement value.
+ */
+#define SLICE_CONST_INT_PTR reinterpret_cast<const int*>(alignof(const int))
+
+/**
+ * Rust slice-compatible nullptr replacement value.
+ */
+#define SLICE_INT_PTR reinterpret_cast<int*>(alignof(int))
+
+/**
+ * Rust slice-compatible nullptr replacement value.
+ */
+#define SLICE_CONST_INT_PTR_PTR \
+ reinterpret_cast<const int**>(alignof(const int*))
+
+/**
+ * Rust slice-compatible nullptr replacement value.
+ */
+#define SLICE_INT_PTR_PTR reinterpret_cast<int**>(alignof(int*))
+
+namespace {
+struct BaseClass {};
+struct DerivedClass : BaseClass {};
+} // namespace
+
+void AssertSpanOfThreeInts(Span<const int> s) {
+ ASSERT_EQ(s.size(), 3U);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ ASSERT_EQ(s[2], 3);
+}
+
+void AssertSpanOfThreeChars(Span<const char> s) {
+ ASSERT_EQ(s.size(), 3U);
+ ASSERT_EQ(s[0], 'a');
+ ASSERT_EQ(s[1], 'b');
+ ASSERT_EQ(s[2], 'c');
+}
+
+void AssertSpanOfThreeChar16s(Span<const char16_t> s) {
+ ASSERT_EQ(s.size(), 3U);
+ ASSERT_EQ(s[0], 'a');
+ ASSERT_EQ(s[1], 'b');
+ ASSERT_EQ(s[2], 'c');
+}
+
+void AssertSpanOfThreeCharsViaString(const nsACString& aStr) {
+ AssertSpanOfThreeChars(aStr);
+}
+
+void AssertSpanOfThreeChar16sViaString(const nsAString& aStr) {
+ AssertSpanOfThreeChar16s(aStr);
+}
+
+SPAN_TEST(default_constructor) {
+ {
+ Span<int> s;
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int> cs;
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+ Span<int, 0> s;
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int, 0> cs;
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<int, 1> s;
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR); // explains why it can't compile
+#endif
+ }
+
+ {
+ Span<int> s{};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int> cs{};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+}
+
+SPAN_TEST(size_optimization) {
+ {
+ Span<int> s;
+ ASSERT_EQ(sizeof(s), sizeof(int*) + sizeof(size_t));
+ }
+
+ {
+ Span<int, 0> s;
+ ASSERT_EQ(sizeof(s), sizeof(int*));
+ }
+}
+
+SPAN_TEST(from_nullptr_constructor) {
+ {
+ Span<int> s = nullptr;
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int> cs = nullptr;
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+ Span<int, 0> s = nullptr;
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int, 0> cs = nullptr;
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<int, 1> s = nullptr;
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR); // explains why it can't compile
+#endif
+ }
+
+ {
+ Span<int> s{nullptr};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int> cs{nullptr};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+ Span<int*> s{nullptr};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR_PTR);
+
+ Span<const int*> cs{nullptr};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR_PTR);
+ }
+}
+
+SPAN_TEST(from_nullptr_length_constructor) {
+ {
+ Span<int> s{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int> cs{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+ {
+ Span<int, 0> s{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+
+ Span<const int, 0> cs{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR);
+ }
+
+#if 0
+ {
+ auto workaround_macro = []() { Span<int, 1> s{ nullptr, static_cast<Span<int>::index_type>(0) }; };
+ CHECK_THROW(workaround_macro(), fail_fast);
+ }
+
+ {
+ auto workaround_macro = []() { Span<int> s{nullptr, 1}; };
+ CHECK_THROW(workaround_macro(), fail_fast);
+
+ auto const_workaround_macro = []() { Span<const int> cs{nullptr, 1}; };
+ CHECK_THROW(const_workaround_macro(), fail_fast);
+ }
+
+ {
+ auto workaround_macro = []() { Span<int, 0> s{nullptr, 1}; };
+ CHECK_THROW(workaround_macro(), fail_fast);
+
+ auto const_workaround_macro = []() { Span<const int, 0> s{nullptr, 1}; };
+ CHECK_THROW(const_workaround_macro(), fail_fast);
+ }
+#endif
+ {
+ Span<int*> s{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR_PTR);
+
+ Span<const int*> cs{nullptr, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(cs.Length(), 0U);
+ ASSERT_EQ(cs.data(), SLICE_CONST_INT_PTR_PTR);
+ }
+}
+
+SPAN_TEST(from_pointer_length_constructor) {
+ int arr[4] = {1, 2, 3, 4};
+
+ {
+ Span<int> s{&arr[0], 2};
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ Span<int, 2> s{&arr[0], 2};
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ int* p = nullptr;
+ Span<int> s{p, static_cast<Span<int>::index_type>(0)};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+ }
+
+#if 0
+ {
+ int* p = nullptr;
+ auto workaround_macro = [=]() { Span<int> s{p, 2}; };
+ CHECK_THROW(workaround_macro(), fail_fast);
+ }
+#endif
+
+ {
+ auto s = Span(&arr[0], 2);
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ int* p = nullptr;
+ auto s = Span(p, static_cast<Span<int>::index_type>(0));
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+ }
+
+#if 0
+ {
+ int* p = nullptr;
+ auto workaround_macro = [=]() { Span(p, 2); };
+ CHECK_THROW(workaround_macro(), fail_fast);
+ }
+#endif
+}
+
+SPAN_TEST(from_pointer_pointer_constructor) {
+ int arr[4] = {1, 2, 3, 4};
+
+ {
+ Span<int> s{&arr[0], &arr[2]};
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ Span<int, 2> s{&arr[0], &arr[2]};
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ Span<int> s{&arr[0], &arr[0]};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<int, 0> s{&arr[0], &arr[0]};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ // this will fail the std::distance() precondition, which asserts on MSVC
+ // debug builds
+ //{
+ // auto workaround_macro = [&]() { Span<int> s{&arr[1], &arr[0]}; };
+ // CHECK_THROW(workaround_macro(), fail_fast);
+ //}
+
+ // this will fail the std::distance() precondition, which asserts on MSVC
+ // debug builds
+ //{
+ // int* p = nullptr;
+ // auto workaround_macro = [&]() { Span<int> s{&arr[0], p}; };
+ // CHECK_THROW(workaround_macro(), fail_fast);
+ //}
+
+ {
+ int* p = nullptr;
+ Span<int> s{p, p};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+ }
+
+ {
+ int* p = nullptr;
+ Span<int, 0> s{p, p};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+ }
+
+ // this will fail the std::distance() precondition, which asserts on MSVC
+ // debug builds
+ //{
+ // int* p = nullptr;
+ // auto workaround_macro = [&]() { Span<int> s{&arr[0], p}; };
+ // CHECK_THROW(workaround_macro(), fail_fast);
+ //}
+
+ {
+ auto s = Span(&arr[0], &arr[2]);
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[1], 2);
+ }
+
+ {
+ auto s = Span(&arr[0], &arr[0]);
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ int* p = nullptr;
+ auto s = Span(p, p);
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), SLICE_INT_PTR);
+ }
+}
+
+SPAN_TEST(from_array_constructor) {
+ int arr[5] = {1, 2, 3, 4, 5};
+
+ {
+ Span<int> s{arr};
+ ASSERT_EQ(s.Length(), 5U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<int, 5> s{arr};
+ ASSERT_EQ(s.Length(), 5U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ int arr2d[2][3] = {{1, 2, 3}, {4, 5, 6}};
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ { Span<int, 6> s{arr}; }
+
+ {
+ Span<int, 0> s{arr};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<int> s{arr2d};
+ ASSERT_EQ(s.Length(), 6U);
+ ASSERT_EQ(s.data(), &arr2d[0][0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[5], 6);
+ }
+
+ {
+ Span<int, 0> s{arr2d};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr2d[0][0]);
+ }
+
+ { Span<int, 6> s{arr2d}; }
+#endif
+ {
+ Span<int[3]> s{&(arr2d[0]), 1};
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), &arr2d[0]);
+ }
+
+ int arr3d[2][3][2] = {{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}};
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<int> s{arr3d};
+ ASSERT_EQ(s.Length(), 12U);
+ ASSERT_EQ(s.data(), &arr3d[0][0][0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[11], 12);
+ }
+
+ {
+ Span<int, 0> s{arr3d};
+ ASSERT_EQ(s.Length(), 0U);
+ ASSERT_EQ(s.data(), &arr3d[0][0][0]);
+ }
+
+ { Span<int, 11> s{arr3d}; }
+
+ {
+ Span<int, 12> s{arr3d};
+ ASSERT_EQ(s.Length(), 12U);
+ ASSERT_EQ(s.data(), &arr3d[0][0][0]);
+ ASSERT_EQ(s[0], 1);
+ ASSERT_EQ(s[5], 6);
+ }
+#endif
+ {
+ Span<int[3][2]> s{&arr3d[0], 1};
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), &arr3d[0]);
+ }
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.Length(), 5U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ auto s = Span(&(arr2d[0]), 1);
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), &arr2d[0]);
+ }
+
+ {
+ auto s = Span(&arr3d[0], 1);
+ ASSERT_EQ(s.Length(), 1U);
+ ASSERT_EQ(s.data(), &arr3d[0]);
+ }
+}
+
+SPAN_TEST(from_dynamic_array_constructor) {
+ double(*arr)[3][4] = new double[100][3][4];
+
+ {
+ Span<double> s(&arr[0][0][0], 10);
+ ASSERT_EQ(s.Length(), 10U);
+ ASSERT_EQ(s.data(), &arr[0][0][0]);
+ }
+
+ {
+ auto s = Span(&arr[0][0][0], 10);
+ ASSERT_EQ(s.Length(), 10U);
+ ASSERT_EQ(s.data(), &arr[0][0][0]);
+ }
+
+ delete[] arr;
+}
+
+SPAN_TEST(from_std_array_constructor) {
+ std::array<int, 4> arr = {{1, 2, 3, 4}};
+
+ {
+ Span<int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+
+ Span<const int> cs{arr};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(cs.data(), arr.data());
+ }
+
+ {
+ Span<int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+
+ Span<const int, 4> cs{arr};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(cs.data(), arr.data());
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), arr.data());
+
+ Span<const int, 2> cs{arr};
+ ASSERT_EQ(cs.size(), 2U);
+ ASSERT_EQ(cs.data(), arr.data());
+ }
+
+ {
+ Span<int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), arr.data());
+
+ Span<const int, 0> cs{arr};
+ ASSERT_EQ(cs.size(), 0U);
+ ASSERT_EQ(cs.data(), arr.data());
+ }
+
+ { Span<int, 5> s{arr}; }
+
+ {
+ auto get_an_array = []() -> std::array<int, 4> { return {1, 2, 3, 4}; };
+ auto take_a_Span = [](Span<int> s) { static_cast<void>(s); };
+ // try to take a temporary std::array
+ take_a_Span(get_an_array());
+ }
+#endif
+
+ {
+ auto get_an_array = []() -> std::array<int, 4> { return {{1, 2, 3, 4}}; };
+ auto take_a_Span = [](Span<const int> s) { static_cast<void>(s); };
+ // try to take a temporary std::array
+ take_a_Span(get_an_array());
+ }
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+}
+
+SPAN_TEST(from_const_std_array_constructor) {
+ const std::array<int, 4> arr = {{1, 2, 3, 4}};
+
+ {
+ Span<const int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ {
+ Span<const int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<const int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ {
+ Span<const int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ { Span<const int, 5> s{arr}; }
+#endif
+
+ {
+ auto get_an_array = []() -> const std::array<int, 4> {
+ return {{1, 2, 3, 4}};
+ };
+ auto take_a_Span = [](Span<const int> s) { static_cast<void>(s); };
+ // try to take a temporary std::array
+ take_a_Span(get_an_array());
+ }
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+}
+
+SPAN_TEST(from_std_array_const_constructor) {
+ std::array<const int, 4> arr = {{1, 2, 3, 4}};
+
+ {
+ Span<const int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ {
+ Span<const int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<const int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ {
+ Span<const int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), arr.data());
+ }
+
+ { Span<const int, 5> s{arr}; }
+
+ { Span<int, 4> s{arr}; }
+#endif
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.size()));
+ ASSERT_EQ(s.data(), arr.data());
+ }
+}
+
+SPAN_TEST(from_mozilla_array_constructor) {
+ mozilla::Array<int, 4> arr(1, 2, 3, 4);
+
+ {
+ Span<int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+
+ Span<const int> cs{arr};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(cs.data(), &arr[0]);
+ }
+
+ {
+ Span<int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+
+ Span<const int, 4> cs{arr};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(cs.data(), &arr[0]);
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+
+ Span<const int, 2> cs{arr};
+ ASSERT_EQ(cs.size(), 2U);
+ ASSERT_EQ(cs.data(), &arr[0]);
+ }
+
+ {
+ Span<int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+
+ Span<const int, 0> cs{arr};
+ ASSERT_EQ(cs.size(), 0U);
+ ASSERT_EQ(cs.data(), &arr[0]);
+ }
+
+ { Span<int, 5> s{arr}; }
+
+ {
+ auto get_an_array = []() -> mozilla::Array<int, 4> { return {1, 2, 3, 4}; };
+ auto take_a_Span = [](Span<int> s) { static_cast<void>(s); };
+ // try to take a temporary mozilla::Array
+ take_a_Span(get_an_array());
+ }
+#endif
+
+ {
+ auto get_an_array = []() -> mozilla::Array<int, 4> { return {1, 2, 3, 4}; };
+ auto take_a_Span = [](Span<const int> s) { static_cast<void>(s); };
+ // try to take a temporary mozilla::Array
+ take_a_Span(get_an_array());
+ }
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+}
+
+SPAN_TEST(from_const_mozilla_array_constructor) {
+ const mozilla::Array<int, 4> arr(1, 2, 3, 4);
+
+ {
+ Span<const int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<const int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<const int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<const int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ { Span<const int, 5> s{arr}; }
+#endif
+
+#if 0
+ {
+ auto get_an_array = []() -> const mozilla::Array<int, 4> {
+ return { 1, 2, 3, 4 };
+ };
+ auto take_a_Span = [](Span<const int> s) { static_cast<void>(s); };
+ // try to take a temporary mozilla::Array
+ take_a_Span(get_an_array());
+ }
+#endif
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+}
+
+SPAN_TEST(from_mozilla_array_const_constructor) {
+ mozilla::Array<const int, 4> arr(1, 2, 3, 4);
+
+ {
+ Span<const int> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<const int, 4> s{arr};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ {
+ Span<const int, 2> s{arr};
+ ASSERT_EQ(s.size(), 2U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ {
+ Span<const int, 0> s{arr};
+ ASSERT_EQ(s.size(), 0U);
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+
+ { Span<const int, 5> s{arr}; }
+
+ { Span<int, 4> s{arr}; }
+#endif
+
+ {
+ auto s = Span(arr);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(arr.cend() - arr.cbegin()));
+ ASSERT_EQ(s.data(), &arr[0]);
+ }
+}
+
+SPAN_TEST(from_container_constructor) {
+ std::vector<int> v = {1, 2, 3};
+ const std::vector<int> cv = v;
+
+ {
+ AssertSpanOfThreeInts(v);
+
+ Span<int> s{v};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.size()));
+ ASSERT_EQ(s.data(), v.data());
+
+ Span<const int> cs{v};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(v.size()));
+ ASSERT_EQ(cs.data(), v.data());
+ }
+
+ std::string str = "hello";
+ const std::string cstr = "hello";
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<char> s{str};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(str.size()));
+ ASSERT_EQ(s.data(), str.data());
+#endif
+ Span<const char> cs{str};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(str.size()));
+ ASSERT_EQ(cs.data(), str.data());
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<char> s{cstr};
+#endif
+ Span<const char> cs{cstr};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(cstr.size()));
+ ASSERT_EQ(cs.data(), cstr.data());
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ auto get_temp_vector = []() -> std::vector<int> { return {}; };
+ auto use_Span = [](Span<int> s) { static_cast<void>(s); };
+ use_Span(get_temp_vector());
+#endif
+ }
+
+ {
+ auto get_temp_vector = []() -> std::vector<int> { return {}; };
+ auto use_Span = [](Span<const int> s) { static_cast<void>(s); };
+ use_Span(get_temp_vector());
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ auto get_temp_string = []() -> std::string { return {}; };
+ auto use_Span = [](Span<char> s) { static_cast<void>(s); };
+ use_Span(get_temp_string());
+#endif
+ }
+
+ {
+ auto get_temp_string = []() -> std::string { return {}; };
+ auto use_Span = [](Span<const char> s) { static_cast<void>(s); };
+ use_Span(get_temp_string());
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ auto get_temp_vector = []() -> const std::vector<int> { return {}; };
+ auto use_Span = [](Span<const char> s) { static_cast<void>(s); };
+ use_Span(get_temp_vector());
+#endif
+ }
+
+ {
+ auto get_temp_string = []() -> const std::string { return {}; };
+ auto use_Span = [](Span<const char> s) { static_cast<void>(s); };
+ use_Span(get_temp_string());
+ }
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ std::map<int, int> m;
+ Span<int> s{m};
+#endif
+ }
+
+ {
+ auto s = Span(v);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.size()));
+ ASSERT_EQ(s.data(), v.data());
+
+ auto cs = Span(cv);
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(cv.size()));
+ ASSERT_EQ(cs.data(), cv.data());
+ }
+}
+
+SPAN_TEST(from_xpcom_collections) {
+ {
+ nsTArray<int> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ AssertSpanOfThreeInts(v);
+
+ Span<int> s{v};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+
+ Span<const int> cs{v};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(cs.data(), v.Elements());
+ ASSERT_EQ(cs[2], 3);
+ }
+ {
+ nsTArray<int> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ AssertSpanOfThreeInts(v);
+
+ auto s = Span(v);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+ }
+ {
+ AutoTArray<int, 5> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ AssertSpanOfThreeInts(v);
+
+ Span<int> s{v};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+
+ Span<const int> cs{v};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(cs.data(), v.Elements());
+ ASSERT_EQ(cs[2], 3);
+ }
+ {
+ AutoTArray<int, 5> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ AssertSpanOfThreeInts(v);
+
+ auto s = Span(v);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+ }
+ {
+ FallibleTArray<int> v;
+ *(v.AppendElement(fallible)) = 1;
+ *(v.AppendElement(fallible)) = 2;
+ *(v.AppendElement(fallible)) = 3;
+
+ AssertSpanOfThreeInts(v);
+
+ Span<int> s{v};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+
+ Span<const int> cs{v};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(cs.data(), v.Elements());
+ ASSERT_EQ(cs[2], 3);
+ }
+ {
+ FallibleTArray<int> v;
+ *(v.AppendElement(fallible)) = 1;
+ *(v.AppendElement(fallible)) = 2;
+ *(v.AppendElement(fallible)) = 3;
+
+ AssertSpanOfThreeInts(v);
+
+ auto s = Span(v);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+ }
+ {
+ nsAutoString str;
+ str.AssignLiteral(u"abc");
+
+ AssertSpanOfThreeChar16s(str);
+ AssertSpanOfThreeChar16sViaString(str);
+
+ Span<char16_t> s{str.GetMutableData()};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(str.Length()));
+ ASSERT_EQ(s.data(), str.BeginWriting());
+ ASSERT_EQ(s[2], 'c');
+
+ Span<const char16_t> cs{str};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(str.Length()));
+ ASSERT_EQ(cs.data(), str.BeginReading());
+ ASSERT_EQ(cs[2], 'c');
+ }
+ {
+ nsAutoString str;
+ str.AssignLiteral(u"abc");
+
+ AssertSpanOfThreeChar16s(str);
+ AssertSpanOfThreeChar16sViaString(str);
+
+ auto s = Span(str);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(str.Length()));
+ ASSERT_EQ(s.data(), str.BeginReading());
+ ASSERT_EQ(s[2], 'c');
+ }
+ {
+ nsAutoCString str;
+ str.AssignLiteral("abc");
+
+ AssertSpanOfThreeChars(str);
+ AssertSpanOfThreeCharsViaString(str);
+
+ Span<const uint8_t> cs{str};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(str.Length()));
+ ASSERT_EQ(cs.data(), reinterpret_cast<const uint8_t*>(str.BeginReading()));
+ ASSERT_EQ(cs[2], 'c');
+ }
+ {
+ nsAutoCString str;
+ str.AssignLiteral("abc");
+
+ AssertSpanOfThreeChars(str);
+ AssertSpanOfThreeCharsViaString(str);
+
+ auto s = Span(str);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(str.Length()));
+ ASSERT_EQ(s.data(), str.BeginReading());
+ ASSERT_EQ(s[2], 'c');
+ }
+ {
+ nsTArray<int> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ Range<int> r(v.Elements(), v.Length());
+
+ AssertSpanOfThreeInts(r);
+
+ Span<int> s{r};
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+
+ Span<const int> cs{r};
+ ASSERT_EQ(cs.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(cs.data(), v.Elements());
+ ASSERT_EQ(cs[2], 3);
+ }
+ {
+ nsTArray<int> v;
+ v.AppendElement(1);
+ v.AppendElement(2);
+ v.AppendElement(3);
+
+ Range<int> r(v.Elements(), v.Length());
+
+ AssertSpanOfThreeInts(r);
+
+ auto s = Span(r);
+ ASSERT_EQ(s.size(), narrow_cast<size_t>(v.Length()));
+ ASSERT_EQ(s.data(), v.Elements());
+ ASSERT_EQ(s[2], 3);
+ }
+}
+
+SPAN_TEST(from_cstring) {
+ {
+ const char* str = nullptr;
+ auto cs = MakeStringSpan(str);
+ ASSERT_EQ(cs.size(), 0U);
+ }
+ {
+ const char* str = "abc";
+
+ auto cs = MakeStringSpan(str);
+ ASSERT_EQ(cs.size(), 3U);
+ ASSERT_EQ(cs.data(), str);
+ ASSERT_EQ(cs[2], 'c');
+
+ static_assert(MakeStringSpan("abc").size() == 3U);
+ static_assert(MakeStringSpan("abc")[2] == 'c');
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<const char> scccl("literal"); // error
+
+ Span<const char> sccel;
+ sccel = "literal"; // error
+
+ cs = Span("literal"); // error
+#endif
+ }
+ {
+ char arr[4] = {'a', 'b', 'c', 0};
+
+ auto cs = MakeStringSpan(arr);
+ ASSERT_EQ(cs.size(), 3U);
+ ASSERT_EQ(cs.data(), arr);
+ ASSERT_EQ(cs[2], 'c');
+
+ cs = Span(arr);
+ ASSERT_EQ(cs.size(), 4U); // zero terminator is part of the array span.
+ ASSERT_EQ(cs.data(), arr);
+ ASSERT_EQ(cs[2], 'c');
+ ASSERT_EQ(cs[3], '\0'); // zero terminator is part of the array span.
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<char> scca(arr); // error
+ Span<const char> sccca(arr); // error
+
+ Span<const char> scccea;
+ scccea = arr; // error
+#endif
+ }
+ {
+ const char16_t* str = nullptr;
+ auto cs = MakeStringSpan(str);
+ ASSERT_EQ(cs.size(), 0U);
+ }
+ {
+ char16_t arr[4] = {'a', 'b', 'c', 0};
+ const char16_t* str = arr;
+
+ auto cs = MakeStringSpan(str);
+ ASSERT_EQ(cs.size(), 3U);
+ ASSERT_EQ(cs.data(), str);
+ ASSERT_EQ(cs[2], 'c');
+
+ static_assert(MakeStringSpan(u"abc").size() == 3U);
+ static_assert(MakeStringSpan(u"abc")[2] == u'c');
+
+ cs = MakeStringSpan(arr);
+ ASSERT_EQ(cs.size(), 3U);
+ ASSERT_EQ(cs.data(), str);
+ ASSERT_EQ(cs[2], 'c');
+
+ cs = Span(arr);
+ ASSERT_EQ(cs.size(), 4U); // zero terminator is part of the array span.
+ ASSERT_EQ(cs.data(), str);
+ ASSERT_EQ(cs[2], 'c');
+ ASSERT_EQ(cs[3], '\0'); // zero terminator is part of the array span.
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<char16_t> scca(arr); // error
+
+ Span<const char16_t> scccea;
+ scccea = arr; // error
+
+ Span<const char16_t> scccl(u"literal"); // error
+
+ Span<const char16_t>* sccel;
+ *sccel = u"literal"; // error
+
+ cs = Span(u"literal"); // error
+#endif
+ }
+}
+
+SPAN_TEST(from_convertible_Span_constructor){{Span<DerivedClass> avd;
+Span<const DerivedClass> avcd = avd;
+static_cast<void>(avcd);
+}
+
+{
+#ifdef CONFIRM_COMPILATION_ERRORS
+ Span<DerivedClass> avd;
+ Span<BaseClass> avb = avd;
+ static_cast<void>(avb);
+#endif
+}
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+{
+ Span<int> s;
+ Span<unsigned int> s2 = s;
+ static_cast<void>(s2);
+}
+
+{
+ Span<int> s;
+ Span<const unsigned int> s2 = s;
+ static_cast<void>(s2);
+}
+
+{
+ Span<int> s;
+ Span<short> s2 = s;
+ static_cast<void>(s2);
+}
+#endif
+}
+
+SPAN_TEST(copy_move_and_assignment) {
+ Span<int> s1;
+ ASSERT_TRUE(s1.empty());
+
+ int arr[] = {3, 4, 5};
+
+ Span<const int> s2 = arr;
+ ASSERT_EQ(s2.Length(), 3U);
+ ASSERT_EQ(s2.data(), &arr[0]);
+
+ s2 = s1;
+ ASSERT_TRUE(s2.empty());
+
+ auto get_temp_Span = [&]() -> Span<int> { return {&arr[1], 2}; };
+ auto use_Span = [&](Span<const int> s) {
+ ASSERT_EQ(s.Length(), 2U);
+ ASSERT_EQ(s.data(), &arr[1]);
+ };
+ use_Span(get_temp_Span());
+
+ s1 = get_temp_Span();
+ ASSERT_EQ(s1.Length(), 2U);
+ ASSERT_EQ(s1.data(), &arr[1]);
+}
+
+SPAN_TEST(first) {
+ int arr[5] = {1, 2, 3, 4, 5};
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.First<2>().Length(), 2U);
+ ASSERT_EQ(av.First(2).Length(), 2U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.First<0>().Length(), 0U);
+ ASSERT_EQ(av.First(0).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.First<5>().Length(), 5U);
+ ASSERT_EQ(av.First(5).Length(), 5U);
+ }
+
+#if 0
+ {
+ Span<int, 5> av = arr;
+# ifdef CONFIRM_COMPILATION_ERRORS
+ ASSERT_EQ(av.First<6>().Length() , 6U);
+ ASSERT_EQ(av.First<-1>().Length() , -1);
+# endif
+ CHECK_THROW(av.First(6).Length(), fail_fast);
+ }
+#endif
+
+ {
+ Span<int> av;
+ ASSERT_EQ(av.First<0>().Length(), 0U);
+ ASSERT_EQ(av.First(0).Length(), 0U);
+ }
+}
+
+SPAN_TEST(last) {
+ int arr[5] = {1, 2, 3, 4, 5};
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.Last<2>().Length(), 2U);
+ ASSERT_EQ(av.Last(2).Length(), 2U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.Last<0>().Length(), 0U);
+ ASSERT_EQ(av.Last(0).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.Last<5>().Length(), 5U);
+ ASSERT_EQ(av.Last(5).Length(), 5U);
+ }
+
+#if 0
+ {
+ Span<int, 5> av = arr;
+# ifdef CONFIRM_COMPILATION_ERRORS
+ ASSERT_EQ(av.Last<6>().Length() , 6U);
+# endif
+ CHECK_THROW(av.Last(6).Length(), fail_fast);
+ }
+#endif
+
+ {
+ Span<int> av;
+ ASSERT_EQ(av.Last<0>().Length(), 0U);
+ ASSERT_EQ(av.Last(0).Length(), 0U);
+ }
+}
+
+SPAN_TEST(from_to) {
+ int arr[5] = {1, 2, 3, 4, 5};
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.From(3).Length(), 2U);
+ ASSERT_EQ(av.From(2)[1], 4);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.From(5).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.From(0).Length(), 5U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.To(3).Length(), 3U);
+ ASSERT_EQ(av.To(3)[1], 2);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.To(0).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.To(5).Length(), 5U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.FromTo(1, 4).Length(), 3U);
+ ASSERT_EQ(av.FromTo(1, 4)[1], 3);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.FromTo(2, 2).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.FromTo(0, 5).Length(), 5U);
+ }
+}
+
+SPAN_TEST(Subspan) {
+ int arr[5] = {1, 2, 3, 4, 5};
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ((av.Subspan<2, 2>().Length()), 2U);
+ ASSERT_EQ(av.Subspan(2, 2).Length(), 2U);
+ ASSERT_EQ(av.Subspan(2, 3).Length(), 3U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ((av.Subspan<0, 0>().Length()), 0U);
+ ASSERT_EQ(av.Subspan(0, 0).Length(), 0U);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ((av.Subspan<0, 5>().Length()), 5U);
+ ASSERT_EQ(av.Subspan(0, 5).Length(), 5U);
+ CHECK_THROW(av.Subspan(0, 6).Length(), fail_fast);
+ CHECK_THROW(av.Subspan(1, 5).Length(), fail_fast);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ((av.Subspan<4, 0>().Length()), 0U);
+ ASSERT_EQ(av.Subspan(4, 0).Length(), 0U);
+ ASSERT_EQ(av.Subspan(5, 0).Length(), 0U);
+ CHECK_THROW(av.Subspan(6, 0).Length(), fail_fast);
+ }
+
+ {
+ Span<int> av;
+ ASSERT_EQ((av.Subspan<0, 0>().Length()), 0U);
+ ASSERT_EQ(av.Subspan(0, 0).Length(), 0U);
+ CHECK_THROW((av.Subspan<1, 0>().Length()), fail_fast);
+ }
+
+ {
+ Span<int> av;
+ ASSERT_EQ(av.Subspan(0).Length(), 0U);
+ CHECK_THROW(av.Subspan(1).Length(), fail_fast);
+ }
+
+ {
+ Span<int> av = arr;
+ ASSERT_EQ(av.Subspan(0).Length(), 5U);
+ ASSERT_EQ(av.Subspan(1).Length(), 4U);
+ ASSERT_EQ(av.Subspan(4).Length(), 1U);
+ ASSERT_EQ(av.Subspan(5).Length(), 0U);
+ CHECK_THROW(av.Subspan(6).Length(), fail_fast);
+ auto av2 = av.Subspan(1);
+ for (int i = 0; i < 4; ++i) ASSERT_EQ(av2[i], i + 2);
+ }
+
+ {
+ Span<int, 5> av = arr;
+ ASSERT_EQ(av.Subspan(0).Length(), 5U);
+ ASSERT_EQ(av.Subspan(1).Length(), 4U);
+ ASSERT_EQ(av.Subspan(4).Length(), 1U);
+ ASSERT_EQ(av.Subspan(5).Length(), 0U);
+ CHECK_THROW(av.Subspan(6).Length(), fail_fast);
+ auto av2 = av.Subspan(1);
+ for (int i = 0; i < 4; ++i) ASSERT_EQ(av2[i], i + 2);
+ }
+}
+
+SPAN_TEST(at_call) {
+ int arr[4] = {1, 2, 3, 4};
+
+ {
+ Span<int> s = arr;
+ ASSERT_EQ(s.at(0), 1);
+ CHECK_THROW(s.at(5), fail_fast);
+ }
+
+ {
+ int arr2d[2] = {1, 6};
+ Span<int, 2> s = arr2d;
+ ASSERT_EQ(s.at(0), 1);
+ ASSERT_EQ(s.at(1), 6);
+ CHECK_THROW(s.at(2), fail_fast);
+ }
+}
+
+SPAN_TEST(operator_function_call) {
+ int arr[4] = {1, 2, 3, 4};
+
+ {
+ Span<int> s = arr;
+ ASSERT_EQ(s(0), 1);
+ CHECK_THROW(s(5), fail_fast);
+ }
+
+ {
+ int arr2d[2] = {1, 6};
+ Span<int, 2> s = arr2d;
+ ASSERT_EQ(s(0), 1);
+ ASSERT_EQ(s(1), 6);
+ CHECK_THROW(s(2), fail_fast);
+ }
+}
+
+SPAN_TEST(iterator_default_init) {
+ Span<int>::iterator it1;
+ Span<int>::iterator it2;
+ ASSERT_EQ(it1, it2);
+}
+
+SPAN_TEST(const_iterator_default_init) {
+ Span<int>::const_iterator it1;
+ Span<int>::const_iterator it2;
+ ASSERT_EQ(it1, it2);
+}
+
+SPAN_TEST(iterator_conversions) {
+ Span<int>::iterator badIt;
+ Span<int>::const_iterator badConstIt;
+ ASSERT_EQ(badIt, badConstIt);
+
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ auto it = s.begin();
+ auto cit = s.cbegin();
+
+ ASSERT_EQ(it, cit);
+ ASSERT_EQ(cit, it);
+
+ Span<int>::const_iterator cit2 = it;
+ ASSERT_EQ(cit2, cit);
+
+ Span<int>::const_iterator cit3 = it + 4;
+ ASSERT_EQ(cit3, s.cend());
+}
+
+SPAN_TEST(iterator_comparisons) {
+ int a[] = {1, 2, 3, 4};
+ {
+ Span<int> s = a;
+ Span<int>::iterator it = s.begin();
+ auto it2 = it + 1;
+ Span<int>::const_iterator cit = s.cbegin();
+
+ ASSERT_EQ(it, cit);
+ ASSERT_EQ(cit, it);
+ ASSERT_EQ(it, it);
+ ASSERT_EQ(cit, cit);
+ ASSERT_EQ(cit, s.begin());
+ ASSERT_EQ(s.begin(), cit);
+ ASSERT_EQ(s.cbegin(), cit);
+ ASSERT_EQ(it, s.begin());
+ ASSERT_EQ(s.begin(), it);
+
+ ASSERT_NE(it, it2);
+ ASSERT_NE(it2, it);
+ ASSERT_NE(it, s.end());
+ ASSERT_NE(it2, s.end());
+ ASSERT_NE(s.end(), it);
+ ASSERT_NE(it2, cit);
+ ASSERT_NE(cit, it2);
+
+ ASSERT_LT(it, it2);
+ ASSERT_LE(it, it2);
+ ASSERT_LE(it2, s.end());
+ ASSERT_LT(it, s.end());
+ ASSERT_LE(it, cit);
+ ASSERT_LE(cit, it);
+ ASSERT_LT(cit, it2);
+ ASSERT_LE(cit, it2);
+ ASSERT_LT(cit, s.end());
+ ASSERT_LE(cit, s.end());
+
+ ASSERT_GT(it2, it);
+ ASSERT_GE(it2, it);
+ ASSERT_GT(s.end(), it2);
+ ASSERT_GE(s.end(), it2);
+ ASSERT_GT(it2, cit);
+ ASSERT_GE(it2, cit);
+ }
+}
+
+SPAN_TEST(begin_end) {
+ {
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ Span<int>::iterator it = s.begin();
+ Span<int>::iterator it2 = std::begin(s);
+ ASSERT_EQ(it, it2);
+
+ it = s.end();
+ it2 = std::end(s);
+ ASSERT_EQ(it, it2);
+ }
+
+ {
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ auto it = s.begin();
+ auto first = it;
+ ASSERT_EQ(it, first);
+ ASSERT_EQ(*it, 1);
+
+ auto beyond = s.end();
+ ASSERT_NE(it, beyond);
+ CHECK_THROW(*beyond, fail_fast);
+
+ ASSERT_EQ(beyond - first, 4);
+ ASSERT_EQ(first - first, 0);
+ ASSERT_EQ(beyond - beyond, 0);
+
+ ++it;
+ ASSERT_EQ(it - first, 1);
+ ASSERT_EQ(*it, 2);
+ *it = 22;
+ ASSERT_EQ(*it, 22);
+ ASSERT_EQ(beyond - it, 3);
+
+ it = first;
+ ASSERT_EQ(it, first);
+ while (it != s.end()) {
+ *it = 5;
+ ++it;
+ }
+
+ ASSERT_EQ(it, beyond);
+ ASSERT_EQ(it - beyond, 0);
+
+ for (auto& n : s) {
+ ASSERT_EQ(n, 5);
+ }
+ }
+}
+
+SPAN_TEST(cbegin_cend) {
+#if 0
+ {
+ int a[] = { 1, 2, 3, 4 };
+ Span<int> s = a;
+
+ Span<int>::const_iterator cit = s.cbegin();
+ Span<int>::const_iterator cit2 = std::cbegin(s);
+ ASSERT_EQ(cit , cit2);
+
+ cit = s.cend();
+ cit2 = std::cend(s);
+ ASSERT_EQ(cit , cit2);
+ }
+#endif
+ {
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ auto it = s.cbegin();
+ auto first = it;
+ ASSERT_EQ(it, first);
+ ASSERT_EQ(*it, 1);
+
+ auto beyond = s.cend();
+ ASSERT_NE(it, beyond);
+ CHECK_THROW(*beyond, fail_fast);
+
+ ASSERT_EQ(beyond - first, 4);
+ ASSERT_EQ(first - first, 0);
+ ASSERT_EQ(beyond - beyond, 0);
+
+ ++it;
+ ASSERT_EQ(it - first, 1);
+ ASSERT_EQ(*it, 2);
+ ASSERT_EQ(beyond - it, 3);
+
+ int last = 0;
+ it = first;
+ ASSERT_EQ(it, first);
+ while (it != s.cend()) {
+ ASSERT_EQ(*it, last + 1);
+
+ last = *it;
+ ++it;
+ }
+
+ ASSERT_EQ(it, beyond);
+ ASSERT_EQ(it - beyond, 0);
+ }
+}
+
+SPAN_TEST(rbegin_rend) {
+ {
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ auto it = s.rbegin();
+ auto first = it;
+ ASSERT_EQ(it, first);
+ ASSERT_EQ(*it, 4);
+
+ auto beyond = s.rend();
+ ASSERT_NE(it, beyond);
+ CHECK_THROW(*beyond, fail_fast);
+
+ ASSERT_EQ(beyond - first, 4);
+ ASSERT_EQ(first - first, 0);
+ ASSERT_EQ(beyond - beyond, 0);
+
+ ++it;
+ ASSERT_EQ(it - first, 1);
+ ASSERT_EQ(*it, 3);
+ *it = 22;
+ ASSERT_EQ(*it, 22);
+ ASSERT_EQ(beyond - it, 3);
+
+ it = first;
+ ASSERT_EQ(it, first);
+ while (it != s.rend()) {
+ *it = 5;
+ ++it;
+ }
+
+ ASSERT_EQ(it, beyond);
+ ASSERT_EQ(it - beyond, 0);
+
+ for (auto& n : s) {
+ ASSERT_EQ(n, 5);
+ }
+ }
+}
+
+SPAN_TEST(crbegin_crend) {
+ {
+ int a[] = {1, 2, 3, 4};
+ Span<int> s = a;
+
+ auto it = s.crbegin();
+ auto first = it;
+ ASSERT_EQ(it, first);
+ ASSERT_EQ(*it, 4);
+
+ auto beyond = s.crend();
+ ASSERT_NE(it, beyond);
+ CHECK_THROW(*beyond, fail_fast);
+
+ ASSERT_EQ(beyond - first, 4);
+ ASSERT_EQ(first - first, 0);
+ ASSERT_EQ(beyond - beyond, 0);
+
+ ++it;
+ ASSERT_EQ(it - first, 1);
+ ASSERT_EQ(*it, 3);
+ ASSERT_EQ(beyond - it, 3);
+
+ it = first;
+ ASSERT_EQ(it, first);
+ int last = 5;
+ while (it != s.crend()) {
+ ASSERT_EQ(*it, last - 1);
+ last = *it;
+
+ ++it;
+ }
+
+ ASSERT_EQ(it, beyond);
+ ASSERT_EQ(it - beyond, 0);
+ }
+}
+
+SPAN_TEST(comparison_operators) {
+ {
+ Span<int> s1 = nullptr;
+ Span<int> s2 = nullptr;
+ ASSERT_EQ(s1, s2);
+ ASSERT_FALSE(s1 != s2);
+ ASSERT_FALSE(s1 < s2);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s1, s2);
+ ASSERT_EQ(s2, s1);
+ ASSERT_FALSE(s2 != s1);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s2, s1);
+ ASSERT_FALSE(s2 > s1);
+ ASSERT_GE(s2, s1);
+ }
+
+ {
+ int arr[] = {2, 1};
+ Span<int> s1 = arr;
+ Span<int> s2 = arr;
+
+ ASSERT_EQ(s1, s2);
+ ASSERT_FALSE(s1 != s2);
+ ASSERT_FALSE(s1 < s2);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s1, s2);
+ ASSERT_EQ(s2, s1);
+ ASSERT_FALSE(s2 != s1);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s2, s1);
+ ASSERT_FALSE(s2 > s1);
+ ASSERT_GE(s2, s1);
+ }
+
+ {
+ int arr[] = {2, 1}; // bigger
+
+ Span<int> s1 = nullptr;
+ Span<int> s2 = arr;
+
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_LT(s1, s2);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s2 <= s1);
+ ASSERT_GT(s2, s1);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s2, s1);
+ ASSERT_FALSE(s1 >= s2);
+ }
+
+ {
+ int arr1[] = {1, 2};
+ int arr2[] = {1, 2};
+ Span<int> s1 = arr1;
+ Span<int> s2 = arr2;
+
+ ASSERT_EQ(s1, s2);
+ ASSERT_FALSE(s1 != s2);
+ ASSERT_FALSE(s1 < s2);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s1, s2);
+ ASSERT_EQ(s2, s1);
+ ASSERT_FALSE(s2 != s1);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s2, s1);
+ ASSERT_FALSE(s2 > s1);
+ ASSERT_GE(s2, s1);
+ }
+
+ {
+ int arr[] = {1, 2, 3};
+
+ AssertSpanOfThreeInts(arr);
+
+ Span<int> s1 = {&arr[0], 2}; // shorter
+ Span<int> s2 = arr; // longer
+
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_LT(s1, s2);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s2 <= s1);
+ ASSERT_GT(s2, s1);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s2, s1);
+ ASSERT_FALSE(s1 >= s2);
+ }
+
+ {
+ int arr1[] = {1, 2}; // smaller
+ int arr2[] = {2, 1}; // bigger
+
+ Span<int> s1 = arr1;
+ Span<int> s2 = arr2;
+
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_NE(s1, s2);
+ ASSERT_NE(s2, s1);
+ ASSERT_LT(s1, s2);
+ ASSERT_FALSE(s2 < s1);
+ ASSERT_LE(s1, s2);
+ ASSERT_FALSE(s2 <= s1);
+ ASSERT_GT(s2, s1);
+ ASSERT_FALSE(s1 > s2);
+ ASSERT_GE(s2, s1);
+ ASSERT_FALSE(s1 >= s2);
+ }
+}
+
+SPAN_TEST(as_bytes) {
+ int a[] = {1, 2, 3, 4};
+
+ {
+ Span<const int> s = a;
+ ASSERT_EQ(s.Length(), 4U);
+ Span<const uint8_t> bs = AsBytes(s);
+ ASSERT_EQ(static_cast<const void*>(bs.data()),
+ static_cast<const void*>(s.data()));
+ ASSERT_EQ(bs.Length(), s.LengthBytes());
+ }
+
+ {
+ Span<int> s;
+ auto bs = AsBytes(s);
+ ASSERT_EQ(bs.Length(), s.Length());
+ ASSERT_EQ(bs.Length(), 0U);
+ ASSERT_EQ(bs.size_bytes(), 0U);
+ ASSERT_EQ(static_cast<const void*>(bs.data()),
+ static_cast<const void*>(s.data()));
+ ASSERT_EQ(bs.data(), reinterpret_cast<const uint8_t*>(SLICE_INT_PTR));
+ }
+
+ {
+ Span<int> s = a;
+ auto bs = AsBytes(s);
+ ASSERT_EQ(static_cast<const void*>(bs.data()),
+ static_cast<const void*>(s.data()));
+ ASSERT_EQ(bs.Length(), s.LengthBytes());
+ }
+}
+
+SPAN_TEST(as_writable_bytes) {
+ int a[] = {1, 2, 3, 4};
+
+ {
+#ifdef CONFIRM_COMPILATION_ERRORS
+ // you should not be able to get writeable bytes for const objects
+ Span<const int> s = a;
+ ASSERT_EQ(s.Length(), 4U);
+ Span<const byte> bs = AsWritableBytes(s);
+ ASSERT_EQ(static_cast<void*>(bs.data()), static_cast<void*>(s.data()));
+ ASSERT_EQ(bs.Length(), s.LengthBytes());
+#endif
+ }
+
+ {
+ Span<int> s;
+ auto bs = AsWritableBytes(s);
+ ASSERT_EQ(bs.Length(), s.Length());
+ ASSERT_EQ(bs.Length(), 0U);
+ ASSERT_EQ(bs.size_bytes(), 0U);
+ ASSERT_EQ(static_cast<void*>(bs.data()), static_cast<void*>(s.data()));
+ ASSERT_EQ(bs.data(), reinterpret_cast<uint8_t*>(SLICE_INT_PTR));
+ }
+
+ {
+ Span<int> s = a;
+ auto bs = AsWritableBytes(s);
+ ASSERT_EQ(static_cast<void*>(bs.data()), static_cast<void*>(s.data()));
+ ASSERT_EQ(bs.Length(), s.LengthBytes());
+ }
+}
+
+SPAN_TEST(as_chars) {
+ const uint8_t a[] = {1, 2, 3, 4};
+ Span<const uint8_t> u = Span(a);
+ Span<const char> c = AsChars(u);
+ ASSERT_EQ(static_cast<const void*>(u.data()),
+ static_cast<const void*>(c.data()));
+ ASSERT_EQ(u.size(), c.size());
+}
+
+SPAN_TEST(as_writable_chars) {
+ uint8_t a[] = {1, 2, 3, 4};
+ Span<uint8_t> u = Span(a);
+ Span<char> c = AsWritableChars(u);
+ ASSERT_EQ(static_cast<void*>(u.data()), static_cast<void*>(c.data()));
+ ASSERT_EQ(u.size(), c.size());
+}
+
+SPAN_TEST(fixed_size_conversions) {
+ int arr[] = {1, 2, 3, 4};
+
+ // converting to an Span from an equal size array is ok
+ Span<int, 4> s4 = arr;
+ ASSERT_EQ(s4.Length(), 4U);
+
+ // converting to dynamic_range is always ok
+ {
+ Span<int> s = s4;
+ ASSERT_EQ(s.Length(), s4.Length());
+ static_cast<void>(s);
+ }
+
+// initialization or assignment to static Span that REDUCES size is NOT ok
+#ifdef CONFIRM_COMPILATION_ERRORS
+ { Span<int, 2> s = arr; }
+ {
+ Span<int, 2> s2 = s4;
+ static_cast<void>(s2);
+ }
+#endif
+
+#if 0
+ // even when done dynamically
+ {
+ Span<int> s = arr;
+ auto f = [&]() {
+ Span<int, 2> s2 = s;
+ static_cast<void>(s2);
+ };
+ CHECK_THROW(f(), fail_fast);
+ }
+#endif
+
+ // but doing so explicitly is ok
+
+ // you can convert statically
+ {
+ Span<int, 2> s2 = {arr, 2};
+ static_cast<void>(s2);
+ }
+ {
+ Span<int, 1> s1 = s4.First<1>();
+ static_cast<void>(s1);
+ }
+
+ // ...or dynamically
+ {
+ // NB: implicit conversion to Span<int,1> from Span<int>
+ Span<int, 1> s1 = s4.First(1);
+ static_cast<void>(s1);
+ }
+
+#if 0
+ // initialization or assignment to static Span that requires size INCREASE is not ok.
+ int arr2[2] = {1, 2};
+#endif
+
+#ifdef CONFIRM_COMPILATION_ERRORS
+ { Span<int, 4> s3 = arr2; }
+ {
+ Span<int, 2> s2 = arr2;
+ Span<int, 4> s4a = s2;
+ }
+#endif
+
+#if 0
+ {
+ auto f = [&]() {
+ Span<int, 4> _s4 = {arr2, 2};
+ static_cast<void>(_s4);
+ };
+ CHECK_THROW(f(), fail_fast);
+ }
+
+ // this should fail - we are trying to assign a small dynamic Span to a fixed_size larger one
+ Span<int> av = arr2;
+ auto f = [&]() {
+ Span<int, 4> _s4 = av;
+ static_cast<void>(_s4);
+ };
+ CHECK_THROW(f(), fail_fast);
+#endif
+}
+
+#if 0
+ SPAN_TEST(interop_with_std_regex)
+ {
+ char lat[] = { '1', '2', '3', '4', '5', '6', 'E', 'F', 'G' };
+ Span<char> s = lat;
+ auto f_it = s.begin() + 7;
+
+ std::match_results<Span<char>::iterator> match;
+
+ std::regex_match(s.begin(), s.end(), match, std::regex(".*"));
+ ASSERT_EQ(match.ready());
+ ASSERT_TRUE(!match.empty());
+ ASSERT_TRUE(match[0].matched);
+ ASSERT_TRUE(match[0].first , s.begin());
+ ASSERT_EQ(match[0].second , s.end());
+
+ std::regex_search(s.begin(), s.end(), match, std::regex("F"));
+ ASSERT_TRUE(match.ready());
+ ASSERT_TRUE(!match.empty());
+ ASSERT_TRUE(match[0].matched);
+ ASSERT_EQ(match[0].first , f_it);
+ ASSERT_EQ(match[0].second , (f_it + 1));
+ }
+
+SPAN_TEST(interop_with_gsl_at)
+{
+ int arr[5] = { 1, 2, 3, 4, 5 };
+ Span<int> s{ arr };
+ ASSERT_EQ(at(s, 0) , 1 );
+ASSERT_EQ(at(s, 1) , 2U);
+}
+#endif
+
+SPAN_TEST(default_constructible) {
+ ASSERT_TRUE((std::is_default_constructible<Span<int>>::value));
+ ASSERT_TRUE((std::is_default_constructible<Span<int, 0>>::value));
+ ASSERT_TRUE((!std::is_default_constructible<Span<int, 42>>::value));
+}
+
+SPAN_TEST(type_inference) {
+ static constexpr int arr[5] = {1, 2, 3, 4, 5};
+ constexpr auto s = Span{arr};
+ static_assert(std::is_same_v<const Span<const int, 5>, decltype(s)>);
+ static_assert(arr == s.Elements());
+}
+
+SPAN_TEST(split_at_dynamic_with_dynamic_extent) {
+ static constexpr int arr[5] = {1, 2, 3, 4, 5};
+ constexpr Span<const int> s = Span{arr};
+
+ { // Split at begin.
+ constexpr auto splitAt0Result = s.SplitAt(0);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt0Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt0Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt0Result.second.Elements());
+ ASSERT_EQ(0u, splitAt0Result.first.Length());
+ ASSERT_EQ(5u, splitAt0Result.second.Length());
+ }
+
+ { // Split at end.
+ constexpr auto splitAt5Result = s.SplitAt(s.Length());
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt5Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt5Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt5Result.first.Elements());
+ ASSERT_EQ(5u, splitAt5Result.first.Length());
+ ASSERT_EQ(0u, splitAt5Result.second.Length());
+ }
+
+ {
+ // Split inside.
+ constexpr auto splitAt3Result = s.SplitAt(3);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt3Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt3Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt3Result.first.Elements());
+ ASSERT_EQ(s.Elements() + 3, splitAt3Result.second.Elements());
+ ASSERT_EQ(3u, splitAt3Result.first.Length());
+ ASSERT_EQ(2u, splitAt3Result.second.Length());
+ }
+}
+
+SPAN_TEST(split_at_dynamic_with_static_extent) {
+ static constexpr int arr[5] = {1, 2, 3, 4, 5};
+ constexpr auto s = Span{arr};
+
+ {
+ // Split at begin.
+ constexpr auto splitAt0Result = s.SplitAt(0);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt0Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt0Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt0Result.second.Elements());
+ }
+
+ {
+ // Split at end.
+ constexpr auto splitAt5Result = s.SplitAt(s.Length());
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt5Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt5Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt5Result.first.Elements());
+ }
+
+ {
+ // Split inside.
+ constexpr auto splitAt3Result = s.SplitAt(3);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt3Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int>, decltype(splitAt3Result.second)>);
+ ASSERT_EQ(s.Elements(), splitAt3Result.first.Elements());
+ ASSERT_EQ(s.Elements() + 3, splitAt3Result.second.Elements());
+ }
+}
+
+SPAN_TEST(split_at_static) {
+ static constexpr int arr[5] = {1, 2, 3, 4, 5};
+ constexpr auto s = Span{arr};
+
+ // Split at begin.
+ constexpr auto splitAt0Result = s.SplitAt<0>();
+ static_assert(
+ std::is_same_v<Span<const int, 0>, decltype(splitAt0Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int, 5>, decltype(splitAt0Result.second)>);
+ static_assert(splitAt0Result.second.Elements() == s.Elements());
+
+ // Split at end.
+ constexpr auto splitAt5Result = s.SplitAt<s.Length()>();
+ static_assert(std::is_same_v<Span<const int, s.Length()>,
+ decltype(splitAt5Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int, 0>, decltype(splitAt5Result.second)>);
+ static_assert(splitAt5Result.first.Elements() == s.Elements());
+
+ // Split inside.
+ constexpr auto splitAt3Result = s.SplitAt<3>();
+ static_assert(
+ std::is_same_v<Span<const int, 3>, decltype(splitAt3Result.first)>);
+ static_assert(
+ std::is_same_v<Span<const int, 2>, decltype(splitAt3Result.second)>);
+ static_assert(splitAt3Result.first.Elements() == s.Elements());
+ static_assert(splitAt3Result.second.Elements() == s.Elements() + 3);
+}
+
+SPAN_TEST(as_const_dynamic) {
+ static int arr[5] = {1, 2, 3, 4, 5};
+ auto span = Span{arr, 5};
+ auto constSpan = span.AsConst();
+ static_assert(std::is_same_v<Span<const int>, decltype(constSpan)>);
+}
+
+SPAN_TEST(as_const_static) {
+ {
+ static constexpr int constArr[5] = {1, 2, 3, 4, 5};
+ constexpr auto span = Span{constArr}; // is already a Span<const int>
+ constexpr auto constSpan = span.AsConst();
+
+ static_assert(
+ std::is_same_v<const Span<const int, 5>, decltype(constSpan)>);
+ }
+
+ {
+ static int arr[5] = {1, 2, 3, 4, 5};
+ auto span = Span{arr};
+ auto constSpan = span.AsConst();
+ static_assert(std::is_same_v<Span<const int, 5>, decltype(constSpan)>);
+ }
+}
+
+SPAN_TEST(construct_from_iterators_dynamic) {
+ const int constArr[5] = {1, 2, 3, 4, 5};
+ auto constSpan = Span{constArr};
+
+ // const from const
+ {
+ const auto wholeSpan = Span{constSpan.cbegin(), constSpan.cend()};
+ static_assert(std::is_same_v<decltype(wholeSpan), const Span<const int>>);
+ ASSERT_TRUE(constSpan == wholeSpan);
+
+ const auto emptyBeginSpan = Span{constSpan.cbegin(), constSpan.cbegin()};
+ ASSERT_TRUE(emptyBeginSpan.IsEmpty());
+
+ const auto emptyEndSpan = Span{constSpan.cend(), constSpan.cend()};
+ ASSERT_TRUE(emptyEndSpan.IsEmpty());
+
+ const auto subSpan = Span{constSpan.cbegin() + 1, constSpan.cend() - 1};
+ ASSERT_EQ(constSpan.Length() - 2, subSpan.Length());
+ ASSERT_EQ(constSpan.Elements() + 1, subSpan.Elements());
+ }
+
+ int arr[5] = {1, 2, 3, 4, 5};
+ auto span = Span{arr};
+
+ // const from non-const
+ {
+ const auto wholeSpan = Span{span.cbegin(), span.cend()};
+ static_assert(std::is_same_v<decltype(wholeSpan), const Span<const int>>);
+ // XXX Can't use span == wholeSpan because of difference in constness.
+ ASSERT_EQ(span.Elements(), wholeSpan.Elements());
+ ASSERT_EQ(span.Length(), wholeSpan.Length());
+
+ const auto emptyBeginSpan = Span{span.cbegin(), span.cbegin()};
+ ASSERT_TRUE(emptyBeginSpan.IsEmpty());
+
+ const auto emptyEndSpan = Span{span.cend(), span.cend()};
+ ASSERT_TRUE(emptyEndSpan.IsEmpty());
+
+ const auto subSpan = Span{span.cbegin() + 1, span.cend() - 1};
+ ASSERT_EQ(span.Length() - 2, subSpan.Length());
+ ASSERT_EQ(span.Elements() + 1, subSpan.Elements());
+ }
+
+ // non-const from non-const
+ {
+ const auto wholeSpan = Span{span.begin(), span.end()};
+ static_assert(std::is_same_v<decltype(wholeSpan), const Span<int>>);
+ ASSERT_TRUE(span == wholeSpan);
+
+ const auto emptyBeginSpan = Span{span.begin(), span.begin()};
+ ASSERT_TRUE(emptyBeginSpan.IsEmpty());
+
+ const auto emptyEndSpan = Span{span.end(), span.end()};
+ ASSERT_TRUE(emptyEndSpan.IsEmpty());
+
+ const auto subSpan = Span{span.begin() + 1, span.end() - 1};
+ ASSERT_EQ(span.Length() - 2, subSpan.Length());
+ }
+}
+
+SPAN_TEST(construct_from_iterators_static) {
+ static constexpr int arr[5] = {1, 2, 3, 4, 5};
+ constexpr auto constSpan = Span{arr};
+
+ // const
+ {
+ const auto wholeSpan = Span{constSpan.cbegin(), constSpan.cend()};
+ static_assert(std::is_same_v<decltype(wholeSpan), const Span<const int>>);
+ ASSERT_TRUE(constSpan == wholeSpan);
+
+ const auto emptyBeginSpan = Span{constSpan.cbegin(), constSpan.cbegin()};
+ ASSERT_TRUE(emptyBeginSpan.IsEmpty());
+
+ const auto emptyEndSpan = Span{constSpan.cend(), constSpan.cend()};
+ ASSERT_TRUE(emptyEndSpan.IsEmpty());
+
+ const auto subSpan = Span{constSpan.cbegin() + 1, constSpan.cend() - 1};
+ ASSERT_EQ(constSpan.Length() - 2, subSpan.Length());
+ ASSERT_EQ(constSpan.Elements() + 1, subSpan.Elements());
+ }
+}
+
+SPAN_TEST(construct_from_container_with_type_deduction) {
+ std::vector<int> vec = {1, 2, 3, 4, 5};
+
+ // from const
+ {
+ const auto& constVecRef = vec;
+
+ auto span = Span{constVecRef};
+ static_assert(std::is_same_v<decltype(span), Span<const int>>);
+ }
+
+ // from non-const
+ {
+ auto span = Span{vec};
+ static_assert(std::is_same_v<decltype(span), Span<int>>);
+ }
+}
diff --git a/mfbt/tests/gtest/TestTainting.cpp b/mfbt/tests/gtest/TestTainting.cpp
new file mode 100644
index 0000000000..0025819c06
--- /dev/null
+++ b/mfbt/tests/gtest/TestTainting.cpp
@@ -0,0 +1,485 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include <math.h>
+
+#include "mozilla/Array.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Range.h"
+#include "mozilla/Tainting.h"
+#include "nsTHashtable.h"
+#include "nsHashKeys.h"
+#include "nsTArray.h"
+#include <array>
+#include <deque>
+#include <forward_list>
+#include <list>
+#include <map>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+using mozilla::Tainted;
+
+#define EXPECTED_INT 10
+#define EXPECTED_CHAR 'z'
+
+static bool externalFunction(int arg) { return arg > 2; }
+
+// ==================================================================
+// MOZ_VALIDATE_AND_GET =============================================
+TEST(Tainting, moz_validate_and_get)
+{
+ int bar;
+ int comparisonVariable = 20;
+ Tainted<int> foo = Tainted<int>(EXPECTED_INT);
+
+ bar = MOZ_VALIDATE_AND_GET(foo, foo < 20);
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // This test is for comparison to an external variable, testing the
+ // default capture mode of the lambda used inside the macro.
+ bar = MOZ_VALIDATE_AND_GET(foo, foo < comparisonVariable);
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ bar = MOZ_VALIDATE_AND_GET(
+ foo, foo < 20,
+ "foo must be less than 20 because higher values represent decibel"
+ "levels greater than a a jet engine inside your ear.");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // Test an external variable with a comment.
+ bar = MOZ_VALIDATE_AND_GET(foo, foo < comparisonVariable, "Test comment");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // Test an external function with a comment.
+ bar = MOZ_VALIDATE_AND_GET(foo, externalFunction(foo), "Test comment");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // Lambda Tests
+ bar =
+ MOZ_VALIDATE_AND_GET(foo, ([&foo]() { return externalFunction(foo); }()));
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // This test is for the lambda variant with a supplied assertion
+ // string.
+ bar =
+ MOZ_VALIDATE_AND_GET(foo, ([&foo]() { return externalFunction(foo); }()),
+ "This tests a comment");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // This test is for the lambda variant with a captured variable
+ bar = MOZ_VALIDATE_AND_GET(foo, ([&foo, &comparisonVariable] {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult ||
+ comparisonVariable < 4;
+ }()),
+ "This tests a comment");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // This test is for the lambda variant with full capture mode
+ bar = MOZ_VALIDATE_AND_GET(foo, ([&] {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult ||
+ comparisonVariable < 4;
+ }()),
+ "This tests a comment");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // External lambdas
+ auto lambda1 = [](int foo) { return externalFunction(foo); };
+
+ auto lambda2 = [&](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ // Test with an explicit capture
+ auto lambda3 = [&comparisonVariable](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ bar = MOZ_VALIDATE_AND_GET(foo, lambda1(foo));
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // Test with a comment
+ bar = MOZ_VALIDATE_AND_GET(foo, lambda1(foo), "Test comment.");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // Test with a default capture mode
+ bar = MOZ_VALIDATE_AND_GET(foo, lambda2(foo), "Test comment.");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ bar = MOZ_VALIDATE_AND_GET(foo, lambda3(foo), "Test comment.");
+ ASSERT_EQ(bar, EXPECTED_INT);
+
+ // We can't test MOZ_VALIDATE_AND_GET failing, because that triggers
+ // a release assert.
+}
+
+// ==================================================================
+// MOZ_IS_VALID =====================================================
+TEST(Tainting, moz_is_valid)
+{
+ int comparisonVariable = 20;
+ Tainted<int> foo = Tainted<int>(EXPECTED_INT);
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, foo < 20));
+
+ ASSERT_FALSE(MOZ_IS_VALID(foo, foo > 20));
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, foo < comparisonVariable));
+
+ ASSERT_TRUE(
+ MOZ_IS_VALID(foo, ([&foo]() { return externalFunction(foo); }())));
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, ([&foo, &comparisonVariable]() {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult ||
+ comparisonVariable < 4;
+ }())));
+
+ // External lambdas
+ auto lambda1 = [](int foo) { return externalFunction(foo); };
+
+ auto lambda2 = [&](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ // Test with an explicit capture
+ auto lambda3 = [&comparisonVariable](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, lambda1(foo)));
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, lambda2(foo)));
+
+ ASSERT_TRUE(MOZ_IS_VALID(foo, lambda3(foo)));
+}
+
+// ==================================================================
+// MOZ_VALIDATE_OR ==================================================
+TEST(Tainting, moz_validate_or)
+{
+ int result;
+ int comparisonVariable = 20;
+ Tainted<int> foo = Tainted<int>(EXPECTED_INT);
+
+ result = MOZ_VALIDATE_OR(foo, foo < 20, 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ result = MOZ_VALIDATE_OR(foo, foo > 20, 100);
+ ASSERT_EQ(result, 100);
+
+ result = MOZ_VALIDATE_OR(foo, foo < comparisonVariable, 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ // External lambdas
+ auto lambda1 = [](int foo) { return externalFunction(foo); };
+
+ auto lambda2 = [&](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ // Test with an explicit capture
+ auto lambda3 = [&comparisonVariable](int foo) {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ };
+
+ result = MOZ_VALIDATE_OR(foo, lambda1(foo), 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ result = MOZ_VALIDATE_OR(foo, lambda2(foo), 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ result = MOZ_VALIDATE_OR(foo, lambda3(foo), 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ result =
+ MOZ_VALIDATE_OR(foo, ([&foo]() { return externalFunction(foo); }()), 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ // This test is for the lambda variant with a supplied assertion
+ // string.
+ result =
+ MOZ_VALIDATE_OR(foo, ([&foo] { return externalFunction(foo); }()), 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ // This test is for the lambda variant with a captured variable
+ result =
+ MOZ_VALIDATE_OR(foo, ([&foo, &comparisonVariable] {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ }()),
+ 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+
+ // This test is for the lambda variant with full capture mode
+ result =
+ MOZ_VALIDATE_OR(foo, ([&] {
+ bool intermediateResult = externalFunction(foo);
+ return intermediateResult || comparisonVariable < 4;
+ }()),
+ 100);
+ ASSERT_EQ(result, EXPECTED_INT);
+}
+
+// ==================================================================
+// MOZ_FIND_AND_VALIDATE ============================================
+TEST(Tainting, moz_find_and_validate)
+{
+ Tainted<int> foo = Tainted<int>(EXPECTED_INT);
+ Tainted<char> baz = Tainted<char>(EXPECTED_CHAR);
+
+ //-------------------------------
+ const mozilla::Array<int, 6> mozarrayWithFoo(0, 5, EXPECTED_INT, 15, 20, 25);
+ const mozilla::Array<int, 5> mozarrayWithoutFoo(0, 5, 15, 20, 25);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, mozarrayWithFoo) ==
+ mozarrayWithFoo[2]);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo,
+ mozarrayWithoutFoo) == nullptr);
+
+ //-------------------------------
+ class TestClass {
+ public:
+ int a;
+ int b;
+
+ TestClass(int a, int b) {
+ this->a = a;
+ this->b = b;
+ }
+
+ bool operator==(const TestClass& other) const {
+ return this->a == other.a && this->b == other.b;
+ }
+ };
+
+ const mozilla::Array<TestClass, 5> mozarrayOfClassesWithFoo(
+ TestClass(0, 1), TestClass(2, 3), TestClass(EXPECTED_INT, EXPECTED_INT),
+ TestClass(4, 5), TestClass(6, 7));
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(
+ foo, foo == list_item.a && foo == list_item.b,
+ mozarrayOfClassesWithFoo) == mozarrayOfClassesWithFoo[2]);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(
+ foo, (foo == list_item.a && foo == list_item.b),
+ mozarrayOfClassesWithFoo) == mozarrayOfClassesWithFoo[2]);
+
+ ASSERT_TRUE(
+ *MOZ_FIND_AND_VALIDATE(
+ foo,
+ (foo == list_item.a && foo == list_item.b && externalFunction(foo)),
+ mozarrayOfClassesWithFoo) == mozarrayOfClassesWithFoo[2]);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(
+ foo, ([](int tainted_val, TestClass list_item) {
+ return tainted_val == list_item.a &&
+ tainted_val == list_item.b;
+ }(foo, list_item)),
+ mozarrayOfClassesWithFoo) == mozarrayOfClassesWithFoo[2]);
+
+ auto lambda4 = [](int tainted_val, TestClass list_item) {
+ return tainted_val == list_item.a && tainted_val == list_item.b;
+ };
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, lambda4(foo, list_item),
+ mozarrayOfClassesWithFoo) ==
+ mozarrayOfClassesWithFoo[2]);
+
+ //-------------------------------
+ const char m[] = "m";
+ const char o[] = "o";
+ const char z[] = {EXPECTED_CHAR, '\0'};
+ const char l[] = "l";
+ const char a[] = "a";
+
+ nsTHashtable<nsCharPtrHashKey> hashtableWithBaz;
+ hashtableWithBaz.PutEntry(m);
+ hashtableWithBaz.PutEntry(o);
+ hashtableWithBaz.PutEntry(z);
+ hashtableWithBaz.PutEntry(l);
+ hashtableWithBaz.PutEntry(a);
+ nsTHashtable<nsCharPtrHashKey> hashtableWithoutBaz;
+ hashtableWithoutBaz.PutEntry(m);
+ hashtableWithoutBaz.PutEntry(o);
+ hashtableWithoutBaz.PutEntry(l);
+ hashtableWithoutBaz.PutEntry(a);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(baz, *list_item.GetKey() == baz,
+ hashtableWithBaz) ==
+ hashtableWithBaz.GetEntry(z));
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(baz, *list_item.GetKey() == baz,
+ hashtableWithoutBaz) == nullptr);
+
+ //-------------------------------
+ const nsTArray<int> nsTArrayWithFoo = {0, 5, EXPECTED_INT, 15, 20, 25};
+ const nsTArray<int> nsTArrayWithoutFoo = {0, 5, 15, 20, 25};
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, nsTArrayWithFoo) ==
+ nsTArrayWithFoo[2]);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo,
+ nsTArrayWithoutFoo) == nullptr);
+
+ //-------------------------------
+ const std::array<int, 6> arrayWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::array<int, 5> arrayWithoutFoo{0, 5, 15, 20, 25};
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, arrayWithFoo) ==
+ arrayWithFoo[2]);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, arrayWithoutFoo) ==
+ nullptr);
+
+ //-------------------------------
+ const std::deque<int> dequeWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::deque<int> dequeWithoutFoo{0, 5, 15, 20, 25};
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, dequeWithFoo) ==
+ dequeWithFoo[2]);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, dequeWithoutFoo) ==
+ nullptr);
+
+ //-------------------------------
+ const std::forward_list<int> forwardWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::forward_list<int> forwardWithoutFoo{0, 5, 15, 20, 25};
+
+ auto forwardListIt = forwardWithFoo.begin();
+ std::advance(forwardListIt, 2);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, forwardWithFoo) ==
+ *forwardListIt);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, forwardWithoutFoo) ==
+ nullptr);
+
+ //-------------------------------
+ const std::list<int> listWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::list<int> listWithoutFoo{0, 5, 15, 20, 25};
+
+ auto listIt = listWithFoo.begin();
+ std::advance(listIt, 2);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, listWithFoo) ==
+ *listIt);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, listWithoutFoo) ==
+ nullptr);
+
+ //-------------------------------
+ const std::map<std::string, int> mapWithFoo{{
+ {"zero", 0},
+ {"five", 5},
+ {"ten", EXPECTED_INT},
+ {"fifteen", 15},
+ {"twenty", 20},
+ {"twenty-five", 25},
+ }};
+ const std::map<std::string, int> mapWithoutFoo{{
+ {"zero", 0},
+ {"five", 5},
+ {"fifteen", 15},
+ {"twenty", 20},
+ {"twenty-five", 25},
+ }};
+
+ const auto map_it = mapWithFoo.find("ten");
+
+ ASSERT_TRUE(
+ MOZ_FIND_AND_VALIDATE(foo, list_item.second == foo, mapWithFoo)->second ==
+ map_it->second);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item.second == foo,
+ mapWithoutFoo) == nullptr);
+
+ //-------------------------------
+ const std::set<int> setWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::set<int> setWithoutFoo{0, 5, 15, 20, 25};
+
+ auto setIt = setWithFoo.find(EXPECTED_INT);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, setWithFoo) ==
+ *setIt);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, setWithoutFoo) ==
+ nullptr);
+
+ //-------------------------------
+ const std::unordered_map<std::string, int> unordermapWithFoo = {
+ {"zero", 0}, {"five", 5}, {"ten", EXPECTED_INT},
+ {"fifteen", 15}, {"twenty", 20}, {"twenty-five", 25},
+ };
+ const std::unordered_map<std::string, int> unordermapWithoutFoo{{
+ {"zero", 0},
+ {"five", 5},
+ {"fifteen", 15},
+ {"twenty", 20},
+ {"twenty-five", 25},
+ }};
+
+ auto unorderedMapIt = unordermapWithFoo.find("ten");
+
+ ASSERT_TRUE(
+ MOZ_FIND_AND_VALIDATE(foo, list_item.second == foo, unordermapWithFoo)
+ ->second == unorderedMapIt->second);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item.second == foo,
+ unordermapWithoutFoo) == nullptr);
+
+ //-------------------------------
+ const std::unordered_set<int> unorderedsetWithFoo{0, 5, EXPECTED_INT,
+ 15, 20, 25};
+ const std::unordered_set<int> unorderedsetWithoutFoo{0, 5, 15, 20, 25};
+
+ auto unorderedSetIt = unorderedsetWithFoo.find(EXPECTED_INT);
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo,
+ unorderedsetWithFoo) == *unorderedSetIt);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo,
+ unorderedsetWithoutFoo) == nullptr);
+
+ //-------------------------------
+ const std::vector<int> vectorWithFoo{0, 5, EXPECTED_INT, 15, 20, 25};
+ const std::vector<int> vectorWithoutFoo{0, 5, 15, 20, 25};
+
+ ASSERT_TRUE(*MOZ_FIND_AND_VALIDATE(foo, list_item == foo, vectorWithFoo) ==
+ vectorWithFoo[2]);
+
+ ASSERT_TRUE(MOZ_FIND_AND_VALIDATE(foo, list_item == foo, vectorWithoutFoo) ==
+ nullptr);
+}
+
+// ==================================================================
+// MOZ_NO_VALIDATE ==================================================
+TEST(Tainting, moz_no_validate)
+{
+ int result;
+ Tainted<int> foo = Tainted<int>(EXPECTED_INT);
+
+ result = MOZ_NO_VALIDATE(
+ foo,
+ "Value is used to match against a dictionary key in the parent."
+ "If there's no key present, there won't be a match."
+ "There is no risk of grabbing a cross-origin value from the dictionary,"
+ "because the IPC actor is instatiated per-content-process and the "
+ "dictionary is not shared between actors.");
+ ASSERT_TRUE(result == EXPECTED_INT);
+}
diff --git a/mfbt/tests/gtest/moz.build b/mfbt/tests/gtest/moz.build
new file mode 100644
index 0000000000..0af8d1ea75
--- /dev/null
+++ b/mfbt/tests/gtest/moz.build
@@ -0,0 +1,32 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ "TestBuffer.cpp",
+ "TestLinkedList.cpp",
+ "TestReverseIterator.cpp",
+ "TestSpan.cpp",
+ "TestTainting.cpp",
+]
+
+SOURCES += [
+ "TestAlgorithm.cpp",
+ "TestInitializedOnce.cpp",
+ "TestMainThreadWeakPtr.cpp",
+ "TestResultExtensions.cpp",
+]
+
+if not CONFIG["MOZILLA_OFFICIAL"]:
+ UNIFIED_SOURCES += [
+ # MOZ_DBG is not defined in MOZILLA_OFFICIAL builds.
+ "TestMozDbg.cpp",
+ ]
+
+# LOCAL_INCLUDES += [
+# "../../base",
+# ]
+
+FINAL_LIBRARY = "xul-gtest"
diff --git a/mfbt/tests/moz.build b/mfbt/tests/moz.build
new file mode 100644
index 0000000000..231bec84a3
--- /dev/null
+++ b/mfbt/tests/moz.build
@@ -0,0 +1,117 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+if CONFIG["MOZ_WIDGET_TOOLKIT"]:
+ TEST_DIRS += [
+ "gtest",
+ ]
+
+# Important: for these tests to be run, they also need to be added
+# to testing/cppunittest.toml.
+CppUnitTests(
+ [
+ "TestAlgorithm",
+ "TestArray",
+ "TestArrayUtils",
+ "TestAtomicBitfields",
+ "TestAtomics",
+ "TestBinarySearch",
+ "TestBitSet",
+ "TestBloomFilter",
+ "TestBufferList",
+ "TestCasting",
+ "TestCeilingFloor",
+ "TestCheckedInt",
+ "TestCompactPair",
+ "TestCountPopulation",
+ "TestCountZeroes",
+ "TestDefineEnum",
+ "TestDoublyLinkedList",
+ "TestEndian",
+ "TestEnumeratedArray",
+ "TestEnumSet",
+ "TestEnumTypeTraits",
+ "TestFastBernoulliTrial",
+ "TestFloatingPoint",
+ "TestFunctionRef",
+ "TestFunctionTypeTraits",
+ "TestHashTable",
+ "TestIntegerRange",
+ "TestJSONWriter",
+ "TestLinkedList",
+ "TestMacroArgs",
+ "TestMacroForEach",
+ "TestMathAlgorithms",
+ "TestMaybe",
+ "TestNonDereferenceable",
+ "TestNotNull",
+ "TestRandomNum",
+ "TestRange",
+ "TestRefPtr",
+ "TestResult",
+ "TestRollingMean",
+ "TestSaturate",
+ "TestScopeExit",
+ "TestSegmentedVector",
+ "TestSHA1",
+ "TestSIMD",
+ "TestSmallPointerArray",
+ "TestSplayTree",
+ "TestTextUtils",
+ "TestTypedEnum",
+ "TestUniquePtr",
+ "TestVariant",
+ "TestVector",
+ "TestWeakPtr",
+ "TestWrappingOperations",
+ "TestXorShift128PlusRNG",
+ ]
+)
+
+# We don't support these tests yet because of the lack of thread support for wasi.
+if CONFIG["OS_ARCH"] != "WASI":
+ CppUnitTests(
+ [
+ "TestSPSCQueue",
+ "TestThreadSafeWeakPtr",
+ ]
+ )
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ CppUnitTests(
+ [
+ "TestWinArchDefs",
+ ]
+ )
+
+# Not to be unified with the rest, because this test
+# sets MOZ_PRETEND_NO_JSRUST, which changes the behavior
+# of the included headers.
+CppUnitTests(
+ [
+ "TestUtf8",
+ ]
+)
+
+# Wasi doesn't support <signal> yet so skip this test.
+if not CONFIG["MOZ_ASAN"] and not CONFIG["MOZ_TSAN"] and CONFIG["OS_ARCH"] != "WASI":
+ CppUnitTests(
+ [
+ "TestPoisonArea",
+ ]
+ )
+
+DisableStlWrapping()
+
+if CONFIG["CC_TYPE"] == "clang-cl":
+ CXXFLAGS += [
+ "-wd4275", # non dll-interface class used as base for dll-interface class
+ "-wd4530", # C++ exception handler used, but unwind semantics are not enabled
+ ]
+
+USE_LIBS += [
+ "mozglue",
+]