From 6bf0a5cb5034a7e684dcc3500e841785237ce2dd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 19:32:43 +0200 Subject: Adding upstream version 1:115.7.0. Signed-off-by: Daniel Baumann --- mfbt/Algorithm.h | 128 + mfbt/Alignment.h | 138 + mfbt/AllocPolicy.h | 175 + mfbt/AlreadyAddRefed.h | 180 + mfbt/Array.h | 110 + mfbt/ArrayUtils.h | 188 + mfbt/Assertions.cpp | 52 + mfbt/Assertions.h | 647 +++ mfbt/AtomicBitfields.h | 468 ++ mfbt/Atomics.h | 521 ++ mfbt/Attributes.h | 983 ++++ mfbt/BinarySearch.h | 249 + mfbt/BitSet.h | 177 + mfbt/BloomFilter.h | 338 ++ mfbt/Buffer.h | 197 + mfbt/BufferList.h | 598 +++ mfbt/Casting.h | 203 + mfbt/ChaosMode.cpp | 17 + mfbt/ChaosMode.h | 90 + mfbt/Char16.h | 142 + mfbt/CheckedInt.h | 804 +++ mfbt/CompactPair.h | 244 + mfbt/Compiler.h | 34 + mfbt/Compression.cpp | 182 + mfbt/Compression.h | 218 + mfbt/DbgMacro.h | 206 + mfbt/DebugOnly.h | 102 + mfbt/DefineEnum.h | 156 + mfbt/DoublyLinkedList.h | 578 ++ mfbt/EndianUtils.h | 611 +++ mfbt/EnumSet.h | 340 ++ mfbt/EnumTypeTraits.h | 113 + mfbt/EnumeratedArray.h | 89 + mfbt/EnumeratedRange.h | 206 + mfbt/FStream.h | 124 + mfbt/FastBernoulliTrial.h | 381 ++ mfbt/FloatingPoint.cpp | 41 + mfbt/FloatingPoint.h | 606 +++ mfbt/FunctionRef.h | 226 + mfbt/FunctionTypeTraits.h | 114 + mfbt/Fuzzing.h | 91 + mfbt/HashFunctions.cpp | 37 + mfbt/HashFunctions.h | 417 ++ mfbt/HashTable.h | 2275 ++++++++ mfbt/HelperMacros.h | 18 + mfbt/InitializedOnce.h | 247 + mfbt/IntegerRange.h | 192 + mfbt/IntegerTypeTraits.h | 86 + mfbt/JSONWriter.cpp | 47 + mfbt/JSONWriter.h | 545 ++ mfbt/JsRust.h | 21 + mfbt/Latin1.h | 262 + mfbt/Likely.h | 23 + mfbt/LinkedList.h | 748 +++ mfbt/MacroArgs.h | 97 + mfbt/MacroForEach.h | 219 + mfbt/MathAlgorithms.h | 492 ++ mfbt/Maybe.h | 977 ++++ mfbt/MaybeOneOf.h | 172 + mfbt/MaybeStorageBase.h | 92 + mfbt/MemoryChecking.h | 127 + mfbt/MemoryReporting.h | 30 + mfbt/MoveOnlyFunction.h | 47 + mfbt/MruCache.h | 165 + mfbt/NonDereferenceable.h | 125 + mfbt/NotNull.h | 449 ++ mfbt/Opaque.h | 41 + mfbt/OperatorNewExtensions.h | 50 + mfbt/PairHash.h | 75 + mfbt/Path.h | 31 + mfbt/PodOperations.h | 160 + mfbt/Poison.cpp | 206 + mfbt/Poison.h | 109 + mfbt/RandomNum.cpp | 146 + mfbt/RandomNum.h | 51 + mfbt/Range.h | 82 + mfbt/RangedArray.h | 66 + mfbt/RangedPtr.h | 308 ++ mfbt/ReentrancyGuard.h | 50 + mfbt/RefCountType.h | 37 + mfbt/RefCounted.h | 323 ++ mfbt/RefPtr.h | 657 +++ mfbt/Result.h | 861 +++ mfbt/ResultExtensions.h | 371 ++ mfbt/ResultVariant.h | 61 + mfbt/ReverseIterator.h | 173 + mfbt/RollingMean.h | 93 + mfbt/SHA1.cpp | 405 ++ mfbt/SHA1.h | 61 + mfbt/SPSCQueue.h | 407 ++ mfbt/STYLE | 11 + mfbt/Saturate.h | 248 + mfbt/ScopeExit.h | 126 + mfbt/Scoped.h | 225 + mfbt/SegmentedVector.h | 352 ++ mfbt/SharedLibrary.h | 47 + mfbt/SmallPointerArray.h | 270 + mfbt/Span.h | 972 ++++ mfbt/SplayTree.h | 305 ++ mfbt/StaticAnalysisFunctions.h | 70 + mfbt/TaggedAnonymousMemory.cpp | 93 + mfbt/TaggedAnonymousMemory.h | 87 + mfbt/Tainting.h | 348 ++ mfbt/TemplateLib.h | 126 + mfbt/TextUtils.h | 288 + mfbt/ThreadLocal.h | 256 + mfbt/ThreadSafeWeakPtr.h | 307 ++ mfbt/ThreadSafety.h | 140 + mfbt/ToString.h | 30 + mfbt/TsanOptions.h | 90 + mfbt/TypedEnumBits.h | 135 + mfbt/Types.h | 140 + mfbt/UniquePtr.h | 648 +++ mfbt/UniquePtrExtensions.cpp | 35 + mfbt/UniquePtrExtensions.h | 310 ++ mfbt/Unused.cpp | 13 + mfbt/Unused.h | 41 + mfbt/Utf8.cpp | 38 + mfbt/Utf8.h | 591 +++ mfbt/Variant.h | 928 ++++ mfbt/Vector.h | 1653 ++++++ mfbt/WasiAtomic.h | 199 + mfbt/WeakPtr.h | 358 ++ mfbt/WindowsVersion.h | 215 + mfbt/WrappingOperations.h | 262 + mfbt/XorShift128PlusRNG.h | 122 + mfbt/double-conversion/GIT-INFO | 5 + mfbt/double-conversion/add-mfbt-api-markers.patch | 207 + mfbt/double-conversion/debug-only-functions.patch | 39 + mfbt/double-conversion/double-conversion/LICENSE | 26 + mfbt/double-conversion/double-conversion/README.md | 55 + .../double-conversion/bignum-dtoa.cc | 641 +++ .../double-conversion/bignum-dtoa.h | 84 + mfbt/double-conversion/double-conversion/bignum.cc | 797 +++ mfbt/double-conversion/double-conversion/bignum.h | 152 + .../double-conversion/cached-powers.cc | 175 + .../double-conversion/cached-powers.h | 64 + mfbt/double-conversion/double-conversion/diy-fp.h | 137 + .../double-conversion/double-conversion.h | 34 + .../double-conversion/double-to-string.cc | 443 ++ .../double-conversion/double-to-string.h | 471 ++ .../double-conversion/fast-dtoa.cc | 665 +++ .../double-conversion/fast-dtoa.h | 88 + .../double-conversion/fixed-dtoa.cc | 405 ++ .../double-conversion/fixed-dtoa.h | 56 + mfbt/double-conversion/double-conversion/ieee.h | 447 ++ .../double-conversion/string-to-double.cc | 818 +++ .../double-conversion/string-to-double.h | 239 + mfbt/double-conversion/double-conversion/strtod.cc | 610 +++ mfbt/double-conversion/double-conversion/strtod.h | 64 + mfbt/double-conversion/double-conversion/utils.h | 421 ++ mfbt/double-conversion/to-fixed-dbl-max.patch | 51 + mfbt/double-conversion/update.sh | 76 + .../double-conversion/use-mozilla-assertions.patch | 60 + mfbt/fallible.h | 64 + mfbt/lz4/LICENSE | 24 + mfbt/lz4/README.md | 169 + mfbt/lz4/README.mozilla | 18 + mfbt/lz4/lz4.c | 2722 ++++++++++ mfbt/lz4/lz4.h | 842 +++ mfbt/lz4/lz4file.c | 311 ++ mfbt/lz4/lz4file.h | 93 + mfbt/lz4/lz4frame.c | 2078 ++++++++ mfbt/lz4/lz4frame.h | 692 +++ mfbt/lz4/lz4frame_static.h | 47 + mfbt/lz4/lz4hc.c | 1631 ++++++ mfbt/lz4/lz4hc.h | 413 ++ mfbt/lz4/xxhash.c | 43 + mfbt/lz4/xxhash.h | 5583 ++++++++++++++++++++ mfbt/moz.build | 207 + mfbt/tests/TestAlgorithm.cpp | 68 + mfbt/tests/TestArray.cpp | 31 + mfbt/tests/TestArrayUtils.cpp | 301 ++ mfbt/tests/TestAtomicBitfields.cpp | 189 + mfbt/tests/TestAtomics.cpp | 274 + mfbt/tests/TestBinarySearch.cpp | 158 + mfbt/tests/TestBitSet.cpp | 117 + mfbt/tests/TestBloomFilter.cpp | 142 + mfbt/tests/TestBufferList.cpp | 372 ++ mfbt/tests/TestCasting.cpp | 255 + mfbt/tests/TestCeilingFloor.cpp | 81 + mfbt/tests/TestCheckedInt.cpp | 615 +++ mfbt/tests/TestCompactPair.cpp | 160 + mfbt/tests/TestCountPopulation.cpp | 30 + mfbt/tests/TestCountZeroes.cpp | 92 + mfbt/tests/TestDefineEnum.cpp | 78 + mfbt/tests/TestDoublyLinkedList.cpp | 306 ++ mfbt/tests/TestEndian.cpp | 501 ++ mfbt/tests/TestEnumSet.cpp | 306 ++ mfbt/tests/TestEnumTypeTraits.cpp | 159 + mfbt/tests/TestEnumeratedArray.cpp | 46 + mfbt/tests/TestFastBernoulliTrial.cpp | 177 + mfbt/tests/TestFloatingPoint.cpp | 730 +++ mfbt/tests/TestFunctionRef.cpp | 142 + mfbt/tests/TestFunctionTypeTraits.cpp | 232 + mfbt/tests/TestHashTable.cpp | 103 + mfbt/tests/TestIntegerRange.cpp | 150 + mfbt/tests/TestJSONWriter.cpp | 657 +++ mfbt/tests/TestLinkedList.cpp | 399 ++ mfbt/tests/TestMacroArgs.cpp | 38 + mfbt/tests/TestMacroForEach.cpp | 44 + mfbt/tests/TestMathAlgorithms.cpp | 545 ++ mfbt/tests/TestMaybe.cpp | 1473 ++++++ mfbt/tests/TestNonDereferenceable.cpp | 171 + mfbt/tests/TestNotNull.cpp | 386 ++ mfbt/tests/TestPoisonArea.cpp | 530 ++ mfbt/tests/TestRandomNum.cpp | 61 + mfbt/tests/TestRange.cpp | 29 + mfbt/tests/TestRefPtr.cpp | 131 + mfbt/tests/TestResult.cpp | 671 +++ mfbt/tests/TestRollingMean.cpp | 114 + mfbt/tests/TestSHA1.cpp | 204 + mfbt/tests/TestSIMD.cpp | 631 +++ mfbt/tests/TestSPSCQueue.cpp | 248 + mfbt/tests/TestSaturate.cpp | 181 + mfbt/tests/TestScopeExit.cpp | 55 + mfbt/tests/TestSegmentedVector.cpp | 369 ++ mfbt/tests/TestSmallPointerArray.cpp | 237 + mfbt/tests/TestSplayTree.cpp | 208 + mfbt/tests/TestTextUtils.cpp | 1064 ++++ mfbt/tests/TestThreadSafeWeakPtr.cpp | 127 + mfbt/tests/TestTypedEnum.cpp | 502 ++ mfbt/tests/TestUniquePtr.cpp | 575 ++ mfbt/tests/TestUtf8.cpp | 755 +++ mfbt/tests/TestVariant.cpp | 1153 ++++ mfbt/tests/TestVector.cpp | 792 +++ mfbt/tests/TestWeakPtr.cpp | 145 + mfbt/tests/TestWrappingOperations.cpp | 587 ++ mfbt/tests/TestXorShift128PlusRNG.cpp | 101 + mfbt/tests/gtest/TestAlgorithm.cpp | 191 + mfbt/tests/gtest/TestBuffer.cpp | 96 + mfbt/tests/gtest/TestInitializedOnce.cpp | 200 + mfbt/tests/gtest/TestLinkedList.cpp | 78 + mfbt/tests/gtest/TestMainThreadWeakPtr.cpp | 42 + mfbt/tests/gtest/TestMozDbg.cpp | 170 + mfbt/tests/gtest/TestResultExtensions.cpp | 579 ++ mfbt/tests/gtest/TestReverseIterator.cpp | 104 + mfbt/tests/gtest/TestSpan.cpp | 2355 +++++++++ mfbt/tests/gtest/TestTainting.cpp | 485 ++ mfbt/tests/gtest/moz.build | 32 + mfbt/tests/moz.build | 110 + 241 files changed, 79587 insertions(+) create mode 100644 mfbt/Algorithm.h create mode 100644 mfbt/Alignment.h create mode 100644 mfbt/AllocPolicy.h create mode 100644 mfbt/AlreadyAddRefed.h create mode 100644 mfbt/Array.h create mode 100644 mfbt/ArrayUtils.h create mode 100644 mfbt/Assertions.cpp create mode 100644 mfbt/Assertions.h create mode 100644 mfbt/AtomicBitfields.h create mode 100644 mfbt/Atomics.h create mode 100644 mfbt/Attributes.h create mode 100644 mfbt/BinarySearch.h create mode 100644 mfbt/BitSet.h create mode 100644 mfbt/BloomFilter.h create mode 100644 mfbt/Buffer.h create mode 100644 mfbt/BufferList.h create mode 100644 mfbt/Casting.h create mode 100644 mfbt/ChaosMode.cpp create mode 100644 mfbt/ChaosMode.h create mode 100644 mfbt/Char16.h create mode 100644 mfbt/CheckedInt.h create mode 100644 mfbt/CompactPair.h create mode 100644 mfbt/Compiler.h create mode 100644 mfbt/Compression.cpp create mode 100644 mfbt/Compression.h create mode 100644 mfbt/DbgMacro.h create mode 100644 mfbt/DebugOnly.h create mode 100644 mfbt/DefineEnum.h create mode 100644 mfbt/DoublyLinkedList.h create mode 100644 mfbt/EndianUtils.h create mode 100644 mfbt/EnumSet.h create mode 100644 mfbt/EnumTypeTraits.h create mode 100644 mfbt/EnumeratedArray.h create mode 100644 mfbt/EnumeratedRange.h create mode 100644 mfbt/FStream.h create mode 100644 mfbt/FastBernoulliTrial.h create mode 100644 mfbt/FloatingPoint.cpp create mode 100644 mfbt/FloatingPoint.h create mode 100644 mfbt/FunctionRef.h create mode 100644 mfbt/FunctionTypeTraits.h create mode 100644 mfbt/Fuzzing.h create mode 100644 mfbt/HashFunctions.cpp create mode 100644 mfbt/HashFunctions.h create mode 100644 mfbt/HashTable.h create mode 100644 mfbt/HelperMacros.h create mode 100644 mfbt/InitializedOnce.h create mode 100644 mfbt/IntegerRange.h create mode 100644 mfbt/IntegerTypeTraits.h create mode 100644 mfbt/JSONWriter.cpp create mode 100644 mfbt/JSONWriter.h create mode 100644 mfbt/JsRust.h create mode 100644 mfbt/Latin1.h create mode 100644 mfbt/Likely.h create mode 100644 mfbt/LinkedList.h create mode 100644 mfbt/MacroArgs.h create mode 100644 mfbt/MacroForEach.h create mode 100644 mfbt/MathAlgorithms.h create mode 100644 mfbt/Maybe.h create mode 100644 mfbt/MaybeOneOf.h create mode 100644 mfbt/MaybeStorageBase.h create mode 100644 mfbt/MemoryChecking.h create mode 100644 mfbt/MemoryReporting.h create mode 100644 mfbt/MoveOnlyFunction.h create mode 100644 mfbt/MruCache.h create mode 100644 mfbt/NonDereferenceable.h create mode 100644 mfbt/NotNull.h create mode 100644 mfbt/Opaque.h create mode 100644 mfbt/OperatorNewExtensions.h create mode 100644 mfbt/PairHash.h create mode 100644 mfbt/Path.h create mode 100644 mfbt/PodOperations.h create mode 100644 mfbt/Poison.cpp create mode 100644 mfbt/Poison.h create mode 100644 mfbt/RandomNum.cpp create mode 100644 mfbt/RandomNum.h create mode 100644 mfbt/Range.h create mode 100644 mfbt/RangedArray.h create mode 100644 mfbt/RangedPtr.h create mode 100644 mfbt/ReentrancyGuard.h create mode 100644 mfbt/RefCountType.h create mode 100644 mfbt/RefCounted.h create mode 100644 mfbt/RefPtr.h create mode 100644 mfbt/Result.h create mode 100644 mfbt/ResultExtensions.h create mode 100644 mfbt/ResultVariant.h create mode 100644 mfbt/ReverseIterator.h create mode 100644 mfbt/RollingMean.h create mode 100644 mfbt/SHA1.cpp create mode 100644 mfbt/SHA1.h create mode 100644 mfbt/SPSCQueue.h create mode 100644 mfbt/STYLE create mode 100644 mfbt/Saturate.h create mode 100644 mfbt/ScopeExit.h create mode 100644 mfbt/Scoped.h create mode 100644 mfbt/SegmentedVector.h create mode 100644 mfbt/SharedLibrary.h create mode 100644 mfbt/SmallPointerArray.h create mode 100644 mfbt/Span.h create mode 100644 mfbt/SplayTree.h create mode 100644 mfbt/StaticAnalysisFunctions.h create mode 100644 mfbt/TaggedAnonymousMemory.cpp create mode 100644 mfbt/TaggedAnonymousMemory.h create mode 100644 mfbt/Tainting.h create mode 100644 mfbt/TemplateLib.h create mode 100644 mfbt/TextUtils.h create mode 100644 mfbt/ThreadLocal.h create mode 100644 mfbt/ThreadSafeWeakPtr.h create mode 100644 mfbt/ThreadSafety.h create mode 100644 mfbt/ToString.h create mode 100644 mfbt/TsanOptions.h create mode 100644 mfbt/TypedEnumBits.h create mode 100644 mfbt/Types.h create mode 100644 mfbt/UniquePtr.h create mode 100644 mfbt/UniquePtrExtensions.cpp create mode 100644 mfbt/UniquePtrExtensions.h create mode 100644 mfbt/Unused.cpp create mode 100644 mfbt/Unused.h create mode 100644 mfbt/Utf8.cpp create mode 100644 mfbt/Utf8.h create mode 100644 mfbt/Variant.h create mode 100644 mfbt/Vector.h create mode 100644 mfbt/WasiAtomic.h create mode 100644 mfbt/WeakPtr.h create mode 100644 mfbt/WindowsVersion.h create mode 100644 mfbt/WrappingOperations.h create mode 100644 mfbt/XorShift128PlusRNG.h create mode 100644 mfbt/double-conversion/GIT-INFO create mode 100644 mfbt/double-conversion/add-mfbt-api-markers.patch create mode 100644 mfbt/double-conversion/debug-only-functions.patch create mode 100644 mfbt/double-conversion/double-conversion/LICENSE create mode 100644 mfbt/double-conversion/double-conversion/README.md create mode 100644 mfbt/double-conversion/double-conversion/bignum-dtoa.cc create mode 100644 mfbt/double-conversion/double-conversion/bignum-dtoa.h create mode 100644 mfbt/double-conversion/double-conversion/bignum.cc create mode 100644 mfbt/double-conversion/double-conversion/bignum.h create mode 100644 mfbt/double-conversion/double-conversion/cached-powers.cc create mode 100644 mfbt/double-conversion/double-conversion/cached-powers.h create mode 100644 mfbt/double-conversion/double-conversion/diy-fp.h create mode 100644 mfbt/double-conversion/double-conversion/double-conversion.h create mode 100644 mfbt/double-conversion/double-conversion/double-to-string.cc create mode 100644 mfbt/double-conversion/double-conversion/double-to-string.h create mode 100644 mfbt/double-conversion/double-conversion/fast-dtoa.cc create mode 100644 mfbt/double-conversion/double-conversion/fast-dtoa.h create mode 100644 mfbt/double-conversion/double-conversion/fixed-dtoa.cc create mode 100644 mfbt/double-conversion/double-conversion/fixed-dtoa.h create mode 100644 mfbt/double-conversion/double-conversion/ieee.h create mode 100644 mfbt/double-conversion/double-conversion/string-to-double.cc create mode 100644 mfbt/double-conversion/double-conversion/string-to-double.h create mode 100644 mfbt/double-conversion/double-conversion/strtod.cc create mode 100644 mfbt/double-conversion/double-conversion/strtod.h create mode 100644 mfbt/double-conversion/double-conversion/utils.h create mode 100644 mfbt/double-conversion/to-fixed-dbl-max.patch create mode 100755 mfbt/double-conversion/update.sh create mode 100644 mfbt/double-conversion/use-mozilla-assertions.patch create mode 100644 mfbt/fallible.h create mode 100644 mfbt/lz4/LICENSE create mode 100644 mfbt/lz4/README.md create mode 100644 mfbt/lz4/README.mozilla create mode 100644 mfbt/lz4/lz4.c create mode 100644 mfbt/lz4/lz4.h create mode 100644 mfbt/lz4/lz4file.c create mode 100644 mfbt/lz4/lz4file.h create mode 100644 mfbt/lz4/lz4frame.c create mode 100644 mfbt/lz4/lz4frame.h create mode 100644 mfbt/lz4/lz4frame_static.h create mode 100644 mfbt/lz4/lz4hc.c create mode 100644 mfbt/lz4/lz4hc.h create mode 100644 mfbt/lz4/xxhash.c create mode 100644 mfbt/lz4/xxhash.h create mode 100644 mfbt/moz.build create mode 100644 mfbt/tests/TestAlgorithm.cpp create mode 100644 mfbt/tests/TestArray.cpp create mode 100644 mfbt/tests/TestArrayUtils.cpp create mode 100644 mfbt/tests/TestAtomicBitfields.cpp create mode 100644 mfbt/tests/TestAtomics.cpp create mode 100644 mfbt/tests/TestBinarySearch.cpp create mode 100644 mfbt/tests/TestBitSet.cpp create mode 100644 mfbt/tests/TestBloomFilter.cpp create mode 100644 mfbt/tests/TestBufferList.cpp create mode 100644 mfbt/tests/TestCasting.cpp create mode 100644 mfbt/tests/TestCeilingFloor.cpp create mode 100644 mfbt/tests/TestCheckedInt.cpp create mode 100644 mfbt/tests/TestCompactPair.cpp create mode 100644 mfbt/tests/TestCountPopulation.cpp create mode 100644 mfbt/tests/TestCountZeroes.cpp create mode 100644 mfbt/tests/TestDefineEnum.cpp create mode 100644 mfbt/tests/TestDoublyLinkedList.cpp create mode 100644 mfbt/tests/TestEndian.cpp create mode 100644 mfbt/tests/TestEnumSet.cpp create mode 100644 mfbt/tests/TestEnumTypeTraits.cpp create mode 100644 mfbt/tests/TestEnumeratedArray.cpp create mode 100644 mfbt/tests/TestFastBernoulliTrial.cpp create mode 100644 mfbt/tests/TestFloatingPoint.cpp create mode 100644 mfbt/tests/TestFunctionRef.cpp create mode 100644 mfbt/tests/TestFunctionTypeTraits.cpp create mode 100644 mfbt/tests/TestHashTable.cpp create mode 100644 mfbt/tests/TestIntegerRange.cpp create mode 100644 mfbt/tests/TestJSONWriter.cpp create mode 100644 mfbt/tests/TestLinkedList.cpp create mode 100644 mfbt/tests/TestMacroArgs.cpp create mode 100644 mfbt/tests/TestMacroForEach.cpp create mode 100644 mfbt/tests/TestMathAlgorithms.cpp create mode 100644 mfbt/tests/TestMaybe.cpp create mode 100644 mfbt/tests/TestNonDereferenceable.cpp create mode 100644 mfbt/tests/TestNotNull.cpp create mode 100644 mfbt/tests/TestPoisonArea.cpp create mode 100644 mfbt/tests/TestRandomNum.cpp create mode 100644 mfbt/tests/TestRange.cpp create mode 100644 mfbt/tests/TestRefPtr.cpp create mode 100644 mfbt/tests/TestResult.cpp create mode 100644 mfbt/tests/TestRollingMean.cpp create mode 100644 mfbt/tests/TestSHA1.cpp create mode 100644 mfbt/tests/TestSIMD.cpp create mode 100644 mfbt/tests/TestSPSCQueue.cpp create mode 100644 mfbt/tests/TestSaturate.cpp create mode 100644 mfbt/tests/TestScopeExit.cpp create mode 100644 mfbt/tests/TestSegmentedVector.cpp create mode 100644 mfbt/tests/TestSmallPointerArray.cpp create mode 100644 mfbt/tests/TestSplayTree.cpp create mode 100644 mfbt/tests/TestTextUtils.cpp create mode 100644 mfbt/tests/TestThreadSafeWeakPtr.cpp create mode 100644 mfbt/tests/TestTypedEnum.cpp create mode 100644 mfbt/tests/TestUniquePtr.cpp create mode 100644 mfbt/tests/TestUtf8.cpp create mode 100644 mfbt/tests/TestVariant.cpp create mode 100644 mfbt/tests/TestVector.cpp create mode 100644 mfbt/tests/TestWeakPtr.cpp create mode 100644 mfbt/tests/TestWrappingOperations.cpp create mode 100644 mfbt/tests/TestXorShift128PlusRNG.cpp create mode 100644 mfbt/tests/gtest/TestAlgorithm.cpp create mode 100644 mfbt/tests/gtest/TestBuffer.cpp create mode 100644 mfbt/tests/gtest/TestInitializedOnce.cpp create mode 100644 mfbt/tests/gtest/TestLinkedList.cpp create mode 100644 mfbt/tests/gtest/TestMainThreadWeakPtr.cpp create mode 100644 mfbt/tests/gtest/TestMozDbg.cpp create mode 100644 mfbt/tests/gtest/TestResultExtensions.cpp create mode 100644 mfbt/tests/gtest/TestReverseIterator.cpp create mode 100644 mfbt/tests/gtest/TestSpan.cpp create mode 100644 mfbt/tests/gtest/TestTainting.cpp create mode 100644 mfbt/tests/gtest/moz.build create mode 100644 mfbt/tests/moz.build (limited to 'mfbt') diff --git a/mfbt/Algorithm.h b/mfbt/Algorithm.h new file mode 100644 index 0000000000..33d666de49 --- /dev/null +++ b/mfbt/Algorithm.h @@ -0,0 +1,128 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A polyfill for ``. */ + +#ifndef mozilla_Algorithm_h +#define mozilla_Algorithm_h + +#include "mozilla/Result.h" + +#include +#include + +namespace mozilla { + +// Returns true if all elements in the range [aFirst, aLast) +// satisfy the predicate aPred. +template +constexpr bool AllOf(Iter aFirst, Iter aLast, Pred aPred) { + for (; aFirst != aLast; ++aFirst) { + if (!aPred(*aFirst)) { + return false; + } + } + return true; +} + +// Like C++20's `std::any_of`. +template +constexpr bool AnyOf(Iter aFirst, Iter aLast, Pred aPred) { + for (; aFirst != aLast; ++aFirst) { + if (aPred(*aFirst)) { + return true; + } + } + + return false; +} + +namespace detail { +template +using ArrayElementTransformType = typename std::invoke_result_t< + Transform, typename std::iterator_traits::reference>; + +template +struct TransformTraits { + using result_type = ArrayElementTransformType; + + using result_ok_type = typename result_type::ok_type; + using result_err_type = typename result_type::err_type; +}; +} // namespace detail + +// An algorithm similar to TransformAbortOnErr combined with a condition that +// allows to skip elements. At most std::distance(aIter, aEnd) elements will be +// inserted into aDst. +// +// Type requirements, in addition to those specified in TransformAbortOnErr: +// - Cond must be compatible with signature +// bool (const SrcIter::value_type&) +template +Result::result_err_type> +TransformIfAbortOnErr(SrcIter aIter, SrcIter aEnd, DstIter aDst, Cond aCond, + Transform aTransform) { + for (; aIter != aEnd; ++aIter) { + if (!aCond(static_cast::value_type>&>( + *aIter))) { + continue; + } + + auto res = aTransform(*aIter); + if (res.isErr()) { + return Err(res.unwrapErr()); + } + + *aDst++ = res.unwrap(); + } + return Ok{}; +} + +template +auto TransformIfAbortOnErr(SrcRange& aRange, DstIter aDst, Cond aCond, + Transform aTransform) { + using std::begin; + using std::end; + return TransformIfAbortOnErr(begin(aRange), end(aRange), aDst, aCond, + aTransform); +} + +// An algorithm similar to std::transform, adapted to error handling based on +// mozilla::Result. It iterates through the input range [aIter, aEnd) and +// inserts the result of applying aTransform to each element into aDst, if +// aTransform returns a success result. On the first error result, iterating is +// aborted, and the error result is returned as an overall result. If all +// transformations return a success result, Ok is returned as an overall result. +// +// Type requirements: +// - SrcIter must be an InputIterator. +// - DstIter must be an OutputIterator. +// - Transform must be compatible with signature +// Result (SrcIter::reference) +template +Result::result_err_type> +TransformAbortOnErr(SrcIter aIter, SrcIter aEnd, DstIter aDst, + Transform aTransform) { + return TransformIfAbortOnErr( + aIter, aEnd, aDst, [](const auto&) { return true; }, aTransform); +} + +template +auto TransformAbortOnErr(SrcRange& aRange, DstIter aDst, Transform aTransform) { + using std::begin; + using std::end; + return TransformIfAbortOnErr( + begin(aRange), end(aRange), aDst, [](const auto&) { return true; }, + aTransform); +} + +} // namespace mozilla + +#endif // mozilla_Algorithm_h diff --git a/mfbt/Alignment.h b/mfbt/Alignment.h new file mode 100644 index 0000000000..c38e00d12c --- /dev/null +++ b/mfbt/Alignment.h @@ -0,0 +1,138 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Functionality related to memory alignment. */ + +#ifndef mozilla_Alignment_h +#define mozilla_Alignment_h + +#include "mozilla/Attributes.h" +#include +#include + +namespace mozilla { + +/* + * This class, and the corresponding macro MOZ_ALIGNOF, figures out how many + * bytes of alignment a given type needs. + */ +template +class AlignmentFinder { + struct Aligner { + char mChar; + T mT; + + // Aligner may be used to check alignment of types with deleted dtors. This + // results in such specializations having implicitly deleted dtors, which + // causes fatal warnings on MSVC (see bug 1481005). As we don't create + // Aligners, we can avoid this warning by explicitly deleting the dtor. + ~Aligner() = delete; + }; + + public: + static const size_t alignment = sizeof(Aligner) - sizeof(T); +}; + +#define MOZ_ALIGNOF(T) mozilla::AlignmentFinder::alignment + +namespace detail { +template +struct AlignasHelper { + T mT; +}; +} // namespace detail + +/* + * Use this instead of alignof to align struct field as if it is inside + * a struct. On some platforms, there exist types which have different + * alignment between when it is used on its own and when it is used on + * a struct field. + * + * Known examples are 64bit types (uint64_t, double) on 32bit Linux, + * where they have 8byte alignment on their own, and 4byte alignment + * when in struct. + */ +#define MOZ_ALIGNAS_IN_STRUCT(T) alignas(mozilla::detail::AlignasHelper) + +/* + * Declare the MOZ_ALIGNED_DECL macro for declaring aligned types. + * + * For instance, + * + * MOZ_ALIGNED_DECL(8, char arr[2]); + * + * will declare a two-character array |arr| aligned to 8 bytes. + */ + +#if defined(__GNUC__) +# define MOZ_ALIGNED_DECL(_align, _type) _type __attribute__((aligned(_align))) +#elif defined(_MSC_VER) +# define MOZ_ALIGNED_DECL(_align, _type) __declspec(align(_align)) _type +#else +# warning "We don't know how to align variables on this compiler." +# define MOZ_ALIGNED_DECL(_align, _type) _type +#endif + +/* + * AlignedElem is a structure whose alignment is guaranteed to be at least N + * bytes. + * + * We support 1, 2, 4, 8, and 16-byte alignment. + */ +template +struct AlignedElem; + +/* + * We have to specialize this template because GCC doesn't like + * __attribute__((aligned(foo))) where foo is a template parameter. + */ + +template <> +struct AlignedElem<1> { + MOZ_ALIGNED_DECL(1, uint8_t elem); +}; + +template <> +struct AlignedElem<2> { + MOZ_ALIGNED_DECL(2, uint8_t elem); +}; + +template <> +struct AlignedElem<4> { + MOZ_ALIGNED_DECL(4, uint8_t elem); +}; + +template <> +struct AlignedElem<8> { + MOZ_ALIGNED_DECL(8, uint8_t elem); +}; + +template <> +struct AlignedElem<16> { + MOZ_ALIGNED_DECL(16, uint8_t elem); +}; + +template +struct MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS AlignedStorage2 { + union U { + char mBytes[sizeof(T)]; + uint64_t mDummy; + } u; + + const T* addr() const { return reinterpret_cast(u.mBytes); } + T* addr() { return static_cast(static_cast(u.mBytes)); } + + AlignedStorage2() = default; + + // AlignedStorage2 is non-copyable: the default copy constructor violates + // strict aliasing rules, per bug 1269319. + AlignedStorage2(const AlignedStorage2&) = delete; + void operator=(const AlignedStorage2&) = delete; +}; + +} /* namespace mozilla */ + +#endif /* mozilla_Alignment_h */ diff --git a/mfbt/AllocPolicy.h b/mfbt/AllocPolicy.h new file mode 100644 index 0000000000..e5c62bcd64 --- /dev/null +++ b/mfbt/AllocPolicy.h @@ -0,0 +1,175 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * An allocation policy concept, usable for structures and algorithms to + * control how memory is allocated and how failures are handled. + */ + +#ifndef mozilla_AllocPolicy_h +#define mozilla_AllocPolicy_h + +#include "mozilla/Attributes.h" +#include "mozilla/Assertions.h" +#include "mozilla/TemplateLib.h" + +#include +#include + +namespace mozilla { + +/* + * Allocation policies are used to implement the standard allocation behaviors + * in a customizable way. Additionally, custom behaviors may be added to these + * behaviors, such as additionally reporting an error through an out-of-band + * mechanism when OOM occurs. The concept modeled here is as follows: + * + * - public copy constructor, assignment, destructor + * - template T* maybe_pod_malloc(size_t) + * Fallible, but doesn't report an error on OOM. + * - template T* maybe_pod_calloc(size_t) + * Fallible, but doesn't report an error on OOM. + * - template T* maybe_pod_realloc(T*, size_t, size_t) + * Fallible, but doesn't report an error on OOM. The old allocation + * size is passed in, in addition to the new allocation size requested. + * - template T* pod_malloc(size_t) + * Responsible for OOM reporting when null is returned. + * - template T* pod_calloc(size_t) + * Responsible for OOM reporting when null is returned. + * - template T* pod_realloc(T*, size_t, size_t) + * Responsible for OOM reporting when null is returned. The old allocation + * size is passed in, in addition to the new allocation size requested. + * - template void free_(T*, size_t) + * The capacity passed in must match the old allocation size. + * - template void free_(T*) + * Frees a buffer without knowing its allocated size. This might not be + * implemented by allocation policies that need the allocation size. + * - void reportAllocOverflow() const + * Called on allocation overflow (that is, an allocation implicitly tried + * to allocate more than the available memory space -- think allocating an + * array of large-size objects, where N * size overflows) before null is + * returned. + * - bool checkSimulatedOOM() const + * Some clients generally allocate memory yet in some circumstances won't + * need to do so. For example, appending to a vector with a small amount of + * inline storage generally allocates memory, but no allocation occurs + * unless appending exceeds inline storage. But for testing purposes, it + * can be useful to treat *every* operation as allocating. + * Clients (such as this hypothetical append method implementation) should + * call this method in situations that don't allocate, but could generally, + * to support this. The default behavior should return true; more + * complicated behavior might be to return false only after a certain + * number of allocations-or-check-simulated-OOMs (coordinating with the + * other AllocPolicy methods) have occurred. + * + * mfbt provides (and typically uses by default) only MallocAllocPolicy, which + * does nothing more than delegate to the malloc/alloc/free functions. + */ + +/* + * A policy that straightforwardly uses malloc/calloc/realloc/free and adds no + * extra behaviors. + */ +class MallocAllocPolicy { + public: + template + T* maybe_pod_malloc(size_t aNumElems) { + if (aNumElems & mozilla::tl::MulOverflowMask::value) { + return nullptr; + } + return static_cast(malloc(aNumElems * sizeof(T))); + } + + template + T* maybe_pod_calloc(size_t aNumElems) { + return static_cast(calloc(aNumElems, sizeof(T))); + } + + template + T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + if (aNewSize & mozilla::tl::MulOverflowMask::value) { + return nullptr; + } + return static_cast(realloc(aPtr, aNewSize * sizeof(T))); + } + + template + T* pod_malloc(size_t aNumElems) { + return maybe_pod_malloc(aNumElems); + } + + template + T* pod_calloc(size_t aNumElems) { + return maybe_pod_calloc(aNumElems); + } + + template + T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + return maybe_pod_realloc(aPtr, aOldSize, aNewSize); + } + + template + void free_(T* aPtr, size_t aNumElems = 0) { + free(aPtr); + } + + void reportAllocOverflow() const {} + + [[nodiscard]] bool checkSimulatedOOM() const { return true; } +}; + +/* + * A policy which always fails to allocate memory, returning nullptr. Methods + * which expect an existing allocation assert. + * + * This type should be used in situations where you want to use a MFBT type with + * inline storage, and don't want to allow it to allocate on the heap. + */ +class NeverAllocPolicy { + public: + template + T* maybe_pod_malloc(size_t aNumElems) { + return nullptr; + } + + template + T* maybe_pod_calloc(size_t aNumElems) { + return nullptr; + } + + template + T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + MOZ_CRASH("NeverAllocPolicy::maybe_pod_realloc"); + } + + template + T* pod_malloc(size_t aNumElems) { + return nullptr; + } + + template + T* pod_calloc(size_t aNumElems) { + return nullptr; + } + + template + T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + MOZ_CRASH("NeverAllocPolicy::pod_realloc"); + } + + template + void free_(T* aPtr, size_t aNumElems = 0) { + MOZ_CRASH("NeverAllocPolicy::free_"); + } + + void reportAllocOverflow() const {} + + [[nodiscard]] bool checkSimulatedOOM() const { return true; } +}; + +} // namespace mozilla + +#endif /* mozilla_AllocPolicy_h */ diff --git a/mfbt/AlreadyAddRefed.h b/mfbt/AlreadyAddRefed.h new file mode 100644 index 0000000000..3b4ae88855 --- /dev/null +++ b/mfbt/AlreadyAddRefed.h @@ -0,0 +1,180 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Typed temporary pointers for reference-counted smart pointers. */ + +#ifndef AlreadyAddRefed_h +#define AlreadyAddRefed_h + +#include + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" + +namespace mozilla { + +struct unused_t; + +} // namespace mozilla + +/** + * already_AddRefed cooperates with reference counting smart pointers to enable + * you to assign in a pointer _without_ |AddRef|ing it. You might want to use + * this as a return type from a function that returns an already |AddRef|ed + * pointer. + * + * TODO Move already_AddRefed to namespace mozilla. This has not yet been done + * because of the sheer number of usages of already_AddRefed. + * + * When should you use already_AddRefed<>? + * * Ensure a consumer takes ownership of a reference + * * Pass ownership without calling AddRef/Release (sometimes required in + * off-main-thread code) + * * The ref pointer type you're using doesn't support move construction + * + * Otherwise, use std::move(RefPtr/nsCOMPtr/etc). + */ +template +struct +#if !defined(MOZ_CLANG_PLUGIN) && !defined(XGILL_PLUGIN) + [[nodiscard]] +#endif + MOZ_NON_AUTOABLE already_AddRefed { + already_AddRefed() : mRawPtr(nullptr) {} + + // For simplicity, allow returning nullptr from functions returning + // already_AddRefed. Don't permit returning raw T*, though; it's preferred + // to create already_AddRefed from a reference-counting smart pointer. + MOZ_IMPLICIT already_AddRefed(decltype(nullptr)) : mRawPtr(nullptr) {} + explicit already_AddRefed(T* aRawPtr) : mRawPtr(aRawPtr) {} + + // Disallow copy constructor and copy assignment operator: move semantics used + // instead. + already_AddRefed(const already_AddRefed& aOther) = delete; + already_AddRefed& operator=(const already_AddRefed& aOther) = delete; + + // WARNING: sketchiness ahead. + // + // The x86-64 ABI for Unix-like operating systems requires structures to be + // returned via invisible reference if they are non-trivial for the purposes + // of calls according to the C++ ABI[1]. For our consideration here, that + // means that if we have a non-trivial move constructor or destructor, + // already_AddRefed must be returned by invisible reference. But + // already_AddRefed is small enough and so commonly used that it would be + // beneficial to return it via registers instead. So we need to figure out + // a way to make the move constructor and the destructor trivial. + // + // Our destructor is normally non-trivial, because it asserts that the + // stored pointer has been taken by somebody else prior to destruction. + // However, since the assert in question is compiled only for DEBUG builds, + // we can make the destructor trivial in non-DEBUG builds by simply defining + // it with `= default`. + // + // We now have to make the move constructor trivial as well. It is normally + // non-trivial, because the incoming object has its pointer null-ed during + // the move. This null-ing is done to satisfy the assert in the destructor. + // But since that destructor has no assert in non-DEBUG builds, the clearing + // is unnecessary in such builds; all we really need to perform is a copy of + // the pointer from the incoming object. So we can let the compiler define + // a trivial move constructor for us, and already_AddRefed can now be + // returned in registers rather than needing to allocate a stack slot for + // an invisible reference. + // + // The above considerations apply to Unix-like operating systems only; the + // conditions for the same optimization to apply on x86-64 Windows are much + // more strigent and are basically impossible for already_AddRefed to + // satisfy[2]. But we do get some benefit from this optimization on Windows + // because we removed the nulling of the pointer during the move, so that's + // a codesize win. + // + // [1] https://itanium-cxx-abi.github.io/cxx-abi/abi.html#non-trivial + // [2] https://docs.microsoft.com/en-us/cpp/build/return-values-cpp + + already_AddRefed(already_AddRefed&& aOther) +#ifdef DEBUG + : mRawPtr(aOther.take()){} +#else + = default; +#endif + + already_AddRefed & + operator=(already_AddRefed&& aOther) { + mRawPtr = aOther.take(); + return *this; + } + + /** + * This helper is useful in cases like + * + * already_AddRefed + * Foo() + * { + * RefPtr x = ...; + * return x.forget(); + * } + * + * The autoconversion allows one to omit the idiom + * + * RefPtr y = x.forget(); + * return y.forget(); + * + * Note that nsRefPtr is the XPCOM reference counting smart pointer class. + */ + template + MOZ_IMPLICIT already_AddRefed(already_AddRefed&& aOther) + : mRawPtr(aOther.take()) {} + + ~already_AddRefed() +#ifdef DEBUG + { + MOZ_ASSERT(!mRawPtr); + } +#else + = default; +#endif + + // Specialize the unused operator<< for already_AddRefed, to allow + // nsCOMPtr foo; + // Unused << foo.forget(); + // Note that nsCOMPtr is the XPCOM reference counting smart pointer class. + friend void operator<<(const mozilla::unused_t& aUnused, + const already_AddRefed& aRhs) { + auto mutableAlreadyAddRefed = const_cast*>(&aRhs); + aUnused << mutableAlreadyAddRefed->take(); + } + + [[nodiscard]] T* take() { + T* rawPtr = mRawPtr; + mRawPtr = nullptr; + return rawPtr; + } + + /** + * This helper provides a static_cast replacement for already_AddRefed, so + * if you have + * + * already_AddRefed F(); + * + * you can write + * + * already_AddRefed + * G() + * { + * return F().downcast(); + * } + */ + template + already_AddRefed downcast() { + U* tmp = static_cast(mRawPtr); + mRawPtr = nullptr; + return already_AddRefed(tmp); + } + + private: + T* MOZ_OWNING_REF mRawPtr; +}; + +#endif // AlreadyAddRefed_h diff --git a/mfbt/Array.h b/mfbt/Array.h new file mode 100644 index 0000000000..55b724a288 --- /dev/null +++ b/mfbt/Array.h @@ -0,0 +1,110 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A compile-time constant-length array with bounds-checking assertions. */ + +#ifndef mozilla_Array_h +#define mozilla_Array_h + +#include + +#include +#include +#include + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Likely.h" + +namespace mozilla { + +template +class Array { + T mArr[_Length]; + + public: + using ElementType = T; + static constexpr size_t Length = _Length; + + constexpr Array() = default; + + template + MOZ_IMPLICIT constexpr Array(Args&&... aArgs) + : mArr{std::forward(aArgs)...} { + static_assert(sizeof...(aArgs) == Length, + "The number of arguments should be equal to the template " + "parameter Length"); + } + + T& operator[](size_t aIndex) { + if (MOZ_UNLIKELY(aIndex >= Length)) { + detail::InvalidArrayIndex_CRASH(aIndex, Length); + } + return mArr[aIndex]; + } + + const T& operator[](size_t aIndex) const { + if (MOZ_UNLIKELY(aIndex >= Length)) { + detail::InvalidArrayIndex_CRASH(aIndex, Length); + } + return mArr[aIndex]; + } + + bool operator==(const Array& aOther) const { + for (size_t i = 0; i < Length; i++) { + if (mArr[i] != aOther[i]) { + return false; + } + } + return true; + } + + typedef T* iterator; + typedef const T* const_iterator; + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; + + // Methods for range-based for loops. + iterator begin() { return mArr; } + constexpr const_iterator begin() const { return mArr; } + constexpr const_iterator cbegin() const { return begin(); } + iterator end() { return mArr + Length; } + constexpr const_iterator end() const { return mArr + Length; } + constexpr const_iterator cend() const { return end(); } + + // Methods for reverse iterating. + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + const_reverse_iterator crbegin() const { return rbegin(); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + const_reverse_iterator crend() const { return rend(); } +}; + +template +class Array { + public: + T& operator[](size_t aIndex) { MOZ_CRASH("indexing into zero-length array"); } + + const T& operator[](size_t aIndex) const { + MOZ_CRASH("indexing into zero-length array"); + } +}; + +// MOZ_DBG support + +template +std::ostream& operator<<(std::ostream& aOut, const Array& aArray) { + return aOut << Span(aArray); +} + +} /* namespace mozilla */ + +#endif /* mozilla_Array_h */ diff --git a/mfbt/ArrayUtils.h b/mfbt/ArrayUtils.h new file mode 100644 index 0000000000..0d55bb1f65 --- /dev/null +++ b/mfbt/ArrayUtils.h @@ -0,0 +1,188 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implements various helper functions related to arrays. + */ + +#ifndef mozilla_ArrayUtils_h +#define mozilla_ArrayUtils_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" + +#include +#include + +#ifdef __cplusplus +# include +# include + +# include "mozilla/Alignment.h" + +namespace mozilla { + +template +class Array; +template +class EnumeratedArray; + +/* + * Safely subtract two pointers when it is known that aEnd >= aBegin, yielding a + * size_t result. + * + * Ordinary pointer subtraction yields a ptrdiff_t result, which, being signed, + * has insufficient range to express the distance between pointers at opposite + * ends of the address space. Furthermore, most compilers use ptrdiff_t to + * represent the intermediate byte address distance, before dividing by + * sizeof(T); if that intermediate result overflows, they'll produce results + * with the wrong sign even when the correct scaled distance would fit in a + * ptrdiff_t. + */ +template +MOZ_ALWAYS_INLINE size_t PointerRangeSize(T* aBegin, T* aEnd) { + MOZ_ASSERT(aEnd >= aBegin); + return (size_t(aEnd) - size_t(aBegin)) / sizeof(T); +} + +/* + * Compute the length of an array with constant length. (Use of this method + * with a non-array pointer will not compile.) + * + * Beware of the implicit trailing '\0' when using this with string constants. + */ +template +constexpr size_t ArrayLength(T (&aArr)[N]) { + return N; +} + +template +constexpr size_t ArrayLength(const Array& aArr) { + return N; +} + +template +constexpr size_t ArrayLength(const EnumeratedArray& aArr) { + return size_t(N); +} + +/* + * Compute the address one past the last element of a constant-length array. + * + * Beware of the implicit trailing '\0' when using this with string constants. + */ +template +constexpr T* ArrayEnd(T (&aArr)[N]) { + return aArr + ArrayLength(aArr); +} + +template +constexpr T* ArrayEnd(Array& aArr) { + return &aArr[0] + ArrayLength(aArr); +} + +template +constexpr const T* ArrayEnd(const Array& aArr) { + return &aArr[0] + ArrayLength(aArr); +} + +/** + * std::equal has subpar ergonomics. + */ + +template +bool ArrayEqual(const T (&a)[N], const U (&b)[N]) { + return std::equal(a, a + N, b); +} + +template +bool ArrayEqual(const T* const a, const U* const b, const size_t n) { + return std::equal(a, a + n, b); +} + +namespace detail { + +template +struct AlignedChecker { + static void test(const Pointee* aPtr) { + MOZ_ASSERT((uintptr_t(aPtr) % MOZ_ALIGNOF(AlignType)) == 0, + "performing a range-check with a misaligned pointer"); + } +}; + +template +struct AlignedChecker>> { + static void test(const Pointee* aPtr) {} +}; + +} // namespace detail + +/** + * Determines whether |aPtr| points at an object in the range [aBegin, aEnd). + * + * |aPtr| must have the same alignment as |aBegin| and |aEnd|. This usually + * should be achieved by ensuring |aPtr| points at a |U|, not just that it + * points at a |T|. + * + * It is a usage error for any argument to be misaligned. + * + * It's okay for T* to be void*, and if so U* may also be void*. In the latter + * case no argument is required to be aligned (obviously, as void* implies no + * particular alignment). + */ +template +inline std::enable_if_t || std::is_base_of::value || + std::is_void_v, + bool> +IsInRange(const T* aPtr, const U* aBegin, const U* aEnd) { + MOZ_ASSERT(aBegin <= aEnd); + detail::AlignedChecker::test(aPtr); + detail::AlignedChecker::test(aBegin); + detail::AlignedChecker::test(aEnd); + return aBegin <= reinterpret_cast(aPtr) && + reinterpret_cast(aPtr) < aEnd; +} + +/** + * Convenience version of the above method when the valid range is specified as + * uintptr_t values. As above, |aPtr| must be aligned, and |aBegin| and |aEnd| + * must be aligned with respect to |T|. + */ +template +inline bool IsInRange(const T* aPtr, uintptr_t aBegin, uintptr_t aEnd) { + return IsInRange(aPtr, reinterpret_cast(aBegin), + reinterpret_cast(aEnd)); +} + +namespace detail { + +/* + * Helper for the MOZ_ARRAY_LENGTH() macro to make the length a typesafe + * compile-time constant even on compilers lacking constexpr support. + */ +template +char (&ArrayLengthHelper(T (&array)[N]))[N]; + +} /* namespace detail */ + +} /* namespace mozilla */ + +#endif /* __cplusplus */ + +/* + * MOZ_ARRAY_LENGTH() is an alternative to mozilla::ArrayLength() for C files + * that can't use C++ template functions and for static_assert() calls that + * can't call ArrayLength() when it is not a C++11 constexpr function. + */ +#ifdef __cplusplus +# define MOZ_ARRAY_LENGTH(array) \ + sizeof(mozilla::detail::ArrayLengthHelper(array)) +#else +# define MOZ_ARRAY_LENGTH(array) (sizeof(array) / sizeof((array)[0])) +#endif + +#endif /* mozilla_ArrayUtils_h */ diff --git a/mfbt/Assertions.cpp b/mfbt/Assertions.cpp new file mode 100644 index 0000000000..7721677f19 --- /dev/null +++ b/mfbt/Assertions.cpp @@ -0,0 +1,52 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/Assertions.h" +#include "mozilla/Atomics.h" +#include "mozilla/Sprintf.h" + +#include + +MOZ_BEGIN_EXTERN_C + +/* + * The crash reason is defined as a global variable here rather than in the + * crash reporter itself to make it available to all code, even libraries like + * JS that don't link with the crash reporter directly. This value will only + * be consumed if the crash reporter is used by the target application. + */ +MFBT_DATA const char* gMozCrashReason = nullptr; + +static char sPrintfCrashReason[sPrintfCrashReasonSize] = {}; + +// Accesses to this atomic are not included in web replay recordings, so that +// if we crash in an area where recorded events are not allowed the true reason +// for the crash is not obscured by a record/replay error. +static mozilla::Atomic sCrashing(false); + +MFBT_API MOZ_COLD MOZ_NEVER_INLINE MOZ_FORMAT_PRINTF(1, 2) const + char* MOZ_CrashPrintf(const char* aFormat, ...) { + if (!sCrashing.compareExchange(false, true)) { + // In the unlikely event of a race condition, skip + // setting the crash reason and just crash safely. + MOZ_RELEASE_ASSERT(false); + } + va_list aArgs; + va_start(aArgs, aFormat); + int ret = VsprintfLiteral(sPrintfCrashReason, aFormat, aArgs); + va_end(aArgs); + MOZ_RELEASE_ASSERT( + ret >= 0 && size_t(ret) < sPrintfCrashReasonSize, + "Could not write the explanation string to the supplied buffer!"); + return sPrintfCrashReason; +} + +MOZ_END_EXTERN_C + +MFBT_API MOZ_NORETURN MOZ_COLD void mozilla::detail::InvalidArrayIndex_CRASH( + size_t aIndex, size_t aLength) { + MOZ_CRASH_UNSAFE_PRINTF("ElementAt(aIndex = %zu, aLength = %zu)", aIndex, + aLength); +} diff --git a/mfbt/Assertions.h b/mfbt/Assertions.h new file mode 100644 index 0000000000..8c8b52405e --- /dev/null +++ b/mfbt/Assertions.h @@ -0,0 +1,647 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implementations of runtime and static assertion macros for C and C++. */ + +#ifndef mozilla_Assertions_h +#define mozilla_Assertions_h + +#if (defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API)) && \ + !defined(__wasi__) +# define MOZ_DUMP_ASSERTION_STACK +#endif + +#include "mozilla/Attributes.h" +#include "mozilla/Compiler.h" +#include "mozilla/Fuzzing.h" +#include "mozilla/Likely.h" +#include "mozilla/MacroArgs.h" +#include "mozilla/StaticAnalysisFunctions.h" +#include "mozilla/Types.h" +#ifdef MOZ_DUMP_ASSERTION_STACK +# include "mozilla/StackWalk.h" +#endif + +/* + * The crash reason set by MOZ_CRASH_ANNOTATE is consumed by the crash reporter + * if present. It is declared here (and defined in Assertions.cpp) to make it + * available to all code, even libraries that don't link with the crash reporter + * directly. + */ +MOZ_BEGIN_EXTERN_C +extern MFBT_DATA const char* gMozCrashReason; +MOZ_END_EXTERN_C + +#if defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API) +static inline void AnnotateMozCrashReason(const char* reason) { + gMozCrashReason = reason; + // See bug 1681846, on 32-bit Android ARM the compiler removes the store to + // gMozCrashReason if this barrier is not present. + asm volatile("" ::: "memory"); +} +# define MOZ_CRASH_ANNOTATE(...) AnnotateMozCrashReason(__VA_ARGS__) +#else +# define MOZ_CRASH_ANNOTATE(...) \ + do { /* nothing */ \ + } while (false) +#endif + +#include +#include +#include +#ifdef _MSC_VER +/* + * TerminateProcess and GetCurrentProcess are defined in , which + * further depends on . We hardcode these few definitions manually + * because those headers clutter the global namespace with a significant + * number of undesired macros and symbols. + */ +MOZ_BEGIN_EXTERN_C +__declspec(dllimport) int __stdcall TerminateProcess(void* hProcess, + unsigned int uExitCode); +__declspec(dllimport) void* __stdcall GetCurrentProcess(void); +MOZ_END_EXTERN_C +#elif defined(__wasi__) +/* + * On Wasm/WASI platforms, we just call __builtin_trap(). + */ +#else +# include +#endif +#ifdef ANDROID +# include +#endif + +MOZ_BEGIN_EXTERN_C + +#if defined(ANDROID) && defined(MOZ_DUMP_ASSERTION_STACK) +MOZ_MAYBE_UNUSED static void MOZ_ReportAssertionFailurePrintFrame( + const char* aBuf) { + __android_log_print(ANDROID_LOG_FATAL, "MOZ_Assert", "%s\n", aBuf); +} +#endif + +/* + * Prints |aStr| as an assertion failure (using aFilename and aLine as the + * location of the assertion) to the standard debug-output channel. + * + * Usually you should use MOZ_ASSERT or MOZ_CRASH instead of this method. This + * method is primarily for internal use in this header, and only secondarily + * for use in implementing release-build assertions. + */ +MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NEVER_INLINE void +MOZ_ReportAssertionFailure(const char* aStr, const char* aFilename, + int aLine) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS { + MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_ASSERT", aFilename, aLine, aStr); +#ifdef ANDROID + __android_log_print(ANDROID_LOG_FATAL, "MOZ_Assert", + "Assertion failure: %s, at %s:%d\n", aStr, aFilename, + aLine); +# if defined(MOZ_DUMP_ASSERTION_STACK) + MozWalkTheStackWithWriter(MOZ_ReportAssertionFailurePrintFrame, CallerPC(), + /* aMaxFrames */ 0); +# endif +#else + fprintf(stderr, "Assertion failure: %s, at %s:%d\n", aStr, aFilename, aLine); +# if defined(MOZ_DUMP_ASSERTION_STACK) + MozWalkTheStack(stderr, CallerPC(), /* aMaxFrames */ 0); +# endif + fflush(stderr); +#endif +} + +MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NEVER_INLINE void MOZ_ReportCrash( + const char* aStr, const char* aFilename, + int aLine) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS { +#ifdef ANDROID + __android_log_print(ANDROID_LOG_FATAL, "MOZ_CRASH", + "Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine); +#else + fprintf(stderr, "Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine); +# if defined(MOZ_DUMP_ASSERTION_STACK) + MozWalkTheStack(stderr, CallerPC(), /* aMaxFrames */ 0); +# endif + fflush(stderr); +#endif +} + +/** + * MOZ_REALLY_CRASH is used in the implementation of MOZ_CRASH(). You should + * call MOZ_CRASH instead. + */ +#if defined(_MSC_VER) +/* + * On MSVC use the __debugbreak compiler intrinsic, which produces an inline + * (not nested in a system function) breakpoint. This distinctively invokes + * Breakpad without requiring system library symbols on all stack-processing + * machines, as a nested breakpoint would require. + * + * We use __LINE__ to prevent the compiler from folding multiple crash sites + * together, which would make crash reports hard to understand. + * + * We use TerminateProcess with the exit code aborting would generate + * because we don't want to invoke atexit handlers, destructors, library + * unload handlers, and so on when our process might be in a compromised + * state. + * + * We don't use abort() because it'd cause Windows to annoyingly pop up the + * process error dialog multiple times. See bug 345118 and bug 426163. + * + * (Technically these are Windows requirements, not MSVC requirements. But + * practically you need MSVC for debugging, and we only ship builds created + * by MSVC, so doing it this way reduces complexity.) + */ + +MOZ_MAYBE_UNUSED static MOZ_COLD MOZ_NORETURN MOZ_NEVER_INLINE void +MOZ_NoReturn(int aLine) { + *((volatile int*)NULL) = aLine; + TerminateProcess(GetCurrentProcess(), 3); +} + +# define MOZ_REALLY_CRASH(line) \ + do { \ + __debugbreak(); \ + MOZ_NoReturn(line); \ + } while (false) + +#elif __wasi__ + +# define MOZ_REALLY_CRASH(line) __builtin_trap() + +#else + +/* + * MOZ_CRASH_WRITE_ADDR is the address to be used when performing a forced + * crash. NULL is preferred however if for some reason NULL cannot be used + * this makes choosing another value possible. + * + * In the case of UBSan certain checks, bounds specifically, cause the compiler + * to emit the 'ud2' instruction when storing to 0x0. This causes forced + * crashes to manifest as ILL (at an arbitrary address) instead of the expected + * SEGV at 0x0. + */ +# ifdef MOZ_UBSAN +# define MOZ_CRASH_WRITE_ADDR 0x1 +# else +# define MOZ_CRASH_WRITE_ADDR NULL +# endif + +# ifdef __cplusplus +# define MOZ_REALLY_CRASH(line) \ + do { \ + *((volatile int*)MOZ_CRASH_WRITE_ADDR) = line; /* NOLINT */ \ + ::abort(); \ + } while (false) +# else +# define MOZ_REALLY_CRASH(line) \ + do { \ + *((volatile int*)MOZ_CRASH_WRITE_ADDR) = line; /* NOLINT */ \ + abort(); \ + } while (false) +# endif +#endif + +/* + * MOZ_CRASH([explanation-string]) crashes the program, plain and simple, in a + * Breakpad-compatible way, in both debug and release builds. + * + * MOZ_CRASH is a good solution for "handling" failure cases when you're + * unwilling or unable to handle them more cleanly -- for OOM, for likely memory + * corruption, and so on. It's also a good solution if you need safe behavior + * in release builds as well as debug builds. But if the failure is one that + * should be debugged and fixed, MOZ_ASSERT is generally preferable. + * + * The optional explanation-string, if provided, must be a string literal + * explaining why we're crashing. This argument is intended for use with + * MOZ_CRASH() calls whose rationale is non-obvious; don't use it if it's + * obvious why we're crashing. + * + * If we're a DEBUG build and we crash at a MOZ_CRASH which provides an + * explanation-string, we print the string to stderr. Otherwise, we don't + * print anything; this is because we want MOZ_CRASH to be 100% safe in release + * builds, and it's hard to print to stderr safely when memory might have been + * corrupted. + */ +#if !(defined(DEBUG) || defined(FUZZING)) +# define MOZ_CRASH(...) \ + do { \ + MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", __FILE__, __LINE__, NULL); \ + MOZ_CRASH_ANNOTATE("MOZ_CRASH(" __VA_ARGS__ ")"); \ + MOZ_REALLY_CRASH(__LINE__); \ + } while (false) +#else +# define MOZ_CRASH(...) \ + do { \ + MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", __FILE__, __LINE__, NULL); \ + MOZ_ReportCrash("" __VA_ARGS__, __FILE__, __LINE__); \ + MOZ_CRASH_ANNOTATE("MOZ_CRASH(" __VA_ARGS__ ")"); \ + MOZ_REALLY_CRASH(__LINE__); \ + } while (false) +#endif + +/* + * MOZ_CRASH_UNSAFE(explanation-string) can be used if the explanation string + * cannot be a string literal (but no other processing needs to be done on it). + * A regular MOZ_CRASH() is preferred wherever possible, as passing arbitrary + * strings from a potentially compromised process is not without risk. If the + * string being passed is the result of a printf-style function, consider using + * MOZ_CRASH_UNSAFE_PRINTF instead. + * + * @note This macro causes data collection because crash strings are annotated + * to crash-stats and are publicly visible. Firefox data stewards must do data + * review on usages of this macro. + */ +static MOZ_ALWAYS_INLINE_EVEN_DEBUG MOZ_COLD MOZ_NORETURN void MOZ_Crash( + const char* aFilename, int aLine, const char* aReason) { + MOZ_FUZZING_HANDLE_CRASH_EVENT4("MOZ_CRASH", aFilename, aLine, aReason); +#if defined(DEBUG) || defined(FUZZING) + MOZ_ReportCrash(aReason, aFilename, aLine); +#endif + MOZ_CRASH_ANNOTATE(aReason); + MOZ_REALLY_CRASH(aLine); +} +#define MOZ_CRASH_UNSAFE(reason) MOZ_Crash(__FILE__, __LINE__, reason) + +static const size_t sPrintfMaxArgs = 4; +static const size_t sPrintfCrashReasonSize = 1024; + +MFBT_API MOZ_COLD MOZ_NEVER_INLINE MOZ_FORMAT_PRINTF(1, 2) const + char* MOZ_CrashPrintf(const char* aFormat, ...); + +/* + * MOZ_CRASH_UNSAFE_PRINTF(format, arg1 [, args]) can be used when more + * information is desired than a string literal can supply. The caller provides + * a printf-style format string, which must be a string literal and between + * 1 and 4 additional arguments. A regular MOZ_CRASH() is preferred wherever + * possible, as passing arbitrary strings to printf from a potentially + * compromised process is not without risk. + * + * @note This macro causes data collection because crash strings are annotated + * to crash-stats and are publicly visible. Firefox data stewards must do data + * review on usages of this macro. + */ +#define MOZ_CRASH_UNSAFE_PRINTF(format, ...) \ + do { \ + static_assert(MOZ_ARG_COUNT(__VA_ARGS__) > 0, \ + "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? " \ + "Or maybe you want MOZ_CRASH instead?"); \ + static_assert(MOZ_ARG_COUNT(__VA_ARGS__) <= sPrintfMaxArgs, \ + "Only up to 4 additional arguments are allowed!"); \ + static_assert(sizeof(format) <= sPrintfCrashReasonSize, \ + "The supplied format string is too long!"); \ + MOZ_Crash(__FILE__, __LINE__, MOZ_CrashPrintf("" format, __VA_ARGS__)); \ + } while (false) + +MOZ_END_EXTERN_C + +/* + * MOZ_ASSERT(expr [, explanation-string]) asserts that |expr| must be truthy in + * debug builds. If it is, execution continues. Otherwise, an error message + * including the expression and the explanation-string (if provided) is printed, + * an attempt is made to invoke any existing debugger, and execution halts. + * MOZ_ASSERT is fatal: no recovery is possible. Do not assert a condition + * which can correctly be falsy. + * + * The optional explanation-string, if provided, must be a string literal + * explaining the assertion. It is intended for use with assertions whose + * correctness or rationale is non-obvious, and for assertions where the "real" + * condition being tested is best described prosaically. Don't provide an + * explanation if it's not actually helpful. + * + * // No explanation needed: pointer arguments often must not be NULL. + * MOZ_ASSERT(arg); + * + * // An explanation can be helpful to explain exactly how we know an + * // assertion is valid. + * MOZ_ASSERT(state == WAITING_FOR_RESPONSE, + * "given that and , we must have..."); + * + * // Or it might disambiguate multiple identical (save for their location) + * // assertions of the same expression. + * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(), + * "we already set [[PrimitiveThis]] for this Boolean object"); + * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(), + * "we already set [[PrimitiveThis]] for this String object"); + * + * MOZ_ASSERT has no effect in non-debug builds. It is designed to catch bugs + * *only* during debugging, not "in the field". If you want the latter, use + * MOZ_RELEASE_ASSERT, which applies to non-debug builds as well. + * + * MOZ_DIAGNOSTIC_ASSERT works like MOZ_RELEASE_ASSERT in Nightly/Aurora and + * MOZ_ASSERT in Beta/Release - use this when a condition is potentially rare + * enough to require real user testing to hit, but is not security-sensitive. + * This can cause user pain, so use it sparingly. If a MOZ_DIAGNOSTIC_ASSERT + * is firing, it should promptly be converted to a MOZ_ASSERT while the failure + * is being investigated, rather than letting users suffer. + * + * MOZ_DIAGNOSTIC_ASSERT_ENABLED is defined when MOZ_DIAGNOSTIC_ASSERT is like + * MOZ_RELEASE_ASSERT rather than MOZ_ASSERT. + */ + +/* + * Implement MOZ_VALIDATE_ASSERT_CONDITION_TYPE, which is used to guard against + * accidentally passing something unintended in lieu of an assertion condition. + */ + +#ifdef __cplusplus +# include +namespace mozilla { +namespace detail { + +template +struct AssertionConditionType { + using ValueT = std::remove_reference_t; + static_assert(!std::is_array_v, + "Expected boolean assertion condition, got an array or a " + "string!"); + static_assert(!std::is_function_v, + "Expected boolean assertion condition, got a function! Did " + "you intend to call that function?"); + static_assert(!std::is_floating_point_v, + "It's often a bad idea to assert that a floating-point number " + "is nonzero, because such assertions tend to intermittently " + "fail. Shouldn't your code gracefully handle this case instead " + "of asserting? Anyway, if you really want to do that, write an " + "explicit boolean condition, like !!x or x!=0."); + + static const bool isValid = true; +}; + +} // namespace detail +} // namespace mozilla +# define MOZ_VALIDATE_ASSERT_CONDITION_TYPE(x) \ + static_assert( \ + mozilla::detail::AssertionConditionType::isValid, \ + "invalid assertion condition") +#else +# define MOZ_VALIDATE_ASSERT_CONDITION_TYPE(x) +#endif + +#if defined(DEBUG) || defined(MOZ_ASAN) +# define MOZ_REPORT_ASSERTION_FAILURE(...) \ + MOZ_ReportAssertionFailure(__VA_ARGS__) +#else +# define MOZ_REPORT_ASSERTION_FAILURE(...) \ + do { /* nothing */ \ + } while (false) +#endif + +/* First the single-argument form. */ +#define MOZ_ASSERT_HELPER1(kind, expr) \ + do { \ + MOZ_VALIDATE_ASSERT_CONDITION_TYPE(expr); \ + if (MOZ_UNLIKELY(!MOZ_CHECK_ASSERT_ASSIGNMENT(expr))) { \ + MOZ_FUZZING_HANDLE_CRASH_EVENT2(kind, #expr); \ + MOZ_REPORT_ASSERTION_FAILURE(#expr, __FILE__, __LINE__); \ + MOZ_CRASH_ANNOTATE(kind "(" #expr ")"); \ + MOZ_REALLY_CRASH(__LINE__); \ + } \ + } while (false) +/* Now the two-argument form. */ +#define MOZ_ASSERT_HELPER2(kind, expr, explain) \ + do { \ + MOZ_VALIDATE_ASSERT_CONDITION_TYPE(expr); \ + if (MOZ_UNLIKELY(!MOZ_CHECK_ASSERT_ASSIGNMENT(expr))) { \ + MOZ_FUZZING_HANDLE_CRASH_EVENT2(kind, #expr); \ + MOZ_REPORT_ASSERTION_FAILURE(#expr " (" explain ")", __FILE__, \ + __LINE__); \ + MOZ_CRASH_ANNOTATE(kind "(" #expr ") (" explain ")"); \ + MOZ_REALLY_CRASH(__LINE__); \ + } \ + } while (false) + +#define MOZ_ASSERT_GLUE(a, b) a b +#define MOZ_RELEASE_ASSERT(...) \ + MOZ_ASSERT_GLUE( \ + MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \ + ("MOZ_RELEASE_ASSERT", __VA_ARGS__)) + +#ifdef DEBUG +# define MOZ_ASSERT(...) \ + MOZ_ASSERT_GLUE( \ + MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \ + ("MOZ_ASSERT", __VA_ARGS__)) +#else +# define MOZ_ASSERT(...) \ + do { \ + } while (false) +#endif /* DEBUG */ + +#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED) +# define MOZ_DIAGNOSTIC_ASSERT(...) \ + MOZ_ASSERT_GLUE( \ + MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \ + ("MOZ_DIAGNOSTIC_ASSERT", __VA_ARGS__)) +#else +# define MOZ_DIAGNOSTIC_ASSERT(...) \ + do { \ + } while (false) +#endif + +/* + * MOZ_ASSERT_IF(cond1, cond2) is equivalent to MOZ_ASSERT(cond2) if cond1 is + * true. + * + * MOZ_ASSERT_IF(isPrime(num), num == 2 || isOdd(num)); + * + * As with MOZ_ASSERT, MOZ_ASSERT_IF has effect only in debug builds. It is + * designed to catch bugs during debugging, not "in the field". + */ +#ifdef DEBUG +# define MOZ_ASSERT_IF(cond, expr) \ + do { \ + if (cond) { \ + MOZ_ASSERT(expr); \ + } \ + } while (false) +#else +# define MOZ_ASSERT_IF(cond, expr) \ + do { \ + } while (false) +#endif + +/* + * MOZ_DIAGNOSTIC_ASSERT_IF is like MOZ_ASSERT_IF, but using + * MOZ_DIAGNOSTIC_ASSERT as the underlying assert. + * + * See the block comment for MOZ_DIAGNOSTIC_ASSERT above for more details on how + * diagnostic assertions work and how to use them. + */ +#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED +# define MOZ_DIAGNOSTIC_ASSERT_IF(cond, expr) \ + do { \ + if (cond) { \ + MOZ_DIAGNOSTIC_ASSERT(expr); \ + } \ + } while (false) +#else +# define MOZ_DIAGNOSTIC_ASSERT_IF(cond, expr) \ + do { \ + } while (false) +#endif + +/* + * MOZ_ASSUME_UNREACHABLE_MARKER() expands to an expression which states that + * it is undefined behavior for execution to reach this point. No guarantees + * are made about what will happen if this is reached at runtime. Most code + * should use MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE because it has extra + * asserts. + */ +#if defined(__clang__) || defined(__GNUC__) +# define MOZ_ASSUME_UNREACHABLE_MARKER() __builtin_unreachable() +#elif defined(_MSC_VER) +# define MOZ_ASSUME_UNREACHABLE_MARKER() __assume(0) +#else +# ifdef __cplusplus +# define MOZ_ASSUME_UNREACHABLE_MARKER() ::abort() +# else +# define MOZ_ASSUME_UNREACHABLE_MARKER() abort() +# endif +#endif + +/* + * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE([reason]) tells the compiler that it + * can assume that the macro call cannot be reached during execution. This lets + * the compiler generate better-optimized code under some circumstances, at the + * expense of the program's behavior being undefined if control reaches the + * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE. + * + * In Gecko, you probably should not use this macro outside of performance- or + * size-critical code, because it's unsafe. If you don't care about code size + * or performance, you should probably use MOZ_ASSERT or MOZ_CRASH. + * + * SpiderMonkey is a different beast, and there it's acceptable to use + * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE more widely. + * + * Note that MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE is noreturn, so it's valid + * not to return a value following a MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE + * call. + * + * Example usage: + * + * enum ValueType { + * VALUE_STRING, + * VALUE_INT, + * VALUE_FLOAT + * }; + * + * int ptrToInt(ValueType type, void* value) { + * { + * // We know for sure that type is either INT or FLOAT, and we want this + * // code to run as quickly as possible. + * switch (type) { + * case VALUE_INT: + * return *(int*) value; + * case VALUE_FLOAT: + * return (int) *(float*) value; + * default: + * MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected ValueType"); + * } + * } + */ + +/* + * Unconditional assert in debug builds for (assumed) unreachable code paths + * that have a safe return without crashing in release builds. + */ +#define MOZ_ASSERT_UNREACHABLE(reason) \ + MOZ_ASSERT(false, "MOZ_ASSERT_UNREACHABLE: " reason) + +#define MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(reason) \ + do { \ + MOZ_ASSERT_UNREACHABLE(reason); \ + MOZ_ASSUME_UNREACHABLE_MARKER(); \ + } while (false) + +/** + * MOZ_FALLTHROUGH_ASSERT is an annotation to suppress compiler warnings about + * switch cases that MOZ_ASSERT(false) (or its alias MOZ_ASSERT_UNREACHABLE) in + * debug builds, but intentionally fall through in release builds to handle + * unexpected values. + * + * Why do we need MOZ_FALLTHROUGH_ASSERT in addition to [[fallthrough]]? In + * release builds, the MOZ_ASSERT(false) will expand to `do { } while (false)`, + * requiring a [[fallthrough]] annotation to suppress a -Wimplicit-fallthrough + * warning. In debug builds, the MOZ_ASSERT(false) will expand to something like + * `if (true) { MOZ_CRASH(); }` and the [[fallthrough]] annotation will cause + * a -Wunreachable-code warning. The MOZ_FALLTHROUGH_ASSERT macro breaks this + * warning stalemate. + * + * // Example before MOZ_FALLTHROUGH_ASSERT: + * switch (foo) { + * default: + * // This case wants to assert in debug builds, fall through in release. + * MOZ_ASSERT(false); // -Wimplicit-fallthrough warning in release builds! + * [[fallthrough]]; // but -Wunreachable-code warning in debug builds! + * case 5: + * return 5; + * } + * + * // Example with MOZ_FALLTHROUGH_ASSERT: + * switch (foo) { + * default: + * // This case asserts in debug builds, falls through in release. + * MOZ_FALLTHROUGH_ASSERT("Unexpected foo value?!"); + * case 5: + * return 5; + * } + */ +#ifdef DEBUG +# define MOZ_FALLTHROUGH_ASSERT(...) \ + MOZ_CRASH("MOZ_FALLTHROUGH_ASSERT: " __VA_ARGS__) +#else +# define MOZ_FALLTHROUGH_ASSERT(...) [[fallthrough]] +#endif + +/* + * MOZ_ALWAYS_TRUE(expr) and friends always evaluate the provided expression, + * in debug builds and in release builds both. Then, in debug builds and + * Nightly and DevEdition release builds, the value of the expression is + * asserted either true or false using MOZ_DIAGNOSTIC_ASSERT. + */ +#define MOZ_ALWAYS_TRUE(expr) \ + do { \ + if (MOZ_LIKELY(expr)) { \ + /* Silence [[nodiscard]]. */ \ + } else { \ + MOZ_DIAGNOSTIC_ASSERT(false, #expr); \ + } \ + } while (false) + +#define MOZ_ALWAYS_FALSE(expr) MOZ_ALWAYS_TRUE(!(expr)) +#define MOZ_ALWAYS_OK(expr) MOZ_ALWAYS_TRUE((expr).isOk()) +#define MOZ_ALWAYS_ERR(expr) MOZ_ALWAYS_TRUE((expr).isErr()) + +/* + * These are disabled when fuzzing + */ +#ifdef FUZZING +# define MOZ_CRASH_UNLESS_FUZZING(...) \ + do { \ + } while (0) +# define MOZ_ASSERT_UNLESS_FUZZING(...) \ + do { \ + } while (0) +#else +# define MOZ_CRASH_UNLESS_FUZZING(...) MOZ_CRASH(__VA_ARGS__) +# define MOZ_ASSERT_UNLESS_FUZZING(...) MOZ_ASSERT(__VA_ARGS__) +#endif + +#undef MOZ_DUMP_ASSERTION_STACK +#undef MOZ_CRASH_CRASHREPORT + +/* + * This is only used by Array and nsTArray classes, therefore it is not + * required when included from C code. + */ +#ifdef __cplusplus +namespace mozilla::detail { +MFBT_API MOZ_NORETURN MOZ_COLD void InvalidArrayIndex_CRASH(size_t aIndex, + size_t aLength); +} // namespace mozilla::detail +#endif // __cplusplus + +#endif /* mozilla_Assertions_h */ diff --git a/mfbt/AtomicBitfields.h b/mfbt/AtomicBitfields.h new file mode 100644 index 0000000000..c61dc4df46 --- /dev/null +++ b/mfbt/AtomicBitfields.h @@ -0,0 +1,468 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_AtomicBitfields_h +#define mozilla_AtomicBitfields_h + +#include "mozilla/Assertions.h" +#include "mozilla/MacroArgs.h" +#include "mozilla/MacroForEach.h" + +#include +#include +#include + +#ifdef __wasi__ +# include "mozilla/WasiAtomic.h" +#else +# include +#endif // __wasi__ + +namespace mozilla { + +// Creates a series of atomic bitfields. +// +// |aBitfields| is the name of the underlying storage for the bitfields. +// |aBitFieldsSize| is the size of the underlying storage (8, 16, 32, or 64). +// +// Bitfields are specified as a triplet of (type, name, size), which mirrors +// the way you declare native C++ bitfields (bool mMyField1: 1). Trailing +// commas are not supported in the list of bitfields. +// +// Signed integer types are not supported by this Macro to avoid dealing with +// packing/unpacking the sign bit and C++'s general messiness around signed +// integer representations not being fully defined. +// +// You cannot request a single field that's the +// size of the the entire bitfield storage. Just use a normal atomic integer! +// +// +// ========================== SEMANTICS AND SAFETY ============================ +// +// All fields are default-initialized to 0. +// +// In debug builds, storing a value to a bitfield that's larger than its bits +// can fit will trigger an assertion. In release builds, the value will just be +// masked off. +// +// If you request anything unsupported by this macro it should result in +// a compile-time error (either a static assert or just weird macro errors). +// For instance, this macro will statically prevent using more bits than +// |aBitFieldsSize|, so specifying the size is just to prevent accidentally +// making the storage bigger. +// +// Each field will get a Load$NAME and Store$Name method which will atomically +// load and store the requested value with a Sequentially Consistent memory +// order (to be on the safe side). Storing a field requires a compare-exchange, +// so a thread may get stalled if there's a lot of contention on the bitfields. +// +// +// ============================== MOTIVATION ================================== +// +// You might be wondering: why would I need atomic bitfields? Well as it turns +// out, bitfields and concurrency mess a lot of people up! +// +// CPUs don't have operations to write to a handful of bits -- they generally +// only have the precision of a byte. So when you use C++'s native bitfields, +// the compiler generates code to mask and shift the values in for you. This +// means writing to a single field will actually overwrite all the other +// bitfields that are packed in with it! +// +// In single-threaded code this is fine; the old values are loaded and written +// back by the compiler's generated code. But in concurrent code, it means +// that accessing two different fields can be an unexpected Data Race (which is +// Undefined Behavior!). +// +// By using MOZ_ATOMIC_BITFIELDS, you protect yourself from these Data Races, +// and don't have to worry about writes getting lost. +// +// +// ================================ EXAMPLE =================================== +// +// #include "mozilla/AtomicBitfields.h" +// #include +// +// +// struct MyType { +// MOZ_ATOMIC_BITFIELDS(mAtomicFields, 8, ( +// (bool, IsDownloaded, 1), +// (uint32_t, SomeData, 2), +// (uint8_t, OtherData, 5) +// )) +// +// int32_t aNormalInteger; +// +// explicit MyType(uint32_t aSomeData): aNormalInteger(7) { +// StoreSomeData(aSomeData); +// // Other bitfields were already default initialized to 0/false +// } +// }; +// +// +// int main() { +// MyType val(3); +// +// if (!val.LoadIsDownloaded()) { +// val.StoreOtherData(2); +// val.StoreIsDownloaded(true); +// } +// } +// +// +// ============================== GENERATED =================================== +// +// This macro is a real mess to read because, well, it's a macro. So for the +// sake of anyone who has to review or modify its internals, here's a rough +// sketch of what the above example would expand to: +// +// struct MyType { +// // The actual storage of the bitfields, initialized to 0. +// std::atomic_uint8_t mAtomicFields{0}; +// +// // How many bits were actually used (in this case, all of them). +// static const size_t mAtomicFields_USED_BITS = 8; +// +// // The offset values for each field. +// static const size_t mAtomicFieldsIsDownloaded = 0; +// static const size_t mAtomicFieldsSomeData = 1; +// static const size_t mAtomicFieldsOtherData = 3; +// +// // Quick safety guard to prevent capacity overflow. +// static_assert(mAtomicFields_USED_BITS <= 8); +// +// // Asserts that fields are reasonable. +// static_assert(8>1, "mAtomicFields: MOZ_ATOMIC_BITFIELDS field too big"); +// static_assert(std::is_unsigned(), "mAtomicFields: +// MOZ_ATOMIC_BITFIELDS doesn't support signed payloads"); +// // ...and so on +// +// // Load/Store methods for all the fields. +// +// bool LoadIsDownloaded() { ... } +// void StoreIsDownloaded(bool aValue) { ... } +// +// uint32_t LoadSomeData() { ... } +// void StoreSomeData(uint32_t aValue) { ... } +// +// uint8_t LoadOtherData() { ... } +// void StoreOtherData(uint8_t aValue) { ... } +// +// +// // Remainder of the struct body continues normally. +// int32_t aNormalInteger; +// explicit MyType(uint32_t aSomeData): aNormalInteger(7) { +// StoreSomeData(aSomeData); +// // Other bitfields were already default initialized to 0/false. +// } +// } +// +// Also if you're wondering why there's so many MOZ_CONCAT's -- it's because +// the preprocessor sometimes gets confused if we use ## on certain arguments. +// MOZ_CONCAT reliably kept the preprocessor happy, sorry it's so ugly! +// +// +// ==================== FIXMES / FUTURE WORK ================================== +// +// * It would be nice if LoadField could be IsField for booleans. +// +// * For the case of setting something to all 1's or 0's, we can use +// |fetch_or| or |fetch_and| instead of |compare_exchange_weak|. Is this +// worth providing? (Possibly for 1-bit boolean fields?) +// +// * Try harder to hide the atomic/enum/array internals from +// the outer struct? +// +#define MOZ_ATOMIC_BITFIELDS(aBitfields, aBitfieldsSize, aFields) \ + std::atomic_uint##aBitfieldsSize##_t aBitfields{0}; \ + \ + static const size_t MOZ_CONCAT(aBitfields, _USED_BITS) = \ + MOZ_FOR_EACH_SEPARATED(MOZ_ATOMIC_BITFIELDS_FIELD_SIZE, (+), (), \ + aFields); \ + \ + MOZ_ROLL_EACH(MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER1, (aBitfields, ), aFields) \ + \ + static_assert(MOZ_CONCAT(aBitfields, _USED_BITS) <= aBitfieldsSize, \ + #aBitfields ": Maximum bits (" #aBitfieldsSize \ + ") exceeded for MOZ_ATOMIC_BITFIELDS instance"); \ + \ + MOZ_FOR_EACH(MOZ_ATOMIC_BITFIELDS_FIELD_HELPER, \ + (aBitfields, aBitfieldsSize, ), aFields) + +// Just a helper to unpack the head of the list. +#define MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER1(aBitfields, aFields) \ + MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER2(aBitfields, MOZ_ARG_1 aFields, aFields); + +// Just a helper to unpack the name and call the real function. +#define MOZ_ATOMIC_BITFIELDS_OFFSET_HELPER2(aBitfields, aField, aFields) \ + MOZ_ATOMIC_BITFIELDS_OFFSET(aBitfields, MOZ_ARG_2 aField, aFields) + +// To compute the offset of a field, why sum up all the offsets after it +// (inclusive) and subtract that from the total sum itself. We do this to swap +// the rolling sum that |MOZ_ROLL_EACH| gets us from descending to ascending. +#define MOZ_ATOMIC_BITFIELDS_OFFSET(aBitfields, aFieldName, aFields) \ + static const size_t MOZ_CONCAT(aBitfields, aFieldName) = \ + MOZ_CONCAT(aBitfields, _USED_BITS) - \ + (MOZ_FOR_EACH_SEPARATED(MOZ_ATOMIC_BITFIELDS_FIELD_SIZE, (+), (), \ + aFields)); + +// Just a more clearly named way of unpacking the size. +#define MOZ_ATOMIC_BITFIELDS_FIELD_SIZE(aArgs) MOZ_ARG_3 aArgs + +// Just a helper to unpack the tuple and call the real function. +#define MOZ_ATOMIC_BITFIELDS_FIELD_HELPER(aBitfields, aBitfieldsSize, aArgs) \ + MOZ_ATOMIC_BITFIELDS_FIELD(aBitfields, aBitfieldsSize, MOZ_ARG_1 aArgs, \ + MOZ_ARG_2 aArgs, MOZ_ARG_3 aArgs) + +// We need to disable this with coverity because it doesn't like checking that +// booleans are < 2 (because they always are). +#ifdef __COVERITY__ +# define MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize) +#else +# define MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize) \ + MOZ_ASSERT(((uint64_t)aValue) < (1ull << aFieldSize), \ + "Stored value exceeded capacity of bitfield!") +#endif + +// Generates the Load and Store methods for each field. +// +// Some comments here because inline macro comments are a pain in the neck: +// +// Most of the locals are forward declared to minimize messy macroified +// type declaration. Also a lot of locals are used to try to make things +// a little more clear, while also avoiding integer promotion issues. +// This is why some locals are literally just copying a value we already have: +// to force it to the right size. +// +// There's an annoying overflow case where a bitfields instance has a field +// that is the same size as the bitfields. Rather than trying to handle that, +// we just static_assert against it. +// +// +// BITMATH EXPLAINED: +// +// For |Load$Name|: +// +// mask = ((1 << fieldSize) - 1) << offset +// +// If you subtract 1 from a value with 1 bit set you get all 1's below that bit. +// This is perfect for ANDing out |fieldSize| bits. We shift by |offset| to get +// it in the right place. +// +// value = (aBitfields.load() & mask) >> offset +// +// This sets every bit we're not interested in to 0. Shifting the result by +// |offset| converts the value back to its native format, ready to be cast +// up to an integer type. +// +// +// For |Store$Name|: +// +// packedValue = (resizedValue << offset) & mask +// +// This converts a native value to the packed format. If the value is in bounds, +// the AND will do nothing. If it's out of bounds (not checked in release), +// then it will cause the value to wrap around by modulo 2^aFieldSize, just like +// a normal uint. +// +// clearedValue = oldValue & ~mask; +// +// This clears the bits where our field is stored on our bitfield storage by +// ANDing it with an inverted (NOTed) mask. +// +// newValue = clearedValue | packedValue; +// +// Once we have |packedValue| and |clearedValue| they just need to be ORed +// together to merge the new field value with the old values of all the other +// fields. +// +// This last step is done in a while loop because someone else can modify +// the bits before we have a chance to. If we didn't guard against this, +// our write would undo the write the other thread did. |compare_exchange_weak| +// is specifically designed to handle this. We give it what we expect the +// current value to be, and what we want it to be. If someone else modifies +// the bitfields before us, then we will reload the value and try again. +// +// Note that |compare_exchange_weak| writes back the actual value to the +// "expected" argument (it's passed by-reference), so we don't need to do +// another load in the body of the loop when we fail to write our result. +#define MOZ_ATOMIC_BITFIELDS_FIELD(aBitfields, aBitfieldsSize, aFieldType, \ + aFieldName, aFieldSize) \ + static_assert(aBitfieldsSize > aFieldSize, \ + #aBitfields ": MOZ_ATOMIC_BITFIELDS field too big"); \ + static_assert(std::is_unsigned(), #aBitfields \ + ": MOZ_ATOMIC_BITFIELDS doesn't support signed payloads"); \ + \ + aFieldType MOZ_CONCAT(Load, aFieldName)() const { \ + uint##aBitfieldsSize##_t fieldSize, mask, masked, value; \ + size_t offset = MOZ_CONCAT(aBitfields, aFieldName); \ + fieldSize = aFieldSize; \ + mask = ((1ull << fieldSize) - 1ull) << offset; \ + masked = aBitfields.load() & mask; \ + value = (masked >> offset); \ + return value; \ + } \ + \ + void MOZ_CONCAT(Store, aFieldName)(aFieldType aValue) { \ + MOZ_ATOMIC_BITFIELDS_STORE_GUARD(aValue, aFieldSize); \ + uint##aBitfieldsSize##_t fieldSize, mask, resizedValue, packedValue, \ + oldValue, clearedValue, newValue; \ + size_t offset = MOZ_CONCAT(aBitfields, aFieldName); \ + fieldSize = aFieldSize; \ + mask = ((1ull << fieldSize) - 1ull) << offset; \ + resizedValue = aValue; \ + packedValue = (resizedValue << offset) & mask; \ + oldValue = aBitfields.load(); \ + do { \ + clearedValue = oldValue & ~mask; \ + newValue = clearedValue | packedValue; \ + } while (!aBitfields.compare_exchange_weak(oldValue, newValue)); \ + } + +// OK SO THIS IS A GROSS HACK. GCC 10.2 (and below) has a bug[1] where it +// doesn't allow a static array to reference itself in its initializer, so we +// need to create a hacky way to produce a rolling sum of all the offsets. +// +// To do this, we make a tweaked version of |MOZ_FOR_EACH| which instead of +// passing just one argument to |aMacro| it passes the remaining values of +// |aArgs|. +// +// This allows us to expand an input (a, b, c, d) quadratically to: +// +// int sum1 = a + b + c + d; +// int sum2 = b + c + d; +// int sum3 = c + d; +// int sum4 = d; +// +// So all of this is a copy-paste of |MOZ_FOR_EACH| except the definition +// of |MOZ_FOR_EACH_HELPER| no longer extracts an argument with |MOZ_ARG_1|. +// Also this is restricted to 32 arguments just to reduce footprint a little. +// +// If the GCC bug is ever fixed, then this hack can be removed, and we can +// use the non-quadratic version that was originally written[2]. In case +// that link dies, a brief summary of that implementation: +// +// * Associate each field with an index by creating an `enum class` with +// entries for each field (an existing gecko patten). +// +// * Calculate offsets with a constexpr static array whose initializer +// self-referentially adds the contents of the previous index to the +// compute the current one. +// +// * Index into this array with the enum. +// +// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97234 +// [2]: https://phabricator.services.mozilla.com/D91622?id=346499 +#define MOZ_ROLL_EACH_EXPAND_HELPER(...) __VA_ARGS__ +#define MOZ_ROLL_EACH_GLUE(a, b) a b +#define MOZ_ROLL_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) \ + MOZ_ROLL_EACH_GLUE(MOZ_PASTE_PREFIX_AND_ARG_COUNT( \ + MOZ_ROLL_EACH_, MOZ_ROLL_EACH_EXPAND_HELPER aArgs), \ + (aMacro, aSeparator, aFixedArgs, aArgs)) +#define MOZ_ROLL_EACH(aMacro, aFixedArgs, aArgs) \ + MOZ_ROLL_EACH_SEPARATED(aMacro, (), aFixedArgs, aArgs) + +#define MOZ_ROLL_EACH_HELPER_GLUE(a, b) a b +#define MOZ_ROLL_EACH_HELPER(aMacro, aFixedArgs, aArgs) \ + MOZ_ROLL_EACH_HELPER_GLUE(aMacro, \ + (MOZ_ROLL_EACH_EXPAND_HELPER aFixedArgs aArgs)) + +#define MOZ_ROLL_EACH_0(m, s, fa, a) +#define MOZ_ROLL_EACH_1(m, s, fa, a) MOZ_ROLL_EACH_HELPER(m, fa, a) +#define MOZ_ROLL_EACH_2(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_1(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_3(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_2(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_4(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_3(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_5(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_4(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_6(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_5(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_7(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_6(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_8(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_7(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_9(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_8(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_10(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_9(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_11(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_10(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_12(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_11(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_13(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_12(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_14(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_13(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_15(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_14(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_16(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_15(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_17(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_16(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_18(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_17(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_19(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_18(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_20(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_19(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_21(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_20(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_22(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_21(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_23(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_22(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_24(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_23(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_25(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_24(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_26(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_25(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_27(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_26(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_28(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_27(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_29(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_28(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_30(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_29(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_31(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_30(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_ROLL_EACH_32(m, s, fa, a) \ + MOZ_ROLL_EACH_HELPER(m, fa, a) \ + MOZ_ROLL_EACH_EXPAND_HELPER s MOZ_ROLL_EACH_31(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +} // namespace mozilla +#endif /* mozilla_AtomicBitfields_h */ diff --git a/mfbt/Atomics.h b/mfbt/Atomics.h new file mode 100644 index 0000000000..4373e08af7 --- /dev/null +++ b/mfbt/Atomics.h @@ -0,0 +1,521 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implements (almost always) lock-free atomic operations. The operations here + * are a subset of that which can be found in C++11's header, with a + * different API to enforce consistent memory ordering constraints. + * + * Anyone caught using |volatile| for inter-thread memory safety needs to be + * sent a copy of this header and the C++11 standard. + */ + +#ifndef mozilla_Atomics_h +#define mozilla_Atomics_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Compiler.h" + +#ifdef __wasi__ +# include "mozilla/WasiAtomic.h" +#else +# include +#endif // __wasi__ + +#include +#include + +namespace mozilla { + +/** + * An enum of memory ordering possibilities for atomics. + * + * Memory ordering is the observable state of distinct values in memory. + * (It's a separate concept from atomicity, which concerns whether an + * operation can ever be observed in an intermediate state. Don't + * conflate the two!) Given a sequence of operations in source code on + * memory, it is *not* always the case that, at all times and on all + * cores, those operations will appear to have occurred in that exact + * sequence. First, the compiler might reorder that sequence, if it + * thinks another ordering will be more efficient. Second, the CPU may + * not expose so consistent a view of memory. CPUs will often perform + * their own instruction reordering, above and beyond that performed by + * the compiler. And each core has its own memory caches, and accesses + * (reads and writes both) to "memory" may only resolve to out-of-date + * cache entries -- not to the "most recently" performed operation in + * some global sense. Any access to a value that may be used by + * multiple threads, potentially across multiple cores, must therefore + * have a memory ordering imposed on it, for all code on all + * threads/cores to have a sufficiently coherent worldview. + * + * http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync and + * http://en.cppreference.com/w/cpp/atomic/memory_order go into more + * detail on all this, including examples of how each mode works. + * + * Note that for simplicity and practicality, not all of the modes in + * C++11 are supported. The missing C++11 modes are either subsumed by + * the modes we provide below, or not relevant for the CPUs we support + * in Gecko. These three modes are confusing enough as it is! + */ +enum MemoryOrdering { + /* + * Relaxed ordering is the simplest memory ordering: none at all. + * When the result of a write is observed, nothing may be inferred + * about other memory. Writes ostensibly performed "before" on the + * writing thread may not yet be visible. Writes performed "after" on + * the writing thread may already be visible, if the compiler or CPU + * reordered them. (The latter can happen if reads and/or writes get + * held up in per-processor caches.) Relaxed ordering means + * operations can always use cached values (as long as the actual + * updates to atomic values actually occur, correctly, eventually), so + * it's usually the fastest sort of atomic access. For this reason, + * *it's also the most dangerous kind of access*. + * + * Relaxed ordering is good for things like process-wide statistics + * counters that don't need to be consistent with anything else, so + * long as updates themselves are atomic. (And so long as any + * observations of that value can tolerate being out-of-date -- if you + * need some sort of up-to-date value, you need some sort of other + * synchronizing operation.) It's *not* good for locks, mutexes, + * reference counts, etc. that mediate access to other memory, or must + * be observably consistent with other memory. + * + * x86 architectures don't take advantage of the optimization + * opportunities that relaxed ordering permits. Thus it's possible + * that using relaxed ordering will "work" on x86 but fail elsewhere + * (ARM, say, which *does* implement non-sequentially-consistent + * relaxed ordering semantics). Be extra-careful using relaxed + * ordering if you can't easily test non-x86 architectures! + */ + Relaxed, + + /* + * When an atomic value is updated with ReleaseAcquire ordering, and + * that new value is observed with ReleaseAcquire ordering, prior + * writes (atomic or not) are also observable. What ReleaseAcquire + * *doesn't* give you is any observable ordering guarantees for + * ReleaseAcquire-ordered operations on different objects. For + * example, if there are two cores that each perform ReleaseAcquire + * operations on separate objects, each core may or may not observe + * the operations made by the other core. The only way the cores can + * be synchronized with ReleaseAcquire is if they both + * ReleaseAcquire-access the same object. This implies that you can't + * necessarily describe some global total ordering of ReleaseAcquire + * operations. + * + * ReleaseAcquire ordering is good for (as the name implies) atomic + * operations on values controlling ownership of things: reference + * counts, mutexes, and the like. However, if you are thinking about + * using these to implement your own locks or mutexes, you should take + * a good, hard look at actual lock or mutex primitives first. + */ + ReleaseAcquire, + + /* + * When an atomic value is updated with SequentiallyConsistent + * ordering, all writes observable when the update is observed, just + * as with ReleaseAcquire ordering. But, furthermore, a global total + * ordering of SequentiallyConsistent operations *can* be described. + * For example, if two cores perform SequentiallyConsistent operations + * on separate objects, one core will observably perform its update + * (and all previous operations will have completed), then the other + * core will observably perform its update (and all previous + * operations will have completed). (Although those previous + * operations aren't themselves ordered -- they could be intermixed, + * or ordered if they occur on atomic values with ordering + * requirements.) SequentiallyConsistent is the *simplest and safest* + * ordering of atomic operations -- it's always as if one operation + * happens, then another, then another, in some order -- and every + * core observes updates to happen in that single order. Because it + * has the most synchronization requirements, operations ordered this + * way also tend to be slowest. + * + * SequentiallyConsistent ordering can be desirable when multiple + * threads observe objects, and they all have to agree on the + * observable order of changes to them. People expect + * SequentiallyConsistent ordering, even if they shouldn't, when + * writing code, atomic or otherwise. SequentiallyConsistent is also + * the ordering of choice when designing lockless data structures. If + * you don't know what order to use, use this one. + */ + SequentiallyConsistent, +}; + +namespace detail { + +/* + * We provide CompareExchangeFailureOrder to work around a bug in some + * versions of GCC's header. See bug 898491. + */ +template +struct AtomicOrderConstraints; + +template <> +struct AtomicOrderConstraints { + static const std::memory_order AtomicRMWOrder = std::memory_order_relaxed; + static const std::memory_order LoadOrder = std::memory_order_relaxed; + static const std::memory_order StoreOrder = std::memory_order_relaxed; + static const std::memory_order CompareExchangeFailureOrder = + std::memory_order_relaxed; +}; + +template <> +struct AtomicOrderConstraints { + static const std::memory_order AtomicRMWOrder = std::memory_order_acq_rel; + static const std::memory_order LoadOrder = std::memory_order_acquire; + static const std::memory_order StoreOrder = std::memory_order_release; + static const std::memory_order CompareExchangeFailureOrder = + std::memory_order_acquire; +}; + +template <> +struct AtomicOrderConstraints { + static const std::memory_order AtomicRMWOrder = std::memory_order_seq_cst; + static const std::memory_order LoadOrder = std::memory_order_seq_cst; + static const std::memory_order StoreOrder = std::memory_order_seq_cst; + static const std::memory_order CompareExchangeFailureOrder = + std::memory_order_seq_cst; +}; + +template +struct IntrinsicBase { + typedef std::atomic ValueType; + typedef AtomicOrderConstraints OrderedOp; +}; + +template +struct IntrinsicMemoryOps : public IntrinsicBase { + typedef IntrinsicBase Base; + + static T load(const typename Base::ValueType& aPtr) { + return aPtr.load(Base::OrderedOp::LoadOrder); + } + + static void store(typename Base::ValueType& aPtr, T aVal) { + aPtr.store(aVal, Base::OrderedOp::StoreOrder); + } + + static T exchange(typename Base::ValueType& aPtr, T aVal) { + return aPtr.exchange(aVal, Base::OrderedOp::AtomicRMWOrder); + } + + static bool compareExchange(typename Base::ValueType& aPtr, T aOldVal, + T aNewVal) { + return aPtr.compare_exchange_strong( + aOldVal, aNewVal, Base::OrderedOp::AtomicRMWOrder, + Base::OrderedOp::CompareExchangeFailureOrder); + } +}; + +template +struct IntrinsicAddSub : public IntrinsicBase { + typedef IntrinsicBase Base; + + static T add(typename Base::ValueType& aPtr, T aVal) { + return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder); + } + + static T sub(typename Base::ValueType& aPtr, T aVal) { + return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder); + } +}; + +template +struct IntrinsicAddSub : public IntrinsicBase { + typedef IntrinsicBase Base; + + static T* add(typename Base::ValueType& aPtr, ptrdiff_t aVal) { + return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder); + } + + static T* sub(typename Base::ValueType& aPtr, ptrdiff_t aVal) { + return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder); + } +}; + +template +struct IntrinsicIncDec : public IntrinsicAddSub { + typedef IntrinsicBase Base; + + static T inc(typename Base::ValueType& aPtr) { + return IntrinsicAddSub::add(aPtr, 1); + } + + static T dec(typename Base::ValueType& aPtr) { + return IntrinsicAddSub::sub(aPtr, 1); + } +}; + +template +struct AtomicIntrinsics : public IntrinsicMemoryOps, + public IntrinsicIncDec { + typedef IntrinsicBase Base; + + static T or_(typename Base::ValueType& aPtr, T aVal) { + return aPtr.fetch_or(aVal, Base::OrderedOp::AtomicRMWOrder); + } + + static T xor_(typename Base::ValueType& aPtr, T aVal) { + return aPtr.fetch_xor(aVal, Base::OrderedOp::AtomicRMWOrder); + } + + static T and_(typename Base::ValueType& aPtr, T aVal) { + return aPtr.fetch_and(aVal, Base::OrderedOp::AtomicRMWOrder); + } +}; + +template +struct AtomicIntrinsics : public IntrinsicMemoryOps, + public IntrinsicIncDec {}; + +template +struct ToStorageTypeArgument { + static constexpr T convert(T aT) { return aT; } +}; + +template +class AtomicBase { + static_assert(sizeof(T) == 4 || sizeof(T) == 8, + "mozilla/Atomics.h only supports 32-bit and 64-bit types"); + + protected: + typedef typename detail::AtomicIntrinsics Intrinsics; + typedef typename Intrinsics::ValueType ValueType; + ValueType mValue; + + public: + constexpr AtomicBase() : mValue() {} + explicit constexpr AtomicBase(T aInit) + : mValue(ToStorageTypeArgument::convert(aInit)) {} + + // Note: we can't provide operator T() here because Atomic inherits + // from AtomcBase with T=uint32_t and not T=bool. If we implemented + // operator T() here, it would cause errors when comparing Atomic with + // a regular bool. + + T operator=(T aVal) { + Intrinsics::store(mValue, aVal); + return aVal; + } + + /** + * Performs an atomic swap operation. aVal is stored and the previous + * value of this variable is returned. + */ + T exchange(T aVal) { return Intrinsics::exchange(mValue, aVal); } + + /** + * Performs an atomic compare-and-swap operation and returns true if it + * succeeded. This is equivalent to atomically doing + * + * if (mValue == aOldValue) { + * mValue = aNewValue; + * return true; + * } else { + * return false; + * } + */ + bool compareExchange(T aOldValue, T aNewValue) { + return Intrinsics::compareExchange(mValue, aOldValue, aNewValue); + } + + private: + AtomicBase(const AtomicBase& aCopy) = delete; +}; + +template +class AtomicBaseIncDec : public AtomicBase { + typedef typename detail::AtomicBase Base; + + public: + constexpr AtomicBaseIncDec() : Base() {} + explicit constexpr AtomicBaseIncDec(T aInit) : Base(aInit) {} + + using Base::operator=; + + operator T() const { return Base::Intrinsics::load(Base::mValue); } + T operator++(int) { return Base::Intrinsics::inc(Base::mValue); } + T operator--(int) { return Base::Intrinsics::dec(Base::mValue); } + T operator++() { return Base::Intrinsics::inc(Base::mValue) + 1; } + T operator--() { return Base::Intrinsics::dec(Base::mValue) - 1; } + + private: + AtomicBaseIncDec(const AtomicBaseIncDec& aCopy) = delete; +}; + +} // namespace detail + +/** + * A wrapper for a type that enforces that all memory accesses are atomic. + * + * In general, where a variable |T foo| exists, |Atomic foo| can be used in + * its place. Implementations for integral and pointer types are provided + * below. + * + * Atomic accesses are sequentially consistent by default. You should + * use the default unless you are tall enough to ride the + * memory-ordering roller coaster (if you're not sure, you aren't) and + * you have a compelling reason to do otherwise. + * + * There is one exception to the case of atomic memory accesses: providing an + * initial value of the atomic value is not guaranteed to be atomic. This is a + * deliberate design choice that enables static atomic variables to be declared + * without introducing extra static constructors. + */ +template +class Atomic; + +/** + * Atomic implementation for integral types. + * + * In addition to atomic store and load operations, compound assignment and + * increment/decrement operators are implemented which perform the + * corresponding read-modify-write operation atomically. Finally, an atomic + * swap method is provided. + */ +template +class Atomic< + T, Order, + std::enable_if_t && !std::is_same_v>> + : public detail::AtomicBaseIncDec { + typedef typename detail::AtomicBaseIncDec Base; + + public: + constexpr Atomic() : Base() {} + explicit constexpr Atomic(T aInit) : Base(aInit) {} + + using Base::operator=; + + T operator+=(T aDelta) { + return Base::Intrinsics::add(Base::mValue, aDelta) + aDelta; + } + + T operator-=(T aDelta) { + return Base::Intrinsics::sub(Base::mValue, aDelta) - aDelta; + } + + T operator|=(T aVal) { + return Base::Intrinsics::or_(Base::mValue, aVal) | aVal; + } + + T operator^=(T aVal) { + return Base::Intrinsics::xor_(Base::mValue, aVal) ^ aVal; + } + + T operator&=(T aVal) { + return Base::Intrinsics::and_(Base::mValue, aVal) & aVal; + } + + private: + Atomic(Atomic& aOther) = delete; +}; + +/** + * Atomic implementation for pointer types. + * + * An atomic compare-and-swap primitive for pointer variables is provided, as + * are atomic increment and decement operators. Also provided are the compound + * assignment operators for addition and subtraction. Atomic swap (via + * exchange()) is included as well. + */ +template +class Atomic : public detail::AtomicBaseIncDec { + typedef typename detail::AtomicBaseIncDec Base; + + public: + constexpr Atomic() : Base() {} + explicit constexpr Atomic(T* aInit) : Base(aInit) {} + + using Base::operator=; + + T* operator+=(ptrdiff_t aDelta) { + return Base::Intrinsics::add(Base::mValue, aDelta) + aDelta; + } + + T* operator-=(ptrdiff_t aDelta) { + return Base::Intrinsics::sub(Base::mValue, aDelta) - aDelta; + } + + private: + Atomic(Atomic& aOther) = delete; +}; + +/** + * Atomic implementation for enum types. + * + * The atomic store and load operations and the atomic swap method is provided. + */ +template +class Atomic>> + : public detail::AtomicBase { + typedef typename detail::AtomicBase Base; + + public: + constexpr Atomic() : Base() {} + explicit constexpr Atomic(T aInit) : Base(aInit) {} + + operator T() const { return T(Base::Intrinsics::load(Base::mValue)); } + + using Base::operator=; + + private: + Atomic(Atomic& aOther) = delete; +}; + +/** + * Atomic implementation for boolean types. + * + * The atomic store and load operations and the atomic swap method is provided. + * + * Note: + * + * - sizeof(Atomic) != sizeof(bool) for some implementations of + * bool and/or some implementations of std::atomic. This is allowed in + * [atomic.types.generic]p9. + * + * - It's not obvious whether the 8-bit atomic functions on Windows are always + * inlined or not. If they are not inlined, the corresponding functions in the + * runtime library are not available on Windows XP. This is why we implement + * Atomic with an underlying type of uint32_t. + */ +template +class Atomic : protected detail::AtomicBase { + typedef typename detail::AtomicBase Base; + + public: + constexpr Atomic() : Base() {} + explicit constexpr Atomic(bool aInit) : Base(aInit) {} + + // We provide boolean wrappers for the underlying AtomicBase methods. + MOZ_IMPLICIT operator bool() const { + return Base::Intrinsics::load(Base::mValue); + } + + bool operator=(bool aVal) { return Base::operator=(aVal); } + + bool exchange(bool aVal) { return Base::exchange(aVal); } + + bool compareExchange(bool aOldValue, bool aNewValue) { + return Base::compareExchange(aOldValue, aNewValue); + } + + private: + Atomic(Atomic& aOther) = delete; +}; + +} // namespace mozilla + +namespace std { + +// If you want to atomically swap two atomic values, use exchange(). +template +void swap(mozilla::Atomic&, mozilla::Atomic&) = delete; + +} // namespace std + +#endif /* mozilla_Atomics_h */ diff --git a/mfbt/Attributes.h b/mfbt/Attributes.h new file mode 100644 index 0000000000..d6e6293066 --- /dev/null +++ b/mfbt/Attributes.h @@ -0,0 +1,983 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implementations of various class and method modifier attributes. */ + +#ifndef mozilla_Attributes_h +#define mozilla_Attributes_h + +#include "mozilla/Compiler.h" + +/* + * MOZ_ALWAYS_INLINE is a macro which expands to tell the compiler that the + * method decorated with it must be inlined, even if the compiler thinks + * otherwise. This is only a (much) stronger version of the inline hint: + * compilers are not guaranteed to respect it (although they're much more likely + * to do so). + * + * The MOZ_ALWAYS_INLINE_EVEN_DEBUG macro is yet stronger. It tells the + * compiler to inline even in DEBUG builds. It should be used very rarely. + */ +#if defined(_MSC_VER) +# define MOZ_ALWAYS_INLINE_EVEN_DEBUG __forceinline +#elif defined(__GNUC__) +# define MOZ_ALWAYS_INLINE_EVEN_DEBUG __attribute__((always_inline)) inline +#else +# define MOZ_ALWAYS_INLINE_EVEN_DEBUG inline +#endif + +#if !defined(DEBUG) +# define MOZ_ALWAYS_INLINE MOZ_ALWAYS_INLINE_EVEN_DEBUG +#elif defined(_MSC_VER) && !defined(__cplusplus) +# define MOZ_ALWAYS_INLINE __inline +#else +# define MOZ_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +/* + * g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality + * without warnings (functionality used by the macros below). These modes are + * detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or, more + * standardly, by checking whether __cplusplus has a C++11 or greater value. + * Current versions of g++ do not correctly set __cplusplus, so we check both + * for forward compatibility. + */ +# define MOZ_HAVE_NEVER_INLINE __declspec(noinline) +# define MOZ_HAVE_NORETURN __declspec(noreturn) +#elif defined(__clang__) +/* + * Per Clang documentation, "Note that marketing version numbers should not + * be used to check for language features, as different vendors use different + * numbering schemes. Instead, use the feature checking macros." + */ +# ifndef __has_extension +# define __has_extension \ + __has_feature /* compatibility, for older versions of clang */ +# endif +# if __has_attribute(noinline) +# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline)) +# endif +# if __has_attribute(noreturn) +# define MOZ_HAVE_NORETURN __attribute__((noreturn)) +# endif +#elif defined(__GNUC__) +# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline)) +# define MOZ_HAVE_NORETURN __attribute__((noreturn)) +# define MOZ_HAVE_NORETURN_PTR __attribute__((noreturn)) +#endif + +#if defined(__clang__) +# if __has_attribute(no_stack_protector) +# define MOZ_HAVE_NO_STACK_PROTECTOR __attribute__((no_stack_protector)) +# endif +#elif defined(__GNUC__) +# define MOZ_HAVE_NO_STACK_PROTECTOR __attribute__((no_stack_protector)) +#endif + +/* + * When built with clang analyzer (a.k.a scan-build), define MOZ_HAVE_NORETURN + * to mark some false positives + */ +#ifdef __clang_analyzer__ +# if __has_extension(attribute_analyzer_noreturn) +# define MOZ_HAVE_ANALYZER_NORETURN __attribute__((analyzer_noreturn)) +# endif +#endif + +/* + * MOZ_NEVER_INLINE is a macro which expands to tell the compiler that the + * method decorated with it must never be inlined, even if the compiler would + * otherwise choose to inline the method. Compilers aren't absolutely + * guaranteed to support this, but most do. + */ +#if defined(MOZ_HAVE_NEVER_INLINE) +# define MOZ_NEVER_INLINE MOZ_HAVE_NEVER_INLINE +#else +# define MOZ_NEVER_INLINE /* no support */ +#endif + +/* + * MOZ_NEVER_INLINE_DEBUG is a macro which expands to MOZ_NEVER_INLINE + * in debug builds, and nothing in opt builds. + */ +#if defined(DEBUG) +# define MOZ_NEVER_INLINE_DEBUG MOZ_NEVER_INLINE +#else +# define MOZ_NEVER_INLINE_DEBUG /* don't inline in opt builds */ +#endif +/* + * MOZ_NORETURN, specified at the start of a function declaration, indicates + * that the given function does not return. (The function definition does not + * need to be annotated.) + * + * MOZ_NORETURN void abort(const char* msg); + * + * This modifier permits the compiler to optimize code assuming a call to such a + * function will never return. It also enables the compiler to avoid spurious + * warnings about not initializing variables, or about any other seemingly-dodgy + * operations performed after the function returns. + * + * There are two variants. The GCC version of NORETURN may be applied to a + * function pointer, while for MSVC it may not. + * + * This modifier does not affect the corresponding function's linking behavior. + */ +#if defined(MOZ_HAVE_NORETURN) +# define MOZ_NORETURN MOZ_HAVE_NORETURN +#else +# define MOZ_NORETURN /* no support */ +#endif +#if defined(MOZ_HAVE_NORETURN_PTR) +# define MOZ_NORETURN_PTR MOZ_HAVE_NORETURN_PTR +#else +# define MOZ_NORETURN_PTR /* no support */ +#endif + +/** + * MOZ_COLD tells the compiler that a function is "cold", meaning infrequently + * executed. This may lead it to optimize for size more aggressively than speed, + * or to allocate the body of the function in a distant part of the text segment + * to help keep it from taking up unnecessary icache when it isn't in use. + * + * Place this attribute at the very beginning of a function definition. For + * example, write + * + * MOZ_COLD int foo(); + * + * or + * + * MOZ_COLD int foo() { return 42; } + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_COLD __attribute__((cold)) +#else +# define MOZ_COLD +#endif + +/** + * MOZ_NONNULL tells the compiler that some of the arguments to a function are + * known to be non-null. The arguments are a list of 1-based argument indexes + * identifying arguments which are known to be non-null. + * + * Place this attribute at the very beginning of a function definition. For + * example, write + * + * MOZ_NONNULL(1, 2) int foo(char *p, char *q); + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_NONNULL(...) __attribute__((nonnull(__VA_ARGS__))) +#else +# define MOZ_NONNULL(...) +#endif + +/** + * MOZ_NONNULL_RETURN tells the compiler that the function's return value is + * guaranteed to be a non-null pointer, which may enable the compiler to + * optimize better at call sites. + * + * Place this attribute at the end of a function declaration. For example, + * + * char* foo(char *p, char *q) MOZ_NONNULL_RETURN; + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_NONNULL_RETURN __attribute__((returns_nonnull)) +#else +# define MOZ_NONNULL_RETURN +#endif + +/* + * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS, specified at the end of a function + * declaration, indicates that for the purposes of static analysis, this + * function does not return. (The function definition does not need to be + * annotated.) + * + * MOZ_ReportCrash(const char* s, const char* file, int ln) + * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS + * + * Some static analyzers, like scan-build from clang, can use this information + * to eliminate false positives. From the upstream documentation of scan-build: + * "This attribute is useful for annotating assertion handlers that actually + * can return, but for the purpose of using the analyzer we want to pretend + * that such functions do not return." + * + */ +#if defined(MOZ_HAVE_ANALYZER_NORETURN) +# define MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS MOZ_HAVE_ANALYZER_NORETURN +#else +# define MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS /* no support */ +#endif + +/* + * MOZ_ASAN_IGNORE is a macro to tell AddressSanitizer (a compile-time + * instrumentation shipped with Clang and GCC) to not instrument the annotated + * function. Furthermore, it will prevent the compiler from inlining the + * function because inlining currently breaks the blocklisting mechanism of + * AddressSanitizer. + */ +#if defined(__has_feature) +# if __has_feature(address_sanitizer) +# define MOZ_HAVE_ASAN_IGNORE +# endif +#elif defined(__GNUC__) +# if defined(__SANITIZE_ADDRESS__) +# define MOZ_HAVE_ASAN_IGNORE +# endif +#endif + +#if defined(MOZ_HAVE_ASAN_IGNORE) +# define MOZ_ASAN_IGNORE MOZ_NEVER_INLINE __attribute__((no_sanitize_address)) +#else +# define MOZ_ASAN_IGNORE /* nothing */ +#endif + +/* + * MOZ_TSAN_IGNORE is a macro to tell ThreadSanitizer (a compile-time + * instrumentation shipped with Clang) to not instrument the annotated function. + * Furthermore, it will prevent the compiler from inlining the function because + * inlining currently breaks the blocklisting mechanism of ThreadSanitizer. + */ +#if defined(__has_feature) +# if __has_feature(thread_sanitizer) +# define MOZ_TSAN_IGNORE MOZ_NEVER_INLINE __attribute__((no_sanitize_thread)) +# else +# define MOZ_TSAN_IGNORE /* nothing */ +# endif +#else +# define MOZ_TSAN_IGNORE /* nothing */ +#endif + +#if defined(__has_attribute) +# if __has_attribute(no_sanitize) +# define MOZ_HAVE_NO_SANITIZE_ATTR +# endif +#endif + +#ifdef __clang__ +# ifdef MOZ_HAVE_NO_SANITIZE_ATTR +# define MOZ_HAVE_UNSIGNED_OVERFLOW_SANITIZE_ATTR +# define MOZ_HAVE_SIGNED_OVERFLOW_SANITIZE_ATTR +# endif +#endif + +/* + * MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW disables *un*signed integer overflow + * checking on the function it annotates, in builds configured to perform it. + * (Currently this is only Clang using -fsanitize=unsigned-integer-overflow, or + * via --enable-unsigned-overflow-sanitizer in Mozilla's build system.) It has + * no effect in other builds. + * + * Place this attribute at the very beginning of a function declaration. + * + * Unsigned integer overflow isn't *necessarily* a bug. It's well-defined in + * C/C++, and code may reasonably depend upon it. For example, + * + * MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline bool + * IsDecimal(char aChar) + * { + * // For chars less than '0', unsigned integer underflow occurs, to a value + * // much greater than 10, so the overall test is false. + * // For chars greater than '0', no overflow occurs, and only '0' to '9' + * // pass the overall test. + * return static_cast(aChar) - '0' < 10; + * } + * + * But even well-defined unsigned overflow often causes bugs when it occurs, so + * it should be restricted to functions annotated with this attribute. + * + * The compiler instrumentation to detect unsigned integer overflow has costs + * both at compile time and at runtime. Functions that are repeatedly inlined + * at compile time will also implicitly inline the necessary instrumentation, + * increasing compile time. Similarly, frequently-executed functions that + * require large amounts of instrumentation will also notice significant runtime + * slowdown to execute that instrumentation. Use this attribute to eliminate + * those costs -- but only after carefully verifying that no overflow can occur. + */ +#ifdef MOZ_HAVE_UNSIGNED_OVERFLOW_SANITIZE_ATTR +# define MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW \ + __attribute__((no_sanitize("unsigned-integer-overflow"))) +#else +# define MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW /* nothing */ +#endif + +/* + * MOZ_NO_SANITIZE_SIGNED_OVERFLOW disables *signed* integer overflow checking + * on the function it annotates, in builds configured to perform it. (Currently + * this is only Clang using -fsanitize=signed-integer-overflow, or via + * --enable-signed-overflow-sanitizer in Mozilla's build system. GCC support + * will probably be added in the future.) It has no effect in other builds. + * + * Place this attribute at the very beginning of a function declaration. + * + * Signed integer overflow is undefined behavior in C/C++: *anything* can happen + * when it occurs. *Maybe* wraparound behavior will occur, but maybe also the + * compiler will assume no overflow happens and will adversely optimize the rest + * of your code. Code that contains signed integer overflow needs to be fixed. + * + * The compiler instrumentation to detect signed integer overflow has costs both + * at compile time and at runtime. Functions that are repeatedly inlined at + * compile time will also implicitly inline the necessary instrumentation, + * increasing compile time. Similarly, frequently-executed functions that + * require large amounts of instrumentation will also notice significant runtime + * slowdown to execute that instrumentation. Use this attribute to eliminate + * those costs -- but only after carefully verifying that no overflow can occur. + */ +#ifdef MOZ_HAVE_SIGNED_OVERFLOW_SANITIZE_ATTR +# define MOZ_NO_SANITIZE_SIGNED_OVERFLOW \ + __attribute__((no_sanitize("signed-integer-overflow"))) +#else +# define MOZ_NO_SANITIZE_SIGNED_OVERFLOW /* nothing */ +#endif + +#undef MOZ_HAVE_NO_SANITIZE_ATTR + +/** + * MOZ_ALLOCATOR tells the compiler that the function it marks returns either a + * "fresh", "pointer-free" block of memory, or nullptr. "Fresh" means that the + * block is not pointed to by any other reachable pointer in the program. + * "Pointer-free" means that the block contains no pointers to any valid object + * in the program. It may be initialized with other (non-pointer) values. + * + * Placing this attribute on appropriate functions helps GCC analyze pointer + * aliasing more accurately in their callers. + * + * GCC warns if a caller ignores the value returned by a function marked with + * MOZ_ALLOCATOR: it is hard to imagine cases where dropping the value returned + * by a function that meets the criteria above would be intentional. + * + * Place this attribute after the argument list and 'this' qualifiers of a + * function definition. For example, write + * + * void *my_allocator(size_t) MOZ_ALLOCATOR; + * + * or + * + * void *my_allocator(size_t bytes) MOZ_ALLOCATOR { ... } + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_ALLOCATOR __attribute__((malloc, warn_unused_result)) +# define MOZ_INFALLIBLE_ALLOCATOR \ + __attribute__((malloc, warn_unused_result, returns_nonnull)) +#else +# define MOZ_ALLOCATOR +# define MOZ_INFALLIBLE_ALLOCATOR +#endif + +/** + * MOZ_MAYBE_UNUSED suppresses compiler warnings about functions that are + * never called (in this build configuration, at least). + * + * Place this attribute at the very beginning of a function declaration. For + * example, write + * + * MOZ_MAYBE_UNUSED int foo(); + * + * or + * + * MOZ_MAYBE_UNUSED int foo() { return 42; } + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_MAYBE_UNUSED __attribute__((__unused__)) +#elif defined(_MSC_VER) +# define MOZ_MAYBE_UNUSED __pragma(warning(suppress : 4505)) +#else +# define MOZ_MAYBE_UNUSED +#endif + +/* + * MOZ_NO_STACK_PROTECTOR, specified at the start of a function declaration, + * indicates that the given function should *NOT* be instrumented to detect + * stack buffer overflows at runtime. (The function definition does not need to + * be annotated.) + * + * MOZ_NO_STACK_PROTECTOR int foo(); + * + * Detecting stack buffer overflows at runtime is a security feature. This + * modifier should thus only be used on functions which are provably exempt of + * stack buffer overflows, for example because they do not use stack buffers. + * + * This modifier does not affect the corresponding function's linking behavior. + */ +#if defined(MOZ_HAVE_NO_STACK_PROTECTOR) +# define MOZ_NO_STACK_PROTECTOR MOZ_HAVE_NO_STACK_PROTECTOR +#else +# define MOZ_NO_STACK_PROTECTOR /* no support */ +#endif + +#ifdef __cplusplus + +/** + * C++11 lets unions contain members that have non-trivial special member + * functions (default/copy/move constructor, copy/move assignment operator, + * destructor) if the user defines the corresponding functions on the union. + * (Such user-defined functions must rely on external knowledge about which arm + * is active to be safe. Be extra-careful defining these functions!) + * + * MSVC unfortunately warns/errors for this bog-standard C++11 pattern. Use + * these macro-guards around such member functions to disable the warnings: + * + * union U + * { + * std::string s; + * int x; + * + * MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS + * + * // |U| must have a user-defined default constructor because |std::string| + * // has a non-trivial default constructor. + * U() ... { ... } + * + * // |U| must have a user-defined destructor because |std::string| has a + * // non-trivial destructor. + * ~U() { ... } + * + * MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS + * }; + */ +# if defined(_MSC_VER) +# define MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS \ + __pragma(warning(push)) __pragma(warning(disable : 4582)) \ + __pragma(warning(disable : 4583)) +# define MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS __pragma(warning(pop)) +# else +# define MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS /* nothing */ +# define MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS /* nothing */ +# endif + +/* + * The following macros are attributes that support the static analysis plugin + * included with Mozilla, and will be implemented (when such support is enabled) + * as C++11 attributes. Since such attributes are legal pretty much everywhere + * and have subtly different semantics depending on their placement, the + * following is a guide on where to place the attributes. + * + * Attributes that apply to a struct or class precede the name of the class: + * (Note that this is different from the placement of final for classes!) + * + * class MOZ_CLASS_ATTRIBUTE SomeClass {}; + * + * Attributes that apply to functions follow the parentheses and const + * qualifiers but precede final, override and the function body: + * + * void DeclaredFunction() MOZ_FUNCTION_ATTRIBUTE; + * void SomeFunction() MOZ_FUNCTION_ATTRIBUTE {} + * void PureFunction() const MOZ_FUNCTION_ATTRIBUTE = 0; + * void OverriddenFunction() MOZ_FUNCTION_ATTIRBUTE override; + * + * Attributes that apply to variables or parameters follow the variable's name: + * + * int variable MOZ_VARIABLE_ATTRIBUTE; + * + * Attributes that apply to types follow the type name: + * + * typedef int MOZ_TYPE_ATTRIBUTE MagicInt; + * int MOZ_TYPE_ATTRIBUTE someVariable; + * int* MOZ_TYPE_ATTRIBUTE magicPtrInt; + * int MOZ_TYPE_ATTRIBUTE* ptrToMagicInt; + * + * Attributes that apply to statements precede the statement: + * + * MOZ_IF_ATTRIBUTE if (x == 0) + * MOZ_DO_ATTRIBUTE do { } while (0); + * + * Attributes that apply to labels precede the label: + * + * MOZ_LABEL_ATTRIBUTE target: + * goto target; + * MOZ_CASE_ATTRIBUTE case 5: + * MOZ_DEFAULT_ATTRIBUTE default: + * + * The static analyses that are performed by the plugin are as follows: + * + * MOZ_CAN_RUN_SCRIPT: Applies to functions which can run script. Callers of + * this function must also be marked as MOZ_CAN_RUN_SCRIPT, and all refcounted + * arguments must be strongly held in the caller. Note that MOZ_CAN_RUN_SCRIPT + * should only be applied to function declarations, not definitions. If you + * need to apply it to a definition (eg because both are generated by a macro) + * use MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION. + * + * MOZ_CAN_RUN_SCRIPT can be applied to XPIDL-generated declarations by + * annotating the method or attribute as [can_run_script] in the .idl file. + * + * MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION: Same as MOZ_CAN_RUN_SCRIPT, but usable on + * a definition. If the declaration is in a header file, users of that header + * file may not see the annotation. + * MOZ_CAN_RUN_SCRIPT_BOUNDARY: Applies to functions which need to call + * MOZ_CAN_RUN_SCRIPT functions, but should not themselves be considered + * MOZ_CAN_RUN_SCRIPT. This should generally be avoided but can be used in + * two cases: + * 1) As a temporary measure to limit the scope of changes when adding + * MOZ_CAN_RUN_SCRIPT. Such a use must be accompanied by a follow-up bug + * to replace the MOZ_CAN_RUN_SCRIPT_BOUNDARY with MOZ_CAN_RUN_SCRIPT and + * a comment linking to that bug. + * 2) If we can reason that the MOZ_CAN_RUN_SCRIPT callees of the function + * do not in fact run script (for example, because their behavior depends + * on arguments and we pass the arguments that don't allow script + * execution). Such a use must be accompanied by a comment that explains + * why it's OK to have the MOZ_CAN_RUN_SCRIPT_BOUNDARY, as well as + * comments in the callee pointing out that if its behavior changes the + * caller might need adjusting. And perhaps also a followup bug to + * refactor things so the "script" and "no script" codepaths do not share + * a chokepoint. + * Importantly, any use MUST be accompanied by a comment explaining why it's + * there, and should ideally have an action plan for getting rid of the + * MOZ_CAN_RUN_SCRIPT_BOUNDARY annotation. + * MOZ_MUST_OVERRIDE: Applies to all C++ member functions. All immediate + * subclasses must provide an exact override of this method; if a subclass + * does not override this method, the compiler will emit an error. This + * attribute is not limited to virtual methods, so if it is applied to a + * nonvirtual method and the subclass does not provide an equivalent + * definition, the compiler will emit an error. + * MOZ_STATIC_CLASS: Applies to all classes. Any class with this annotation is + * expected to live in static memory, so it is a compile-time error to use + * it, or an array of such objects, as the type of a variable declaration, or + * as a temporary object, or as the type of a new expression (unless + * placement new is being used). If a member of another class uses this + * class, or if another class inherits from this class, then it is considered + * to be a static class as well, although this attribute need not be provided + * in such cases. + * MOZ_STATIC_LOCAL_CLASS: Applies to all classes. Any class with this + * annotation is expected to be a static local variable, so it is + * a compile-time error to use it, or an array of such objects, or as a + * temporary object, or as the type of a new expression. If another class + * inherits from this class then it is considered to be a static local + * class as well, although this attribute need not be provided in such cases. + * It is also a compile-time error for any class with this annotation to have + * a non-trivial destructor. + * MOZ_STACK_CLASS: Applies to all classes. Any class with this annotation is + * expected to live on the stack, so it is a compile-time error to use it, or + * an array of such objects, as a global or static variable, or as the type of + * a new expression (unless placement new is being used). If a member of + * another class uses this class, or if another class inherits from this + * class, then it is considered to be a stack class as well, although this + * attribute need not be provided in such cases. + * MOZ_NONHEAP_CLASS: Applies to all classes. Any class with this annotation is + * expected to live on the stack or in static storage, so it is a compile-time + * error to use it, or an array of such objects, as the type of a new + * expression. If a member of another class uses this class, or if another + * class inherits from this class, then it is considered to be a non-heap + * class as well, although this attribute need not be provided in such cases. + * MOZ_HEAP_CLASS: Applies to all classes. Any class with this annotation is + * expected to live on the heap, so it is a compile-time error to use it, or + * an array of such objects, as the type of a variable declaration, or as a + * temporary object. If a member of another class uses this class, or if + * another class inherits from this class, then it is considered to be a heap + * class as well, although this attribute need not be provided in such cases. + * MOZ_NON_TEMPORARY_CLASS: Applies to all classes. Any class with this + * annotation is expected not to live in a temporary. If a member of another + * class uses this class or if another class inherits from this class, then it + * is considered to be a non-temporary class as well, although this attribute + * need not be provided in such cases. + * MOZ_TEMPORARY_CLASS: Applies to all classes. Any class with this annotation + * is expected to only live in a temporary. If another class inherits from + * this class, then it is considered to be a non-temporary class as well, + * although this attribute need not be provided in such cases. + * MOZ_RAII: Applies to all classes. Any class with this annotation is assumed + * to be a RAII guard, which is expected to live on the stack in an automatic + * allocation. It is prohibited from being allocated in a temporary, static + * storage, or on the heap. This is a combination of MOZ_STACK_CLASS and + * MOZ_NON_TEMPORARY_CLASS. + * MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS: Applies to all classes that are + * intended to prevent introducing static initializers. This attribute + * currently makes it a compile-time error to instantiate these classes + * anywhere other than at the global scope, or as a static member of a class. + * In non-debug mode, it also prohibits non-trivial constructors and + * destructors. + * MOZ_TRIVIAL_CTOR_DTOR: Applies to all classes that must have both a trivial + * or constexpr constructor and a trivial destructor. Setting this attribute + * on a class makes it a compile-time error for that class to get a + * non-trivial constructor or destructor for any reason. + * MOZ_ALLOW_TEMPORARY: Applies to constructors. This indicates that using the + * constructor is allowed in temporary expressions, if it would have otherwise + * been forbidden by the type being a MOZ_NON_TEMPORARY_CLASS. Useful for + * constructors like Maybe(Nothing). + * MOZ_HEAP_ALLOCATOR: Applies to any function. This indicates that the return + * value is allocated on the heap, and will as a result check such allocations + * during MOZ_STACK_CLASS and MOZ_NONHEAP_CLASS annotation checking. + * MOZ_IMPLICIT: Applies to constructors. Implicit conversion constructors + * are disallowed by default unless they are marked as MOZ_IMPLICIT. This + * attribute must be used for constructors which intend to provide implicit + * conversions. + * MOZ_IS_REFPTR: Applies to class declarations of ref pointer to mark them as + * such for use with static-analysis. + * A ref pointer is an object wrapping a pointer and automatically taking care + * of its refcounting upon construction/destruction/transfer of ownership. + * This annotation implies MOZ_IS_SMARTPTR_TO_REFCOUNTED. + * MOZ_IS_SMARTPTR_TO_REFCOUNTED: Applies to class declarations of smart + * pointers to ref counted classes to mark them as such for use with + * static-analysis. + * MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT: Applies to functions. Makes it a compile + * time error to pass arithmetic expressions on variables to the function. + * MOZ_OWNING_REF: Applies to declarations of pointers to reference counted + * types. This attribute tells the compiler that the raw pointer is a strong + * reference, where ownership through methods such as AddRef and Release is + * managed manually. This can make the compiler ignore these pointers when + * validating the usage of pointers otherwise. + * + * Example uses include owned pointers inside of unions, and pointers stored + * in POD types where a using a smart pointer class would make the object + * non-POD. + * MOZ_NON_OWNING_REF: Applies to declarations of pointers to reference counted + * types. This attribute tells the compiler that the raw pointer is a weak + * reference, which is ensured to be valid by a guarantee that the reference + * will be nulled before the pointer becomes invalid. This can make the + * compiler ignore these pointers when validating the usage of pointers + * otherwise. + * + * Examples include an mOwner pointer, which is nulled by the owning class's + * destructor, and is null-checked before dereferencing. + * MOZ_UNSAFE_REF: Applies to declarations of pointers to reference counted + * types. Occasionally there are non-owning references which are valid, but + * do not take the form of a MOZ_NON_OWNING_REF. Their safety may be + * dependent on the behaviour of API consumers. The string argument passed + * to this macro documents the safety conditions. This can make the compiler + * ignore these pointers when validating the usage of pointers elsewhere. + * + * Examples include an nsAtom* member which is known at compile time to point + * to a static atom which is valid throughout the lifetime of the program, or + * an API which stores a pointer, but doesn't take ownership over it, instead + * requiring the API consumer to correctly null the value before it becomes + * invalid. + * + * Use of this annotation is discouraged when a strong reference or one of + * the above two annotations can be used instead. + * MOZ_NO_ADDREF_RELEASE_ON_RETURN: Applies to function declarations. Makes it + * a compile time error to call AddRef or Release on the return value of a + * function. This is intended to be used with operator->() of our smart + * pointer classes to ensure that the refcount of an object wrapped in a + * smart pointer is not manipulated directly. + * MOZ_NEEDS_NO_VTABLE_TYPE: Applies to template class declarations. Makes it + * a compile time error to instantiate this template with a type parameter + * which has a VTable. + * MOZ_NON_MEMMOVABLE: Applies to class declarations for types that are not safe + * to be moved in memory using memmove(). + * MOZ_NEEDS_MEMMOVABLE_TYPE: Applies to template class declarations where the + * template arguments are required to be safe to move in memory using + * memmove(). Passing MOZ_NON_MEMMOVABLE types to these templates is a + * compile time error. + * MOZ_NEEDS_MEMMOVABLE_MEMBERS: Applies to class declarations where each member + * must be safe to move in memory using memmove(). MOZ_NON_MEMMOVABLE types + * used in members of these classes are compile time errors. + * MOZ_NO_DANGLING_ON_TEMPORARIES: Applies to method declarations which return + * a pointer that is freed when the destructor of the class is called. This + * prevents these methods from being called on temporaries of the class, + * reducing risks of use-after-free. + * This attribute cannot be applied to && methods. + * In some cases, adding a deleted &&-qualified overload is too restrictive as + * this method should still be callable as a non-escaping argument to another + * function. This annotation can be used in those cases. + * MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS: Applies to template class + * declarations where an instance of the template should be considered, for + * static analysis purposes, to inherit any type annotations (such as + * MOZ_STACK_CLASS) from its template arguments. + * MOZ_INIT_OUTSIDE_CTOR: Applies to class member declarations. Occasionally + * there are class members that are not initialized in the constructor, + * but logic elsewhere in the class ensures they are initialized prior to use. + * Using this attribute on a member disables the check that this member must + * be initialized in constructors via list-initialization, in the constructor + * body, or via functions called from the constructor body. + * MOZ_IS_CLASS_INIT: Applies to class method declarations. Occasionally the + * constructor doesn't initialize all of the member variables and another + * function is used to initialize the rest. This marker is used to make the + * static analysis tool aware that the marked function is part of the + * initialization process and to include the marked function in the scan + * mechanism that determines which member variables still remain + * uninitialized. + * MOZ_NON_PARAM: Applies to types. Makes it compile time error to use the type + * in parameter without pointer or reference. + * MOZ_NON_AUTOABLE: Applies to class declarations. Makes it a compile time + * error to use `auto` in place of this type in variable declarations. This + * is intended to be used with types which are intended to be implicitly + * constructed into other other types before being assigned to variables. + * MOZ_REQUIRED_BASE_METHOD: Applies to virtual class method declarations. + * Sometimes derived classes override methods that need to be called by their + * overridden counterparts. This marker indicates that the marked method must + * be called by the method that it overrides. + * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG: Applies to method declarations. + * Callers of the annotated method must return from that function within the + * calling block using an explicit `return` statement if the "this" value for + * the call is a parameter of the caller. Only calls to Constructors, + * references to local and member variables, and calls to functions or + * methods marked as MOZ_MAY_CALL_AFTER_MUST_RETURN may be made after the + * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG call. + * MOZ_MAY_CALL_AFTER_MUST_RETURN: Applies to function or method declarations. + * Calls to these methods may be made in functions after calls a + * MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG method. + * MOZ_LIFETIME_BOUND: Applies to method declarations. + * The result of calling these functions on temporaries may not be returned as + * a reference or bound to a reference variable. + * MOZ_UNANNOTATED/MOZ_ANNOTATED: Applies to Mutexes/Monitors and variations on + * them. MOZ_UNANNOTATED indicates that the Mutex/Monitor/etc hasn't been + * examined and annotated using macros from mfbt/ThreadSafety -- + * MOZ_GUARDED_BY()/REQUIRES()/etc. MOZ_ANNOTATED is used in rare cases to + * indicate that is has been looked at, but it did not need any + * MOZ_GUARDED_BY()/REQUIRES()/etc (and thus static analysis knows it can + * ignore this Mutex/Monitor/etc) + */ + +// gcc emits a nuisance warning -Wignored-attributes because attributes do not +// affect mangled names, and therefore template arguments do not propagate +// their attributes. It is rare that this would affect anything in practice, +// and most compilers are silent about it. Similarly, -Wattributes complains +// about attributes being ignored during template instantiation. +// +// Be conservative and only suppress the warning when running in a +// configuration where it would be emitted, namely when compiling with the +// XGILL_PLUGIN for the rooting hazard analysis (which runs under gcc.) If we +// end up wanting these attributes in general GCC builds, change this to +// something like +// +// #if defined(__GNUC__) && ! defined(__clang__) +// +# ifdef XGILL_PLUGIN +# pragma GCC diagnostic ignored "-Wignored-attributes" +# pragma GCC diagnostic ignored "-Wattributes" +# endif + +# if defined(MOZ_CLANG_PLUGIN) || defined(XGILL_PLUGIN) +# define MOZ_CAN_RUN_SCRIPT __attribute__((annotate("moz_can_run_script"))) +# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION \ + __attribute__((annotate("moz_can_run_script"))) \ + __attribute__((annotate("moz_can_run_script_for_definition"))) +# define MOZ_CAN_RUN_SCRIPT_BOUNDARY \ + __attribute__((annotate("moz_can_run_script_boundary"))) +# define MOZ_MUST_OVERRIDE __attribute__((annotate("moz_must_override"))) +# define MOZ_STATIC_CLASS __attribute__((annotate("moz_global_class"))) +# define MOZ_STATIC_LOCAL_CLASS \ + __attribute__((annotate("moz_static_local_class"))) \ + __attribute__((annotate("moz_trivial_dtor"))) +# define MOZ_STACK_CLASS __attribute__((annotate("moz_stack_class"))) +# define MOZ_NONHEAP_CLASS __attribute__((annotate("moz_nonheap_class"))) +# define MOZ_HEAP_CLASS __attribute__((annotate("moz_heap_class"))) +# define MOZ_NON_TEMPORARY_CLASS \ + __attribute__((annotate("moz_non_temporary_class"))) +# define MOZ_TEMPORARY_CLASS __attribute__((annotate("moz_temporary_class"))) +# define MOZ_TRIVIAL_CTOR_DTOR \ + __attribute__((annotate("moz_trivial_ctor_dtor"))) +# define MOZ_ALLOW_TEMPORARY __attribute__((annotate("moz_allow_temporary"))) +# ifdef DEBUG +/* in debug builds, these classes do have non-trivial constructors. */ +# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS \ + __attribute__((annotate("moz_global_class"))) +# else +# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS \ + __attribute__((annotate("moz_global_class"))) MOZ_TRIVIAL_CTOR_DTOR +# endif +# define MOZ_IMPLICIT __attribute__((annotate("moz_implicit"))) +# define MOZ_IS_SMARTPTR_TO_REFCOUNTED \ + __attribute__((annotate("moz_is_smartptr_to_refcounted"))) +# define MOZ_IS_REFPTR MOZ_IS_SMARTPTR_TO_REFCOUNTED +# define MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT \ + __attribute__((annotate("moz_no_arith_expr_in_arg"))) +# define MOZ_OWNING_REF __attribute__((annotate("moz_owning_ref"))) +# define MOZ_NON_OWNING_REF __attribute__((annotate("moz_non_owning_ref"))) +# define MOZ_UNSAFE_REF(reason) __attribute__((annotate("moz_unsafe_ref"))) +# define MOZ_NO_ADDREF_RELEASE_ON_RETURN \ + __attribute__((annotate("moz_no_addref_release_on_return"))) +# define MOZ_NEEDS_NO_VTABLE_TYPE \ + __attribute__((annotate("moz_needs_no_vtable_type"))) +# define MOZ_NON_MEMMOVABLE __attribute__((annotate("moz_non_memmovable"))) +# define MOZ_NEEDS_MEMMOVABLE_TYPE \ + __attribute__((annotate("moz_needs_memmovable_type"))) +# define MOZ_NEEDS_MEMMOVABLE_MEMBERS \ + __attribute__((annotate("moz_needs_memmovable_members"))) +# define MOZ_NO_DANGLING_ON_TEMPORARIES \ + __attribute__((annotate("moz_no_dangling_on_temporaries"))) +# define MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS \ + __attribute__(( \ + annotate("moz_inherit_type_annotations_from_template_args"))) +# define MOZ_NON_AUTOABLE __attribute__((annotate("moz_non_autoable"))) +# define MOZ_INIT_OUTSIDE_CTOR +# define MOZ_IS_CLASS_INIT +# define MOZ_NON_PARAM __attribute__((annotate("moz_non_param"))) +# define MOZ_REQUIRED_BASE_METHOD \ + __attribute__((annotate("moz_required_base_method"))) +# define MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG \ + __attribute__((annotate("moz_must_return_from_caller_if_this_is_arg"))) +# define MOZ_MAY_CALL_AFTER_MUST_RETURN \ + __attribute__((annotate("moz_may_call_after_must_return"))) +# define MOZ_LIFETIME_BOUND __attribute__((annotate("moz_lifetime_bound"))) +# define MOZ_KNOWN_LIVE __attribute__((annotate("moz_known_live"))) +# ifndef XGILL_PLUGIN +# define MOZ_UNANNOTATED __attribute__((annotate("moz_unannotated"))) +# define MOZ_ANNOTATED __attribute__((annotate("moz_annotated"))) +# else +# define MOZ_UNANNOTATED /* nothing */ +# define MOZ_ANNOTATED /* nothing */ +# endif + +/* + * It turns out that clang doesn't like void func() __attribute__ {} without a + * warning, so use pragmas to disable the warning. + */ +# ifdef __clang__ +# define MOZ_HEAP_ALLOCATOR \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((annotate("moz_heap_allocator"))) \ + _Pragma("clang diagnostic pop") +# else +# define MOZ_HEAP_ALLOCATOR __attribute__((annotate("moz_heap_allocator"))) +# endif +# else +# define MOZ_CAN_RUN_SCRIPT /* nothing */ +# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION /* nothing */ +# define MOZ_CAN_RUN_SCRIPT_BOUNDARY /* nothing */ +# define MOZ_MUST_OVERRIDE /* nothing */ +# define MOZ_STATIC_CLASS /* nothing */ +# define MOZ_STATIC_LOCAL_CLASS /* nothing */ +# define MOZ_STACK_CLASS /* nothing */ +# define MOZ_NONHEAP_CLASS /* nothing */ +# define MOZ_HEAP_CLASS /* nothing */ +# define MOZ_NON_TEMPORARY_CLASS /* nothing */ +# define MOZ_TEMPORARY_CLASS /* nothing */ +# define MOZ_TRIVIAL_CTOR_DTOR /* nothing */ +# define MOZ_ALLOW_TEMPORARY /* nothing */ +# define MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS /* nothing */ +# define MOZ_IMPLICIT /* nothing */ +# define MOZ_IS_SMARTPTR_TO_REFCOUNTED /* nothing */ +# define MOZ_IS_REFPTR /* nothing */ +# define MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT /* nothing */ +# define MOZ_HEAP_ALLOCATOR /* nothing */ +# define MOZ_OWNING_REF /* nothing */ +# define MOZ_NON_OWNING_REF /* nothing */ +# define MOZ_UNSAFE_REF(reason) /* nothing */ +# define MOZ_NO_ADDREF_RELEASE_ON_RETURN /* nothing */ +# define MOZ_NEEDS_NO_VTABLE_TYPE /* nothing */ +# define MOZ_NON_MEMMOVABLE /* nothing */ +# define MOZ_NEEDS_MEMMOVABLE_TYPE /* nothing */ +# define MOZ_NEEDS_MEMMOVABLE_MEMBERS /* nothing */ +# define MOZ_NO_DANGLING_ON_TEMPORARIES /* nothing */ +# define MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS /* nothing */ +# define MOZ_INIT_OUTSIDE_CTOR /* nothing */ +# define MOZ_IS_CLASS_INIT /* nothing */ +# define MOZ_NON_PARAM /* nothing */ +# define MOZ_NON_AUTOABLE /* nothing */ +# define MOZ_REQUIRED_BASE_METHOD /* nothing */ +# define MOZ_MUST_RETURN_FROM_CALLER_IF_THIS_IS_ARG /* nothing */ +# define MOZ_MAY_CALL_AFTER_MUST_RETURN /* nothing */ +# define MOZ_LIFETIME_BOUND /* nothing */ +# define MOZ_KNOWN_LIVE /* nothing */ +# define MOZ_UNANNOTATED /* nothing */ +# define MOZ_ANNOTATED /* nothing */ +# endif /* defined(MOZ_CLANG_PLUGIN) || defined(XGILL_PLUGIN) */ + +# define MOZ_RAII MOZ_NON_TEMPORARY_CLASS MOZ_STACK_CLASS + +// XGILL_PLUGIN is used for the GC rooting hazard analysis, which compiles with +// gcc. gcc has different rules governing __attribute__((...)) placement, so +// some attributes will error out when used in the source code where clang +// expects them to be. Remove the problematic annotations when needed. +// +// The placement of c++11 [[...]] attributes is more flexible and defined by a +// spec, so it would be nice to switch to those for the problematic +// cases. Unfortunately, the official spec provides *no* way to annotate a +// lambda function, which is one source of the difficulty here. It appears that +// this will be fixed in c++23: https://github.com/cplusplus/papers/issues/882 + +# ifdef XGILL_PLUGIN + +# undef MOZ_MUST_OVERRIDE +# undef MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION +# undef MOZ_CAN_RUN_SCRIPT +# undef MOZ_CAN_RUN_SCRIPT_BOUNDARY +# define MOZ_MUST_OVERRIDE /* nothing */ +# define MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION /* nothing */ +# define MOZ_CAN_RUN_SCRIPT /* nothing */ +# define MOZ_CAN_RUN_SCRIPT_BOUNDARY /* nothing */ + +# endif + +#endif /* __cplusplus */ + +/** + * Printf style formats. MOZ_FORMAT_PRINTF and MOZ_FORMAT_WPRINTF can be used + * to annotate a function or method that is "printf/wprintf-like"; this will let + * (some) compilers check that the arguments match the template string. + * + * This macro takes two arguments. The first argument is the argument + * number of the template string. The second argument is the argument + * number of the '...' argument holding the arguments. + * + * Argument numbers start at 1. Note that the implicit "this" + * argument of a non-static member function counts as an argument. + * + * So, for a simple case like: + * void print_something (int whatever, const char *fmt, ...); + * The corresponding annotation would be + * MOZ_FORMAT_PRINTF(2, 3) + * However, if "print_something" were a non-static member function, + * then the annotation would be: + * MOZ_FORMAT_PRINTF(3, 4) + * + * The second argument should be 0 for vprintf-like functions; that + * is, those taking a va_list argument. + * + * Note that the checking is limited to standards-conforming + * printf-likes, and in particular this should not be used for + * PR_snprintf and friends, which are "printf-like" but which assign + * different meanings to the various formats. + * + * MinGW requires special handling due to different format specifiers + * on different platforms. The macro __MINGW_PRINTF_FORMAT maps to + * either gnu_printf or ms_printf depending on where we are compiling + * to avoid warnings on format specifiers that are legal. + * + * At time of writing MinGW has no wide equivalent to __MINGW_PRINTF_FORMAT; + * therefore __MINGW_WPRINTF_FORMAT has been implemented following the same + * pattern seen in MinGW's source. + */ +#ifdef __MINGW32__ +# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck) \ + __attribute__((format(__MINGW_PRINTF_FORMAT, stringIndex, firstToCheck))) +# ifndef __MINGW_WPRINTF_FORMAT +# if defined(__clang__) +# define __MINGW_WPRINTF_FORMAT wprintf +# elif defined(_UCRT) || __USE_MINGW_ANSI_STDIO +# define __MINGW_WPRINTF_FORMAT gnu_wprintf +# else +# define __MINGW_WPRINTF_FORMAT ms_wprintf +# endif +# endif +# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck) \ + __attribute__((format(__MINGW_WPRINTF_FORMAT, stringIndex, firstToCheck))) +#elif __GNUC__ || __clang__ +# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck) \ + __attribute__((format(printf, stringIndex, firstToCheck))) +# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck) \ + __attribute__((format(wprintf, stringIndex, firstToCheck))) +#else +# define MOZ_FORMAT_PRINTF(stringIndex, firstToCheck) +# define MOZ_FORMAT_WPRINTF(stringIndex, firstToCheck) +#endif + +/** + * To manually declare an XPCOM ABI-compatible virtual function, the following + * macros can be used to handle the non-standard ABI used on Windows for COM + * compatibility. E.g.: + * + * virtual ReturnType MOZ_XPCOM_ABI foo(); + */ +#if defined(XP_WIN) +# define MOZ_XPCOM_ABI __stdcall +#else +# define MOZ_XPCOM_ABI +#endif + +/** + * MSVC / clang-cl don't optimize empty bases correctly unless we explicitly + * tell it to, see: + * + * https://stackoverflow.com/questions/12701469/why-is-the-empty-base-class-optimization-ebo-is-not-working-in-msvc + * https://devblogs.microsoft.com/cppblog/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/ + */ +#if defined(_MSC_VER) +# define MOZ_EMPTY_BASES __declspec(empty_bases) +#else +# define MOZ_EMPTY_BASES +#endif + +#endif /* mozilla_Attributes_h */ diff --git a/mfbt/BinarySearch.h b/mfbt/BinarySearch.h new file mode 100644 index 0000000000..e5de93a259 --- /dev/null +++ b/mfbt/BinarySearch.h @@ -0,0 +1,249 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_BinarySearch_h +#define mozilla_BinarySearch_h + +#include "mozilla/Assertions.h" +#include "mozilla/CompactPair.h" + +#include + +namespace mozilla { + +/* + * The BinarySearch() algorithm searches the given container |aContainer| over + * the sorted index range [aBegin, aEnd) for an index |i| where + * |aContainer[i] == aTarget|. + * If such an index |i| is found, BinarySearch returns |true| and the index is + * returned via the outparam |aMatchOrInsertionPoint|. If no index is found, + * BinarySearch returns |false| and the outparam returns the first index in + * [aBegin, aEnd] where |aTarget| can be inserted to maintain sorted order. + * + * Example: + * + * Vector sortedInts = ... + * + * size_t match; + * if (BinarySearch(sortedInts, 0, sortedInts.length(), 13, &match)) { + * printf("found 13 at %lu\n", match); + * } + * + * The BinarySearchIf() version behaves similarly, but takes |aComparator|, a + * functor to compare the values with, instead of a value to find. + * That functor should take one argument - the value to compare - and return an + * |int| with the comparison result: + * + * * 0, if the argument is equal to, + * * less than 0, if the argument is greater than, + * * greater than 0, if the argument is less than + * + * the value. + * + * Example: + * + * struct Comparator { + * int operator()(int aVal) const { + * if (mTarget < aVal) { return -1; } + * if (mTarget > aVal) { return 1; } + * return 0; + * } + * explicit Comparator(int aTarget) : mTarget(aTarget) {} + * const int mTarget; + * }; + * + * Vector sortedInts = ... + * + * size_t match; + * if (BinarySearchIf(sortedInts, 0, sortedInts.length(), Comparator(13), + * &match)) { printf("found 13 at %lu\n", match); + * } + * + */ + +template +bool BinarySearchIf(const Container& aContainer, size_t aBegin, size_t aEnd, + const Comparator& aCompare, + size_t* aMatchOrInsertionPoint) { + MOZ_ASSERT(aBegin <= aEnd); + + size_t low = aBegin; + size_t high = aEnd; + while (high != low) { + size_t middle = low + (high - low) / 2; + + // Allow any intermediate type so long as it provides a suitable ordering + // relation. + const int result = aCompare(aContainer[middle]); + + if (result == 0) { + *aMatchOrInsertionPoint = middle; + return true; + } + + if (result < 0) { + high = middle; + } else { + low = middle + 1; + } + } + + *aMatchOrInsertionPoint = low; + return false; +} + +namespace detail { + +template +class BinarySearchDefaultComparator { + public: + explicit BinarySearchDefaultComparator(const T& aTarget) : mTarget(aTarget) {} + + template + int operator()(const U& aVal) const { + if (mTarget == aVal) { + return 0; + } + + if (mTarget < aVal) { + return -1; + } + + return 1; + } + + private: + const T& mTarget; +}; + +} // namespace detail + +template +bool BinarySearch(const Container& aContainer, size_t aBegin, size_t aEnd, + T aTarget, size_t* aMatchOrInsertionPoint) { + return BinarySearchIf(aContainer, aBegin, aEnd, + detail::BinarySearchDefaultComparator(aTarget), + aMatchOrInsertionPoint); +} + +/* + * LowerBound(), UpperBound(), and EqualRange() are equivalent to + * std::lower_bound(), std::upper_bound(), and std::equal_range() respectively. + * + * LowerBound() returns an index pointing to the first element in the range + * in which each element is considered *not less than* the given value passed + * via |aCompare|, or the length of |aContainer| if no such element is found. + * + * UpperBound() returns an index pointing to the first element in the range + * in which each element is considered *greater than* the given value passed + * via |aCompare|, or the length of |aContainer| if no such element is found. + * + * EqualRange() returns a range [first, second) containing all elements are + * considered equivalent to the given value via |aCompare|. If you need + * either the first or last index of the range, LowerBound() or UpperBound(), + * which is slightly faster than EqualRange(), should suffice. + * + * Example (another example is given in TestBinarySearch.cpp): + * + * Vector sortedStrings = ... + * + * struct Comparator { + * const nsACString& mStr; + * explicit Comparator(const nsACString& aStr) : mStr(aStr) {} + * int32_t operator()(const char* aVal) const { + * return Compare(mStr, nsDependentCString(aVal)); + * } + * }; + * + * auto bounds = EqualRange(sortedStrings, 0, sortedStrings.length(), + * Comparator("needle I'm looking for"_ns)); + * printf("Found the range [%zd %zd)\n", bounds.first(), bounds.second()); + * + */ +template +size_t LowerBound(const Container& aContainer, size_t aBegin, size_t aEnd, + const Comparator& aCompare) { + MOZ_ASSERT(aBegin <= aEnd); + + size_t low = aBegin; + size_t high = aEnd; + while (high != low) { + size_t middle = low + (high - low) / 2; + + // Allow any intermediate type so long as it provides a suitable ordering + // relation. + const int result = aCompare(aContainer[middle]); + + // The range returning from LowerBound does include elements + // equivalent to the given value i.e. aCompare(element) == 0 + if (result <= 0) { + high = middle; + } else { + low = middle + 1; + } + } + + return low; +} + +template +size_t UpperBound(const Container& aContainer, size_t aBegin, size_t aEnd, + const Comparator& aCompare) { + MOZ_ASSERT(aBegin <= aEnd); + + size_t low = aBegin; + size_t high = aEnd; + while (high != low) { + size_t middle = low + (high - low) / 2; + + // Allow any intermediate type so long as it provides a suitable ordering + // relation. + const int result = aCompare(aContainer[middle]); + + // The range returning from UpperBound does NOT include elements + // equivalent to the given value i.e. aCompare(element) == 0 + if (result < 0) { + high = middle; + } else { + low = middle + 1; + } + } + + return high; +} + +template +CompactPair EqualRange(const Container& aContainer, + size_t aBegin, size_t aEnd, + const Comparator& aCompare) { + MOZ_ASSERT(aBegin <= aEnd); + + size_t low = aBegin; + size_t high = aEnd; + while (high != low) { + size_t middle = low + (high - low) / 2; + + // Allow any intermediate type so long as it provides a suitable ordering + // relation. + const int result = aCompare(aContainer[middle]); + + if (result < 0) { + high = middle; + } else if (result > 0) { + low = middle + 1; + } else { + return MakeCompactPair( + LowerBound(aContainer, low, middle, aCompare), + UpperBound(aContainer, middle + 1, high, aCompare)); + } + } + + return MakeCompactPair(low, high); +} + +} // namespace mozilla + +#endif // mozilla_BinarySearch_h diff --git a/mfbt/BitSet.h b/mfbt/BitSet.h new file mode 100644 index 0000000000..7c03fb87ce --- /dev/null +++ b/mfbt/BitSet.h @@ -0,0 +1,177 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_BitSet_h +#define mozilla_BitSet_h + +#include "mozilla/Array.h" +#include "mozilla/ArrayUtils.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/PodOperations.h" +#include "mozilla/Span.h" + +namespace mozilla { + +/** + * An object like std::bitset but which provides access to the underlying + * storage. + * + * The limited API is due to expedience only; feel free to flesh out any + * std::bitset-like members. + */ +template +class BitSet { + static_assert(std::is_unsigned_v, + "The Word type must be an unsigned integral type"); + + private: + static constexpr size_t kBitsPerWord = 8 * sizeof(Word); + static constexpr size_t kNumWords = (N + kBitsPerWord - 1) / kBitsPerWord; + static constexpr size_t kPaddingBits = (kNumWords * kBitsPerWord) - N; + static constexpr Word kPaddingMask = Word(-1) >> kPaddingBits; + + // The zeroth bit in the bitset is the least significant bit of mStorage[0]. + Array mStorage; + + constexpr void ResetPaddingBits() { + if constexpr (kPaddingBits != 0) { + mStorage[kNumWords - 1] &= kPaddingMask; + } + } + + public: + class Reference { + public: + Reference(BitSet& aBitSet, size_t aPos) + : mBitSet(aBitSet), mPos(aPos) {} + + Reference& operator=(bool aValue) { + auto bit = Word(1) << (mPos % kBitsPerWord); + auto& word = mBitSet.mStorage[mPos / kBitsPerWord]; + word = (word & ~bit) | (aValue ? bit : 0); + return *this; + } + + MOZ_IMPLICIT operator bool() const { return mBitSet.Test(mPos); } + + private: + BitSet& mBitSet; + size_t mPos; + }; + + constexpr BitSet() : mStorage() {} + + BitSet(const BitSet& aOther) { *this = aOther; } + + BitSet& operator=(const BitSet& aOther) { + PodCopy(mStorage.begin(), aOther.mStorage.begin(), kNumWords); + return *this; + } + + explicit BitSet(Span aStorage) { + PodCopy(mStorage.begin(), aStorage.Elements(), kNumWords); + } + + static constexpr size_t Size() { return N; } + + constexpr bool Test(size_t aPos) const { + MOZ_ASSERT(aPos < N); + return mStorage[aPos / kBitsPerWord] & (Word(1) << (aPos % kBitsPerWord)); + } + + constexpr bool IsEmpty() const { + for (const Word& word : mStorage) { + if (word) { + return false; + } + } + return true; + } + + explicit constexpr operator bool() { return !IsEmpty(); } + + constexpr bool operator[](size_t aPos) const { return Test(aPos); } + + Reference operator[](size_t aPos) { + MOZ_ASSERT(aPos < N); + return {*this, aPos}; + } + + BitSet operator|(const BitSet& aOther) { + BitSet result = *this; + result |= aOther; + return result; + } + + BitSet& operator|=(const BitSet& aOther) { + for (size_t i = 0; i < ArrayLength(mStorage); i++) { + mStorage[i] |= aOther.mStorage[i]; + } + return *this; + } + + BitSet operator~() const { + BitSet result = *this; + result.Flip(); + return result; + } + + BitSet& operator&=(const BitSet& aOther) { + for (size_t i = 0; i < ArrayLength(mStorage); i++) { + mStorage[i] &= aOther.mStorage[i]; + } + return *this; + } + + BitSet operator&(const BitSet& aOther) const { + BitSet result = *this; + result &= aOther; + return result; + } + + bool operator==(const BitSet& aOther) const { + return mStorage == aOther.mStorage; + } + + size_t Count() const { + size_t count = 0; + + for (const Word& word : mStorage) { + if constexpr (kBitsPerWord > 32) { + count += CountPopulation64(word); + } else { + count += CountPopulation32(word); + } + } + + return count; + } + + // Set all bits to false. + void ResetAll() { PodArrayZero(mStorage); } + + // Set all bits to true. + void SetAll() { + memset(mStorage.begin(), 0xff, kNumWords * sizeof(Word)); + ResetPaddingBits(); + } + + void Flip() { + for (Word& word : mStorage) { + word = ~word; + } + + ResetPaddingBits(); + } + + Span Storage() { return mStorage; } + + Span Storage() const { return mStorage; } +}; + +} // namespace mozilla + +#endif // mozilla_BitSet_h diff --git a/mfbt/BloomFilter.h b/mfbt/BloomFilter.h new file mode 100644 index 0000000000..08882c4d63 --- /dev/null +++ b/mfbt/BloomFilter.h @@ -0,0 +1,338 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A counting Bloom filter implementation. This allows consumers to + * do fast probabilistic "is item X in set Y?" testing which will + * never answer "no" when the correct answer is "yes" (but might + * incorrectly answer "yes" when the correct answer is "no"). + */ + +#ifndef mozilla_BloomFilter_h +#define mozilla_BloomFilter_h + +#include "mozilla/Assertions.h" +#include "mozilla/Likely.h" + +#include +#include + +namespace mozilla { + +/* + * This class implements a classic Bloom filter as described at + * . This allows quick + * probabilistic answers to the question "is object X in set Y?" where the + * contents of Y might not be time-invariant. The probabilistic nature of the + * test means that sometimes the answer will be "yes" when it should be "no". + * If the answer is "no", then X is guaranteed not to be in Y. + * + * The filter is parametrized on KeySize, which is the size of the key + * generated by each of hash functions used by the filter, in bits, + * and the type of object T being added and removed. T must implement + * a |uint32_t hash() const| method which returns a uint32_t hash key + * that will be used to generate the two separate hash functions for + * the Bloom filter. This hash key MUST be well-distributed for good + * results! KeySize is not allowed to be larger than 16. + * + * The filter uses exactly 2**KeySize bit (2**(KeySize-3) bytes) of memory. + * From now on we will refer to the memory used by the filter as M. + * + * The expected rate of incorrect "yes" answers depends on M and on + * the number N of objects in set Y. As long as N is small compared + * to M, the rate of such answers is expected to be approximately + * 4*(N/M)**2 for this filter. In practice, if Y has a few hundred + * elements then using a KeySize of 12 gives a reasonably low + * incorrect answer rate. A KeySize of 12 has the additional benefit + * of using exactly one page for the filter in typical hardware + * configurations. + */ +template +class BitBloomFilter { + /* + * A counting Bloom filter with 8-bit counters. For now we assume + * that having two hash functions is enough, but we may revisit that + * decision later. + * + * The filter uses an array with 2**KeySize entries. + * + * Assuming a well-distributed hash function, a Bloom filter with + * array size M containing N elements and + * using k hash function has expected false positive rate exactly + * + * $ (1 - (1 - 1/M)^{kN})^k $ + * + * because each array slot has a + * + * $ (1 - 1/M)^{kN} $ + * + * chance of being 0, and the expected false positive rate is the + * probability that all of the k hash functions will hit a nonzero + * slot. + * + * For reasonable assumptions (M large, kN large, which should both + * hold if we're worried about false positives) about M and kN this + * becomes approximately + * + * $$ (1 - \exp(-kN/M))^k $$ + * + * For our special case of k == 2, that's $(1 - \exp(-2N/M))^2$, + * or in other words + * + * $$ N/M = -0.5 * \ln(1 - \sqrt(r)) $$ + * + * where r is the false positive rate. This can be used to compute + * the desired KeySize for a given load N and false positive rate r. + * + * If N/M is assumed small, then the false positive rate can + * further be approximated as 4*N^2/M^2. So increasing KeySize by + * 1, which doubles M, reduces the false positive rate by about a + * factor of 4, and a false positive rate of 1% corresponds to + * about M/N == 20. + * + * What this means in practice is that for a few hundred keys using a + * KeySize of 12 gives false positive rates on the order of 0.25-4%. + * + * Similarly, using a KeySize of 10 would lead to a 4% false + * positive rate for N == 100 and to quite bad false positive + * rates for larger N. + */ + public: + BitBloomFilter() { + static_assert(KeySize >= 3, "KeySize too small"); + static_assert(KeySize <= kKeyShift, "KeySize too big"); + + // XXX: Should we have a custom operator new using calloc instead and + // require that we're allocated via the operator? + clear(); + } + + /* + * Clear the filter. This should be done before reusing it. + */ + void clear(); + + /* + * Add an item to the filter. + */ + void add(const T* aValue); + + /* + * Check whether the filter might contain an item. This can + * sometimes return true even if the item is not in the filter, + * but will never return false for items that are actually in the + * filter. + */ + bool mightContain(const T* aValue) const; + + /* + * Methods for add/contain when we already have a hash computed + */ + void add(uint32_t aHash); + bool mightContain(uint32_t aHash) const; + + private: + static const size_t kArraySize = (1 << (KeySize - 3)); + static const uint32_t kKeyMask = (1 << KeySize) - 1; + static const uint32_t kKeyShift = 16; + + static uint32_t hash1(uint32_t aHash) { return aHash & kKeyMask; } + static uint32_t hash2(uint32_t aHash) { + return (aHash >> kKeyShift) & kKeyMask; + } + + bool getSlot(uint32_t aHash) const { + uint32_t index = aHash / 8; + uint8_t shift = aHash % 8; + uint8_t mask = 1 << shift; + return !!(mBits[index] & mask); + } + + void setSlot(uint32_t aHash) { + uint32_t index = aHash / 8; + uint8_t shift = aHash % 8; + uint8_t bit = 1 << shift; + mBits[index] |= bit; + } + + bool getFirstSlot(uint32_t aHash) const { return getSlot(hash1(aHash)); } + bool getSecondSlot(uint32_t aHash) const { return getSlot(hash2(aHash)); } + + void setFirstSlot(uint32_t aHash) { setSlot(hash1(aHash)); } + void setSecondSlot(uint32_t aHash) { setSlot(hash2(aHash)); } + + uint8_t mBits[kArraySize]; +}; + +template +inline void BitBloomFilter::clear() { + memset(mBits, 0, kArraySize); +} + +template +inline void BitBloomFilter::add(uint32_t aHash) { + setFirstSlot(aHash); + setSecondSlot(aHash); +} + +template +MOZ_ALWAYS_INLINE void BitBloomFilter::add(const T* aValue) { + uint32_t hash = aValue->hash(); + return add(hash); +} + +template +MOZ_ALWAYS_INLINE bool BitBloomFilter::mightContain( + uint32_t aHash) const { + // Check that all the slots for this hash contain something + return getFirstSlot(aHash) && getSecondSlot(aHash); +} + +template +MOZ_ALWAYS_INLINE bool BitBloomFilter::mightContain( + const T* aValue) const { + uint32_t hash = aValue->hash(); + return mightContain(hash); +} + +/* + * This class implements a counting Bloom filter as described at + * , with + * 8-bit counters. + * + * Compared to `BitBloomFilter`, this class supports 'remove' operation. + * + * The filter uses exactly 2**KeySize bytes of memory. + * + * Other characteristics are the same as BitBloomFilter. + */ +template +class CountingBloomFilter { + public: + CountingBloomFilter() { + static_assert(KeySize <= kKeyShift, "KeySize too big"); + + clear(); + } + + /* + * Clear the filter. This should be done before reusing it, because + * just removing all items doesn't clear counters that hit the upper + * bound. + */ + void clear(); + + /* + * Add an item to the filter. + */ + void add(const T* aValue); + + /* + * Remove an item from the filter. + */ + void remove(const T* aValue); + + /* + * Check whether the filter might contain an item. This can + * sometimes return true even if the item is not in the filter, + * but will never return false for items that are actually in the + * filter. + */ + bool mightContain(const T* aValue) const; + + /* + * Methods for add/remove/contain when we already have a hash computed + */ + void add(uint32_t aHash); + void remove(uint32_t aHash); + bool mightContain(uint32_t aHash) const; + + private: + static const size_t kArraySize = (1 << KeySize); + static const uint32_t kKeyMask = (1 << KeySize) - 1; + static const uint32_t kKeyShift = 16; + + static uint32_t hash1(uint32_t aHash) { return aHash & kKeyMask; } + static uint32_t hash2(uint32_t aHash) { + return (aHash >> kKeyShift) & kKeyMask; + } + + uint8_t& firstSlot(uint32_t aHash) { return mCounters[hash1(aHash)]; } + uint8_t& secondSlot(uint32_t aHash) { return mCounters[hash2(aHash)]; } + + const uint8_t& firstSlot(uint32_t aHash) const { + return mCounters[hash1(aHash)]; + } + const uint8_t& secondSlot(uint32_t aHash) const { + return mCounters[hash2(aHash)]; + } + + static bool full(const uint8_t& aSlot) { return aSlot == UINT8_MAX; } + + uint8_t mCounters[kArraySize]; +}; + +template +inline void CountingBloomFilter::clear() { + memset(mCounters, 0, kArraySize); +} + +template +inline void CountingBloomFilter::add(uint32_t aHash) { + uint8_t& slot1 = firstSlot(aHash); + if (MOZ_LIKELY(!full(slot1))) { + ++slot1; + } + uint8_t& slot2 = secondSlot(aHash); + if (MOZ_LIKELY(!full(slot2))) { + ++slot2; + } +} + +template +MOZ_ALWAYS_INLINE void CountingBloomFilter::add(const T* aValue) { + uint32_t hash = aValue->hash(); + return add(hash); +} + +template +inline void CountingBloomFilter::remove(uint32_t aHash) { + // If the slots are full, we don't know whether we bumped them to be + // there when we added or not, so just leave them full. + uint8_t& slot1 = firstSlot(aHash); + if (MOZ_LIKELY(!full(slot1))) { + --slot1; + } + uint8_t& slot2 = secondSlot(aHash); + if (MOZ_LIKELY(!full(slot2))) { + --slot2; + } +} + +template +MOZ_ALWAYS_INLINE void CountingBloomFilter::remove( + const T* aValue) { + uint32_t hash = aValue->hash(); + remove(hash); +} + +template +MOZ_ALWAYS_INLINE bool CountingBloomFilter::mightContain( + uint32_t aHash) const { + // Check that all the slots for this hash contain something + return firstSlot(aHash) && secondSlot(aHash); +} + +template +MOZ_ALWAYS_INLINE bool CountingBloomFilter::mightContain( + const T* aValue) const { + uint32_t hash = aValue->hash(); + return mightContain(hash); +} + +} // namespace mozilla + +#endif /* mozilla_BloomFilter_h */ diff --git a/mfbt/Buffer.h b/mfbt/Buffer.h new file mode 100644 index 0000000000..c4e0a4be92 --- /dev/null +++ b/mfbt/Buffer.h @@ -0,0 +1,197 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_Buffer_h +#define mozilla_Buffer_h + +#include +#include + +#include "mozilla/Assertions.h" +#include "mozilla/Maybe.h" +#include "mozilla/Span.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/UniquePtrExtensions.h" + +namespace mozilla { + +/** + * A move-only type that wraps a mozilla::UniquePtr and the length of + * the T[]. + * + * Unlike mozilla::Array, the length is a run-time property. + * Unlike mozilla::Vector and nsTArray, does not have capacity and + * assocatiated growth functionality. + * Unlike mozilla::Span, mozilla::Buffer owns the allocation it points to. + */ +template +class Buffer final { + private: + mozilla::UniquePtr mData; + size_t mLength; + + public: + Buffer(const Buffer& aOther) = delete; + Buffer& operator=(const Buffer& aOther) = delete; + + /** + * Construct zero-lenth Buffer (without actually pointing to a heap + * allocation). + */ + Buffer() : mData(nullptr), mLength(0){}; + + /** + * Construct from raw parts. + * + * aLength must not be greater than the actual length of the buffer pointed + * to by aData. + */ + Buffer(mozilla::UniquePtr&& aData, size_t aLength) + : mData(std::move(aData)), mLength(aLength) {} + + /** + * Move constructor. Sets the moved-from Buffer to zero-length + * state. + */ + Buffer(Buffer&& aOther) + : mData(std::move(aOther.mData)), mLength(aOther.mLength) { + aOther.mLength = 0; + } + + /** + * Move assignment. Sets the moved-from Buffer to zero-length + * state. + */ + Buffer& operator=(Buffer&& aOther) { + mData = std::move(aOther.mData); + mLength = aOther.mLength; + aOther.mLength = 0; + return *this; + } + + /** + * Construct by copying the elements of a Span. + * + * Allocates the internal buffer infallibly. Use CopyFrom for fallible + * allocation. + */ + explicit Buffer(mozilla::Span aSpan) + : mData(mozilla::MakeUniqueForOverwrite(aSpan.Length())), + mLength(aSpan.Length()) { + std::copy(aSpan.cbegin(), aSpan.cend(), mData.get()); + } + + /** + * Create a new Buffer by copying the elements of a Span. + * + * Allocates the internal buffer fallibly. + */ + static mozilla::Maybe> CopyFrom(mozilla::Span aSpan) { + if (aSpan.IsEmpty()) { + return Some(Buffer()); + } + + auto data = mozilla::MakeUniqueForOverwriteFallible(aSpan.Length()); + if (!data) { + return mozilla::Nothing(); + } + std::copy(aSpan.cbegin(), aSpan.cend(), data.get()); + return mozilla::Some(Buffer(std::move(data), aSpan.Length())); + } + + /** + * Construct a buffer of requested length. + * + * The contents will be initialized or uninitialized according + * to the behavior of mozilla::MakeUnique(aLength) for T. + * + * Allocates the internal buffer infallibly. Use Alloc for fallible + * allocation. + */ + explicit Buffer(size_t aLength) + : mData(mozilla::MakeUnique(aLength)), mLength(aLength) {} + + /** + * Create a new Buffer with an internal buffer of requested length. + * + * The contents will be initialized or uninitialized according to the + * behavior of mozilla::MakeUnique(aLength) for T. + * + * Allocates the internal buffer fallibly. + */ + static mozilla::Maybe> Alloc(size_t aLength) { + auto data = mozilla::MakeUniqueFallible(aLength); + if (!data) { + return mozilla::Nothing(); + } + return mozilla::Some(Buffer(std::move(data), aLength)); + } + + /** + * Create a new Buffer with an internal buffer of requested length. + * + * This uses MakeUniqueFallibleForOverwrite so the contents will be + * default-initialized. + * + * Allocates the internal buffer fallibly. + */ + static Maybe> AllocForOverwrite(size_t aLength) { + auto data = MakeUniqueForOverwriteFallible(aLength); + if (!data) { + return Nothing(); + } + return Some(Buffer(std::move(data), aLength)); + } + + auto AsSpan() const { return mozilla::Span{mData.get(), mLength}; } + auto AsWritableSpan() { return mozilla::Span{mData.get(), mLength}; } + operator mozilla::Span() const { return AsSpan(); } + operator mozilla::Span() { return AsWritableSpan(); } + + /** + * Guarantees a non-null and aligned pointer + * even for the zero-length case. + */ + T* Elements() { return AsWritableSpan().Elements(); } + size_t Length() const { return mLength; } + + T& operator[](size_t aIndex) { + MOZ_ASSERT(aIndex < mLength); + return mData.get()[aIndex]; + } + + const T& operator[](size_t aIndex) const { + MOZ_ASSERT(aIndex < mLength); + return mData.get()[aIndex]; + } + + typedef T* iterator; + typedef const T* const_iterator; + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; + + // Methods for range-based for loops. + iterator begin() { return mData.get(); } + const_iterator begin() const { return mData.get(); } + const_iterator cbegin() const { return begin(); } + iterator end() { return mData.get() + mLength; } + const_iterator end() const { return mData.get() + mLength; } + const_iterator cend() const { return end(); } + + // Methods for reverse iterating. + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + const_reverse_iterator crbegin() const { return rbegin(); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + const_reverse_iterator crend() const { return rend(); } +}; + +} /* namespace mozilla */ + +#endif /* mozilla_Buffer_h */ diff --git a/mfbt/BufferList.h b/mfbt/BufferList.h new file mode 100644 index 0000000000..ca63d7af8e --- /dev/null +++ b/mfbt/BufferList.h @@ -0,0 +1,598 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_BufferList_h +#define mozilla_BufferList_h + +#include +#include +#include +#include + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Maybe.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Vector.h" + +// BufferList represents a sequence of buffers of data. A BufferList can choose +// to own its buffers or not. The class handles writing to the buffers, +// iterating over them, and reading data out. Unlike SegmentedVector, the +// buffers may be of unequal size. Like SegmentedVector, BufferList is a nice +// way to avoid large contiguous allocations (which can trigger OOMs). + +class InfallibleAllocPolicy; + +namespace mozilla { + +template +class BufferList : private AllocPolicy { + // Each buffer in a BufferList has a size and a capacity. The first mSize + // bytes are initialized and the remaining |mCapacity - mSize| bytes are free. + struct Segment { + char* mData; + size_t mSize; + size_t mCapacity; + + Segment(char* aData, size_t aSize, size_t aCapacity) + : mData(aData), mSize(aSize), mCapacity(aCapacity) {} + + Segment(const Segment&) = delete; + Segment& operator=(const Segment&) = delete; + + Segment(Segment&&) = default; + Segment& operator=(Segment&&) = default; + + char* Start() const { return mData; } + char* End() const { return mData + mSize; } + }; + + template + friend class BufferList; + + public: + // For the convenience of callers, all segments are required to be a multiple + // of 8 bytes in capacity. Also, every buffer except the last one is required + // to be full (i.e., size == capacity). Therefore, a byte at offset N within + // the BufferList and stored in memory at an address A will satisfy + // (N % Align == A % Align) if Align == 2, 4, or 8. + static const size_t kSegmentAlignment = 8; + + // Allocate a BufferList. The BufferList will free all its buffers when it is + // destroyed. If an infallible allocator is used, an initial buffer of size + // aInitialSize and capacity aInitialCapacity is allocated automatically. This + // data will be contiguous and can be accessed via |Start()|. If a fallible + // alloc policy is used, aInitialSize must be 0, and the fallible |Init()| + // method may be called instead. Subsequent buffers will be allocated with + // capacity aStandardCapacity. + BufferList(size_t aInitialSize, size_t aInitialCapacity, + size_t aStandardCapacity, AllocPolicy aAP = AllocPolicy()) + : AllocPolicy(aAP), + mOwning(true), + mSegments(aAP), + mSize(0), + mStandardCapacity(aStandardCapacity) { + MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0); + MOZ_ASSERT(aStandardCapacity % kSegmentAlignment == 0); + + if (aInitialCapacity) { + MOZ_ASSERT((aInitialSize == 0 || + std::is_same_v), + "BufferList may only be constructed with an initial size when " + "using an infallible alloc policy"); + + AllocateSegment(aInitialSize, aInitialCapacity); + } + } + + BufferList(const BufferList& aOther) = delete; + + BufferList(BufferList&& aOther) + : mOwning(aOther.mOwning), + mSegments(std::move(aOther.mSegments)), + mSize(aOther.mSize), + mStandardCapacity(aOther.mStandardCapacity) { + aOther.mSegments.clear(); + aOther.mSize = 0; + } + + BufferList& operator=(const BufferList& aOther) = delete; + + BufferList& operator=(BufferList&& aOther) { + Clear(); + + mOwning = aOther.mOwning; + mSegments = std::move(aOther.mSegments); + mSize = aOther.mSize; + aOther.mSegments.clear(); + aOther.mSize = 0; + return *this; + } + + ~BufferList() { Clear(); } + + // Initializes the BufferList with a segment of the given size and capacity. + // May only be called once, before any segments have been allocated. + bool Init(size_t aInitialSize, size_t aInitialCapacity) { + MOZ_ASSERT(mSegments.empty()); + MOZ_ASSERT(aInitialCapacity != 0); + MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0); + + return AllocateSegment(aInitialSize, aInitialCapacity); + } + + bool CopyFrom(const BufferList& aOther) { + MOZ_ASSERT(mOwning); + + Clear(); + + // We don't make an exact copy of aOther. Instead, create a single segment + // with enough space to hold all data in aOther. + if (!Init(aOther.mSize, (aOther.mSize + kSegmentAlignment - 1) & + ~(kSegmentAlignment - 1))) { + return false; + } + + size_t offset = 0; + for (const Segment& segment : aOther.mSegments) { + memcpy(Start() + offset, segment.mData, segment.mSize); + offset += segment.mSize; + } + MOZ_ASSERT(offset == mSize); + + return true; + } + + // Returns the sum of the sizes of all the buffers. + size_t Size() const { return mSize; } + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) { + size_t size = mSegments.sizeOfExcludingThis(aMallocSizeOf); + for (Segment& segment : mSegments) { + size += aMallocSizeOf(segment.Start()); + } + return size; + } + + void Clear() { + if (mOwning) { + for (Segment& segment : mSegments) { + this->free_(segment.mData, segment.mCapacity); + } + } + mSegments.clear(); + + mSize = 0; + } + + // Iterates over bytes in the segments. You can advance it by as many bytes as + // you choose. + class IterImpl { + // Invariants: + // (0) mSegment <= bufferList.mSegments.length() + // (1) mData <= mDataEnd + // (2) If mSegment is not the last segment, mData < mDataEnd + uintptr_t mSegment{0}; + char* mData{nullptr}; + char* mDataEnd{nullptr}; + size_t mAbsoluteOffset{0}; + + friend class BufferList; + + public: + explicit IterImpl(const BufferList& aBuffers) { + if (!aBuffers.mSegments.empty()) { + mData = aBuffers.mSegments[0].Start(); + mDataEnd = aBuffers.mSegments[0].End(); + } + } + + // Returns a pointer to the raw data. It is valid to access up to + // RemainingInSegment bytes of this buffer. + char* Data() const { + MOZ_RELEASE_ASSERT(!Done()); + return mData; + } + + bool operator==(const IterImpl& other) const { + return mAbsoluteOffset == other.mAbsoluteOffset; + } + bool operator!=(const IterImpl& other) const { return !(*this == other); } + + // Returns true if the memory in the range [Data(), Data() + aBytes) is all + // part of one contiguous buffer. + bool HasRoomFor(size_t aBytes) const { + return RemainingInSegment() >= aBytes; + } + + // Returns the largest value aBytes for which HasRoomFor(aBytes) will be + // true. + size_t RemainingInSegment() const { + MOZ_RELEASE_ASSERT(mData <= mDataEnd); + return mDataEnd - mData; + } + + // Returns true if there are at least aBytes entries remaining in the + // BufferList after this iterator. + bool HasBytesAvailable(const BufferList& aBuffers, size_t aBytes) const { + return TotalBytesAvailable(aBuffers) >= aBytes; + } + + // Returns the largest value `aBytes` for which HasBytesAvailable(aBytes) + // will be true. + size_t TotalBytesAvailable(const BufferList& aBuffers) const { + return aBuffers.mSize - mAbsoluteOffset; + } + + // Advances the iterator by aBytes bytes. aBytes must be less than + // RemainingInSegment(). If advancing by aBytes takes the iterator to the + // end of a buffer, it will be moved to the beginning of the next buffer + // unless it is the last buffer. + void Advance(const BufferList& aBuffers, size_t aBytes) { + const Segment& segment = aBuffers.mSegments[mSegment]; + MOZ_RELEASE_ASSERT(segment.Start() <= mData); + MOZ_RELEASE_ASSERT(mData <= mDataEnd); + MOZ_RELEASE_ASSERT(mDataEnd == segment.End()); + + MOZ_RELEASE_ASSERT(HasRoomFor(aBytes)); + mData += aBytes; + mAbsoluteOffset += aBytes; + + if (mData == mDataEnd && mSegment + 1 < aBuffers.mSegments.length()) { + mSegment++; + const Segment& nextSegment = aBuffers.mSegments[mSegment]; + mData = nextSegment.Start(); + mDataEnd = nextSegment.End(); + MOZ_RELEASE_ASSERT(mData < mDataEnd); + } + } + + // Advance the iterator by aBytes, possibly crossing segments. This function + // returns false if it runs out of buffers to advance through. Otherwise it + // returns true. + bool AdvanceAcrossSegments(const BufferList& aBuffers, size_t aBytes) { + // If we don't need to cross segments, we can directly use `Advance` to + // get to our destination. + if (MOZ_LIKELY(aBytes <= RemainingInSegment())) { + Advance(aBuffers, aBytes); + return true; + } + + // Check if we have enough bytes to scan this far forward. + if (!HasBytesAvailable(aBuffers, aBytes)) { + return false; + } + + // Compare the distance to our target offset from the end of the + // BufferList to the distance from the start of our next segment. + // Depending on which is closer, we'll advance either forwards or + // backwards. + size_t targetOffset = mAbsoluteOffset + aBytes; + size_t fromEnd = aBuffers.mSize - targetOffset; + if (aBytes - RemainingInSegment() < fromEnd) { + // Advance through the buffer list until we reach the desired absolute + // offset. + while (mAbsoluteOffset < targetOffset) { + Advance(aBuffers, std::min(targetOffset - mAbsoluteOffset, + RemainingInSegment())); + } + MOZ_ASSERT(mAbsoluteOffset == targetOffset); + return true; + } + + // Scanning starting from the end of the BufferList. We advance + // backwards from the final segment until we find the segment to end in. + // + // If we end on a segment boundary, make sure to place the cursor at the + // beginning of the next segment. + mSegment = aBuffers.mSegments.length() - 1; + while (fromEnd > aBuffers.mSegments[mSegment].mSize) { + fromEnd -= aBuffers.mSegments[mSegment].mSize; + mSegment--; + } + mDataEnd = aBuffers.mSegments[mSegment].End(); + mData = mDataEnd - fromEnd; + mAbsoluteOffset = targetOffset; + MOZ_ASSERT_IF(Done(), mSegment == aBuffers.mSegments.length() - 1); + MOZ_ASSERT_IF(Done(), mAbsoluteOffset == aBuffers.mSize); + return true; + } + + // Returns true when the iterator reaches the end of the BufferList. + bool Done() const { return mData == mDataEnd; } + + // The absolute offset of this iterator within the BufferList. + size_t AbsoluteOffset() const { return mAbsoluteOffset; } + + private: + bool IsIn(const BufferList& aBuffers) const { + return mSegment < aBuffers.mSegments.length() && + mData >= aBuffers.mSegments[mSegment].mData && + mData < aBuffers.mSegments[mSegment].End(); + } + }; + + // Special convenience method that returns Iter().Data(). + char* Start() { + MOZ_RELEASE_ASSERT(!mSegments.empty()); + return mSegments[0].mData; + } + const char* Start() const { return mSegments[0].mData; } + + IterImpl Iter() const { return IterImpl(*this); } + + // Copies aSize bytes from aData into the BufferList. The storage for these + // bytes may be split across multiple buffers. Size() is increased by aSize. + [[nodiscard]] inline bool WriteBytes(const char* aData, size_t aSize); + + // Allocates a buffer of at most |aMaxBytes| bytes and, if successful, returns + // that buffer, and places its size in |aSize|. If unsuccessful, returns null + // and leaves |aSize| undefined. + inline char* AllocateBytes(size_t aMaxSize, size_t* aSize); + + // Copies possibly non-contiguous byte range starting at aIter into + // aData. aIter is advanced by aSize bytes. Returns false if it runs out of + // data before aSize. + inline bool ReadBytes(IterImpl& aIter, char* aData, size_t aSize) const; + + // Return a new BufferList that shares storage with this BufferList. The new + // BufferList is read-only. It allows iteration over aSize bytes starting at + // aIter. Borrow can fail, in which case *aSuccess will be false upon + // return. The borrowed BufferList can use a different AllocPolicy than the + // original one. However, it is not responsible for freeing buffers, so the + // AllocPolicy is only used for the buffer vector. + template + BufferList Borrow( + IterImpl& aIter, size_t aSize, bool* aSuccess, + BorrowingAllocPolicy aAP = BorrowingAllocPolicy()) const; + + // Return a new BufferList and move storage from this BufferList to it. The + // new BufferList owns the buffers. Move can fail, in which case *aSuccess + // will be false upon return. The new BufferList can use a different + // AllocPolicy than the original one. The new OtherAllocPolicy is responsible + // for freeing buffers, so the OtherAllocPolicy must use freeing method + // compatible to the original one. + template + BufferList MoveFallible( + bool* aSuccess, OtherAllocPolicy aAP = OtherAllocPolicy()); + + // Return the number of bytes from 'start' to 'end', two iterators within + // this BufferList. + size_t RangeLength(const IterImpl& start, const IterImpl& end) const { + MOZ_ASSERT(start.IsIn(*this) && end.IsIn(*this)); + return end.mAbsoluteOffset - start.mAbsoluteOffset; + } + + // This takes ownership of the data + void* WriteBytesZeroCopy(char* aData, size_t aSize, size_t aCapacity) { + MOZ_ASSERT(aCapacity != 0); + MOZ_ASSERT(aSize <= aCapacity); + MOZ_ASSERT(mOwning); + + if (!mSegments.append(Segment(aData, aSize, aCapacity))) { + this->free_(aData, aCapacity); + return nullptr; + } + mSize += aSize; + return aData; + } + + // Truncate this BufferList at the given iterator location, discarding all + // data after this point. After this call, all other iterators will be + // invalidated, and the passed-in iterator will be "Done". + // + // Returns the number of bytes discarded by this truncation. + size_t Truncate(IterImpl& aIter); + + private: + explicit BufferList(AllocPolicy aAP) + : AllocPolicy(aAP), mOwning(false), mSize(0), mStandardCapacity(0) {} + + char* AllocateSegment(size_t aSize, size_t aCapacity) { + MOZ_RELEASE_ASSERT(mOwning); + MOZ_ASSERT(aCapacity != 0); + MOZ_ASSERT(aSize <= aCapacity); + + char* data = this->template pod_malloc(aCapacity); + if (!data) { + return nullptr; + } + if (!mSegments.append(Segment(data, aSize, aCapacity))) { + this->free_(data, aCapacity); + return nullptr; + } + mSize += aSize; + return data; + } + + void AssertConsistentSize() const { +#ifdef DEBUG + size_t realSize = 0; + for (const auto& segment : mSegments) { + realSize += segment.mSize; + } + MOZ_ASSERT(realSize == mSize, "cached size value is inconsistent!"); +#endif + } + + bool mOwning; + Vector mSegments; + size_t mSize; + size_t mStandardCapacity; +}; + +template +[[nodiscard]] bool BufferList::WriteBytes(const char* aData, + size_t aSize) { + MOZ_RELEASE_ASSERT(mOwning); + MOZ_RELEASE_ASSERT(mStandardCapacity); + + size_t copied = 0; + while (copied < aSize) { + size_t toCopy; + char* data = AllocateBytes(aSize - copied, &toCopy); + if (!data) { + return false; + } + memcpy(data, aData + copied, toCopy); + copied += toCopy; + } + + return true; +} + +template +char* BufferList::AllocateBytes(size_t aMaxSize, size_t* aSize) { + MOZ_RELEASE_ASSERT(mOwning); + MOZ_RELEASE_ASSERT(mStandardCapacity); + + if (!mSegments.empty()) { + Segment& lastSegment = mSegments.back(); + + size_t capacity = lastSegment.mCapacity - lastSegment.mSize; + if (capacity) { + size_t size = std::min(aMaxSize, capacity); + char* data = lastSegment.mData + lastSegment.mSize; + + lastSegment.mSize += size; + mSize += size; + + *aSize = size; + return data; + } + } + + size_t size = std::min(aMaxSize, mStandardCapacity); + char* data = AllocateSegment(size, mStandardCapacity); + if (data) { + *aSize = size; + } + return data; +} + +template +bool BufferList::ReadBytes(IterImpl& aIter, char* aData, + size_t aSize) const { + size_t copied = 0; + size_t remaining = aSize; + while (remaining) { + size_t toCopy = std::min(aIter.RemainingInSegment(), remaining); + if (!toCopy) { + // We've run out of data in the last segment. + return false; + } + memcpy(aData + copied, aIter.Data(), toCopy); + copied += toCopy; + remaining -= toCopy; + + aIter.Advance(*this, toCopy); + } + + return true; +} + +template +template +BufferList BufferList::Borrow( + IterImpl& aIter, size_t aSize, bool* aSuccess, + BorrowingAllocPolicy aAP) const { + BufferList result(aAP); + + size_t size = aSize; + while (size) { + size_t toAdvance = std::min(size, aIter.RemainingInSegment()); + + if (!toAdvance || !result.mSegments.append( + typename BufferList::Segment( + aIter.mData, toAdvance, toAdvance))) { + *aSuccess = false; + return result; + } + aIter.Advance(*this, toAdvance); + size -= toAdvance; + } + + result.mSize = aSize; + *aSuccess = true; + return result; +} + +template +template +BufferList BufferList::MoveFallible( + bool* aSuccess, OtherAllocPolicy aAP) { + BufferList result(0, 0, mStandardCapacity, aAP); + + IterImpl iter = Iter(); + while (!iter.Done()) { + size_t toAdvance = iter.RemainingInSegment(); + + if (!toAdvance || + !result.mSegments.append(typename BufferList::Segment( + iter.mData, toAdvance, toAdvance))) { + *aSuccess = false; + result.mSegments.clear(); + return result; + } + iter.Advance(*this, toAdvance); + } + + result.mSize = mSize; + mSegments.clear(); + mSize = 0; + *aSuccess = true; + return result; +} + +template +size_t BufferList::Truncate(IterImpl& aIter) { + MOZ_ASSERT(aIter.IsIn(*this) || aIter.Done()); + if (aIter.Done()) { + return 0; + } + + size_t prevSize = mSize; + + // Remove any segments after the iterator's current segment. + while (mSegments.length() > aIter.mSegment + 1) { + Segment& toFree = mSegments.back(); + mSize -= toFree.mSize; + if (mOwning) { + this->free_(toFree.mData, toFree.mCapacity); + } + mSegments.popBack(); + } + + // The last segment is now aIter's current segment. Truncate or remove it. + Segment& seg = mSegments.back(); + MOZ_ASSERT(aIter.mDataEnd == seg.End()); + mSize -= aIter.RemainingInSegment(); + seg.mSize -= aIter.RemainingInSegment(); + if (!seg.mSize) { + if (mOwning) { + this->free_(seg.mData, seg.mCapacity); + } + mSegments.popBack(); + } + + // Correct `aIter` to point to the new end of the BufferList. + if (mSegments.empty()) { + MOZ_ASSERT(mSize == 0); + aIter.mSegment = 0; + aIter.mData = aIter.mDataEnd = nullptr; + } else { + aIter.mSegment = mSegments.length() - 1; + aIter.mData = aIter.mDataEnd = mSegments.back().End(); + } + MOZ_ASSERT(aIter.Done()); + + AssertConsistentSize(); + return prevSize - mSize; +} + +} // namespace mozilla + +#endif /* mozilla_BufferList_h */ diff --git a/mfbt/Casting.h b/mfbt/Casting.h new file mode 100644 index 0000000000..c3341887ac --- /dev/null +++ b/mfbt/Casting.h @@ -0,0 +1,203 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Cast operations to supplement the built-in casting operations. */ + +#ifndef mozilla_Casting_h +#define mozilla_Casting_h + +#include "mozilla/Assertions.h" + +#include +#include +#include +#include + +namespace mozilla { + +/** + * Sets the outparam value of type |To| with the same underlying bit pattern of + * |aFrom|. + * + * |To| and |From| must be types of the same size; be careful of cross-platform + * size differences, or this might fail to compile on some but not all + * platforms. + * + * There is also a variant that returns the value directly. In most cases, the + * two variants should be identical. However, in the specific case of x86 + * chips, the behavior differs: returning floating-point values directly is done + * through the x87 stack, and x87 loads and stores turn signaling NaNs into + * quiet NaNs... silently. Returning floating-point values via outparam, + * however, is done entirely within the SSE registers when SSE2 floating-point + * is enabled in the compiler, which has semantics-preserving behavior you would + * expect. + * + * If preserving the distinction between signaling NaNs and quiet NaNs is + * important to you, you should use the outparam version. In all other cases, + * you should use the direct return version. + */ +template +inline void BitwiseCast(const From aFrom, To* aResult) { + static_assert(sizeof(From) == sizeof(To), + "To and From must have the same size"); + + // We could maybe downgrade these to std::is_trivially_copyable, but the + // various STLs we use don't all provide it. + static_assert(std::is_trivial::value, + "shouldn't bitwise-copy a type having non-trivial " + "initialization"); + static_assert(std::is_trivial::value, + "shouldn't bitwise-copy a type having non-trivial " + "initialization"); + + std::memcpy(static_cast(aResult), static_cast(&aFrom), + sizeof(From)); +} + +template +inline To BitwiseCast(const From aFrom) { + To temp; + BitwiseCast(aFrom, &temp); + return temp; +} + +namespace detail { + +template +constexpr int64_t safe_integer() { + static_assert(std::is_floating_point_v); + return std::pow(2, std::numeric_limits::digits); +} + +template +constexpr uint64_t safe_integer_unsigned() { + static_assert(std::is_floating_point_v); + return std::pow(2, std::numeric_limits::digits); +} + +// This is working around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81676, +// fixed in gcc-10 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +template +bool IsInBounds(In aIn) { + constexpr bool inSigned = std::is_signed_v; + constexpr bool outSigned = std::is_signed_v; + constexpr bool bothSigned = inSigned && outSigned; + constexpr bool bothUnsigned = !inSigned && !outSigned; + constexpr bool inFloat = std::is_floating_point_v; + constexpr bool outFloat = std::is_floating_point_v; + constexpr bool bothFloat = inFloat && outFloat; + constexpr bool noneFloat = !inFloat && !outFloat; + constexpr Out outMax = std::numeric_limits::max(); + constexpr Out outMin = std::numeric_limits::lowest(); + + // This selects the widest of two types, and is used to cast throughout. + using select_widest = std::conditional_t<(sizeof(In) > sizeof(Out)), In, Out>; + + if constexpr (bothFloat) { + if (aIn > select_widest(outMax) || aIn < select_widest(outMin)) { + return false; + } + } + // Normal casting applies, the floating point number is floored. + if constexpr (inFloat && !outFloat) { + static_assert(sizeof(aIn) <= sizeof(int64_t)); + // Check if the input floating point is larger than the output bounds. This + // catches situations where the input is a float larger than the max of the + // output type. + if (aIn < static_cast(outMin) || + aIn > static_cast(outMax)) { + return false; + } + // At this point we know that the input can be converted to an integer. + // Check if it's larger than the bounds of the target integer. + if (outSigned) { + int64_t asInteger = static_cast(aIn); + if (asInteger < outMin || asInteger > outMax) { + return false; + } + } else { + uint64_t asInteger = static_cast(aIn); + if (asInteger > outMax) { + return false; + } + } + } + + // Checks if the integer is representable exactly as a floating point value of + // a specific width. + if constexpr (!inFloat && outFloat) { + if constexpr (inSigned) { + if (aIn < -safe_integer() || aIn > safe_integer()) { + return false; + } + } else { + if (aIn >= safe_integer_unsigned()) { + return false; + } + } + } + + if constexpr (noneFloat) { + if constexpr (bothUnsigned) { + if (aIn > select_widest(outMax)) { + return false; + } + } + if constexpr (bothSigned) { + if (aIn > select_widest(outMax) || aIn < select_widest(outMin)) { + return false; + } + } + if constexpr (inSigned && !outSigned) { + if (aIn < 0 || std::make_unsigned_t(aIn) > outMax) { + return false; + } + } + if constexpr (!inSigned && outSigned) { + if (aIn > select_widest(outMax)) { + return false; + } + } + } + return true; +} +#pragma GCC diagnostic pop + +} // namespace detail + +/** + * Cast a value of type |From| to a value of type |To|, asserting that the cast + * will be a safe cast per C++ (that is, that |to| is in the range of values + * permitted for the type |From|). + * In particular, this will fail if a integer cannot be represented exactly as a + * floating point value, because it's too large. + */ +template +inline To AssertedCast(const From aFrom) { + static_assert(std::is_arithmetic_v && std::is_arithmetic_v); + MOZ_ASSERT((detail::IsInBounds(aFrom))); + return static_cast(aFrom); +} + +/** + * Cast a value of numeric type |From| to a value of numeric type |To|, release + * asserting that the cast will be a safe cast per C++ (that is, that |to| is in + * the range of values permitted for the type |From|). + * In particular, this will fail if a integer cannot be represented exactly as a + * floating point value, because it's too large. + */ +template +inline To ReleaseAssertedCast(const From aFrom) { + static_assert(std::is_arithmetic_v && std::is_arithmetic_v); + MOZ_RELEASE_ASSERT((detail::IsInBounds(aFrom))); + return static_cast(aFrom); +} + +} // namespace mozilla + +#endif /* mozilla_Casting_h */ diff --git a/mfbt/ChaosMode.cpp b/mfbt/ChaosMode.cpp new file mode 100644 index 0000000000..d090e8a37e --- /dev/null +++ b/mfbt/ChaosMode.cpp @@ -0,0 +1,17 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/ChaosMode.h" + +namespace mozilla { + +namespace detail { + +Atomic gChaosModeCounter(0); +ChaosFeature gChaosFeatures = None; + +} /* namespace detail */ +} /* namespace mozilla */ diff --git a/mfbt/ChaosMode.h b/mfbt/ChaosMode.h new file mode 100644 index 0000000000..faf7acddf3 --- /dev/null +++ b/mfbt/ChaosMode.h @@ -0,0 +1,90 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_ChaosMode_h +#define mozilla_ChaosMode_h + +#include "mozilla/Atomics.h" +#include "mozilla/EnumSet.h" + +#include +#include + +namespace mozilla { + +enum ChaosFeature { + None = 0x0, + // Altering thread scheduling. + ThreadScheduling = 0x1, + // Altering network request scheduling. + NetworkScheduling = 0x2, + // Altering timer scheduling. + TimerScheduling = 0x4, + // Read and write less-than-requested amounts. + IOAmounts = 0x8, + // Iterate over hash tables in random order. + HashTableIteration = 0x10, + // Randomly refuse to use cached version of image (when allowed by spec). + ImageCache = 0x20, + // Delay dispatching threads to encourage dispatched tasks to run. + TaskDispatching = 0x40, + // Delay task running to encourage sending threads to run. + TaskRunning = 0x80, + Any = 0xffffffff, +}; + +namespace detail { +extern MFBT_DATA Atomic gChaosModeCounter; +extern MFBT_DATA ChaosFeature gChaosFeatures; +} // namespace detail + +/** + * When "chaos mode" is activated, code that makes implicitly nondeterministic + * choices is encouraged to make random and extreme choices, to test more + * code paths and uncover bugs. + */ +class ChaosMode { + public: + static void SetChaosFeature(ChaosFeature aChaosFeature) { + detail::gChaosFeatures = aChaosFeature; + } + + static bool isActive(ChaosFeature aFeature) { + if (detail::gChaosModeCounter > 0) { + return true; + } + return detail::gChaosFeatures & aFeature; + } + + /** + * Increase the chaos mode activation level. An equivalent number of + * calls to leaveChaosMode must be made in order to restore the original + * chaos mode state. If the activation level is nonzero all chaos mode + * features are activated. + */ + static void enterChaosMode() { detail::gChaosModeCounter++; } + + /** + * Decrease the chaos mode activation level. See enterChaosMode(). + */ + static void leaveChaosMode() { + MOZ_ASSERT(detail::gChaosModeCounter > 0); + detail::gChaosModeCounter--; + } + + /** + * Returns a somewhat (but not uniformly) random uint32_t < aBound. + * Not to be used for anything except ChaosMode, since it's not very random. + */ + static uint32_t randomUint32LessThan(uint32_t aBound) { + MOZ_ASSERT(aBound != 0); + return uint32_t(rand()) % aBound; + } +}; + +} /* namespace mozilla */ + +#endif /* mozilla_ChaosMode_h */ diff --git a/mfbt/Char16.h b/mfbt/Char16.h new file mode 100644 index 0000000000..7856880830 --- /dev/null +++ b/mfbt/Char16.h @@ -0,0 +1,142 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implements a UTF-16 character type. */ + +#ifndef mozilla_Char16_h +#define mozilla_Char16_h + +#ifdef __cplusplus + +/* + * C++11 introduces a char16_t type and support for UTF-16 string and character + * literals. C++11's char16_t is a distinct builtin type. Technically, char16_t + * is a 16-bit code unit of a Unicode code point, not a "character". + */ + +# ifdef WIN32 +# define MOZ_USE_CHAR16_WRAPPER +# include +# include "mozilla/Attributes.h" +/** + * Win32 API extensively uses wchar_t, which is represented by a separated + * builtin type than char16_t per spec. It's not the case for MSVC prior to + * MSVC 2015, but other compilers follow the spec. We want to mix wchar_t and + * char16_t on Windows builds. This class is supposed to make it easier. It + * stores char16_t const pointer, but provides implicit casts for wchar_t as + * well. On other platforms, we simply use + * |typedef const char16_t* char16ptr_t|. Here, we want to make the class as + * similar to this typedef, including providing some casts that are allowed + * by the typedef. + */ +class char16ptr_t { + private: + const char16_t* mPtr; + static_assert(sizeof(char16_t) == sizeof(wchar_t), + "char16_t and wchar_t sizes differ"); + + public: + constexpr MOZ_IMPLICIT char16ptr_t(const char16_t* aPtr) : mPtr(aPtr) {} + MOZ_IMPLICIT char16ptr_t(const wchar_t* aPtr) + : mPtr(reinterpret_cast(aPtr)) {} + + /* Without this, nullptr assignment would be ambiguous. */ + constexpr MOZ_IMPLICIT char16ptr_t(decltype(nullptr)) : mPtr(nullptr) {} + + constexpr operator const char16_t*() const { return mPtr; } + operator const wchar_t*() const { + return reinterpret_cast(mPtr); + } + + operator wchar_t*() { + return const_cast(reinterpret_cast(mPtr)); + } + + constexpr operator const void*() const { return mPtr; } + constexpr explicit operator bool() const { return mPtr != nullptr; } + + explicit operator int() const { return reinterpret_cast(mPtr); } + explicit operator unsigned int() const { + return reinterpret_cast(mPtr); + } + explicit operator long() const { return reinterpret_cast(mPtr); } + explicit operator unsigned long() const { + return reinterpret_cast(mPtr); + } + explicit operator long long() const { + return reinterpret_cast(mPtr); + } + explicit operator unsigned long long() const { + return reinterpret_cast(mPtr); + } + + /** + * Some Windows API calls accept BYTE* but require that data actually be + * WCHAR*. Supporting this requires explicit operators to support the + * requisite explicit casts. + */ + explicit operator const char*() const { + return reinterpret_cast(mPtr); + } + explicit operator const unsigned char*() const { + return reinterpret_cast(mPtr); + } + explicit operator unsigned char*() const { + return const_cast( + reinterpret_cast(mPtr)); + } + explicit operator void*() const { return const_cast(mPtr); } + + /* Some operators used on pointers. */ + char16_t operator[](size_t aIndex) const { return mPtr[aIndex]; } + bool operator==(const char16ptr_t& aOther) const { + return mPtr == aOther.mPtr; + } + bool operator==(decltype(nullptr)) const { return mPtr == nullptr; } + bool operator!=(const char16ptr_t& aOther) const { + return mPtr != aOther.mPtr; + } + bool operator!=(decltype(nullptr)) const { return mPtr != nullptr; } + char16ptr_t operator+(int aValue) const { return char16ptr_t(mPtr + aValue); } + char16ptr_t operator+(unsigned int aValue) const { + return char16ptr_t(mPtr + aValue); + } + char16ptr_t operator+(long aValue) const { + return char16ptr_t(mPtr + aValue); + } + char16ptr_t operator+(unsigned long aValue) const { + return char16ptr_t(mPtr + aValue); + } + char16ptr_t operator+(long long aValue) const { + return char16ptr_t(mPtr + aValue); + } + char16ptr_t operator+(unsigned long long aValue) const { + return char16ptr_t(mPtr + aValue); + } + ptrdiff_t operator-(const char16ptr_t& aOther) const { + return mPtr - aOther.mPtr; + } +}; + +inline decltype((char*)0 - (char*)0) operator-(const char16_t* aX, + const char16ptr_t aY) { + return aX - static_cast(aY); +} + +# else + +typedef const char16_t* char16ptr_t; + +# endif + +static_assert(sizeof(char16_t) == 2, "Is char16_t type 16 bits?"); +static_assert(char16_t(-1) > char16_t(0), "Is char16_t type unsigned?"); +static_assert(sizeof(u'A') == 2, "Is unicode char literal 16 bits?"); +static_assert(sizeof(u""[0]) == 2, "Is unicode string char 16 bits?"); + +#endif + +#endif /* mozilla_Char16_h */ diff --git a/mfbt/CheckedInt.h b/mfbt/CheckedInt.h new file mode 100644 index 0000000000..d784376d8c --- /dev/null +++ b/mfbt/CheckedInt.h @@ -0,0 +1,804 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Provides checked integers, detecting integer overflow and divide-by-0. */ + +#ifndef mozilla_CheckedInt_h +#define mozilla_CheckedInt_h + +#include +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/IntegerTypeTraits.h" +#include +#include + +#define MOZILLA_CHECKEDINT_COMPARABLE_VERSION(major, minor, patch) \ + (major << 16 | minor << 8 | patch) + +// Probe for builtin math overflow support. Disabled for 32-bit builds for now +// since "gcc -m32" claims to support these but its implementation is buggy. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82274 +// Also disabled for clang before version 7 (resp. Xcode clang 10.0.1): while +// clang 5 and 6 have a working __builtin_add_overflow, it is not constexpr. +#if defined(HAVE_64BIT_BUILD) +# if defined(__has_builtin) && \ + (!defined(__clang_major__) || \ + (!defined(__apple_build_version__) && __clang_major__ >= 7) || \ + (defined(__apple_build_version__) && \ + MOZILLA_CHECKEDINT_COMPARABLE_VERSION( \ + __clang_major__, __clang_minor__, __clang_patchlevel__) >= \ + MOZILLA_CHECKEDINT_COMPARABLE_VERSION(10, 0, 1))) +# define MOZ_HAS_BUILTIN_OP_OVERFLOW (__has_builtin(__builtin_add_overflow)) +# elif defined(__GNUC__) +// (clang also defines __GNUC__ but it supports __has_builtin since at least +// v3.1 (released in 2012) so it won't get here.) +# define MOZ_HAS_BUILTIN_OP_OVERFLOW (__GNUC__ >= 5) +# else +# define MOZ_HAS_BUILTIN_OP_OVERFLOW (0) +# endif +#else +# define MOZ_HAS_BUILTIN_OP_OVERFLOW (0) +#endif + +#undef MOZILLA_CHECKEDINT_COMPARABLE_VERSION + +namespace mozilla { + +template +class CheckedInt; + +namespace detail { + +/* + * Step 1: manually record supported types + * + * What's nontrivial here is that there are different families of integer + * types: basic integer types and stdint types. It is merrily undefined which + * types from one family may be just typedefs for a type from another family. + * + * For example, on GCC 4.6, aside from the basic integer types, the only other + * type that isn't just a typedef for some of them, is int8_t. + */ + +struct UnsupportedType {}; + +template +struct IsSupportedPass2 { + static const bool value = false; +}; + +template +struct IsSupported { + static const bool value = IsSupportedPass2::value; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupported { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +template <> +struct IsSupportedPass2 { + static const bool value = true; +}; + +/* + * Step 2: Implement the actual validity checks. + * + * Ideas taken from IntegerLib, code different. + */ + +template +struct TwiceBiggerType { + typedef typename detail::StdintTypeForSizeAndSignedness< + sizeof(IntegerType) * 2, std::is_signed_v>::Type Type; +}; + +template +struct TwiceBiggerType { + typedef UnsupportedType Type; +}; + +template +constexpr bool HasSignBit(T aX) { + // In C++, right bit shifts on negative values is undefined by the standard. + // Notice that signed-to-unsigned conversions are always well-defined in the + // standard, as the value congruent modulo 2**n as expected. By contrast, + // unsigned-to-signed is only well-defined if the value is representable. + return bool(std::make_unsigned_t(aX) >> PositionOfSignBit::value); +} + +// Bitwise ops may return a larger type, so it's good to use this inline +// helper guaranteeing that the result is really of type T. +template +constexpr T BinaryComplement(T aX) { + return ~aX; +} + +template , + bool IsUSigned = std::is_signed_v> +struct DoesRangeContainRange {}; + +template +struct DoesRangeContainRange { + static const bool value = sizeof(T) >= sizeof(U); +}; + +template +struct DoesRangeContainRange { + static const bool value = sizeof(T) > sizeof(U); +}; + +template +struct DoesRangeContainRange { + static const bool value = false; +}; + +template , + bool IsUSigned = std::is_signed_v, + bool DoesTRangeContainURange = DoesRangeContainRange::value> +struct IsInRangeImpl {}; + +template +struct IsInRangeImpl { + static constexpr bool run(U) { return true; } +}; + +template +struct IsInRangeImpl { + static constexpr bool run(U aX) { + return aX <= std::numeric_limits::max() && + aX >= std::numeric_limits::min(); + } +}; + +template +struct IsInRangeImpl { + static constexpr bool run(U aX) { + return aX <= std::numeric_limits::max(); + } +}; + +template +struct IsInRangeImpl { + static constexpr bool run(U aX) { + return sizeof(T) > sizeof(U) || aX <= U(std::numeric_limits::max()); + } +}; + +template +struct IsInRangeImpl { + static constexpr bool run(U aX) { + return sizeof(T) >= sizeof(U) + ? aX >= 0 + : aX >= 0 && aX <= U(std::numeric_limits::max()); + } +}; + +template +constexpr bool IsInRange(U aX) { + return IsInRangeImpl::run(aX); +} + +template +constexpr bool IsAddValid(T aX, T aY) { +#if MOZ_HAS_BUILTIN_OP_OVERFLOW + T dummy; + return !__builtin_add_overflow(aX, aY, &dummy); +#else + // Addition is valid if the sign of aX+aY is equal to either that of aX or + // that of aY. Since the value of aX+aY is undefined if we have a signed + // type, we compute it using the unsigned type of the same size. Beware! + // These bitwise operations can return a larger integer type, if T was a + // small type like int8_t, so we explicitly cast to T. + + std::make_unsigned_t ux = aX; + std::make_unsigned_t uy = aY; + std::make_unsigned_t result = ux + uy; + return std::is_signed_v + ? HasSignBit(BinaryComplement(T((result ^ aX) & (result ^ aY)))) + : BinaryComplement(aX) >= aY; +#endif +} + +template +constexpr bool IsSubValid(T aX, T aY) { +#if MOZ_HAS_BUILTIN_OP_OVERFLOW + T dummy; + return !__builtin_sub_overflow(aX, aY, &dummy); +#else + // Subtraction is valid if either aX and aY have same sign, or aX-aY and aX + // have same sign. Since the value of aX-aY is undefined if we have a signed + // type, we compute it using the unsigned type of the same size. + std::make_unsigned_t ux = aX; + std::make_unsigned_t uy = aY; + std::make_unsigned_t result = ux - uy; + + return std::is_signed_v + ? HasSignBit(BinaryComplement(T((result ^ aX) & (aX ^ aY)))) + : aX >= aY; +#endif +} + +template , + bool TwiceBiggerTypeIsSupported = + IsSupported::Type>::value> +struct IsMulValidImpl {}; + +template +struct IsMulValidImpl { + static constexpr bool run(T aX, T aY) { + typedef typename TwiceBiggerType::Type TwiceBiggerType; + TwiceBiggerType product = TwiceBiggerType(aX) * TwiceBiggerType(aY); + return IsInRange(product); + } +}; + +template +struct IsMulValidImpl { + static constexpr bool run(T aX, T aY) { + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::min(); + + if (aX == 0 || aY == 0) { + return true; + } + if (aX > 0) { + return aY > 0 ? aX <= max / aY : aY >= min / aX; + } + + // If we reach this point, we know that aX < 0. + return aY > 0 ? aX >= min / aY : aY >= max / aX; + } +}; + +template +struct IsMulValidImpl { + static constexpr bool run(T aX, T aY) { + return aY == 0 || aX <= std::numeric_limits::max() / aY; + } +}; + +template +constexpr bool IsMulValid(T aX, T aY) { +#if MOZ_HAS_BUILTIN_OP_OVERFLOW + T dummy; + return !__builtin_mul_overflow(aX, aY, &dummy); +#else + return IsMulValidImpl::run(aX, aY); +#endif +} + +template +constexpr bool IsDivValid(T aX, T aY) { + // Keep in mind that in the signed case, min/-1 is invalid because + // abs(min)>max. + return aY != 0 && !(std::is_signed_v && + aX == std::numeric_limits::min() && aY == T(-1)); +} + +template > +struct IsModValidImpl; + +template +constexpr bool IsModValid(T aX, T aY) { + return IsModValidImpl::run(aX, aY); +} + +/* + * Mod is pretty simple. + * For now, let's just use the ANSI C definition: + * If aX or aY are negative, the results are implementation defined. + * Consider these invalid. + * Undefined for aY=0. + * The result will never exceed either aX or aY. + * + * Checking that aX>=0 is a warning when T is unsigned. + */ + +template +struct IsModValidImpl { + static constexpr bool run(T aX, T aY) { return aY >= 1; } +}; + +template +struct IsModValidImpl { + static constexpr bool run(T aX, T aY) { + if (aX < 0) { + return false; + } + return aY >= 1; + } +}; + +template > +struct NegateImpl; + +template +struct NegateImpl { + static constexpr CheckedInt negate(const CheckedInt& aVal) { + // Handle negation separately for signed/unsigned, for simpler code and to + // avoid an MSVC warning negating an unsigned value. + static_assert(detail::IsInRange(0), "Integer type can't represent 0"); + return CheckedInt(T(0), aVal.isValid() && aVal.mValue == 0); + } +}; + +template +struct NegateImpl { + static constexpr CheckedInt negate(const CheckedInt& aVal) { + // Watch out for the min-value, which (with twos-complement) can't be + // negated as -min-value is then (max-value + 1). + if (!aVal.isValid() || aVal.mValue == std::numeric_limits::min()) { + return CheckedInt(aVal.mValue, false); + } + /* For some T, arithmetic ops automatically promote to a wider type, so + * explitly do the narrowing cast here. The narrowing cast is valid because + * we did the check for min value above. */ + return CheckedInt(T(-aVal.mValue), true); + } +}; + +} // namespace detail + +/* + * Step 3: Now define the CheckedInt class. + */ + +/** + * @class CheckedInt + * @brief Integer wrapper class checking for integer overflow and other errors + * @param T the integer type to wrap. Can be any type among the following: + * - any basic integer type such as |int| + * - any stdint type such as |int8_t| + * + * This class implements guarded integer arithmetic. Do a computation, check + * that isValid() returns true, you then have a guarantee that no problem, such + * as integer overflow, happened during this computation, and you can call + * value() to get the plain integer value. + * + * The arithmetic operators in this class are guaranteed not to raise a signal + * (e.g. in case of a division by zero). + * + * For example, suppose that you want to implement a function that computes + * (aX+aY)/aZ, that doesn't crash if aZ==0, and that reports on error (divide by + * zero or integer overflow). You could code it as follows: + @code + bool computeXPlusYOverZ(int aX, int aY, int aZ, int* aResult) + { + CheckedInt checkedResult = (CheckedInt(aX) + aY) / aZ; + if (checkedResult.isValid()) { + *aResult = checkedResult.value(); + return true; + } else { + return false; + } + } + @endcode + * + * Implicit conversion from plain integers to checked integers is allowed. The + * plain integer is checked to be in range before being casted to the + * destination type. This means that the following lines all compile, and the + * resulting CheckedInts are correctly detected as valid or invalid: + * @code + // 1 is of type int, is found to be in range for uint8_t, x is valid + CheckedInt x(1); + // -1 is of type int, is found not to be in range for uint8_t, x is invalid + CheckedInt x(-1); + // -1 is of type int, is found to be in range for int8_t, x is valid + CheckedInt x(-1); + // 1000 is of type int16_t, is found not to be in range for int8_t, + // x is invalid + CheckedInt x(int16_t(1000)); + // 3123456789 is of type uint32_t, is found not to be in range for int32_t, + // x is invalid + CheckedInt x(uint32_t(3123456789)); + * @endcode + * Implicit conversion from + * checked integers to plain integers is not allowed. As shown in the + * above example, to get the value of a checked integer as a normal integer, + * call value(). + * + * Arithmetic operations between checked and plain integers is allowed; the + * result type is the type of the checked integer. + * + * Checked integers of different types cannot be used in the same arithmetic + * expression. + * + * There are convenience typedefs for all stdint types, of the following form + * (these are just 2 examples): + @code + typedef CheckedInt CheckedInt32; + typedef CheckedInt CheckedUint16; + @endcode + */ +template +class CheckedInt { + protected: + T mValue; + bool mIsValid; + + template + constexpr CheckedInt(U aValue, bool aIsValid) + : mValue(aValue), mIsValid(aIsValid) { + static_assert(std::is_same_v, + "this constructor must accept only T values"); + static_assert(detail::IsSupported::value, + "This type is not supported by CheckedInt"); + } + + friend struct detail::NegateImpl; + + public: + /** + * Constructs a checked integer with given @a value. The checked integer is + * initialized as valid or invalid depending on whether the @a value + * is in range. + * + * This constructor is not explicit. Instead, the type of its argument is a + * separate template parameter, ensuring that no conversion is performed + * before this constructor is actually called. As explained in the above + * documentation for class CheckedInt, this constructor checks that its + * argument is valid. + */ + template + MOZ_IMPLICIT MOZ_NO_ARITHMETIC_EXPR_IN_ARGUMENT constexpr CheckedInt(U aValue) + : mValue(T(aValue)), mIsValid(detail::IsInRange(aValue)) { + static_assert( + detail::IsSupported::value && detail::IsSupported::value, + "This type is not supported by CheckedInt"); + } + + template + friend class CheckedInt; + + template + constexpr CheckedInt toChecked() const { + CheckedInt ret(mValue); + ret.mIsValid = ret.mIsValid && mIsValid; + return ret; + } + + /** Constructs a valid checked integer with initial value 0 */ + constexpr CheckedInt() : mValue(T(0)), mIsValid(true) { + static_assert(detail::IsSupported::value, + "This type is not supported by CheckedInt"); + static_assert(detail::IsInRange(0), "Integer type can't represent 0"); + } + + /** @returns the actual value */ + constexpr T value() const { + MOZ_DIAGNOSTIC_ASSERT( + mIsValid, + "Invalid checked integer (division by zero or integer overflow)"); + return mValue; + } + + /** + * @returns true if the checked integer is valid, i.e. is not the result + * of an invalid operation or of an operation involving an invalid checked + * integer + */ + constexpr bool isValid() const { return mIsValid; } + + template + friend constexpr CheckedInt operator+(const CheckedInt& aLhs, + const CheckedInt& aRhs); + template + constexpr CheckedInt& operator+=(U aRhs); + constexpr CheckedInt& operator+=(const CheckedInt& aRhs); + + template + friend constexpr CheckedInt operator-(const CheckedInt& aLhs, + const CheckedInt& aRhs); + template + constexpr CheckedInt& operator-=(U aRhs); + constexpr CheckedInt& operator-=(const CheckedInt& aRhs); + + template + friend constexpr CheckedInt operator*(const CheckedInt& aLhs, + const CheckedInt& aRhs); + template + constexpr CheckedInt& operator*=(U aRhs); + constexpr CheckedInt& operator*=(const CheckedInt& aRhs); + + template + friend constexpr CheckedInt operator/(const CheckedInt& aLhs, + const CheckedInt& aRhs); + template + constexpr CheckedInt& operator/=(U aRhs); + constexpr CheckedInt& operator/=(const CheckedInt& aRhs); + + template + friend constexpr CheckedInt operator%(const CheckedInt& aLhs, + const CheckedInt& aRhs); + template + constexpr CheckedInt& operator%=(U aRhs); + constexpr CheckedInt& operator%=(const CheckedInt& aRhs); + + constexpr CheckedInt operator-() const { + return detail::NegateImpl::negate(*this); + } + + /** + * @returns true if the left and right hand sides are valid + * and have the same value. + * + * Note that these semantics are the reason why we don't offer + * a operator!=. Indeed, we'd want to have a!=b be equivalent to !(a==b) + * but that would mean that whenever a or b is invalid, a!=b + * is always true, which would be very confusing. + * + * For similar reasons, operators <, >, <=, >= would be very tricky to + * specify, so we just avoid offering them. + * + * Notice that these == semantics are made more reasonable by these facts: + * 1. a==b implies equality at the raw data level + * (the converse is false, as a==b is never true among invalids) + * 2. This is similar to the behavior of IEEE floats, where a==b + * means that a and b have the same value *and* neither is NaN. + */ + constexpr bool operator==(const CheckedInt& aOther) const { + return mIsValid && aOther.mIsValid && mValue == aOther.mValue; + } + + /** prefix ++ */ + constexpr CheckedInt& operator++() { + *this += 1; + return *this; + } + + /** postfix ++ */ + constexpr CheckedInt operator++(int) { + CheckedInt tmp = *this; + *this += 1; + return tmp; + } + + /** prefix -- */ + constexpr CheckedInt& operator--() { + *this -= 1; + return *this; + } + + /** postfix -- */ + constexpr CheckedInt operator--(int) { + CheckedInt tmp = *this; + *this -= 1; + return tmp; + } + + private: + /** + * The !=, <, <=, >, >= operators are disabled: + * see the comment on operator==. + */ + template + bool operator!=(U aOther) const = delete; + template + bool operator<(U aOther) const = delete; + template + bool operator<=(U aOther) const = delete; + template + bool operator>(U aOther) const = delete; + template + bool operator>=(U aOther) const = delete; +}; + +#define MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(NAME, OP) \ + template \ + constexpr CheckedInt operator OP(const CheckedInt& aLhs, \ + const CheckedInt& aRhs) { \ + if (!detail::Is##NAME##Valid(aLhs.mValue, aRhs.mValue)) { \ + static_assert(detail::IsInRange(0), \ + "Integer type can't represent 0"); \ + return CheckedInt(T(0), false); \ + } \ + /* For some T, arithmetic ops automatically promote to a wider type, so \ + * explitly do the narrowing cast here. The narrowing cast is valid \ + * because we did the "Is##NAME##Valid" check above. */ \ + return CheckedInt(T(aLhs.mValue OP aRhs.mValue), \ + aLhs.mIsValid && aRhs.mIsValid); \ + } + +#if MOZ_HAS_BUILTIN_OP_OVERFLOW +# define MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(NAME, OP, FUN) \ + template \ + constexpr CheckedInt operator OP(const CheckedInt& aLhs, \ + const CheckedInt& aRhs) { \ + auto result = T{}; \ + if (FUN(aLhs.mValue, aRhs.mValue, &result)) { \ + static_assert(detail::IsInRange(0), \ + "Integer type can't represent 0"); \ + return CheckedInt(T(0), false); \ + } \ + return CheckedInt(result, aLhs.mIsValid && aRhs.mIsValid); \ + } +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Add, +, __builtin_add_overflow) +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Sub, -, __builtin_sub_overflow) +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2(Mul, *, __builtin_mul_overflow) +# undef MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR2 +#else +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Add, +) +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Sub, -) +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Mul, *) +#endif + +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Div, /) +MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR(Mod, %) +#undef MOZ_CHECKEDINT_BASIC_BINARY_OPERATOR + +// Implement castToCheckedInt(x), making sure that +// - it allows x to be either a CheckedInt or any integer type +// that can be casted to T +// - if x is already a CheckedInt, we just return a reference to it, +// instead of copying it (optimization) + +namespace detail { + +template +struct CastToCheckedIntImpl { + typedef CheckedInt ReturnType; + static constexpr CheckedInt run(U aU) { return aU; } +}; + +template +struct CastToCheckedIntImpl> { + typedef const CheckedInt& ReturnType; + static constexpr const CheckedInt& run(const CheckedInt& aU) { + return aU; + } +}; + +} // namespace detail + +template +constexpr typename detail::CastToCheckedIntImpl::ReturnType +castToCheckedInt(U aU) { + static_assert(detail::IsSupported::value && detail::IsSupported::value, + "This type is not supported by CheckedInt"); + return detail::CastToCheckedIntImpl::run(aU); +} + +#define MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(OP, COMPOUND_OP) \ + template \ + template \ + constexpr CheckedInt& CheckedInt::operator COMPOUND_OP(U aRhs) { \ + *this = *this OP castToCheckedInt(aRhs); \ + return *this; \ + } \ + template \ + constexpr CheckedInt& CheckedInt::operator COMPOUND_OP( \ + const CheckedInt& aRhs) { \ + *this = *this OP aRhs; \ + return *this; \ + } \ + template \ + constexpr CheckedInt operator OP(const CheckedInt& aLhs, U aRhs) { \ + return aLhs OP castToCheckedInt(aRhs); \ + } \ + template \ + constexpr CheckedInt operator OP(U aLhs, const CheckedInt& aRhs) { \ + return castToCheckedInt(aLhs) OP aRhs; \ + } + +MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(+, +=) +MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(*, *=) +MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(-, -=) +MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(/, /=) +MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS(%, %=) + +#undef MOZ_CHECKEDINT_CONVENIENCE_BINARY_OPERATORS + +template +constexpr bool operator==(const CheckedInt& aLhs, U aRhs) { + return aLhs == castToCheckedInt(aRhs); +} + +template +constexpr bool operator==(U aLhs, const CheckedInt& aRhs) { + return castToCheckedInt(aLhs) == aRhs; +} + +// Convenience typedefs. +typedef CheckedInt CheckedInt8; +typedef CheckedInt CheckedUint8; +typedef CheckedInt CheckedInt16; +typedef CheckedInt CheckedUint16; +typedef CheckedInt CheckedInt32; +typedef CheckedInt CheckedUint32; +typedef CheckedInt CheckedInt64; +typedef CheckedInt CheckedUint64; + +} // namespace mozilla + +#endif /* mozilla_CheckedInt_h */ diff --git a/mfbt/CompactPair.h b/mfbt/CompactPair.h new file mode 100644 index 0000000000..fa810dc0af --- /dev/null +++ b/mfbt/CompactPair.h @@ -0,0 +1,244 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A class holding a pair of objects that tries to conserve storage space. */ + +#ifndef mozilla_CompactPair_h +#define mozilla_CompactPair_h + +#include +#include +#include + +#include "mozilla/Attributes.h" + +namespace mozilla { + +namespace detail { + +enum StorageType { AsBase, AsMember }; + +// Optimize storage using the Empty Base Optimization -- that empty base classes +// don't take up space -- to optimize size when one or the other class is +// stateless and can be used as a base class. +// +// The extra conditions on storage for B are necessary so that CompactPairHelper +// won't ambiguously inherit from either A or B, such that one or the other base +// class would be inaccessible. +template ? detail::AsBase : detail::AsMember, + detail::StorageType = std::is_empty_v && + !std::is_base_of::value && + !std::is_base_of::value + ? detail::AsBase + : detail::AsMember> +struct CompactPairHelper; + +template +struct CompactPairHelper { + protected: + template + constexpr CompactPairHelper(std::tuple& aATuple, + std::tuple& aBTuple, + std::index_sequence, + std::index_sequence) + : mFirstA(std::forward(std::get(aATuple))...), + mSecondB(std::forward(std::get(aBTuple))...) {} + + public: + template + constexpr CompactPairHelper(AArg&& aA, BArg&& aB) + : mFirstA(std::forward(aA)), mSecondB(std::forward(aB)) {} + + constexpr A& first() { return mFirstA; } + constexpr const A& first() const { return mFirstA; } + constexpr B& second() { return mSecondB; } + constexpr const B& second() const { return mSecondB; } + + void swap(CompactPairHelper& aOther) { + std::swap(mFirstA, aOther.mFirstA); + std::swap(mSecondB, aOther.mSecondB); + } + + private: + A mFirstA; + B mSecondB; +}; + +template +struct CompactPairHelper : private B { + protected: + template + constexpr CompactPairHelper(std::tuple& aATuple, + std::tuple& aBTuple, + std::index_sequence, + std::index_sequence) + : B(std::forward(std::get(aBTuple))...), + mFirstA(std::forward(std::get(aATuple))...) {} + + public: + template + constexpr CompactPairHelper(AArg&& aA, BArg&& aB) + : B(std::forward(aB)), mFirstA(std::forward(aA)) {} + + constexpr A& first() { return mFirstA; } + constexpr const A& first() const { return mFirstA; } + constexpr B& second() { return *this; } + constexpr const B& second() const { return *this; } + + void swap(CompactPairHelper& aOther) { + std::swap(mFirstA, aOther.mFirstA); + std::swap(static_cast(*this), static_cast(aOther)); + } + + private: + A mFirstA; +}; + +template +struct CompactPairHelper : private A { + protected: + template + constexpr CompactPairHelper(std::tuple& aATuple, + std::tuple& aBTuple, + std::index_sequence, + std::index_sequence) + : A(std::forward(std::get(aATuple))...), + mSecondB(std::forward(std::get(aBTuple))...) {} + + public: + template + constexpr CompactPairHelper(AArg&& aA, BArg&& aB) + : A(std::forward(aA)), mSecondB(std::forward(aB)) {} + + constexpr A& first() { return *this; } + constexpr const A& first() const { return *this; } + constexpr B& second() { return mSecondB; } + constexpr const B& second() const { return mSecondB; } + + void swap(CompactPairHelper& aOther) { + std::swap(static_cast(*this), static_cast(aOther)); + std::swap(mSecondB, aOther.mSecondB); + } + + private: + B mSecondB; +}; + +template +struct CompactPairHelper : private A, private B { + protected: + template + constexpr CompactPairHelper(std::tuple& aATuple, + std::tuple& aBTuple, + std::index_sequence, + std::index_sequence) + : A(std::forward(std::get(aATuple))...), + B(std::forward(std::get(aBTuple))...) {} + + public: + template + constexpr CompactPairHelper(AArg&& aA, BArg&& aB) + : A(std::forward(aA)), B(std::forward(aB)) {} + + constexpr A& first() { return static_cast(*this); } + constexpr const A& first() const { return static_cast(*this); } + constexpr B& second() { return static_cast(*this); } + constexpr const B& second() const { return static_cast(*this); } + + void swap(CompactPairHelper& aOther) { + std::swap(static_cast(*this), static_cast(aOther)); + std::swap(static_cast(*this), static_cast(aOther)); + } +}; + +} // namespace detail + +/** + * CompactPair is the logical concatenation of an instance of A with an instance + * B. Space is conserved when possible. Neither A nor B may be a final class. + * + * In general if space conservation is not critical is preferred to use + * std::pair. + * + * It's typically clearer to have individual A and B member fields. Except if + * you want the space-conserving qualities of CompactPair, you're probably + * better off not using this! + * + * No guarantees are provided about the memory layout of A and B, the order of + * initialization or destruction of A and B, and so on. (This is approximately + * required to optimize space usage.) The first/second names are merely + * conceptual! + */ +template +struct CompactPair : private detail::CompactPairHelper { + typedef typename detail::CompactPairHelper Base; + + using Base::Base; + + template + constexpr CompactPair(std::piecewise_construct_t, std::tuple aFirst, + std::tuple aSecond) + : Base(aFirst, aSecond, std::index_sequence_for(), + std::index_sequence_for()) {} + + CompactPair(CompactPair&& aOther) = default; + CompactPair(const CompactPair& aOther) = default; + + CompactPair& operator=(CompactPair&& aOther) = default; + CompactPair& operator=(const CompactPair& aOther) = default; + + /** The A instance. */ + using Base::first; + /** The B instance. */ + using Base::second; + + /** Swap this pair with another pair. */ + void swap(CompactPair& aOther) { Base::swap(aOther); } +}; + +/** + * MakeCompactPair allows you to construct a CompactPair instance using type + * inference. A call like this: + * + * MakeCompactPair(Foo(), Bar()) + * + * will return a CompactPair. + */ +template +CompactPair>, + std::remove_cv_t>> +MakeCompactPair(A&& aA, B&& aB) { + return CompactPair>, + std::remove_cv_t>>( + std::forward(aA), std::forward(aB)); +} + +/** + * CompactPair equality comparison + */ +template +bool operator==(const CompactPair& aLhs, const CompactPair& aRhs) { + return aLhs.first() == aRhs.first() && aLhs.second() == aRhs.second(); +} + +} // namespace mozilla + +namespace std { + +template +void swap(mozilla::CompactPair& aX, mozilla::CompactPair& aY) { + aX.swap(aY); +} + +} // namespace std + +#endif /* mozilla_CompactPair_h */ diff --git a/mfbt/Compiler.h b/mfbt/Compiler.h new file mode 100644 index 0000000000..2c7dcc7c59 --- /dev/null +++ b/mfbt/Compiler.h @@ -0,0 +1,34 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Various compiler checks. */ + +#ifndef mozilla_Compiler_h +#define mozilla_Compiler_h + +#define MOZ_IS_GCC 0 + +#if !defined(__clang__) && defined(__GNUC__) + +# undef MOZ_IS_GCC +# define MOZ_IS_GCC 1 +/* + * These macros should simplify gcc version checking. For example, to check + * for gcc 4.7.1 or later, check `#if MOZ_GCC_VERSION_AT_LEAST(4, 7, 1)`. + */ +# define MOZ_GCC_VERSION_AT_LEAST(major, minor, patchlevel) \ + ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \ + ((major)*10000 + (minor)*100 + (patchlevel))) +# define MOZ_GCC_VERSION_AT_MOST(major, minor, patchlevel) \ + ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) <= \ + ((major)*10000 + (minor)*100 + (patchlevel))) +# if !MOZ_GCC_VERSION_AT_LEAST(6, 1, 0) +# error "mfbt (and Gecko) require at least gcc 6.1 to build." +# endif + +#endif + +#endif /* mozilla_Compiler_h */ diff --git a/mfbt/Compression.cpp b/mfbt/Compression.cpp new file mode 100644 index 0000000000..b0c3db6980 --- /dev/null +++ b/mfbt/Compression.cpp @@ -0,0 +1,182 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/Compression.h" +#include "mozilla/CheckedInt.h" + +// Without including , MSVC 2015 complains about e.g. the impossibility +// to convert `const void* const` to `void*` when calling memchr from +// corecrt_memory.h. +#include + +#include "lz4/lz4.h" +#include "lz4/lz4frame.h" + +using namespace mozilla; +using namespace mozilla::Compression; + +/* Our wrappers */ + +size_t LZ4::compress(const char* aSource, size_t aInputSize, char* aDest) { + CheckedInt inputSizeChecked = aInputSize; + MOZ_ASSERT(inputSizeChecked.isValid()); + return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(), + LZ4_compressBound(inputSizeChecked.value())); +} + +size_t LZ4::compressLimitedOutput(const char* aSource, size_t aInputSize, + char* aDest, size_t aMaxOutputSize) { + CheckedInt inputSizeChecked = aInputSize; + MOZ_ASSERT(inputSizeChecked.isValid()); + CheckedInt maxOutputSizeChecked = aMaxOutputSize; + MOZ_ASSERT(maxOutputSizeChecked.isValid()); + return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(), + maxOutputSizeChecked.value()); +} + +bool LZ4::decompress(const char* aSource, size_t aInputSize, char* aDest, + size_t aMaxOutputSize, size_t* aOutputSize) { + CheckedInt maxOutputSizeChecked = aMaxOutputSize; + MOZ_ASSERT(maxOutputSizeChecked.isValid()); + CheckedInt inputSizeChecked = aInputSize; + MOZ_ASSERT(inputSizeChecked.isValid()); + + int ret = LZ4_decompress_safe(aSource, aDest, inputSizeChecked.value(), + maxOutputSizeChecked.value()); + if (ret >= 0) { + *aOutputSize = ret; + return true; + } + + *aOutputSize = 0; + return false; +} + +bool LZ4::decompressPartial(const char* aSource, size_t aInputSize, char* aDest, + size_t aMaxOutputSize, size_t* aOutputSize) { + CheckedInt maxOutputSizeChecked = aMaxOutputSize; + MOZ_ASSERT(maxOutputSizeChecked.isValid()); + CheckedInt inputSizeChecked = aInputSize; + MOZ_ASSERT(inputSizeChecked.isValid()); + + int ret = LZ4_decompress_safe_partial( + aSource, aDest, inputSizeChecked.value(), maxOutputSizeChecked.value(), + maxOutputSizeChecked.value()); + if (ret >= 0) { + *aOutputSize = ret; + return true; + } + + *aOutputSize = 0; + return false; +} + +LZ4FrameCompressionContext::LZ4FrameCompressionContext(int aCompressionLevel, + size_t aMaxSrcSize, + bool aChecksum, + bool aStableSrc) + : mContext(nullptr), + mCompressionLevel(aCompressionLevel), + mGenerateChecksum(aChecksum), + mStableSrc(aStableSrc), + mMaxSrcSize(aMaxSrcSize), + mWriteBufLen(0) { + LZ4F_contentChecksum_t checksum = + mGenerateChecksum ? LZ4F_contentChecksumEnabled : LZ4F_noContentChecksum; + LZ4F_preferences_t prefs = { + { + LZ4F_max256KB, + LZ4F_blockLinked, + checksum, + }, + mCompressionLevel, + }; + mWriteBufLen = LZ4F_compressBound(mMaxSrcSize, &prefs); + LZ4F_errorCode_t err = LZ4F_createCompressionContext(&mContext, LZ4F_VERSION); + MOZ_RELEASE_ASSERT(!LZ4F_isError(err)); +} + +LZ4FrameCompressionContext::~LZ4FrameCompressionContext() { + LZ4F_freeCompressionContext(mContext); +} + +Result, size_t> LZ4FrameCompressionContext::BeginCompressing( + Span aWriteBuffer) { + mWriteBuffer = aWriteBuffer; + LZ4F_contentChecksum_t checksum = + mGenerateChecksum ? LZ4F_contentChecksumEnabled : LZ4F_noContentChecksum; + LZ4F_preferences_t prefs = { + { + LZ4F_max256KB, + LZ4F_blockLinked, + checksum, + }, + mCompressionLevel, + }; + size_t headerSize = LZ4F_compressBegin(mContext, mWriteBuffer.Elements(), + mWriteBufLen, &prefs); + if (LZ4F_isError(headerSize)) { + return Err(headerSize); + } + + return Span{static_cast(mWriteBuffer.Elements()), headerSize}; +} + +Result, size_t> +LZ4FrameCompressionContext::ContinueCompressing(Span aInput) { + LZ4F_compressOptions_t opts = {}; + opts.stableSrc = (uint32_t)mStableSrc; + size_t outputSize = + LZ4F_compressUpdate(mContext, mWriteBuffer.Elements(), mWriteBufLen, + aInput.Elements(), aInput.Length(), &opts); + if (LZ4F_isError(outputSize)) { + return Err(outputSize); + } + + return Span{static_cast(mWriteBuffer.Elements()), outputSize}; +} + +Result, size_t> LZ4FrameCompressionContext::EndCompressing() { + size_t outputSize = + LZ4F_compressEnd(mContext, mWriteBuffer.Elements(), mWriteBufLen, + /* options */ nullptr); + if (LZ4F_isError(outputSize)) { + return Err(outputSize); + } + + return Span{static_cast(mWriteBuffer.Elements()), outputSize}; +} + +LZ4FrameDecompressionContext::LZ4FrameDecompressionContext(bool aStableDest) + : mContext(nullptr), mStableDest(aStableDest) { + LZ4F_errorCode_t err = + LZ4F_createDecompressionContext(&mContext, LZ4F_VERSION); + MOZ_RELEASE_ASSERT(!LZ4F_isError(err)); +} + +LZ4FrameDecompressionContext::~LZ4FrameDecompressionContext() { + LZ4F_freeDecompressionContext(mContext); +} + +Result +LZ4FrameDecompressionContext::Decompress(Span aOutput, + Span aInput) { + LZ4F_decompressOptions_t opts = {}; + opts.stableDst = (uint32_t)mStableDest; + size_t outBytes = aOutput.Length(); + size_t inBytes = aInput.Length(); + size_t result = LZ4F_decompress(mContext, aOutput.Elements(), &outBytes, + aInput.Elements(), &inBytes, &opts); + if (LZ4F_isError(result)) { + return Err(result); + } + + LZ4FrameDecompressionResult decompressionResult = {}; + decompressionResult.mFinished = !result; + decompressionResult.mSizeRead = inBytes; + decompressionResult.mSizeWritten = outBytes; + return decompressionResult; +} diff --git a/mfbt/Compression.h b/mfbt/Compression.h new file mode 100644 index 0000000000..d9f787c0b4 --- /dev/null +++ b/mfbt/Compression.h @@ -0,0 +1,218 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Various simple compression/decompression functions. */ + +#ifndef mozilla_Compression_h_ +#define mozilla_Compression_h_ + +#include "mozilla/Assertions.h" +#include "mozilla/Types.h" +#include "mozilla/ResultVariant.h" +#include "mozilla/Span.h" +#include "mozilla/UniquePtr.h" + +struct LZ4F_cctx_s; // compression context +struct LZ4F_dctx_s; // decompression context + +namespace mozilla { +namespace Compression { + +/** + * LZ4 is a very fast byte-wise compression algorithm. + * + * Compared to Google's Snappy it is faster to compress and decompress and + * generally produces output of about the same size. + * + * Compared to zlib it compresses at about 10x the speed, decompresses at about + * 4x the speed and produces output of about 1.5x the size. + */ + +class LZ4 { + public: + /** + * Compresses |aInputSize| bytes from |aSource| into |aDest|. Destination + * buffer must be already allocated, and must be sized to handle worst cases + * situations (input data not compressible). Worst case size evaluation is + * provided by function maxCompressedSize() + * + * @param aInputSize is the input size. Max supported value is ~1.9GB + * @return the number of bytes written in buffer |aDest| + */ + static MFBT_API size_t compress(const char* aSource, size_t aInputSize, + char* aDest); + + /** + * Compress |aInputSize| bytes from |aSource| into an output buffer + * |aDest| of maximum size |aMaxOutputSize|. If it cannot achieve it, + * compression will stop, and result of the function will be zero, + * |aDest| will still be written to, but since the number of input + * bytes consumed is not returned the result is not usable. + * + * This function never writes outside of provided output buffer. + * + * @param aInputSize is the input size. Max supported value is ~1.9GB + * @param aMaxOutputSize is the size of the destination buffer (which must + * be already allocated) + * @return the number of bytes written in buffer |aDest| or 0 if the + * compression fails + */ + static MFBT_API size_t compressLimitedOutput(const char* aSource, + size_t aInputSize, char* aDest, + size_t aMaxOutputSize); + + /** + * If the source stream is malformed, the function will stop decoding + * and return false. + * + * This function never writes beyond aDest + aMaxOutputSize, and is + * therefore protected against malicious data packets. + * + * Note: Destination buffer must be already allocated. This version is + * slightly slower than the decompress without the aMaxOutputSize. + * + * @param aInputSize is the length of the input compressed data + * @param aMaxOutputSize is the size of the destination buffer (which must be + * already allocated) + * @param aOutputSize the actual number of bytes decoded in the destination + * buffer (necessarily <= aMaxOutputSize) + * @return true on success, false on failure + */ + [[nodiscard]] static MFBT_API bool decompress(const char* aSource, + size_t aInputSize, char* aDest, + size_t aMaxOutputSize, + size_t* aOutputSize); + + /** + * If the source stream is malformed, the function will stop decoding + * and return false. + * + * This function never writes beyond aDest + aMaxOutputSize, and is + * therefore protected against malicious data packets. It also ignores + * unconsumed input upon reaching aMaxOutputSize and can therefore be used + * for partial decompression. + * + * Note: Destination buffer must be already allocated. This version is + * slightly slower than the decompress without the aMaxOutputSize. + * + * @param aInputSize is the length of the input compressed data + * @param aMaxOutputSize is the size of the destination buffer (which must be + * already allocated) + * @param aOutputSize the actual number of bytes decoded in the destination + * buffer (necessarily <= aMaxOutputSize) + * @return true on success, false on failure + */ + [[nodiscard]] static MFBT_API bool decompressPartial(const char* aSource, + size_t aInputSize, + char* aDest, + size_t aMaxOutputSize, + size_t* aOutputSize); + + /* + * Provides the maximum size that LZ4 may output in a "worst case" + * scenario (input data not compressible) primarily useful for memory + * allocation of output buffer. + * note : this function is limited by "int" range (2^31-1) + * + * @param aInputSize is the input size. Max supported value is ~1.9GB + * @return maximum output size in a "worst case" scenario + */ + static inline size_t maxCompressedSize(size_t aInputSize) { + size_t max = (aInputSize + (aInputSize / 255) + 16); + MOZ_ASSERT(max > aInputSize); + return max; + } +}; + +/** + * Context for LZ4 Frame-based streaming compression. Use this if you + * want to incrementally compress something or if you want to compress + * something such that another application can read it. + */ +class LZ4FrameCompressionContext final { + public: + MFBT_API LZ4FrameCompressionContext(int aCompressionLevel, size_t aMaxSrcSize, + bool aChecksum, bool aStableSrc = false); + + MFBT_API ~LZ4FrameCompressionContext(); + + size_t GetRequiredWriteBufferLength() { return mWriteBufLen; } + + /** + * Begin streaming frame-based compression. + * + * @return a Result with a Span containing the frame header, or an lz4 error + * code (size_t). + */ + MFBT_API Result, size_t> BeginCompressing( + Span aWriteBuffer); + + /** + * Continue streaming frame-based compression with the provided input. + * + * @param aInput input buffer to be compressed. + * @return a Result with a Span containing compressed output, or an lz4 error + * code (size_t). + */ + MFBT_API Result, size_t> ContinueCompressing( + Span aInput); + + /** + * Finalize streaming frame-based compression with the provided input. + * + * @return a Result with a Span containing compressed output and the frame + * footer, or an lz4 error code (size_t). + */ + MFBT_API Result, size_t> EndCompressing(); + + private: + LZ4F_cctx_s* mContext; + int mCompressionLevel; + bool mGenerateChecksum; + bool mStableSrc; + size_t mMaxSrcSize; + size_t mWriteBufLen; + Span mWriteBuffer; +}; + +struct LZ4FrameDecompressionResult { + size_t mSizeRead; + size_t mSizeWritten; + bool mFinished; +}; + +/** + * Context for LZ4 Frame-based streaming decompression. Use this if you + * want to decompress something compressed by LZ4FrameCompressionContext + * or by another application. + */ +class LZ4FrameDecompressionContext final { + public: + explicit MFBT_API LZ4FrameDecompressionContext(bool aStableDest = false); + MFBT_API ~LZ4FrameDecompressionContext(); + + /** + * Decompress a buffer/part of a buffer compressed with + * LZ4FrameCompressionContext or another application. + * + * @param aOutput output buffer to be write results into. + * @param aInput input buffer to be decompressed. + * @return a Result with information on bytes read/written and whether we + * completely decompressed the input into the output, or an lz4 error code + * (size_t). + */ + MFBT_API Result Decompress( + Span aOutput, Span aInput); + + private: + LZ4F_dctx_s* mContext; + bool mStableDest; +}; + +} /* namespace Compression */ +} /* namespace mozilla */ + +#endif /* mozilla_Compression_h_ */ diff --git a/mfbt/DbgMacro.h b/mfbt/DbgMacro.h new file mode 100644 index 0000000000..3247b993c0 --- /dev/null +++ b/mfbt/DbgMacro.h @@ -0,0 +1,206 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_DbgMacro_h +#define mozilla_DbgMacro_h + +/* a MOZ_DBG macro that outputs a wrapped value to stderr then returns it */ + +#include "mozilla/MacroForEach.h" +#include "mozilla/Span.h" + +#include +#include + +template +class nsTSubstring; + +#ifdef ANDROID +# include +#endif + +namespace mozilla { + +namespace detail { + +// Predicate to check whether T can be inserted into an ostream. +template () + << std::declval())> +std::true_type supports_os_test(const T&); +std::false_type supports_os_test(...); + +template +using supports_os = decltype(supports_os_test(std::declval())); + +} // namespace detail + +// Helper function to write a value to an ostream. +// +// This handles pointer values where the type being pointed to supports being +// inserted into an ostream, and we write out the value being pointed to in +// addition to the pointer value. +template +auto DebugValue(std::ostream& aOut, T* aValue) + -> std::enable_if_t::value, std::ostream&> { + if (aValue) { + aOut << *aValue << " @ " << aValue; + } else { + aOut << "null"; + } + return aOut; +} + +// Helper function to write a value to an ostream. +// +// This handles all pointer types that cannot be dereferenced and inserted into +// an ostream. +template +auto DebugValue(std::ostream& aOut, T* aValue) + -> std::enable_if_t::value, + std::ostream&> { + return aOut << aValue; +} + +// Helper function to write a value to an ostream. +// +// This handles XPCOM string types. +template +auto DebugValue(std::ostream& aOut, const T& aValue) + -> std::enable_if_t, T>::value || + std::is_base_of, T>::value, + std::ostream&> { + return aOut << '"' << aValue << '"'; +} + +// Helper function to write a value to an ostream. +// +// This handles all other types. +template +auto DebugValue(std::ostream& aOut, const T& aValue) + -> std::enable_if_t, T>::value && + !std::is_base_of, T>::value, + std::ostream&> { + return aOut << aValue; +} + +namespace detail { + +// Helper function template for MOZ_DBG. +template +auto&& MozDbg(const char* aFile, int aLine, const char* aExpression, + T&& aValue) { + std::ostringstream s; + s << "[MozDbg] [" << aFile << ':' << aLine << "] " << aExpression << " = "; + mozilla::DebugValue(s, std::forward(aValue)); + s << '\n'; +#ifdef ANDROID + __android_log_print(ANDROID_LOG_INFO, "Gecko", "%s", s.str().c_str()); +#else + fputs(s.str().c_str(), stderr); +#endif + return std::forward(aValue); +} + +} // namespace detail + +} // namespace mozilla + +template +std::ostream& operator<<(std::ostream& aOut, + const mozilla::Span& aSpan) { + aOut << '['; + if (!aSpan.IsEmpty()) { + aOut << aSpan[0]; + for (size_t i = 1; i < aSpan.Length(); ++i) { + aOut << ", " << aSpan[i]; + } + } + return aOut << ']'; +} + +// Don't define this for char[], since operator<<(ostream&, char*) is already +// defined. +template ::value>> +std::ostream& operator<<(std::ostream& aOut, const T (&aArray)[N]) { + return aOut << mozilla::Span(aArray); +} + +// MOZ_DBG is a macro like the Rust dbg!() macro -- it will print out the +// expression passed to it to stderr and then return the value. It is not +// available in MOZILLA_OFFICIAL builds, so you shouldn't land any uses of it in +// the tree. +// +// It should work for any type T that has an operator<<(std::ostream&, const T&) +// defined for it. +// +// Note 1: Using MOZ_DBG may cause copies to be made of temporary values: +// +// struct A { +// A(int); +// A(const A&); +// +// int x; +// }; +// +// void f(A); +// +// f(A{1}); // may (and, in C++17, will) elide the creation of a temporary +// // for A{1} and instead initialize the function argument +// // directly using the A(int) constructor +// +// f(MOZ_DBG(A{1})); // will create and return a temporary for A{1}, which +// // then will be passed to the A(const A&) copy +// // constructor to initialize f's argument +// +// Note 2: MOZ_DBG cannot be used to wrap a prvalue that is being used to +// initialize an object if its type has no move constructor: +// +// struct B { +// B() = default; +// B(B&&) = delete; +// }; +// +// B b1 = B(); // fine, initializes b1 directly +// +// B b2 = MOZ_DBG(B()); // compile error: MOZ_DBG needs to materialize a +// // temporary for B() so it can be passed to +// // operator<<, but that temporary is returned from +// // MOZ_DBG as an rvalue reference and so wants to +// // invoke B's move constructor to initialize b2 +#ifndef MOZILLA_OFFICIAL +# define MOZ_DBG(...) \ + mozilla::detail::MozDbg(__FILE__, __LINE__, #__VA_ARGS__, __VA_ARGS__) +#endif + +// Helper macro for MOZ_DEFINE_DBG. +#define MOZ_DBG_FIELD(name_) << #name_ << " = " << aValue.name_ + +// Macro to define an operator<<(ostream&) for a struct or class that displays +// the type name and the values of the specified member variables. Must be +// called inside the struct or class. +// +// For example: +// +// struct Point { +// float x; +// float y; +// +// MOZ_DEFINE_DBG(Point, x, y) +// }; +// +// generates an operator<< that outputs strings like +// "Point { x = 1.0, y = 2.0 }". +#define MOZ_DEFINE_DBG(type_, ...) \ + friend std::ostream& operator<<(std::ostream& aOut, const type_& aValue) { \ + return aOut << #type_ \ + << (MOZ_ARG_COUNT(__VA_ARGS__) == 0 ? "" : " { ") \ + MOZ_FOR_EACH_SEPARATED(MOZ_DBG_FIELD, (<< ", "), (), \ + (__VA_ARGS__)) \ + << (MOZ_ARG_COUNT(__VA_ARGS__) == 0 ? "" : " }"); \ + } + +#endif // mozilla_DbgMacro_h diff --git a/mfbt/DebugOnly.h b/mfbt/DebugOnly.h new file mode 100644 index 0000000000..0441685735 --- /dev/null +++ b/mfbt/DebugOnly.h @@ -0,0 +1,102 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Provides DebugOnly, a type for variables used only in debug builds (i.e. by + * assertions). + */ + +#ifndef mozilla_DebugOnly_h +#define mozilla_DebugOnly_h + +#include "mozilla/Attributes.h" + +#include + +namespace mozilla { + +/** + * DebugOnly contains a value of type T, but only in debug builds. In release + * builds, it does not contain a value. This helper is intended to be used with + * MOZ_ASSERT()-style macros, allowing one to write: + * + * DebugOnly check = func(); + * MOZ_ASSERT(check); + * + * more concisely than declaring |check| conditional on #ifdef DEBUG. + * + * DebugOnly instances can only be coerced to T in debug builds. In release + * builds they don't have a value, so type coercion is not well defined. + * + * NOTE: DebugOnly instances still take up one byte of space, plus padding, even + * in optimized, non-DEBUG builds (see bug 1253094 comment 37 for more info). + * For this reason the class is MOZ_STACK_CLASS to prevent consumers using + * DebugOnly for struct/class members and unwittingly inflating the size of + * their objects in release builds. + */ +template +class MOZ_STACK_CLASS DebugOnly { + public: +#ifdef DEBUG + T value; + + DebugOnly() = default; + MOZ_IMPLICIT DebugOnly(T&& aOther) : value(std::move(aOther)) {} + MOZ_IMPLICIT DebugOnly(const T& aOther) : value(aOther) {} + DebugOnly(const DebugOnly& aOther) : value(aOther.value) {} + DebugOnly& operator=(const T& aRhs) { + value = aRhs; + return *this; + } + DebugOnly& operator=(T&& aRhs) { + value = std::move(aRhs); + return *this; + } + + void operator++(int) { value++; } + void operator--(int) { value--; } + + // Do not define operator+=(), etc. here. These will coerce via the + // implicit cast and built-in operators. Defining explicit methods here + // will create ambiguity the compiler can't deal with. + + T* operator&() { return &value; } + + operator T&() { return value; } + operator const T&() const { return value; } + + T& operator->() { return value; } + const T& operator->() const { return value; } + + const T& inspect() const { return value; } + +#else + DebugOnly() = default; + MOZ_IMPLICIT DebugOnly(const T&) {} + DebugOnly(const DebugOnly&) {} + DebugOnly& operator=(const T&) { return *this; } + MOZ_IMPLICIT DebugOnly(T&&) {} + DebugOnly& operator=(T&&) { return *this; } + void operator++(int) {} + void operator--(int) {} + DebugOnly& operator+=(const T&) { return *this; } + DebugOnly& operator-=(const T&) { return *this; } + DebugOnly& operator&=(const T&) { return *this; } + DebugOnly& operator|=(const T&) { return *this; } + DebugOnly& operator^=(const T&) { return *this; } +#endif + + /* + * DebugOnly must always have a user-defined destructor or else it will + * generate "unused variable" warnings, exactly what it's intended + * to avoid! + */ + ~DebugOnly() {} +}; + +} // namespace mozilla + +#endif /* mozilla_DebugOnly_h */ diff --git a/mfbt/DefineEnum.h b/mfbt/DefineEnum.h new file mode 100644 index 0000000000..afcff10e52 --- /dev/null +++ b/mfbt/DefineEnum.h @@ -0,0 +1,156 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Poor man's reflection for enumerations. */ + +#ifndef mozilla_DefineEnum_h +#define mozilla_DefineEnum_h + +#include // for size_t + +#include "mozilla/MacroArgs.h" // for MOZ_ARG_COUNT +#include "mozilla/MacroForEach.h" // for MOZ_FOR_EACH + +/** + * MOZ_UNWRAP_ARGS is a helper macro that unwraps a list of comma-separated + * items enclosed in parentheses, to yield just the items. + * + * Usage: |MOZ_UNWRAP_ARGS foo| (note the absence of parentheses in the + * invocation), where |foo| is a parenthesis-enclosed list. + * For exampe if |foo| is |(3, 4, 5)|, then the expansion is just |3, 4, 5|. + */ +#define MOZ_UNWRAP_ARGS(...) __VA_ARGS__ + +/** + * MOZ_DEFINE_ENUM(aEnumName, aEnumerators) is a macro that allows + * simultaneously defining an enumeration named |aEnumName|, and a constant + * that stores the number of enumerators it has. + * + * The motivation is to allow the enumeration to evolve over time without + * either having to manually keep such a constant up to date, or having to + * add a special "sentinel" enumerator for this purpose. (While adding a + * "sentinel" enumerator is trivial, it causes headaches with "switch" + * statements. We often try to write "switch" statements whose cases exhaust + * the enumerators and don't have a "default" case, so that if a new + * enumerator is added and we forget to handle it in the "switch", the + * compiler points it out. But this means we need to explicitly handle the + * sentinel in every "switch".) + * + * |aEnumerators| is expected to be a comma-separated list of enumerators, + * enclosed in parentheses. The enumerators may NOT have associated + * initializers (an attempt to have one will result in a compiler error). + * This ensures that the enumerator values are in the range [0, N), where N + * is the number of enumerators. + * + * The list of enumerators cannot contain a trailing comma. This is a + * limitation of MOZ_FOR_EACH, which we use in the implementation; if + * MOZ_FOR_EACH supported trailing commas, we could too. + * + * The generated constant has the name "k" + |aEnumName| + "Count", and type + * "size_t". The enumeration and the constant are both defined in the scope + * in which the macro is invoked. + * + * For convenience, a constant of the enumeration type named + * "kHighest" + |aEnumName| is also defined, whose value is the highest + * valid enumerator, assuming the enumerators have contiguous values starting + * from 0. + * + * Invocation of the macro may be followed by a semicolon, if one prefers a + * more declaration-like syntax. + * + * Example invocation: + * MOZ_DEFINE_ENUM(MyEnum, (Foo, Bar, Baz)); + * + * This expands to: + * enum MyEnum { Foo, Bar, Baz }; + * constexpr size_t kMyEnumCount = 3; + * constexpr MyEnum kHighestMyEnum = MyEnum(kMyEnumCount - 1); + * // some static_asserts to ensure the values are in the range [0, 3) + * + * The macro also has several variants: + * + * - A |_CLASS| variant, which generates an |enum class| instead of + * a plain enum. + * + * - A |_WITH_BASE| variant which generates an enum with a specified + * underlying ("base") type, which is provided as an additional + * argument in second position. + * + * - An |_AT_CLASS_SCOPE| variant, designed for enumerations defined + * at class scope. For these, the generated constants are static, + * and have names prefixed with "s" instead of "k" as per + * naming convention. + * + * (and combinations of these). + */ + +/* + * A helper macro for asserting that an enumerator does not have an initializer. + * + * The static_assert and the comparison are just scaffolding; the important + * part is forming the expression |aEnumName::aEnumeratorDecl|. + * + * If |aEnumeratorDecl| is just the enumerator name without an identifier, + * this expression compiles fine. However, if |aEnumeratorDecl| includes an + * initializer, as in |eEnumerator = initializer|, then this will fail to + * compile in expression context, since |eEnumerator| is not an lvalue. + * + * (The static_assert itself should always pass in the absence of the above + * error, since turning on a bit can only increase an integer value. It just + * provides a place to put the expression we want to form.) + */ + +#define MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER(aEnumName, aEnumeratorDecl) \ + static_assert( \ + int(aEnumName::aEnumeratorDecl) <= \ + (int(aEnumName::aEnumeratorDecl) | 1), \ + "MOZ_DEFINE_ENUM does not allow enumerators to have initializers"); + +#define MOZ_DEFINE_ENUM_IMPL(aEnumName, aClassSpec, aBaseSpec, aEnumerators) \ + enum aClassSpec aEnumName aBaseSpec{MOZ_UNWRAP_ARGS aEnumerators}; \ + constexpr size_t k##aEnumName##Count = MOZ_ARG_COUNT aEnumerators; \ + constexpr aEnumName kHighest##aEnumName = \ + aEnumName(k##aEnumName##Count - 1); \ + MOZ_FOR_EACH(MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER, (aEnumName, ), \ + aEnumerators) + +#define MOZ_DEFINE_ENUM(aEnumName, aEnumerators) \ + MOZ_DEFINE_ENUM_IMPL(aEnumName, , , aEnumerators) + +#define MOZ_DEFINE_ENUM_WITH_BASE(aEnumName, aBaseName, aEnumerators) \ + MOZ_DEFINE_ENUM_IMPL(aEnumName, , : aBaseName, aEnumerators) + +#define MOZ_DEFINE_ENUM_CLASS(aEnumName, aEnumerators) \ + MOZ_DEFINE_ENUM_IMPL(aEnumName, class, , aEnumerators) + +#define MOZ_DEFINE_ENUM_CLASS_WITH_BASE(aEnumName, aBaseName, aEnumerators) \ + MOZ_DEFINE_ENUM_IMPL(aEnumName, class, : aBaseName, aEnumerators) + +#define MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, aClassSpec, aBaseSpec, \ + aEnumerators) \ + enum aClassSpec aEnumName aBaseSpec{MOZ_UNWRAP_ARGS aEnumerators}; \ + constexpr static size_t s##aEnumName##Count = MOZ_ARG_COUNT aEnumerators; \ + constexpr static aEnumName sHighest##aEnumName = \ + aEnumName(s##aEnumName##Count - 1); \ + MOZ_FOR_EACH(MOZ_ASSERT_ENUMERATOR_HAS_NO_INITIALIZER, (aEnumName, ), \ + aEnumerators) + +#define MOZ_DEFINE_ENUM_AT_CLASS_SCOPE(aEnumName, aEnumerators) \ + MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, , , aEnumerators) + +#define MOZ_DEFINE_ENUM_WITH_BASE_AT_CLASS_SCOPE(aEnumName, aBaseName, \ + aEnumerators) \ + MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, , : aBaseName, aEnumerators) + +#define MOZ_DEFINE_ENUM_CLASS_AT_CLASS_SCOPE(aEnumName, aEnumerators) \ + MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, class, , aEnumerators) + +#define MOZ_DEFINE_ENUM_CLASS_WITH_BASE_AT_CLASS_SCOPE(aEnumName, aBaseName, \ + aEnumerators) \ + MOZ_DEFINE_ENUM_AT_CLASS_SCOPE_IMPL(aEnumName, class, \ + : aBaseName, aEnumerators) + +#endif // mozilla_DefineEnum_h diff --git a/mfbt/DoublyLinkedList.h b/mfbt/DoublyLinkedList.h new file mode 100644 index 0000000000..df178440d2 --- /dev/null +++ b/mfbt/DoublyLinkedList.h @@ -0,0 +1,578 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** A doubly-linked list with flexible next/prev naming. */ + +#ifndef mozilla_DoublyLinkedList_h +#define mozilla_DoublyLinkedList_h + +#include +#include +#include +#include + +#include "mozilla/Assertions.h" + +/** + * Where mozilla::LinkedList strives for ease of use above all other + * considerations, mozilla::DoublyLinkedList strives for flexibility. The + * following are things that can be done with mozilla::DoublyLinkedList that + * cannot be done with mozilla::LinkedList: + * + * * Arbitrary next/prev placement and naming. With the tools provided here, + * the next and previous pointers can be at the end of the structure, in a + * sub-structure, stored with a tag, in a union, wherever, as long as you + * can look them up and set them on demand. + * * Can be used without deriving from a new base and, thus, does not require + * use of constructors. + * + * Example: + * + * class Observer : public DoublyLinkedListElement + * { + * public: + * void observe(char* aTopic) { ... } + * }; + * + * class ObserverContainer + * { + * private: + * DoublyLinkedList mList; + * + * public: + * void addObserver(Observer* aObserver) + * { + * // Will assert if |aObserver| is part of another list. + * mList.pushBack(aObserver); + * } + * + * void removeObserver(Observer* aObserver) + * { + * // Will assert if |aObserver| is not part of |list|. + * mList.remove(aObserver); + * } + * + * void notifyObservers(char* aTopic) + * { + * for (Observer* o : mList) { + * o->observe(aTopic); + * } + * } + * }; + */ + +namespace mozilla { + +/** + * Deriving from this will allow T to be inserted into and removed from a + * DoublyLinkedList. + */ +template +class DoublyLinkedListElement { + template + friend class DoublyLinkedList; + friend T; + T* mNext; + T* mPrev; + + public: + DoublyLinkedListElement() : mNext(nullptr), mPrev(nullptr) {} +}; + +/** + * Provides access to a DoublyLinkedListElement within T. + * + * The default implementation of this template works for types that derive + * from DoublyLinkedListElement, but one can specialize for their class so + * that some appropriate DoublyLinkedListElement reference is returned. + * + * For more complex cases (multiple DoublyLinkedListElements, for example), + * one can define their own trait class and use that as ElementAccess for + * DoublyLinkedList. See TestDoublyLinkedList.cpp for an example. + */ +template +struct GetDoublyLinkedListElement { + static_assert(std::is_base_of, T>::value, + "You need your own specialization of GetDoublyLinkedListElement" + " or use a separate Trait."); + static DoublyLinkedListElement& Get(T* aThis) { return *aThis; } +}; + +/** + * A doubly linked list. |T| is the type of element stored in this list. |T| + * must contain or have access to unique next and previous element pointers. + * The template argument |ElementAccess| provides code to tell this list how to + * get a reference to a DoublyLinkedListElement that may reside anywhere. + */ +template > +class DoublyLinkedList final { + T* mHead; + T* mTail; + + /** + * Checks that either the list is empty and both mHead and mTail are nullptr + * or the list has entries and both mHead and mTail are non-null. + */ + bool isStateValid() const { return (mHead != nullptr) == (mTail != nullptr); } + + bool ElementNotInList(T* aElm) { + if (!ElementAccess::Get(aElm).mNext && !ElementAccess::Get(aElm).mPrev) { + // Both mNext and mPrev being NULL can mean two things: + // - the element is not in the list. + // - the element is the first and only element in the list. + // So check for the latter. + return mHead != aElm; + } + return false; + } + + public: + DoublyLinkedList() : mHead(nullptr), mTail(nullptr) {} + + class Iterator final { + T* mCurrent; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + Iterator() : mCurrent(nullptr) {} + explicit Iterator(T* aCurrent) : mCurrent(aCurrent) {} + + T& operator*() const { return *mCurrent; } + T* operator->() const { return mCurrent; } + + Iterator& operator++() { + mCurrent = mCurrent ? ElementAccess::Get(mCurrent).mNext : nullptr; + return *this; + } + + Iterator operator++(int) { + Iterator result = *this; + ++(*this); + return result; + } + + Iterator& operator--() { + mCurrent = ElementAccess::Get(mCurrent).mPrev; + return *this; + } + + Iterator operator--(int) { + Iterator result = *this; + --(*this); + return result; + } + + bool operator!=(const Iterator& aOther) const { + return mCurrent != aOther.mCurrent; + } + + bool operator==(const Iterator& aOther) const { + return mCurrent == aOther.mCurrent; + } + + explicit operator bool() const { return mCurrent; } + }; + + Iterator begin() { return Iterator(mHead); } + const Iterator begin() const { return Iterator(mHead); } + const Iterator cbegin() const { return Iterator(mHead); } + + Iterator end() { return Iterator(); } + const Iterator end() const { return Iterator(); } + const Iterator cend() const { return Iterator(); } + + /** + * Returns true if the list contains no elements. + */ + bool isEmpty() const { + MOZ_ASSERT(isStateValid()); + return mHead == nullptr; + } + + /** + * Inserts aElm into the list at the head position. |aElm| must not already + * be in a list. + */ + void pushFront(T* aElm) { + MOZ_ASSERT(aElm); + MOZ_ASSERT(ElementNotInList(aElm)); + MOZ_ASSERT(isStateValid()); + + ElementAccess::Get(aElm).mNext = mHead; + if (mHead) { + MOZ_ASSERT(!ElementAccess::Get(mHead).mPrev); + ElementAccess::Get(mHead).mPrev = aElm; + } + + mHead = aElm; + if (!mTail) { + mTail = aElm; + } + } + + /** + * Remove the head of the list and return it. Calling this on an empty list + * will assert. + */ + T* popFront() { + MOZ_ASSERT(!isEmpty()); + MOZ_ASSERT(isStateValid()); + + T* result = mHead; + mHead = result ? ElementAccess::Get(result).mNext : nullptr; + if (mHead) { + ElementAccess::Get(mHead).mPrev = nullptr; + } + + if (mTail == result) { + mTail = nullptr; + } + + if (result) { + ElementAccess::Get(result).mNext = nullptr; + ElementAccess::Get(result).mPrev = nullptr; + } + + return result; + } + + /** + * Inserts aElm into the list at the tail position. |aElm| must not already + * be in a list. + */ + void pushBack(T* aElm) { + MOZ_ASSERT(aElm); + MOZ_ASSERT(ElementNotInList(aElm)); + MOZ_ASSERT(isStateValid()); + + ElementAccess::Get(aElm).mNext = nullptr; + ElementAccess::Get(aElm).mPrev = mTail; + if (mTail) { + MOZ_ASSERT(!ElementAccess::Get(mTail).mNext); + ElementAccess::Get(mTail).mNext = aElm; + } + + mTail = aElm; + if (!mHead) { + mHead = aElm; + } + } + + /** + * Remove the tail of the list and return it. Calling this on an empty list + * will assert. + */ + T* popBack() { + MOZ_ASSERT(!isEmpty()); + MOZ_ASSERT(isStateValid()); + + T* result = mTail; + mTail = result ? ElementAccess::Get(result).mPrev : nullptr; + if (mTail) { + ElementAccess::Get(mTail).mNext = nullptr; + } + + if (mHead == result) { + mHead = nullptr; + } + + if (result) { + ElementAccess::Get(result).mNext = nullptr; + ElementAccess::Get(result).mPrev = nullptr; + } + + return result; + } + + /** + * Insert the given |aElm| *before* |aIter|. + */ + void insertBefore(const Iterator& aIter, T* aElm) { + MOZ_ASSERT(aElm); + MOZ_ASSERT(ElementNotInList(aElm)); + MOZ_ASSERT(isStateValid()); + + if (!aIter) { + return pushBack(aElm); + } else if (aIter == begin()) { + return pushFront(aElm); + } + + T* after = &(*aIter); + T* before = ElementAccess::Get(after).mPrev; + MOZ_ASSERT(before); + + ElementAccess::Get(before).mNext = aElm; + ElementAccess::Get(aElm).mPrev = before; + ElementAccess::Get(aElm).mNext = after; + ElementAccess::Get(after).mPrev = aElm; + } + + /** + * Removes the given element from the list. The element must be in this list. + */ + void remove(T* aElm) { + MOZ_ASSERT(aElm); + MOZ_ASSERT(ElementAccess::Get(aElm).mNext || + ElementAccess::Get(aElm).mPrev || + (aElm == mHead && aElm == mTail), + "Attempted to remove element not in this list"); + + if (T* prev = ElementAccess::Get(aElm).mPrev) { + ElementAccess::Get(prev).mNext = ElementAccess::Get(aElm).mNext; + } else { + MOZ_ASSERT(mHead == aElm); + mHead = ElementAccess::Get(aElm).mNext; + } + + if (T* next = ElementAccess::Get(aElm).mNext) { + ElementAccess::Get(next).mPrev = ElementAccess::Get(aElm).mPrev; + } else { + MOZ_ASSERT(mTail == aElm); + mTail = ElementAccess::Get(aElm).mPrev; + } + + ElementAccess::Get(aElm).mNext = nullptr; + ElementAccess::Get(aElm).mPrev = nullptr; + } + + /** + * Returns an iterator referencing the first found element whose value matches + * the given element according to operator==. + */ + Iterator find(const T& aElm) { return std::find(begin(), end(), aElm); } + + /** + * Returns whether the given element is in the list. Note that this uses + * T::operator==, not pointer comparison. + */ + bool contains(const T& aElm) { return find(aElm) != Iterator(); } + + /** + * Returns whether the given element might be in the list. Note that this + * assumes the element is either in the list or not in the list, and ignores + * the case where the element might be in another list in order to make the + * check fast. + */ + bool ElementProbablyInList(T* aElm) { + if (isEmpty()) { + return false; + } + return !ElementNotInList(aElm); + } +}; + +/** + * @brief Double linked list that allows insertion/removal during iteration. + * + * This class uses the mozilla::DoublyLinkedList internally and keeps + * track of created iterator instances by putting them on a simple list on stack + * (compare nsTAutoObserverArray). + * This allows insertion or removal operations to adjust iterators and therefore + * keeping them valid during iteration. + */ +template > +class SafeDoublyLinkedList { + public: + /** + * @brief Iterator class for SafeDoublyLinkedList. + * + * The iterator contains two iterators of the underlying list: + * - mCurrent points to the current list element of the iterator. + * - mNext points to the next element of the list. + * + * When removing an element from the list, mCurrent and mNext may + * be adjusted: + * - If mCurrent is the element to be deleted, it is set to empty. mNext can + * still be used to advance to the next element. + * - If mNext is the element to be deleted, it is set to its next element + * (or to empty if mNext is the last element of the list). + */ + class SafeIterator { + using BaseIterator = typename DoublyLinkedList::Iterator; + friend class SafeDoublyLinkedList; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + + SafeIterator() = default; + SafeIterator(SafeIterator const& aOther) + : SafeIterator(aOther.mCurrent, aOther.mList) {} + + SafeIterator(BaseIterator aBaseIter, + SafeDoublyLinkedList* aList) + : mCurrent(aBaseIter), + mNext(aBaseIter ? ++aBaseIter : BaseIterator()), + mList(aList) { + if (mList) { + mNextIterator = mList->mIter; + mList->mIter = this; + } + } + ~SafeIterator() { + if (mList) { + MOZ_ASSERT(mList->mIter == this, + "Iterators must currently be destroyed in opposite order " + "from the construction order. It is suggested that you " + "simply put them on the stack"); + mList->mIter = mNextIterator; + } + } + + SafeIterator& operator++() { + mCurrent = mNext; + if (mNext) { + ++mNext; + } + return *this; + } + + pointer operator->() { return &*mCurrent; } + const_pointer operator->() const { return &*mCurrent; } + reference operator*() { return *mCurrent; } + const_reference operator*() const { return *mCurrent; } + + pointer current() { return mCurrent ? &*mCurrent : nullptr; } + const_pointer current() const { return mCurrent ? &*mCurrent : nullptr; } + + explicit operator bool() const { return bool(mCurrent); } + bool operator==(SafeIterator const& other) const { + return mCurrent == other.mCurrent; + } + bool operator!=(SafeIterator const& other) const { + return mCurrent != other.mCurrent; + } + + BaseIterator& next() { return mNext; } // mainly needed for unittests. + private: + /** + * Base list iterator pointing to the current list element of the iteration. + * If element mCurrent points to gets removed, the iterator will be set to + * empty. mNext keeps the iterator valid. + */ + BaseIterator mCurrent{nullptr}; + /** + * Base list iterator pointing to the next list element of the iteration. + * If element mCurrent points to gets removed, mNext is still valid. + * If element mNext points to gets removed, mNext advances, keeping this + * iterator valid. + */ + BaseIterator mNext{nullptr}; + + /** + * Next element in the stack-allocated list of iterators stored in the + * SafeLinkedList object. + */ + SafeIterator* mNextIterator{nullptr}; + SafeDoublyLinkedList* mList{nullptr}; + + void setNext(T* aElm) { mNext = BaseIterator(aElm); } + void setCurrent(T* aElm) { mCurrent = BaseIterator(aElm); } + }; + + private: + using BaseListType = DoublyLinkedList; + friend class SafeIterator; + + public: + SafeDoublyLinkedList() = default; + + bool isEmpty() const { return mList.isEmpty(); } + bool contains(T* aElm) { + for (auto iter = mList.begin(); iter != mList.end(); ++iter) { + if (&*iter == aElm) { + return true; + } + } + return false; + } + + SafeIterator begin() { return SafeIterator(mList.begin(), this); } + SafeIterator begin() const { return SafeIterator(mList.begin(), this); } + SafeIterator cbegin() const { return begin(); } + + SafeIterator end() { return SafeIterator(); } + SafeIterator end() const { return SafeIterator(); } + SafeIterator cend() const { return SafeIterator(); } + + void pushFront(T* aElm) { mList.pushFront(aElm); } + + void pushBack(T* aElm) { + mList.pushBack(aElm); + auto* iter = mIter; + while (iter) { + if (!iter->mNext) { + iter->setNext(aElm); + } + iter = iter->mNextIterator; + } + } + + T* popFront() { + T* firstElm = mList.popFront(); + auto* iter = mIter; + while (iter) { + if (iter->current() == firstElm) { + iter->setCurrent(nullptr); + } + iter = iter->mNextIterator; + } + + return firstElm; + } + + T* popBack() { + T* lastElm = mList.popBack(); + auto* iter = mIter; + while (iter) { + if (iter->current() == lastElm) { + iter->setCurrent(nullptr); + } else if (iter->mNext && &*(iter->mNext) == lastElm) { + iter->setNext(nullptr); + } + iter = iter->mNextIterator; + } + + return lastElm; + } + + void remove(T* aElm) { + if (!mList.ElementProbablyInList(aElm)) { + return; + } + auto* iter = mIter; + while (iter) { + if (iter->mNext && &*(iter->mNext) == aElm) { + ++(iter->mNext); + } + if (iter->current() == aElm) { + iter->setCurrent(nullptr); + } + iter = iter->mNextIterator; + } + + mList.remove(aElm); + } + + private: + BaseListType mList; + SafeIterator* mIter{nullptr}; +}; + +} // namespace mozilla + +#endif // mozilla_DoublyLinkedList_h diff --git a/mfbt/EndianUtils.h b/mfbt/EndianUtils.h new file mode 100644 index 0000000000..b6f3e2c315 --- /dev/null +++ b/mfbt/EndianUtils.h @@ -0,0 +1,611 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Functions for reading and writing integers in various endiannesses. */ + +/* + * The classes LittleEndian and BigEndian expose static methods for + * reading and writing 16-, 32-, and 64-bit signed and unsigned integers + * in their respective endianness. The addresses read from or written + * to may be misaligned (although misaligned accesses may incur + * architecture-specific performance costs). The naming scheme is: + * + * {Little,Big}Endian::{read,write}{Uint,Int} + * + * For instance, LittleEndian::readInt32 will read a 32-bit signed + * integer from memory in little endian format. Similarly, + * BigEndian::writeUint16 will write a 16-bit unsigned integer to memory + * in big-endian format. + * + * The class NativeEndian exposes methods for conversion of existing + * data to and from the native endianness. These methods are intended + * for cases where data needs to be transferred, serialized, etc. + * swap{To,From}{Little,Big}Endian byteswap a single value if necessary. + * Bulk conversion functions are also provided which optimize the + * no-conversion-needed case: + * + * - copyAndSwap{To,From}{Little,Big}Endian; + * - swap{To,From}{Little,Big}EndianInPlace. + * + * The *From* variants are intended to be used for reading data and the + * *To* variants for writing data. + * + * Methods on NativeEndian work with integer data of any type. + * Floating-point data is not supported. + * + * For clarity in networking code, "Network" may be used as a synonym + * for "Big" in any of the above methods or class names. + * + * As an example, reading a file format header whose fields are stored + * in big-endian format might look like: + * + * class ExampleHeader + * { + * private: + * uint32_t mMagic; + * uint32_t mLength; + * uint32_t mTotalRecords; + * uint64_t mChecksum; + * + * public: + * ExampleHeader(const void* data) + * { + * const uint8_t* ptr = static_cast(data); + * mMagic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t); + * mLength = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t); + * mTotalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t); + * mChecksum = BigEndian::readUint64(ptr); + * } + * ... + * }; + */ + +#ifndef mozilla_EndianUtils_h +#define mozilla_EndianUtils_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Compiler.h" +#include "mozilla/DebugOnly.h" + +#include +#include + +#if defined(_MSC_VER) +# include +# pragma intrinsic(_byteswap_ushort) +# pragma intrinsic(_byteswap_ulong) +# pragma intrinsic(_byteswap_uint64) +#endif + +/* + * Our supported compilers provide architecture-independent macros for this. + * Yes, there are more than two values for __BYTE_ORDER__. + */ +#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + defined(__ORDER_BIG_ENDIAN__) +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define MOZ_LITTLE_ENDIAN() 1 +# define MOZ_BIG_ENDIAN() 0 +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define MOZ_LITTLE_ENDIAN() 0 +# define MOZ_BIG_ENDIAN() 1 +# else +# error "Can't handle mixed-endian architectures" +# endif +#else +# error "Don't know how to determine endianness" +#endif + +#if defined(__clang__) +# if __has_builtin(__builtin_bswap16) +# define MOZ_HAVE_BUILTIN_BYTESWAP16 __builtin_bswap16 +# endif +#elif defined(__GNUC__) +# define MOZ_HAVE_BUILTIN_BYTESWAP16 __builtin_bswap16 +#elif defined(_MSC_VER) +# define MOZ_HAVE_BUILTIN_BYTESWAP16 _byteswap_ushort +#endif + +namespace mozilla { + +namespace detail { + +/* + * We need wrappers here because free functions with default template + * arguments and/or partial specialization of function templates are not + * supported by all the compilers we use. + */ +template +struct Swapper; + +template +struct Swapper { + static T swap(T aValue) { +#if defined(MOZ_HAVE_BUILTIN_BYTESWAP16) + return MOZ_HAVE_BUILTIN_BYTESWAP16(aValue); +#else + return T(((aValue & 0x00ff) << 8) | ((aValue & 0xff00) >> 8)); +#endif + } +}; + +template +struct Swapper { + static T swap(T aValue) { +#if defined(__clang__) || defined(__GNUC__) + return T(__builtin_bswap32(aValue)); +#elif defined(_MSC_VER) + return T(_byteswap_ulong(aValue)); +#else + return T(((aValue & 0x000000ffU) << 24) | ((aValue & 0x0000ff00U) << 8) | + ((aValue & 0x00ff0000U) >> 8) | ((aValue & 0xff000000U) >> 24)); +#endif + } +}; + +template +struct Swapper { + static inline T swap(T aValue) { +#if defined(__clang__) || defined(__GNUC__) + return T(__builtin_bswap64(aValue)); +#elif defined(_MSC_VER) + return T(_byteswap_uint64(aValue)); +#else + return T(((aValue & 0x00000000000000ffULL) << 56) | + ((aValue & 0x000000000000ff00ULL) << 40) | + ((aValue & 0x0000000000ff0000ULL) << 24) | + ((aValue & 0x00000000ff000000ULL) << 8) | + ((aValue & 0x000000ff00000000ULL) >> 8) | + ((aValue & 0x0000ff0000000000ULL) >> 24) | + ((aValue & 0x00ff000000000000ULL) >> 40) | + ((aValue & 0xff00000000000000ULL) >> 56)); +#endif + } +}; + +enum Endianness { Little, Big }; + +#if MOZ_BIG_ENDIAN() +# define MOZ_NATIVE_ENDIANNESS detail::Big +#else +# define MOZ_NATIVE_ENDIANNESS detail::Little +#endif + +class EndianUtils { + /** + * Assert that the memory regions [aDest, aDest+aCount) and + * [aSrc, aSrc+aCount] do not overlap. aCount is given in bytes. + */ + static void assertNoOverlap(const void* aDest, const void* aSrc, + size_t aCount) { + DebugOnly byteDestPtr = static_cast(aDest); + DebugOnly byteSrcPtr = static_cast(aSrc); + MOZ_ASSERT( + (byteDestPtr <= byteSrcPtr && byteDestPtr + aCount <= byteSrcPtr) || + (byteSrcPtr <= byteDestPtr && byteSrcPtr + aCount <= byteDestPtr)); + } + + template + static void assertAligned(T* aPtr) { + MOZ_ASSERT((uintptr_t(aPtr) % sizeof(T)) == 0, "Unaligned pointer!"); + } + + protected: + /** + * Return |aValue| converted from SourceEndian encoding to DestEndian + * encoding. + */ + template + static inline T maybeSwap(T aValue) { + if (SourceEndian == DestEndian) { + return aValue; + } + return Swapper::swap(aValue); + } + + /** + * Convert |aCount| elements at |aPtr| from SourceEndian encoding to + * DestEndian encoding. + */ + template + static inline void maybeSwapInPlace(T* aPtr, size_t aCount) { + assertAligned(aPtr); + + if (SourceEndian == DestEndian) { + return; + } + for (size_t i = 0; i < aCount; i++) { + aPtr[i] = Swapper::swap(aPtr[i]); + } + } + + /** + * Write |aCount| elements to the unaligned address |aDest| in DestEndian + * format, using elements found at |aSrc| in SourceEndian format. + */ + template + static void copyAndSwapTo(void* aDest, const T* aSrc, size_t aCount) { + assertNoOverlap(aDest, aSrc, aCount * sizeof(T)); + assertAligned(aSrc); + + if (SourceEndian == DestEndian) { + memcpy(aDest, aSrc, aCount * sizeof(T)); + return; + } + + uint8_t* byteDestPtr = static_cast(aDest); + for (size_t i = 0; i < aCount; ++i) { + union { + T mVal; + uint8_t mBuffer[sizeof(T)]; + } u; + u.mVal = maybeSwap(aSrc[i]); + memcpy(byteDestPtr, u.mBuffer, sizeof(T)); + byteDestPtr += sizeof(T); + } + } + + /** + * Write |aCount| elements to |aDest| in DestEndian format, using elements + * found at the unaligned address |aSrc| in SourceEndian format. + */ + template + static void copyAndSwapFrom(T* aDest, const void* aSrc, size_t aCount) { + assertNoOverlap(aDest, aSrc, aCount * sizeof(T)); + assertAligned(aDest); + + if (SourceEndian == DestEndian) { + memcpy(aDest, aSrc, aCount * sizeof(T)); + return; + } + + const uint8_t* byteSrcPtr = static_cast(aSrc); + for (size_t i = 0; i < aCount; ++i) { + union { + T mVal; + uint8_t mBuffer[sizeof(T)]; + } u; + memcpy(u.mBuffer, byteSrcPtr, sizeof(T)); + aDest[i] = maybeSwap(u.mVal); + byteSrcPtr += sizeof(T); + } + } +}; + +template +class Endian : private EndianUtils { + protected: + /** Read a uint16_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static uint16_t readUint16(const void* aPtr) { + return read(aPtr); + } + + /** Read a uint32_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static uint32_t readUint32(const void* aPtr) { + return read(aPtr); + } + + /** Read a uint64_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static uint64_t readUint64(const void* aPtr) { + return read(aPtr); + } + + /** Read a uintptr_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static uintptr_t readUintptr(const void* aPtr) { + return read(aPtr); + } + + /** Read an int16_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static int16_t readInt16(const void* aPtr) { + return read(aPtr); + } + + /** Read an int32_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static int32_t readInt32(const void* aPtr) { + return read(aPtr); + } + + /** Read an int64_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static int64_t readInt64(const void* aPtr) { + return read(aPtr); + } + + /** Read an intptr_t in ThisEndian endianness from |aPtr| and return it. */ + [[nodiscard]] static intptr_t readIntptr(const void* aPtr) { + return read(aPtr); + } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeUint16(void* aPtr, uint16_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeUint32(void* aPtr, uint32_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeUint64(void* aPtr, uint64_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeUintptr(void* aPtr, uintptr_t aValue) { + write(aPtr, aValue); + } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeInt16(void* aPtr, int16_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeInt32(void* aPtr, int32_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeInt64(void* aPtr, int64_t aValue) { write(aPtr, aValue); } + + /** Write |aValue| to |aPtr| using ThisEndian endianness. */ + static void writeIntptr(void* aPtr, intptr_t aValue) { write(aPtr, aValue); } + + /* + * Converts a value of type T to little-endian format. + * + * This function is intended for cases where you have data in your + * native-endian format and you need it to appear in little-endian + * format for transmission. + */ + template + [[nodiscard]] static T swapToLittleEndian(T aValue) { + return maybeSwap(aValue); + } + + /* + * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting + * them to little-endian format if ThisEndian is Big. |aSrc| as a typed + * pointer must be aligned; |aDest| need not be. + * + * As with memcpy, |aDest| and |aSrc| must not overlap. + */ + template + static void copyAndSwapToLittleEndian(void* aDest, const T* aSrc, + size_t aCount) { + copyAndSwapTo(aDest, aSrc, aCount); + } + + /* + * Likewise, but converts values in place. + */ + template + static void swapToLittleEndianInPlace(T* aPtr, size_t aCount) { + maybeSwapInPlace(aPtr, aCount); + } + + /* + * Converts a value of type T to big-endian format. + */ + template + [[nodiscard]] static T swapToBigEndian(T aValue) { + return maybeSwap(aValue); + } + + /* + * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting + * them to big-endian format if ThisEndian is Little. |aSrc| as a typed + * pointer must be aligned; |aDest| need not be. + * + * As with memcpy, |aDest| and |aSrc| must not overlap. + */ + template + static void copyAndSwapToBigEndian(void* aDest, const T* aSrc, + size_t aCount) { + copyAndSwapTo(aDest, aSrc, aCount); + } + + /* + * Likewise, but converts values in place. + */ + template + static void swapToBigEndianInPlace(T* aPtr, size_t aCount) { + maybeSwapInPlace(aPtr, aCount); + } + + /* + * Synonyms for the big-endian functions, for better readability + * in network code. + */ + + template + [[nodiscard]] static T swapToNetworkOrder(T aValue) { + return swapToBigEndian(aValue); + } + + template + static void copyAndSwapToNetworkOrder(void* aDest, const T* aSrc, + size_t aCount) { + copyAndSwapToBigEndian(aDest, aSrc, aCount); + } + + template + static void swapToNetworkOrderInPlace(T* aPtr, size_t aCount) { + swapToBigEndianInPlace(aPtr, aCount); + } + + /* + * Converts a value of type T from little-endian format. + */ + template + [[nodiscard]] static T swapFromLittleEndian(T aValue) { + return maybeSwap(aValue); + } + + /* + * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting + * them to little-endian format if ThisEndian is Big. |aDest| as a typed + * pointer must be aligned; |aSrc| need not be. + * + * As with memcpy, |aDest| and |aSrc| must not overlap. + */ + template + static void copyAndSwapFromLittleEndian(T* aDest, const void* aSrc, + size_t aCount) { + copyAndSwapFrom(aDest, aSrc, aCount); + } + + /* + * Likewise, but converts values in place. + */ + template + static void swapFromLittleEndianInPlace(T* aPtr, size_t aCount) { + maybeSwapInPlace(aPtr, aCount); + } + + /* + * Converts a value of type T from big-endian format. + */ + template + [[nodiscard]] static T swapFromBigEndian(T aValue) { + return maybeSwap(aValue); + } + + /* + * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting + * them to big-endian format if ThisEndian is Little. |aDest| as a typed + * pointer must be aligned; |aSrc| need not be. + * + * As with memcpy, |aDest| and |aSrc| must not overlap. + */ + template + static void copyAndSwapFromBigEndian(T* aDest, const void* aSrc, + size_t aCount) { + copyAndSwapFrom(aDest, aSrc, aCount); + } + + /* + * Likewise, but converts values in place. + */ + template + static void swapFromBigEndianInPlace(T* aPtr, size_t aCount) { + maybeSwapInPlace(aPtr, aCount); + } + + /* + * Synonyms for the big-endian functions, for better readability + * in network code. + */ + template + [[nodiscard]] static T swapFromNetworkOrder(T aValue) { + return swapFromBigEndian(aValue); + } + + template + static void copyAndSwapFromNetworkOrder(T* aDest, const void* aSrc, + size_t aCount) { + copyAndSwapFromBigEndian(aDest, aSrc, aCount); + } + + template + static void swapFromNetworkOrderInPlace(T* aPtr, size_t aCount) { + swapFromBigEndianInPlace(aPtr, aCount); + } + + private: + /** + * Read a value of type T, encoded in endianness ThisEndian from |aPtr|. + * Return that value encoded in native endianness. + */ + template + static T read(const void* aPtr) { + union { + T mVal; + uint8_t mBuffer[sizeof(T)]; + } u; + memcpy(u.mBuffer, aPtr, sizeof(T)); + return maybeSwap(u.mVal); + } + + /** + * Write a value of type T, in native endianness, to |aPtr|, in ThisEndian + * endianness. + */ + template + static void write(void* aPtr, T aValue) { + T tmp = maybeSwap(aValue); + memcpy(aPtr, &tmp, sizeof(T)); + } + + Endian() = delete; + Endian(const Endian& aTther) = delete; + void operator=(const Endian& aOther) = delete; +}; + +template +class EndianReadWrite : public Endian { + private: + typedef Endian super; + + public: + using super::readInt16; + using super::readInt32; + using super::readInt64; + using super::readIntptr; + using super::readUint16; + using super::readUint32; + using super::readUint64; + using super::readUintptr; + using super::writeInt16; + using super::writeInt32; + using super::writeInt64; + using super::writeIntptr; + using super::writeUint16; + using super::writeUint32; + using super::writeUint64; + using super::writeUintptr; +}; + +} /* namespace detail */ + +class LittleEndian final : public detail::EndianReadWrite {}; + +class BigEndian final : public detail::EndianReadWrite {}; + +typedef BigEndian NetworkEndian; + +class NativeEndian final : public detail::Endian { + private: + typedef detail::Endian super; + + public: + /* + * These functions are intended for cases where you have data in your + * native-endian format and you need the data to appear in the appropriate + * endianness for transmission, serialization, etc. + */ + using super::copyAndSwapToBigEndian; + using super::copyAndSwapToLittleEndian; + using super::copyAndSwapToNetworkOrder; + using super::swapToBigEndian; + using super::swapToBigEndianInPlace; + using super::swapToLittleEndian; + using super::swapToLittleEndianInPlace; + using super::swapToNetworkOrder; + using super::swapToNetworkOrderInPlace; + + /* + * These functions are intended for cases where you have data in the + * given endianness (e.g. reading from disk or a file-format) and you + * need the data to appear in native-endian format for processing. + */ + using super::copyAndSwapFromBigEndian; + using super::copyAndSwapFromLittleEndian; + using super::copyAndSwapFromNetworkOrder; + using super::swapFromBigEndian; + using super::swapFromBigEndianInPlace; + using super::swapFromLittleEndian; + using super::swapFromLittleEndianInPlace; + using super::swapFromNetworkOrder; + using super::swapFromNetworkOrderInPlace; +}; + +#undef MOZ_NATIVE_ENDIANNESS + +} /* namespace mozilla */ + +#endif /* mozilla_EndianUtils_h */ diff --git a/mfbt/EnumSet.h b/mfbt/EnumSet.h new file mode 100644 index 0000000000..f7765c6f5c --- /dev/null +++ b/mfbt/EnumSet.h @@ -0,0 +1,340 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A set abstraction for enumeration values. */ + +#ifndef mozilla_EnumSet_h +#define mozilla_EnumSet_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/MathAlgorithms.h" + +#include +#include + +#include + +namespace mozilla { + +/** + * EnumSet is a set of values defined by an enumeration. It is implemented + * using a bit mask with the size of U for each value. It works both for enum + * and enum class types. EnumSet also works with U being a BitSet. + */ +template ::type>::type> +class EnumSet { + public: + using valueType = T; + using serializedType = Serialized; + + constexpr EnumSet() : mBitField() {} + + constexpr MOZ_IMPLICIT EnumSet(T aEnum) : mBitField(bitFor(aEnum)) {} + + constexpr EnumSet(T aEnum1, T aEnum2) + : mBitField(bitFor(aEnum1) | bitFor(aEnum2)) {} + + constexpr EnumSet(T aEnum1, T aEnum2, T aEnum3) + : mBitField(bitFor(aEnum1) | bitFor(aEnum2) | bitFor(aEnum3)) {} + + constexpr EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4) + : mBitField(bitFor(aEnum1) | bitFor(aEnum2) | bitFor(aEnum3) | + bitFor(aEnum4)) {} + + constexpr MOZ_IMPLICIT EnumSet(std::initializer_list list) : mBitField() { + for (auto value : list) { + (*this) += value; + } + } + +#ifdef DEBUG + constexpr EnumSet(const EnumSet& aEnumSet) : mBitField(aEnumSet.mBitField) {} + + constexpr EnumSet& operator=(const EnumSet& aEnumSet) { + mBitField = aEnumSet.mBitField; + incVersion(); + return *this; + } +#endif + + /** + * Add an element + */ + constexpr void operator+=(T aEnum) { + incVersion(); + mBitField |= bitFor(aEnum); + } + + /** + * Add an element + */ + constexpr EnumSet operator+(T aEnum) const { + EnumSet result(*this); + result += aEnum; + return result; + } + + /** + * Union + */ + void operator+=(const EnumSet& aEnumSet) { + incVersion(); + mBitField |= aEnumSet.mBitField; + } + + /** + * Union + */ + EnumSet operator+(const EnumSet& aEnumSet) const { + EnumSet result(*this); + result += aEnumSet; + return result; + } + + /** + * Remove an element + */ + void operator-=(T aEnum) { + incVersion(); + mBitField &= ~(bitFor(aEnum)); + } + + /** + * Remove an element + */ + EnumSet operator-(T aEnum) const { + EnumSet result(*this); + result -= aEnum; + return result; + } + + /** + * Remove a set of elements + */ + void operator-=(const EnumSet& aEnumSet) { + incVersion(); + mBitField &= ~(aEnumSet.mBitField); + } + + /** + * Remove a set of elements + */ + EnumSet operator-(const EnumSet& aEnumSet) const { + EnumSet result(*this); + result -= aEnumSet; + return result; + } + + /** + * Clear + */ + void clear() { + incVersion(); + mBitField = Serialized(); + } + + /** + * Intersection + */ + void operator&=(const EnumSet& aEnumSet) { + incVersion(); + mBitField &= aEnumSet.mBitField; + } + + /** + * Intersection + */ + EnumSet operator&(const EnumSet& aEnumSet) const { + EnumSet result(*this); + result &= aEnumSet; + return result; + } + + /** + * Equality + */ + bool operator==(const EnumSet& aEnumSet) const { + return mBitField == aEnumSet.mBitField; + } + + /** + * Equality + */ + bool operator==(T aEnum) const { return mBitField == bitFor(aEnum); } + + /** + * Not equal + */ + bool operator!=(const EnumSet& aEnumSet) const { + return !operator==(aEnumSet); + } + + /** + * Not equal + */ + bool operator!=(T aEnum) const { return !operator==(aEnum); } + + /** + * Test is an element is contained in the set. + */ + bool contains(T aEnum) const { + return static_cast(mBitField & bitFor(aEnum)); + } + + /** + * Test if a set is contained in the set. + */ + bool contains(const EnumSet& aEnumSet) const { + return (mBitField & aEnumSet.mBitField) == aEnumSet.mBitField; + } + + /** + * Return the number of elements in the set. + */ + size_t size() const { + if constexpr (std::is_unsigned_v) { + if constexpr (kMaxBits > 32) { + return CountPopulation64(mBitField); + } else { + return CountPopulation32(mBitField); + } + } else { + return mBitField.Count(); + } + } + + bool isEmpty() const { + if constexpr (std::is_unsigned_v) { + return mBitField == 0; + } else { + return mBitField.IsEmpty(); + } + } + + Serialized serialize() const { return mBitField; } + + void deserialize(Serialized aValue) { + incVersion(); + mBitField = aValue; + } + + class ConstIterator { + const EnumSet* mSet; + uint32_t mPos; +#ifdef DEBUG + uint64_t mVersion; +#endif + + void checkVersion() const { + // Check that the set has not been modified while being iterated. + MOZ_ASSERT_IF(mSet, mSet->mVersion == mVersion); + } + + public: + ConstIterator(const EnumSet& aSet, uint32_t aPos) + : mSet(&aSet), mPos(aPos) { +#ifdef DEBUG + mVersion = mSet->mVersion; +#endif + MOZ_ASSERT(aPos <= kMaxBits); + if (aPos != kMaxBits && !mSet->contains(T(mPos))) { + ++*this; + } + } + + ConstIterator(const ConstIterator& aOther) + : mSet(aOther.mSet), mPos(aOther.mPos) { +#ifdef DEBUG + mVersion = aOther.mVersion; + checkVersion(); +#endif + } + + ConstIterator(ConstIterator&& aOther) + : mSet(aOther.mSet), mPos(aOther.mPos) { +#ifdef DEBUG + mVersion = aOther.mVersion; + checkVersion(); +#endif + aOther.mSet = nullptr; + } + + ~ConstIterator() { checkVersion(); } + + bool operator==(const ConstIterator& other) const { + MOZ_ASSERT(mSet == other.mSet); + checkVersion(); + return mPos == other.mPos; + } + + bool operator!=(const ConstIterator& other) const { + return !(*this == other); + } + + T operator*() const { + MOZ_ASSERT(mSet); + MOZ_ASSERT(mPos < kMaxBits); + MOZ_ASSERT(mSet->contains(T(mPos))); + checkVersion(); + return T(mPos); + } + + ConstIterator& operator++() { + MOZ_ASSERT(mSet); + MOZ_ASSERT(mPos < kMaxBits); + checkVersion(); + do { + mPos++; + } while (mPos < kMaxBits && !mSet->contains(T(mPos))); + return *this; + } + }; + + ConstIterator begin() const { return ConstIterator(*this, 0); } + + ConstIterator end() const { return ConstIterator(*this, kMaxBits); } + + private: + constexpr static Serialized bitFor(T aEnum) { + auto bitNumber = static_cast(aEnum); + MOZ_DIAGNOSTIC_ASSERT(bitNumber < kMaxBits); + if constexpr (std::is_unsigned_v) { + return static_cast(Serialized{1} << bitNumber); + } else { + Serialized bitField; + bitField[bitNumber] = true; + return bitField; + } + } + + constexpr void incVersion() { +#ifdef DEBUG + mVersion++; +#endif + } + + static constexpr size_t MaxBits() { + if constexpr (std::is_unsigned_v) { + return sizeof(Serialized) * 8; + } else { + return Serialized::Size(); + } + } + + static constexpr size_t kMaxBits = MaxBits(); + + Serialized mBitField; + +#ifdef DEBUG + uint64_t mVersion = 0; +#endif +}; + +} // namespace mozilla + +#endif /* mozilla_EnumSet_h_*/ diff --git a/mfbt/EnumTypeTraits.h b/mfbt/EnumTypeTraits.h new file mode 100644 index 0000000000..528e1db8a7 --- /dev/null +++ b/mfbt/EnumTypeTraits.h @@ -0,0 +1,113 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Type traits for enums. */ + +#ifndef mozilla_EnumTypeTraits_h +#define mozilla_EnumTypeTraits_h + +#include +#include + +namespace mozilla { + +namespace detail { + +template +struct EnumFitsWithinHelper; + +// Signed enum, signed storage. +template +struct EnumFitsWithinHelper + : public std::integral_constant {}; + +// Signed enum, unsigned storage. +template +struct EnumFitsWithinHelper + : public std::integral_constant {}; + +// Unsigned enum, signed storage. +template +struct EnumFitsWithinHelper + : public std::integral_constant {}; + +// Unsigned enum, unsigned storage. +template +struct EnumFitsWithinHelper + : public std::integral_constant {}; + +} // namespace detail + +/* + * Type trait that determines whether the enum type T can fit within the + * integral type Storage without data loss. This trait should be used with + * caution with an enum type whose underlying type has not been explicitly + * specified: for such enums, the C++ implementation is free to choose a type + * no smaller than int whose range encompasses all possible values of the enum. + * So for an enum with only small non-negative values, the underlying type may + * be either int or unsigned int, depending on the whims of the implementation. + */ +template +struct EnumTypeFitsWithin + : public detail::EnumFitsWithinHelper< + sizeof(T), + std::is_signed::type>::value, + sizeof(Storage), std::is_signed::value> { + static_assert(std::is_enum::value, "must provide an enum type"); + static_assert(std::is_integral::value, + "must provide an integral type"); +}; + +/* + * Provides information about highest enum member value. + * Each specialization of struct MaxEnumValue should define + * "static constexpr unsigned int value". + * + * example: + * + * enum ExampleEnum + * { + * CAT = 0, + * DOG, + * HAMSTER + * }; + * + * template <> + * struct MaxEnumValue + * { + * static constexpr unsigned int value = static_cast(HAMSTER); + * }; + */ +template +struct MaxEnumValue; // no need to define the primary template + +/** + * Get the underlying value of an enum, but typesafe. + * + * example: + * + * enum class Pet : int16_t { + * Cat, + * Dog, + * Fish + * }; + * enum class Plant { + * Flower, + * Tree, + * Vine + * }; + * UnderlyingValue(Pet::Fish) -> int16_t(2) + * UnderlyingValue(Plant::Tree) -> int(1) + */ +template +inline constexpr auto UnderlyingValue(const T v) { + static_assert(std::is_enum_v); + return static_cast::type>(v); +} + +} // namespace mozilla + +#endif /* mozilla_EnumTypeTraits_h */ diff --git a/mfbt/EnumeratedArray.h b/mfbt/EnumeratedArray.h new file mode 100644 index 0000000000..f6edff4875 --- /dev/null +++ b/mfbt/EnumeratedArray.h @@ -0,0 +1,89 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* EnumeratedArray is like Array, but indexed by a typed enum. */ + +#ifndef mozilla_EnumeratedArray_h +#define mozilla_EnumeratedArray_h + +#include + +#include "mozilla/Array.h" + +namespace mozilla { + +/** + * EnumeratedArray is a fixed-size array container for use when an + * array is indexed by a specific enum class. + * + * This provides type safety by guarding at compile time against accidentally + * indexing such arrays with unrelated values. This also removes the need + * for manual casting when using a typed enum value to index arrays. + * + * Aside from the typing of indices, EnumeratedArray is similar to Array. + * + * Example: + * + * enum class AnimalSpecies { + * Cow, + * Sheep, + * Count + * }; + * + * EnumeratedArray headCount; + * + * headCount[AnimalSpecies::Cow] = 17; + * headCount[AnimalSpecies::Sheep] = 30; + * + */ +template +class EnumeratedArray { + public: + static const size_t kSize = size_t(SizeAsEnumValue); + + private: + typedef Array ArrayType; + + ArrayType mArray; + + public: + EnumeratedArray() = default; + + template + MOZ_IMPLICIT constexpr EnumeratedArray(Args&&... aArgs) + : mArray{std::forward(aArgs)...} {} + + ValueType& operator[](IndexType aIndex) { return mArray[size_t(aIndex)]; } + + const ValueType& operator[](IndexType aIndex) const { + return mArray[size_t(aIndex)]; + } + + typedef typename ArrayType::iterator iterator; + typedef typename ArrayType::const_iterator const_iterator; + typedef typename ArrayType::reverse_iterator reverse_iterator; + typedef typename ArrayType::const_reverse_iterator const_reverse_iterator; + + // Methods for range-based for loops. + iterator begin() { return mArray.begin(); } + const_iterator begin() const { return mArray.begin(); } + const_iterator cbegin() const { return mArray.cbegin(); } + iterator end() { return mArray.end(); } + const_iterator end() const { return mArray.end(); } + const_iterator cend() const { return mArray.cend(); } + + // Methods for reverse iterating. + reverse_iterator rbegin() { return mArray.rbegin(); } + const_reverse_iterator rbegin() const { return mArray.rbegin(); } + const_reverse_iterator crbegin() const { return mArray.crbegin(); } + reverse_iterator rend() { return mArray.rend(); } + const_reverse_iterator rend() const { return mArray.rend(); } + const_reverse_iterator crend() const { return mArray.crend(); } +}; + +} // namespace mozilla + +#endif // mozilla_EnumeratedArray_h diff --git a/mfbt/EnumeratedRange.h b/mfbt/EnumeratedRange.h new file mode 100644 index 0000000000..74d9592392 --- /dev/null +++ b/mfbt/EnumeratedRange.h @@ -0,0 +1,206 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Iterator over contiguous enum values */ + +/* + * Implements generator functions that create a range to iterate over the values + * of a scoped or unscoped enum. Unlike IntegerRange, which can only function on + * the underlying integral type, the elements of the generated sequence will + * have the type of the enum in question. + * + * Note that the enum values should be contiguous in the iterated range; + * unfortunately there exists no way for EnumeratedRange to enforce this + * either dynamically or at compile time. + */ + +#ifndef mozilla_EnumeratedRange_h +#define mozilla_EnumeratedRange_h + +#include +#include + +#include "mozilla/Assertions.h" +#include "mozilla/ReverseIterator.h" + +namespace mozilla { + +namespace detail { + +template +class EnumeratedIterator { + public: + typedef typename std::underlying_type::type IntTypeT; + + template + constexpr explicit EnumeratedIterator(EnumType aCurrent) + : mCurrent(aCurrent) {} + + template + explicit EnumeratedIterator(const EnumeratedIterator& aOther) + : mCurrent(aOther.mCurrent) {} + + EnumTypeT operator*() const { return mCurrent; } + + /* Increment and decrement operators */ + + EnumeratedIterator& operator++() { + mCurrent = EnumTypeT(IntTypeT(mCurrent) + IntTypeT(1)); + return *this; + } + EnumeratedIterator& operator--() { + mCurrent = EnumTypeT(IntTypeT(mCurrent) - IntTypeT(1)); + return *this; + } + EnumeratedIterator operator++(int) { + auto ret = *this; + mCurrent = EnumTypeT(IntTypeT(mCurrent) + IntTypeT(1)); + return ret; + } + EnumeratedIterator operator--(int) { + auto ret = *this; + mCurrent = EnumTypeT(IntTypeT(mCurrent) - IntTypeT(1)); + return ret; + } + + /* Comparison operators */ + + template + friend bool operator==(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + template + friend bool operator!=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + template + friend bool operator<(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + template + friend bool operator<=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + template + friend bool operator>(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + template + friend bool operator>=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2); + + private: + EnumTypeT mCurrent; +}; + +template +bool operator==(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent == aIter2.mCurrent; +} + +template +bool operator!=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent != aIter2.mCurrent; +} + +template +bool operator<(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent < aIter2.mCurrent; +} + +template +bool operator<=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent <= aIter2.mCurrent; +} + +template +bool operator>(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent > aIter2.mCurrent; +} + +template +bool operator>=(const EnumeratedIterator& aIter1, + const EnumeratedIterator& aIter2) { + return aIter1.mCurrent >= aIter2.mCurrent; +} + +template +class EnumeratedRange { + public: + typedef EnumeratedIterator iterator; + typedef EnumeratedIterator const_iterator; + typedef ReverseIterator reverse_iterator; + typedef ReverseIterator const_reverse_iterator; + + template + constexpr EnumeratedRange(EnumType aBegin, EnumType aEnd) + : mBegin(aBegin), mEnd(aEnd) {} + + iterator begin() const { return iterator(mBegin); } + const_iterator cbegin() const { return begin(); } + iterator end() const { return iterator(mEnd); } + const_iterator cend() const { return end(); } + reverse_iterator rbegin() const { return reverse_iterator(mEnd); } + const_reverse_iterator crbegin() const { return rbegin(); } + reverse_iterator rend() const { return reverse_iterator(mBegin); } + const_reverse_iterator crend() const { return rend(); } + + private: + EnumTypeT mBegin; + EnumTypeT mEnd; +}; + +} // namespace detail + +#ifdef __GNUC__ +// Enums can have an unsigned underlying type, which makes some of the +// comparisons below always true or always false. Temporarily disable +// -Wtype-limits to avoid breaking -Werror builds. +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wtype-limits" +#endif + +// Create a range to iterate from aBegin to aEnd, exclusive. +template +constexpr detail::EnumeratedRange MakeEnumeratedRange(EnumType aBegin, + EnumType aEnd) { + MOZ_ASSERT(aBegin <= aEnd, "Cannot generate invalid, unbounded range!"); + return detail::EnumeratedRange(aBegin, aEnd); +} + +// Create a range to iterate from EnumType(0) to aEnd, exclusive. EnumType(0) +// should exist, but note that there is no way for us to ensure that it does! +template +constexpr detail::EnumeratedRange MakeEnumeratedRange(EnumType aEnd) { + return MakeEnumeratedRange(EnumType(0), aEnd); +} + +// Create a range to iterate from aBegin to aEnd, inclusive. +// +// NOTE: This internally constructs a value that is one past `aEnd`, so the +// enumeration needs to either have a fixed underlying type, or `aEnd + 1` must +// be inside the range of the enumeration, in order to not be undefined +// behavior. +// +// See bug 1614512. +template +constexpr detail::EnumeratedRange MakeInclusiveEnumeratedRange( + EnumType aBegin, EnumType aEnd) { + using EnumUnderlyingType = typename std::underlying_type_t; + const auto end = static_cast(aEnd); + + MOZ_ASSERT(end != std::numeric_limits::max(), + "aEnd shouldn't overflow!"); + return MakeEnumeratedRange(aBegin, static_cast(end + 1)); +} + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif + +} // namespace mozilla + +#endif // mozilla_EnumeratedRange_h diff --git a/mfbt/FStream.h b/mfbt/FStream.h new file mode 100644 index 0000000000..74f2d16595 --- /dev/null +++ b/mfbt/FStream.h @@ -0,0 +1,124 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// Similar to std::ifstream/ofstream, but takes char16ptr_t on Windows. +// Until C++17, std functions can only take char* filenames. So Unicode +// filenames were lost on Windows. To address this limitations, this wrapper +// uses proprietary wchar_t* overloads on MSVC, and __gnu_cxx::stdio_filebuf +// extension on MinGW. Once we can use C++17 filesystem API everywhere, +// we will be able to avoid this wrapper. + +#ifndef mozilla_FStream_h +#define mozilla_FStream_h + +#include "mozilla/Char16.h" +#include +#include +#include +#if defined(__MINGW32__) && defined(__GLIBCXX__) +# include "mozilla/UniquePtr.h" +# include +# include +#endif + +namespace mozilla { + +#if defined(__MINGW32__) && defined(__GLIBCXX__) +// MinGW does not support wchar_t* overloads that are MSVC extension until +// C++17, so we have to implement widechar wrappers using a GNU extension. +class IFStream : public std::istream { + public: + explicit IFStream(char16ptr_t filename, openmode mode = in); + + std::filebuf* rdbuf() const { return mFileBuf.get(); } + + bool is_open() const { return mFileBuf && mFileBuf->is_open(); } + void open(char16ptr_t filename, openmode mode = in); + void close() { mFileBuf && mFileBuf->close(); } + + private: + UniquePtr mFileBuf; +}; + +inline IFStream::IFStream(char16ptr_t filename, openmode mode) + : std::istream(nullptr) { + open(filename, mode); +} + +inline void IFStream::open(char16ptr_t filename, openmode mode) { + int fmode = _O_RDONLY; + if (mode & binary) { + fmode |= _O_BINARY; + } else { + fmode |= _O_TEXT; + } + int fd = _wopen(filename, fmode); + mFileBuf = MakeUnique<__gnu_cxx::stdio_filebuf>(fd, mode); + std::istream::rdbuf(mFileBuf.get()); +} + +class OFStream : public std::ostream { + public: + explicit OFStream(char16ptr_t filename, openmode mode = out); + + std::filebuf* rdbuf() const { return mFileBuf.get(); } + + bool is_open() const { return mFileBuf && mFileBuf->is_open(); } + void open(char16ptr_t filename, openmode mode = out); + void close() { mFileBuf && mFileBuf->close(); } + + private: + UniquePtr mFileBuf; +}; + +inline OFStream::OFStream(char16ptr_t filename, openmode mode) + : std::ostream(nullptr) { + open(filename, mode); +} + +inline void OFStream::open(char16ptr_t filename, openmode mode) { + int fmode = _O_WRONLY; + if (mode & binary) { + fmode |= _O_BINARY; + } else { + fmode |= _O_TEXT; + } + if (mode & trunc) { + fmode |= _O_CREAT | _O_TRUNC; + } + int fd = _wopen(filename, fmode); + mFileBuf = MakeUnique<__gnu_cxx::stdio_filebuf>(fd, mode); + std::ostream::rdbuf(mFileBuf.get()); +} + +#elif defined(XP_WIN) +class IFStream : public std::ifstream { + public: + explicit IFStream(char16ptr_t filename, openmode mode = in) + : std::ifstream(filename, mode) {} + + void open(char16ptr_t filename, openmode mode = in) { + std::ifstream::open(filename, mode); + } +}; + +class OFStream : public std::ofstream { + public: + explicit OFStream(char16ptr_t filename, openmode mode = out) + : std::ofstream(filename, mode) {} + + void open(char16ptr_t filename, openmode mode = out) { + std::ofstream::open(filename, mode); + } +}; +#else +using IFStream = std::ifstream; +using OFStream = std::ofstream; +#endif + +} // namespace mozilla + +#endif /* mozilla_FStream_h */ diff --git a/mfbt/FastBernoulliTrial.h b/mfbt/FastBernoulliTrial.h new file mode 100644 index 0000000000..d1c4f3b9fb --- /dev/null +++ b/mfbt/FastBernoulliTrial.h @@ -0,0 +1,381 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_FastBernoulliTrial_h +#define mozilla_FastBernoulliTrial_h + +#include "mozilla/Assertions.h" +#include "mozilla/XorShift128PlusRNG.h" + +#include +#include + +namespace mozilla { + +/** + * class FastBernoulliTrial: Efficient sampling with uniform probability + * + * When gathering statistics about a program's behavior, we may be observing + * events that occur very frequently (e.g., function calls or memory + * allocations) and we may be gathering information that is somewhat expensive + * to produce (e.g., call stacks). Sampling all the events could have a + * significant impact on the program's performance. + * + * Why not just sample every N'th event? This technique is called "systematic + * sampling"; it's simple and efficient, and it's fine if we imagine a + * patternless stream of events. But what if we're sampling allocations, and the + * program happens to have a loop where each iteration does exactly N + * allocations? You would end up sampling the same allocation every time through + * the loop; the entire rest of the loop becomes invisible to your measurements! + * More generally, if each iteration does M allocations, and M and N have any + * common divisor at all, most allocation sites will never be sampled. If + * they're both even, say, the odd-numbered allocations disappear from your + * results. + * + * Ideally, we'd like each event to have some probability P of being sampled, + * independent of its neighbors and of its position in the sequence. This is + * called "Bernoulli sampling", and it doesn't suffer from any of the problems + * mentioned above. + * + * One disadvantage of Bernoulli sampling is that you can't be sure exactly how + * many samples you'll get: technically, it's possible that you might sample + * none of them, or all of them. But if the number of events N is large, these + * aren't likely outcomes; you can generally expect somewhere around P * N + * events to be sampled. + * + * The other disadvantage of Bernoulli sampling is that you have to generate a + * random number for every event, which can be slow. + * + * [significant pause] + * + * BUT NOT WITH THIS CLASS! FastBernoulliTrial lets you do true Bernoulli + * sampling, while generating a fresh random number only when we do decide to + * sample an event, not on every trial. When it decides not to sample, a call to + * |FastBernoulliTrial::trial| is nothing but decrementing a counter and + * comparing it to zero. So the lower your sampling probability is, the less + * overhead FastBernoulliTrial imposes. + * + * Probabilities of 0 and 1 are handled efficiently. (In neither case need we + * ever generate a random number at all.) + * + * The essential API: + * + * - FastBernoulliTrial(double P) + * Construct an instance that selects events with probability P. + * + * - FastBernoulliTrial::trial() + * Return true with probability P. Call this each time an event occurs, to + * decide whether to sample it or not. + * + * - FastBernoulliTrial::trial(size_t n) + * Equivalent to calling trial() |n| times, and returning true if any of those + * calls do. However, like trial, this runs in fast constant time. + * + * What is this good for? In some applications, some events are "bigger" than + * others. For example, large allocations are more significant than small + * allocations. Perhaps we'd like to imagine that we're drawing allocations + * from a stream of bytes, and performing a separate Bernoulli trial on every + * byte from the stream. We can accomplish this by calling |t.trial(S)| for + * the number of bytes S, and sampling the event if that returns true. + * + * Of course, this style of sampling needs to be paired with analysis and + * presentation that makes the size of the event apparent, lest trials with + * large values for |n| appear to be indistinguishable from those with small + * values for |n|. + */ +class FastBernoulliTrial { + /* + * This comment should just read, "Generate skip counts with a geometric + * distribution", and leave everyone to go look that up and see why it's the + * right thing to do, if they don't know already. + * + * BUT IF YOU'RE CURIOUS, COMMENTS ARE FREE... + * + * Instead of generating a fresh random number for every trial, we can + * randomly generate a count of how many times we should return false before + * the next time we return true. We call this a "skip count". Once we've + * returned true, we generate a fresh skip count, and begin counting down + * again. + * + * Here's an awesome fact: by exercising a little care in the way we generate + * skip counts, we can produce results indistinguishable from those we would + * get "rolling the dice" afresh for every trial. + * + * In short, skip counts in Bernoulli trials of probability P obey a geometric + * distribution. If a random variable X is uniformly distributed from [0..1), + * then std::floor(std::log(X) / std::log(1-P)) has the appropriate geometric + * distribution for the skip counts. + * + * Why that formula? + * + * Suppose we're to return |true| with some probability P, say, 0.3. Spread + * all possible futures along a line segment of length 1. In portion P of + * those cases, we'll return true on the next call to |trial|; the skip count + * is 0. For the remaining portion 1-P of cases, the skip count is 1 or more. + * + * skip: 0 1 or more + * |------------------^-----------------------------------------| + * portion: 0.3 0.7 + * P 1-P + * + * But the "1 or more" section of the line is subdivided the same way: *within + * that section*, in portion P the second call to |trial()| returns true, and + * in portion 1-P it returns false a second time; the skip count is two or + * more. So we return true on the second call in proportion 0.7 * 0.3, and + * skip at least the first two in proportion 0.7 * 0.7. + * + * skip: 0 1 2 or more + * |------------------^------------^----------------------------| + * portion: 0.3 0.7 * 0.3 0.7 * 0.7 + * P (1-P)*P (1-P)^2 + * + * We can continue to subdivide: + * + * skip >= 0: |------------------------------------------------- (1-P)^0 --| + * skip >= 1: | ------------------------------- (1-P)^1 --| + * skip >= 2: | ------------------ (1-P)^2 --| + * skip >= 3: | ^ ---------- (1-P)^3 --| + * skip >= 4: | . --- (1-P)^4 --| + * . + * ^X, see below + * + * In other words, the likelihood of the next n calls to |trial| returning + * false is (1-P)^n. The longer a run we require, the more the likelihood + * drops. Further calls may return false too, but this is the probability + * we'll skip at least n. + * + * This is interesting, because we can pick a point along this line segment + * and see which skip count's range it falls within; the point X above, for + * example, is within the ">= 2" range, but not within the ">= 3" range, so it + * designates a skip count of 2. So if we pick points on the line at random + * and use the skip counts they fall under, that will be indistinguishable + * from generating a fresh random number between 0 and 1 for each trial and + * comparing it to P. + * + * So to find the skip count for a point X, we must ask: To what whole power + * must we raise 1-P such that we include X, but the next power would exclude + * it? This is exactly std::floor(std::log(X) / std::log(1-P)). + * + * Our algorithm is then, simply: When constructed, compute an initial skip + * count. Return false from |trial| that many times, and then compute a new + * skip count. + * + * For a call to |trial(n)|, if the skip count is greater than n, return false + * and subtract n from the skip count. If the skip count is less than n, + * return true and compute a new skip count. Since each trial is independent, + * it doesn't matter by how much n overshoots the skip count; we can actually + * compute a new skip count at *any* time without affecting the distribution. + * This is really beautiful. + */ + public: + /** + * Construct a fast Bernoulli trial generator. Calls to |trial()| return true + * with probability |aProbability|. Use |aState0| and |aState1| to seed the + * random number generator; both may not be zero. + */ + FastBernoulliTrial(double aProbability, uint64_t aState0, uint64_t aState1) + : mProbability(0), + mInvLogNotProbability(0), + mGenerator(aState0, aState1), + mSkipCount(0) { + setProbability(aProbability); + } + + /** + * Return true with probability |mProbability|. Call this each time an event + * occurs, to decide whether to sample it or not. The lower |mProbability| is, + * the faster this function runs. + */ + bool trial() { + if (mSkipCount) { + mSkipCount--; + return false; + } + + return chooseSkipCount(); + } + + /** + * Equivalent to calling trial() |n| times, and returning true if any of those + * calls do. However, like trial, this runs in fast constant time. + * + * What is this good for? In some applications, some events are "bigger" than + * others. For example, large allocations are more significant than small + * allocations. Perhaps we'd like to imagine that we're drawing allocations + * from a stream of bytes, and performing a separate Bernoulli trial on every + * byte from the stream. We can accomplish this by calling |t.trial(S)| for + * the number of bytes S, and sampling the event if that returns true. + * + * Of course, this style of sampling needs to be paired with analysis and + * presentation that makes the "size" of the event apparent, lest trials with + * large values for |n| appear to be indistinguishable from those with small + * values for |n|, despite being potentially much more likely to be sampled. + */ + bool trial(size_t aCount) { + if (mSkipCount > aCount) { + mSkipCount -= aCount; + return false; + } + + return chooseSkipCount(); + } + + void setRandomState(uint64_t aState0, uint64_t aState1) { + mGenerator.setState(aState0, aState1); + } + + void setProbability(double aProbability) { + MOZ_ASSERT(0 <= aProbability && aProbability <= 1); + mProbability = aProbability; + if (0 < mProbability && mProbability < 1) { + /* + * Let's look carefully at how this calculation plays out in floating- + * point arithmetic. We'll assume IEEE, but the final C++ code we arrive + * at would still be fine if our numbers were mathematically perfect. So, + * while we've considered IEEE's edge cases, we haven't done anything that + * should be actively bad when using other representations. + * + * (In the below, read comparisons as exact mathematical comparisons: when + * we say something "equals 1", that means it's exactly equal to 1. We + * treat approximation using intervals with open boundaries: saying a + * value is in (0,1) doesn't specify how close to 0 or 1 the value gets. + * When we use closed boundaries like [2**-53, 1], we're careful to ensure + * the boundary values are actually representable.) + * + * - After the comparison above, we know mProbability is in (0,1). + * + * - The gaps below 1 are 2**-53, so that interval is (0, 1-2**-53]. + * + * - Because the floating-point gaps near 1 are wider than those near + * zero, there are many small positive doubles ε such that 1-ε rounds to + * exactly 1. However, 2**-53 can be represented exactly. So + * 1-mProbability is in [2**-53, 1]. + * + * - log(1 - mProbability) is thus in (-37, 0]. + * + * That range includes zero, but when we use mInvLogNotProbability, it + * would be helpful if we could trust that it's negative. So when log(1 + * - mProbability) is 0, we'll just set mProbability to 0, so that + * mInvLogNotProbability is not used in chooseSkipCount. + * + * - How much of the range of mProbability does this cause us to ignore? + * The only value for which log returns 0 is exactly 1; the slope of log + * at 1 is 1, so for small ε such that 1 - ε != 1, log(1 - ε) is -ε, + * never 0. The gaps near one are larger than the gaps near zero, so if + * 1 - ε wasn't 1, then -ε is representable. So if log(1 - mProbability) + * isn't 0, then 1 - mProbability isn't 1, which means that mProbability + * is at least 2**-53, as discussed earlier. This is a sampling + * likelihood of roughly one in ten trillion, which is unlikely to be + * distinguishable from zero in practice. + * + * So by forbidding zero, we've tightened our range to (-37, -2**-53]. + * + * - Finally, 1 / log(1 - mProbability) is in [-2**53, -1/37). This all + * falls readily within the range of an IEEE double. + * + * ALL THAT HAVING BEEN SAID: here are the five lines of actual code: + */ + double logNotProbability = std::log(1 - mProbability); + if (logNotProbability == 0.0) + mProbability = 0.0; + else + mInvLogNotProbability = 1 / logNotProbability; + } + + chooseSkipCount(); + } + + private: + /* The likelihood that any given call to |trial| should return true. */ + double mProbability; + + /* + * The value of 1/std::log(1 - mProbability), cached for repeated use. + * + * If mProbability is exactly 0 or exactly 1, we don't use this value. + * Otherwise, we guarantee this value is in the range [-2**53, -1/37), i.e. + * definitely negative, as required by chooseSkipCount. See setProbability for + * the details. + */ + double mInvLogNotProbability; + + /* Our random number generator. */ + non_crypto::XorShift128PlusRNG mGenerator; + + /* The number of times |trial| should return false before next returning true. + */ + size_t mSkipCount; + + /* + * Choose the next skip count. This also returns the value that |trial| should + * return, since we have to check for the extreme values for mProbability + * anyway, and |trial| should never return true at all when mProbability is 0. + */ + bool chooseSkipCount() { + /* + * If the probability is 1.0, every call to |trial| returns true. Make sure + * mSkipCount is 0. + */ + if (mProbability == 1.0) { + mSkipCount = 0; + return true; + } + + /* + * If the probabilility is zero, |trial| never returns true. Don't bother us + * for a while. + */ + if (mProbability == 0.0) { + mSkipCount = SIZE_MAX; + return false; + } + + /* + * What sorts of values can this call to std::floor produce? + * + * Since mGenerator.nextDouble returns a value in [0, 1-2**-53], std::log + * returns a value in the range [-infinity, -2**-53], all negative. Since + * mInvLogNotProbability is negative (see its comments), the product is + * positive and possibly infinite. std::floor returns +infinity unchanged. + * So the result will always be positive. + * + * Converting a double to an integer that is out of range for that integer + * is undefined behavior, so we must clamp our result to SIZE_MAX, to ensure + * we get an acceptable value for mSkipCount. + * + * The clamp is written carefully. Note that if we had said: + * + * if (skipCount > double(SIZE_MAX)) + * mSkipCount = SIZE_MAX; + * else + * mSkipCount = skipCount; + * + * that leads to undefined behavior 64-bit machines: SIZE_MAX coerced to + * double can equal 2^64, so if skipCount equaled 2^64 converting it to + * size_t would induce undefined behavior. + * + * Jakob Olesen cleverly suggested flipping the sense of the comparison to + * skipCount < double(SIZE_MAX). The conversion will evaluate to 2^64 or + * the double just below it: either way, skipCount is guaranteed to have a + * value that's safely convertible to size_t. + * + * (On 32-bit machines, all size_t values can be represented exactly in + * double, so all is well.) + */ + double skipCount = + std::floor(std::log(mGenerator.nextDouble()) * mInvLogNotProbability); + if (skipCount < double(SIZE_MAX)) + mSkipCount = skipCount; + else + mSkipCount = SIZE_MAX; + + return true; + } +}; + +} /* namespace mozilla */ + +#endif /* mozilla_FastBernoulliTrial_h */ diff --git a/mfbt/FloatingPoint.cpp b/mfbt/FloatingPoint.cpp new file mode 100644 index 0000000000..4d52ffaaf8 --- /dev/null +++ b/mfbt/FloatingPoint.cpp @@ -0,0 +1,41 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implementations of FloatingPoint functions */ + +#include "mozilla/FloatingPoint.h" + +#include // for FLT_MAX + +namespace mozilla { + +bool IsFloat32Representable(double aValue) { + // NaNs and infinities are representable. + if (!std::isfinite(aValue)) { + return true; + } + + // If it exceeds finite |float| range, casting to |double| is always undefined + // behavior per C++11 [conv.double]p1 last sentence. + if (Abs(aValue) > FLT_MAX) { + return false; + } + + // But if it's within finite range, then either it's 1) an exact value and so + // representable, or 2) it's "between two adjacent destination values" and + // safe to cast to "an implementation-defined choice of either of those + // values". + auto valueAsFloat = static_cast(aValue); + + // Per [conv.fpprom] this never changes value. + auto valueAsFloatAsDouble = static_cast(valueAsFloat); + + // Finally, in 1) exact representable value equals exact representable value, + // or 2) *changed* value does not equal original value, ergo unrepresentable. + return valueAsFloatAsDouble == aValue; +} + +} /* namespace mozilla */ diff --git a/mfbt/FloatingPoint.h b/mfbt/FloatingPoint.h new file mode 100644 index 0000000000..f4ae36257b --- /dev/null +++ b/mfbt/FloatingPoint.h @@ -0,0 +1,606 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Various predicates and operations on IEEE-754 floating point types. */ + +#ifndef mozilla_FloatingPoint_h +#define mozilla_FloatingPoint_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Casting.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/Types.h" + +#include +#include +#include +#include + +namespace mozilla { + +/* + * It's reasonable to ask why we have this header at all. Don't isnan, + * copysign, the built-in comparison operators, and the like solve these + * problems? Unfortunately, they don't. We've found that various compilers + * (MSVC, MSVC when compiling with PGO, and GCC on OS X, at least) miscompile + * the standard methods in various situations, so we can't use them. Some of + * these compilers even have problems compiling seemingly reasonable bitwise + * algorithms! But with some care we've found algorithms that seem to not + * trigger those compiler bugs. + * + * For the aforementioned reasons, be very wary of making changes to any of + * these algorithms. If you must make changes, keep a careful eye out for + * compiler bustage, particularly PGO-specific bustage. + */ + +namespace detail { + +/* + * These implementations assume float/double are 32/64-bit single/double + * format number types compatible with the IEEE-754 standard. C++ doesn't + * require this, but we required it in implementations of these algorithms that + * preceded this header, so we shouldn't break anything to continue doing so. + */ +template +struct FloatingPointTrait; + +template <> +struct FloatingPointTrait { + protected: + using Bits = uint32_t; + + static constexpr unsigned kExponentWidth = 8; + static constexpr unsigned kSignificandWidth = 23; +}; + +template <> +struct FloatingPointTrait { + protected: + using Bits = uint64_t; + + static constexpr unsigned kExponentWidth = 11; + static constexpr unsigned kSignificandWidth = 52; +}; + +} // namespace detail + +/* + * This struct contains details regarding the encoding of floating-point + * numbers that can be useful for direct bit manipulation. As of now, the + * template parameter has to be float or double. + * + * The nested typedef |Bits| is the unsigned integral type with the same size + * as T: uint32_t for float and uint64_t for double (static assertions + * double-check these assumptions). + * + * kExponentBias is the offset that is subtracted from the exponent when + * computing the value, i.e. one plus the opposite of the mininum possible + * exponent. + * kExponentShift is the shift that one needs to apply to retrieve the + * exponent component of the value. + * + * kSignBit contains a bits mask. Bit-and-ing with this mask will result in + * obtaining the sign bit. + * kExponentBits contains the mask needed for obtaining the exponent bits and + * kSignificandBits contains the mask needed for obtaining the significand + * bits. + * + * Full details of how floating point number formats are encoded are beyond + * the scope of this comment. For more information, see + * http://en.wikipedia.org/wiki/IEEE_floating_point + * http://en.wikipedia.org/wiki/Floating_point#IEEE_754:_floating_point_in_modern_computers + */ +template +struct FloatingPoint final : private detail::FloatingPointTrait { + private: + using Base = detail::FloatingPointTrait; + + public: + /** + * An unsigned integral type suitable for accessing the bitwise representation + * of T. + */ + using Bits = typename Base::Bits; + + static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T"); + + /** The bit-width of the exponent component of T. */ + using Base::kExponentWidth; + + /** The bit-width of the significand component of T. */ + using Base::kSignificandWidth; + + static_assert(1 + kExponentWidth + kSignificandWidth == CHAR_BIT * sizeof(T), + "sign bit plus bit widths should sum to overall bit width"); + + /** + * The exponent field in an IEEE-754 floating point number consists of bits + * encoding an unsigned number. The *actual* represented exponent (for all + * values finite and not denormal) is that value, minus a bias |kExponentBias| + * so that a useful range of numbers is represented. + */ + static constexpr unsigned kExponentBias = (1U << (kExponentWidth - 1)) - 1; + + /** + * The amount by which the bits of the exponent-field in an IEEE-754 floating + * point number are shifted from the LSB of the floating point type. + */ + static constexpr unsigned kExponentShift = kSignificandWidth; + + /** The sign bit in the floating point representation. */ + static constexpr Bits kSignBit = static_cast(1) + << (CHAR_BIT * sizeof(Bits) - 1); + + /** The exponent bits in the floating point representation. */ + static constexpr Bits kExponentBits = + ((static_cast(1) << kExponentWidth) - 1) << kSignificandWidth; + + /** The significand bits in the floating point representation. */ + static constexpr Bits kSignificandBits = + (static_cast(1) << kSignificandWidth) - 1; + + static_assert((kSignBit & kExponentBits) == 0, + "sign bit shouldn't overlap exponent bits"); + static_assert((kSignBit & kSignificandBits) == 0, + "sign bit shouldn't overlap significand bits"); + static_assert((kExponentBits & kSignificandBits) == 0, + "exponent bits shouldn't overlap significand bits"); + + static_assert((kSignBit | kExponentBits | kSignificandBits) == ~Bits(0), + "all bits accounted for"); +}; + +/** + * Determines whether a float/double is negative or -0. It is an error + * to call this method on a float/double which is NaN. + */ +template +static MOZ_ALWAYS_INLINE bool IsNegative(T aValue) { + MOZ_ASSERT(!std::isnan(aValue), "NaN does not have a sign"); + return std::signbit(aValue); +} + +/** Determines whether a float/double represents -0. */ +template +static MOZ_ALWAYS_INLINE bool IsNegativeZero(T aValue) { + /* Only the sign bit is set if the value is -0. */ + typedef FloatingPoint Traits; + typedef typename Traits::Bits Bits; + Bits bits = BitwiseCast(aValue); + return bits == Traits::kSignBit; +} + +/** Determines wether a float/double represents +0. */ +template +static MOZ_ALWAYS_INLINE bool IsPositiveZero(T aValue) { + /* All bits are zero if the value is +0. */ + typedef FloatingPoint Traits; + typedef typename Traits::Bits Bits; + Bits bits = BitwiseCast(aValue); + return bits == 0; +} + +/** + * Returns 0 if a float/double is NaN or infinite; + * otherwise, the float/double is returned. + */ +template +static MOZ_ALWAYS_INLINE T ToZeroIfNonfinite(T aValue) { + return std::isfinite(aValue) ? aValue : 0; +} + +/** + * Returns the exponent portion of the float/double. + * + * Zero is not special-cased, so ExponentComponent(0.0) is + * -int_fast16_t(Traits::kExponentBias). + */ +template +static MOZ_ALWAYS_INLINE int_fast16_t ExponentComponent(T aValue) { + /* + * The exponent component of a float/double is an unsigned number, biased + * from its actual value. Subtract the bias to retrieve the actual exponent. + */ + typedef FloatingPoint Traits; + typedef typename Traits::Bits Bits; + Bits bits = BitwiseCast(aValue); + return int_fast16_t((bits & Traits::kExponentBits) >> + Traits::kExponentShift) - + int_fast16_t(Traits::kExponentBias); +} + +/** Returns +Infinity. */ +template +static MOZ_ALWAYS_INLINE T PositiveInfinity() { + /* + * Positive infinity has all exponent bits set, sign bit set to 0, and no + * significand. + */ + typedef FloatingPoint Traits; + return BitwiseCast(Traits::kExponentBits); +} + +/** Returns -Infinity. */ +template +static MOZ_ALWAYS_INLINE T NegativeInfinity() { + /* + * Negative infinity has all exponent bits set, sign bit set to 1, and no + * significand. + */ + typedef FloatingPoint Traits; + return BitwiseCast(Traits::kSignBit | Traits::kExponentBits); +} + +/** + * Computes the bit pattern for an infinity with the specified sign bit. + */ +template +struct InfinityBits { + using Traits = FloatingPoint; + + static_assert(SignBit == 0 || SignBit == 1, "bad sign bit"); + static constexpr typename Traits::Bits value = + (SignBit * Traits::kSignBit) | Traits::kExponentBits; +}; + +/** + * Computes the bit pattern for a NaN with the specified sign bit and + * significand bits. + */ +template ::Bits Significand> +struct SpecificNaNBits { + using Traits = FloatingPoint; + + static_assert(SignBit == 0 || SignBit == 1, "bad sign bit"); + static_assert((Significand & ~Traits::kSignificandBits) == 0, + "significand must only have significand bits set"); + static_assert(Significand & Traits::kSignificandBits, + "significand must be nonzero"); + + static constexpr typename Traits::Bits value = + (SignBit * Traits::kSignBit) | Traits::kExponentBits | Significand; +}; + +/** + * Constructs a NaN value with the specified sign bit and significand bits. + * + * There is also a variant that returns the value directly. In most cases, the + * two variants should be identical. However, in the specific case of x86 + * chips, the behavior differs: returning floating-point values directly is done + * through the x87 stack, and x87 loads and stores turn signaling NaNs into + * quiet NaNs... silently. Returning floating-point values via outparam, + * however, is done entirely within the SSE registers when SSE2 floating-point + * is enabled in the compiler, which has semantics-preserving behavior you would + * expect. + * + * If preserving the distinction between signaling NaNs and quiet NaNs is + * important to you, you should use the outparam version. In all other cases, + * you should use the direct return version. + */ +template +static MOZ_ALWAYS_INLINE void SpecificNaN( + int signbit, typename FloatingPoint::Bits significand, T* result) { + typedef FloatingPoint Traits; + MOZ_ASSERT(signbit == 0 || signbit == 1); + MOZ_ASSERT((significand & ~Traits::kSignificandBits) == 0); + MOZ_ASSERT(significand & Traits::kSignificandBits); + + BitwiseCast( + (signbit ? Traits::kSignBit : 0) | Traits::kExponentBits | significand, + result); + MOZ_ASSERT(std::isnan(*result)); +} + +template +static MOZ_ALWAYS_INLINE T +SpecificNaN(int signbit, typename FloatingPoint::Bits significand) { + T t; + SpecificNaN(signbit, significand, &t); + return t; +} + +/** Computes the smallest non-zero positive float/double value. */ +template +static MOZ_ALWAYS_INLINE T MinNumberValue() { + typedef FloatingPoint Traits; + typedef typename Traits::Bits Bits; + return BitwiseCast(Bits(1)); +} + +namespace detail { + +template +inline bool NumberEqualsSignedInteger(Float aValue, SignedInteger* aInteger) { + static_assert(std::is_same_v || std::is_same_v, + "Float must be an IEEE-754 floating point type"); + static_assert(std::is_signed_v, + "this algorithm only works for signed types: a different one " + "will be required for unsigned types"); + static_assert(sizeof(SignedInteger) >= sizeof(int), + "this function *might* require some finessing for signed types " + "subject to integral promotion before it can be used on them"); + + MOZ_MAKE_MEM_UNDEFINED(aInteger, sizeof(*aInteger)); + + // NaNs and infinities are not integers. + if (!std::isfinite(aValue)) { + return false; + } + + // Otherwise do direct comparisons against the minimum/maximum |SignedInteger| + // values that can be encoded in |Float|. + + constexpr SignedInteger MaxIntValue = + std::numeric_limits::max(); // e.g. INT32_MAX + constexpr SignedInteger MinValue = + std::numeric_limits::min(); // e.g. INT32_MIN + + static_assert(IsPowerOfTwo(Abs(MinValue)), + "MinValue should be is a small power of two, thus exactly " + "representable in float/double both"); + + constexpr unsigned SignedIntegerWidth = CHAR_BIT * sizeof(SignedInteger); + constexpr unsigned ExponentShift = FloatingPoint::kExponentShift; + + // Careful! |MaxIntValue| may not be the maximum |SignedInteger| value that + // can be encoded in |Float|. Its |SignedIntegerWidth - 1| bits of precision + // may exceed |Float|'s |ExponentShift + 1| bits of precision. If necessary, + // compute the maximum |SignedInteger| that fits in |Float| from IEEE-754 + // first principles. (|MinValue| doesn't have this problem because as a + // [relatively] small power of two it's always representable in |Float|.) + + // Per C++11 [expr.const]p2, unevaluated subexpressions of logical AND/OR and + // conditional expressions *may* contain non-constant expressions, without + // making the enclosing expression not constexpr. MSVC implements this -- but + // it sometimes warns about undefined behavior in unevaluated subexpressions. + // This bites us if we initialize |MaxValue| the obvious way including an + // |uint64_t(1) << (SignedIntegerWidth - 2 - ExponentShift)| subexpression. + // Pull that shift-amount out and give it a not-too-huge value when it's in an + // unevaluated subexpression. 🙄 + constexpr unsigned PrecisionExceededShiftAmount = + ExponentShift > SignedIntegerWidth - 1 + ? 0 + : SignedIntegerWidth - 2 - ExponentShift; + + constexpr SignedInteger MaxValue = + ExponentShift > SignedIntegerWidth - 1 + ? MaxIntValue + : SignedInteger((uint64_t(1) << (SignedIntegerWidth - 1)) - + (uint64_t(1) << PrecisionExceededShiftAmount)); + + if (static_cast(MinValue) <= aValue && + aValue <= static_cast(MaxValue)) { + auto possible = static_cast(aValue); + if (static_cast(possible) == aValue) { + *aInteger = possible; + return true; + } + } + + return false; +} + +template +inline bool NumberIsSignedInteger(Float aValue, SignedInteger* aInteger) { + static_assert(std::is_same_v || std::is_same_v, + "Float must be an IEEE-754 floating point type"); + static_assert(std::is_signed_v, + "this algorithm only works for signed types: a different one " + "will be required for unsigned types"); + static_assert(sizeof(SignedInteger) >= sizeof(int), + "this function *might* require some finessing for signed types " + "subject to integral promotion before it can be used on them"); + + MOZ_MAKE_MEM_UNDEFINED(aInteger, sizeof(*aInteger)); + + if (IsNegativeZero(aValue)) { + return false; + } + + return NumberEqualsSignedInteger(aValue, aInteger); +} + +} // namespace detail + +/** + * If |aValue| is identical to some |int32_t| value, set |*aInt32| to that value + * and return true. Otherwise return false, leaving |*aInt32| in an + * indeterminate state. + * + * This method returns false for negative zero. If you want to consider -0 to + * be 0, use NumberEqualsInt32 below. + */ +template +static MOZ_ALWAYS_INLINE bool NumberIsInt32(T aValue, int32_t* aInt32) { + return detail::NumberIsSignedInteger(aValue, aInt32); +} + +/** + * If |aValue| is identical to some |int64_t| value, set |*aInt64| to that value + * and return true. Otherwise return false, leaving |*aInt64| in an + * indeterminate state. + * + * This method returns false for negative zero. If you want to consider -0 to + * be 0, use NumberEqualsInt64 below. + */ +template +static MOZ_ALWAYS_INLINE bool NumberIsInt64(T aValue, int64_t* aInt64) { + return detail::NumberIsSignedInteger(aValue, aInt64); +} + +/** + * If |aValue| is equal to some int32_t value (where -0 and +0 are considered + * equal), set |*aInt32| to that value and return true. Otherwise return false, + * leaving |*aInt32| in an indeterminate state. + * + * |NumberEqualsInt32(-0.0, ...)| will return true. To test whether a value can + * be losslessly converted to |int32_t| and back, use NumberIsInt32 above. + */ +template +static MOZ_ALWAYS_INLINE bool NumberEqualsInt32(T aValue, int32_t* aInt32) { + return detail::NumberEqualsSignedInteger(aValue, aInt32); +} + +/** + * If |aValue| is equal to some int64_t value (where -0 and +0 are considered + * equal), set |*aInt64| to that value and return true. Otherwise return false, + * leaving |*aInt64| in an indeterminate state. + * + * |NumberEqualsInt64(-0.0, ...)| will return true. To test whether a value can + * be losslessly converted to |int64_t| and back, use NumberIsInt64 above. + */ +template +static MOZ_ALWAYS_INLINE bool NumberEqualsInt64(T aValue, int64_t* aInt64) { + return detail::NumberEqualsSignedInteger(aValue, aInt64); +} + +/** + * Computes a NaN value. Do not use this method if you depend upon a particular + * NaN value being returned. + */ +template +static MOZ_ALWAYS_INLINE T UnspecifiedNaN() { + /* + * If we can use any quiet NaN, we might as well use the all-ones NaN, + * since it's cheap to materialize on common platforms (such as x64, where + * this value can be represented in a 32-bit signed immediate field, allowing + * it to be stored to memory in a single instruction). + */ + typedef FloatingPoint Traits; + return SpecificNaN(1, Traits::kSignificandBits); +} + +/** + * Compare two doubles for equality, *without* equating -0 to +0, and equating + * any NaN value to any other NaN value. (The normal equality operators equate + * -0 with +0, and they equate NaN to no other value.) + */ +template +static inline bool NumbersAreIdentical(T aValue1, T aValue2) { + using Bits = typename FloatingPoint::Bits; + if (std::isnan(aValue1)) { + return std::isnan(aValue2); + } + return BitwiseCast(aValue1) == BitwiseCast(aValue2); +} + +/** + * Compare two floating point values for bit-wise equality. + */ +template +static inline bool NumbersAreBitwiseIdentical(T aValue1, T aValue2) { + using Bits = typename FloatingPoint::Bits; + return BitwiseCast(aValue1) == BitwiseCast(aValue2); +} + +/** + * Return true iff |aValue| and |aValue2| are equal (ignoring sign if both are + * zero) or both NaN. + */ +template +static inline bool EqualOrBothNaN(T aValue1, T aValue2) { + if (std::isnan(aValue1)) { + return std::isnan(aValue2); + } + return aValue1 == aValue2; +} + +/** + * Return NaN if either |aValue1| or |aValue2| is NaN, or the minimum of + * |aValue1| and |aValue2| otherwise. + */ +template +static inline T NaNSafeMin(T aValue1, T aValue2) { + if (std::isnan(aValue1) || std::isnan(aValue2)) { + return UnspecifiedNaN(); + } + return std::min(aValue1, aValue2); +} + +/** + * Return NaN if either |aValue1| or |aValue2| is NaN, or the maximum of + * |aValue1| and |aValue2| otherwise. + */ +template +static inline T NaNSafeMax(T aValue1, T aValue2) { + if (std::isnan(aValue1) || std::isnan(aValue2)) { + return UnspecifiedNaN(); + } + return std::max(aValue1, aValue2); +} + +namespace detail { + +template +struct FuzzyEqualsEpsilon; + +template <> +struct FuzzyEqualsEpsilon { + // A number near 1e-5 that is exactly representable in a float. + static float value() { return 1.0f / (1 << 17); } +}; + +template <> +struct FuzzyEqualsEpsilon { + // A number near 1e-12 that is exactly representable in a double. + static double value() { return 1.0 / (1LL << 40); } +}; + +} // namespace detail + +/** + * Compare two floating point values for equality, modulo rounding error. That + * is, the two values are considered equal if they are both not NaN and if they + * are less than or equal to aEpsilon apart. The default value of aEpsilon is + * near 1e-5. + * + * For most scenarios you will want to use FuzzyEqualsMultiplicative instead, + * as it is more reasonable over the entire range of floating point numbers. + * This additive version should only be used if you know the range of the + * numbers you are dealing with is bounded and stays around the same order of + * magnitude. + */ +template +static MOZ_ALWAYS_INLINE bool FuzzyEqualsAdditive( + T aValue1, T aValue2, T aEpsilon = detail::FuzzyEqualsEpsilon::value()) { + static_assert(std::is_floating_point_v, "floating point type required"); + return Abs(aValue1 - aValue2) <= aEpsilon; +} + +/** + * Compare two floating point values for equality, allowing for rounding error + * relative to the magnitude of the values. That is, the two values are + * considered equal if they are both not NaN and they are less than or equal to + * some aEpsilon apart, where the aEpsilon is scaled by the smaller of the two + * argument values. + * + * In most cases you will want to use this rather than FuzzyEqualsAdditive, as + * this function effectively masks out differences in the bottom few bits of + * the floating point numbers being compared, regardless of what order of + * magnitude those numbers are at. + */ +template +static MOZ_ALWAYS_INLINE bool FuzzyEqualsMultiplicative( + T aValue1, T aValue2, T aEpsilon = detail::FuzzyEqualsEpsilon::value()) { + static_assert(std::is_floating_point_v, "floating point type required"); + // can't use std::min because of bug 965340 + T smaller = Abs(aValue1) < Abs(aValue2) ? Abs(aValue1) : Abs(aValue2); + return Abs(aValue1 - aValue2) <= aEpsilon * smaller; +} + +/** + * Returns true if |aValue| can be losslessly represented as an IEEE-754 single + * precision number, false otherwise. All NaN values are considered + * representable (even though the bit patterns of double precision NaNs can't + * all be exactly represented in single precision). + */ +[[nodiscard]] extern MFBT_API bool IsFloat32Representable(double aValue); + +} /* namespace mozilla */ + +#endif /* mozilla_FloatingPoint_h */ diff --git a/mfbt/FunctionRef.h b/mfbt/FunctionRef.h new file mode 100644 index 0000000000..12c3b2f4ea --- /dev/null +++ b/mfbt/FunctionRef.h @@ -0,0 +1,226 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A generic callable type that can be initialized from any compatible callable, + * suitable for use as a function argument for the duration of the function + * call (and no longer). + */ + +#ifndef mozilla_FunctionRef_h +#define mozilla_FunctionRef_h + +#include "mozilla/OperatorNewExtensions.h" // mozilla::NotNull, ::operator new + +#include // std::nullptr_t +#include // std::{declval,integral_constant}, std::is_{convertible,same,void}_v, std::{enable_if,remove_reference,remove_cv}_t +#include // std::forward + +// This concept and its implementation are substantially inspired by foonathan's +// prior art: +// +// https://foonathan.net/2017/01/function-ref-implementation/ +// https://github.com/foonathan/type_safe/blob/2017851053f8dd268372f1612865792c5c621570/include/type_safe/reference.hpp + +namespace mozilla { + +namespace detail { + +// Template helper to determine if |Returned| is a return type compatible with +// |Required|: if the former converts to the latter, or if |Required| is |void| +// and nothing is returned. +template +using CompatibleReturnType = + std::integral_constant || + std::is_convertible_v>; + +// Template helper to check if |Func| called with |Params| arguments returns +// a type compatible with |Ret|. +template +using EnableMatchingFunction = std::enable_if_t< + CompatibleReturnType< + decltype(std::declval()(std::declval()...)), Ret>::value, + int>; + +struct MatchingFunctionPointerTag {}; +struct MatchingFunctorTag {}; +struct InvalidFunctorTag {}; + +// Template helper to determine the proper way to store |Callable|: as function +// pointer, as pointer to object, or unstorable. +template +struct GetCallableTag { + // Match the case where |Callable| is a compatible function pointer or + // converts to one. (|+obj| invokes such a conversion.) + template + static MatchingFunctionPointerTag test( + int, T& obj, EnableMatchingFunction = 0); + + // Match the case where |Callable| is callable but can't be converted to a + // function pointer. (|short| is a worse match for 0 than |int|, causing the + // function pointer match to be preferred if both apply.) + template + static MatchingFunctorTag test(short, T& obj, + EnableMatchingFunction = 0); + + // Match all remaining cases. (Any other match is preferred to an ellipsis + // match.) + static InvalidFunctorTag test(...); + + using Type = decltype(test(0, std::declval())); +}; + +// If the callable is |nullptr|, |std::declval()| will be an +// error. Provide a specialization for |nullptr| that will fail substitution. +template +struct GetCallableTag {}; + +template +using EnableFunctionTag = std::enable_if_t< + std::is_same_v::Type, + Result>, + int>; + +} // namespace detail + +/** + * An efficient, type-erasing, non-owning reference to a callable. It is + * intended for use as the type of a function parameter that is not used after + * the function in question returns. + * + * This class does not own the callable, so in general it is unsafe to store a + * FunctionRef. + */ +template +class FunctionRef; + +template +class FunctionRef { + union Payload; + + // |FunctionRef| stores an adaptor function pointer, determined by the + // arguments passed to the constructor. That adaptor will perform the steps + // needed to invoke the callable passed at construction time. + using Adaptor = Ret (*)(const Payload& aPayload, Params... aParams); + + // If |FunctionRef|'s callable can be stored as a function pointer, that + // function pointer is stored after being cast to this *different* function + // pointer type. |mAdaptor| then casts back to the original type to call it. + // ([expr.reinterpret.cast]p6 guarantees that A->B->A function pointer casts + // produce the original function pointer value.) An outlandish signature is + // used to emphasize that the exact function pointer type doesn't matter. + using FuncPtr = Payload***** (*)(Payload*****); + + /** + * An adaptor function (used by this class's function call operator) that + * invokes the callable in |mPayload|, forwarding arguments and converting + * return type as needed. + */ + const Adaptor mAdaptor; + + /** Storage for the wrapped callable value. */ + union Payload { + // This arm is used if |FunctionRef| is passed a compatible function pointer + // or a lambda/callable that converts to a compatible function pointer. + FuncPtr mFuncPtr; + + // This arm is used if |FunctionRef| is passed some other callable or + // |nullptr|. + void* mObject; + } mPayload; + + template + static Ret CallFunctionPointer(const Payload& aPayload, + Params... aParams) noexcept { + auto func = reinterpret_cast(aPayload.mFuncPtr); + return static_cast(func(std::forward(aParams)...)); + } + + template + FunctionRef(detail::MatchingFunctionPointerTag, Ret2 (*aFuncPtr)(Params2...)) + : mAdaptor(&CallFunctionPointer) { + ::new (KnownNotNull, &mPayload.mFuncPtr) + FuncPtr(reinterpret_cast(aFuncPtr)); + } + + public: + /** + * Construct a |FunctionRef| that's like a null function pointer that can't be + * called. + */ + MOZ_IMPLICIT FunctionRef(std::nullptr_t) noexcept : mAdaptor(nullptr) { + // This is technically unnecessary, but it seems best to always initialize + // a union arm. + ::new (KnownNotNull, &mPayload.mObject) void*(nullptr); + } + + FunctionRef() : FunctionRef(nullptr) {} + + /** + * Constructs a |FunctionRef| from an object callable with |Params| arguments, + * that returns a type convertible to |Ret|, where the callable isn't + * convertible to function pointer (often because it contains some internal + * state). For example: + * + * int x = 5; + * auto doSideEffect = [&x]{ x++; }; // state is captured reference to |x| + * FunctionRef f(doSideEffect); + */ + template < + typename Callable, + typename = detail::EnableFunctionTag, + typename std::enable_if_t>, + FunctionRef>>* = nullptr> + MOZ_IMPLICIT FunctionRef(Callable& aCallable) noexcept + : mAdaptor([](const Payload& aPayload, Params... aParams) { + auto& func = *static_cast(aPayload.mObject); + // Unable to use std::forward here due to llvm windows bug + // https://bugs.llvm.org/show_bug.cgi?id=28299 + // + // This prevents use of move-only arguments for functors and lambdas. + // Move only arguments can be used when using function pointers + return static_cast(func(static_cast(aParams)...)); + }) { + ::new (KnownNotNull, &mPayload.mObject) void*(&aCallable); + } + + /** + * Constructs a |FunctionRef| from an value callable with |Params| arguments, + * that returns a type convertible to |Ret|, where the callable is stateless + * and is (or is convertible to) a function pointer. For example: + * + * // Exact match + * double twice(double d) { return d * 2; } + * FunctionRef func1(&twice); + * + * // Compatible match + * float thrice(long double d) { return static_cast(d) * 3; } + * FunctionRef func2(&thrice); + * + * // Non-generic lambdas that don't capture anything have a conversion + * // function to the appropriate function pointer type. + * FunctionRef f([](long double){ return 'c'; }); + */ + template > + MOZ_IMPLICIT FunctionRef(const Callable& aCallable) noexcept + : FunctionRef(detail::MatchingFunctionPointerTag{}, +aCallable) {} + + /** Call the callable stored in this with the given arguments. */ + Ret operator()(Params... params) const { + return mAdaptor(mPayload, std::forward(params)...); + } + + /** Return true iff this wasn't created from |nullptr|. */ + explicit operator bool() const noexcept { return mAdaptor != nullptr; } +}; + +} /* namespace mozilla */ + +#endif /* mozilla_FunctionRef_h */ diff --git a/mfbt/FunctionTypeTraits.h b/mfbt/FunctionTypeTraits.h new file mode 100644 index 0000000000..83b3bc971a --- /dev/null +++ b/mfbt/FunctionTypeTraits.h @@ -0,0 +1,114 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_FunctionTypeTraits_h +#define mozilla_FunctionTypeTraits_h + +#include /* for size_t */ +#include + +namespace mozilla { + +// Main FunctionTypeTraits declaration, taking one template argument. +// +// Given a function type, FunctionTypeTraits will expose the following members: +// - ReturnType: Return type. +// - arity: Number of parameters (size_t). +// - ParameterType: Type of the Nth** parameter, 0-indexed. +// +// ** `ParameterType` with `N` >= `arity` is allowed and gives `void`. +// This prevents compilation errors when trying to access a type outside of the +// function's parameters, which is useful for parameters checks, e.g.: +// template +// auto foo(F&&) +// -> enable_if(FunctionTypeTraits::arity == 1 && +// is_same::template ParameterType<0>, +// int>::value, +// void) +// { +// // This function will only be enabled if `F` takes one `int`. +// // Without the permissive ParameterType, it wouldn't even compile. +// +// Note: FunctionTypeTraits does not work with generic lambdas `[](auto&) {}`, +// because parameter types cannot be known until an actual invocation when types +// are inferred from the given arguments. +template +struct FunctionTypeTraits; + +// Remove reference and pointer wrappers, if any. +template +struct FunctionTypeTraits : public FunctionTypeTraits {}; +template +struct FunctionTypeTraits : public FunctionTypeTraits {}; +template +struct FunctionTypeTraits : public FunctionTypeTraits {}; + +// Extract `operator()` function from callables (e.g. lambdas, std::function). +template +struct FunctionTypeTraits + : public FunctionTypeTraits {}; + +namespace detail { + +// If `safe`, retrieve the `N`th type from `As`, otherwise `void`. +// See top description for reason. +template +struct TupleElementSafe; +template +struct TupleElementSafe { + using Type = typename std::tuple_element>::type; +}; +template +struct TupleElementSafe { + using Type = void; +}; + +template +struct FunctionTypeTraitsHelper { + using ReturnType = R; + static constexpr size_t arity = sizeof...(As); + template + using ParameterType = + typename TupleElementSafe<(N < sizeof...(As)), N, As...>::Type; +}; + +} // namespace detail + +// Specialization for free functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; + +// Specialization for non-const member functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; + +// Specialization for const member functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; + +#ifdef NS_HAVE_STDCALL +// Specialization for __stdcall free functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; + +// Specialization for __stdcall non-const member functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; + +// Specialization for __stdcall const member functions. +template +struct FunctionTypeTraits + : detail::FunctionTypeTraitsHelper {}; +#endif // NS_HAVE_STDCALL + +} // namespace mozilla + +#endif // mozilla_FunctionTypeTraits_h diff --git a/mfbt/Fuzzing.h b/mfbt/Fuzzing.h new file mode 100644 index 0000000000..7435436615 --- /dev/null +++ b/mfbt/Fuzzing.h @@ -0,0 +1,91 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Additional definitions and implementation for fuzzing code */ + +#ifndef mozilla_Fuzzing_h +#define mozilla_Fuzzing_h + +#ifdef FUZZING_SNAPSHOT +# include "mozilla/fuzzing/NyxWrapper.h" + +# ifdef __cplusplus +# include "mozilla/fuzzing/Nyx.h" +# include "mozilla/ScopeExit.h" + +# define MOZ_FUZZING_NYX_RELEASE(id) \ + if (mozilla::fuzzing::Nyx::instance().is_enabled(id)) { \ + mozilla::fuzzing::Nyx::instance().release(); \ + } + +# define MOZ_FUZZING_NYX_GUARD(id) \ + auto nyxGuard = mozilla::MakeScopeExit([&] { \ + if (mozilla::fuzzing::Nyx::instance().is_enabled(id)) { \ + mozilla::fuzzing::Nyx::instance().release(); \ + } \ + }); +# endif + +# define MOZ_FUZZING_HANDLE_CRASH_EVENT2(aType, aReason) \ + do { \ + if (nyx_handle_event) { \ + nyx_handle_event(aType, __FILE__, __LINE__, aReason); \ + } \ + } while (false) + +# define MOZ_FUZZING_HANDLE_CRASH_EVENT4(aType, aFilename, aLine, aReason) \ + do { \ + if (nyx_handle_event) { \ + nyx_handle_event(aType, aFilename, aLine, aReason); \ + } \ + } while (false) + +# define MOZ_FUZZING_NYX_PRINT(aMsg) \ + do { \ + if (nyx_puts) { \ + nyx_puts(aMsg); \ + } else { \ + fprintf(stderr, aMsg); \ + } \ + } while (false) + +# define MOZ_FUZZING_NYX_PRINTF(aFormat, ...) \ + do { \ + if (nyx_puts) { \ + char msgbuf[2048]; \ + snprintf(msgbuf, sizeof(msgbuf) - 1, "" aFormat, __VA_ARGS__); \ + nyx_puts(msgbuf); \ + } else { \ + fprintf(stderr, aFormat, __VA_ARGS__); \ + } \ + } while (false) + +# ifdef FUZZ_DEBUG +# define MOZ_FUZZING_NYX_DEBUG(x) MOZ_FUZZING_NYX_PRINT(x) +# else +# define MOZ_FUZZING_NYX_DEBUG(x) +# endif +# define MOZ_FUZZING_NYX_ABORT(aMsg) \ + do { \ + MOZ_FUZZING_NYX_PRINT(aMsg); \ + MOZ_REALLY_CRASH(__LINE__); \ + } while (false); +#else +# define MOZ_FUZZING_NYX_RELEASE(id) +# define MOZ_FUZZING_NYX_GUARD(id) +# define MOZ_FUZZING_NYX_PRINT(aMsg) +# define MOZ_FUZZING_NYX_PRINTF(aFormat, ...) +# define MOZ_FUZZING_NYX_DEBUG(aMsg) +# define MOZ_FUZZING_NYX_ABORT(aMsg) +# define MOZ_FUZZING_HANDLE_CRASH_EVENT2(aType, aReason) \ + do { \ + } while (false) +# define MOZ_FUZZING_HANDLE_CRASH_EVENT4(aType, aFilename, aLine, aReason) \ + do { \ + } while (false) +#endif + +#endif /* mozilla_Fuzzing_h */ diff --git a/mfbt/HashFunctions.cpp b/mfbt/HashFunctions.cpp new file mode 100644 index 0000000000..4cb04e58a3 --- /dev/null +++ b/mfbt/HashFunctions.cpp @@ -0,0 +1,37 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implementations of hash functions. */ + +#include "mozilla/HashFunctions.h" +#include "mozilla/Types.h" + +#include + +namespace mozilla { + +uint32_t HashBytes(const void* aBytes, size_t aLength) { + uint32_t hash = 0; + const char* b = reinterpret_cast(aBytes); + + /* Walk word by word. */ + size_t i = 0; + for (; i < aLength - (aLength % sizeof(size_t)); i += sizeof(size_t)) { + /* Do an explicitly unaligned load of the data. */ + size_t data; + memcpy(&data, b + i, sizeof(size_t)); + + hash = AddToHash(hash, data); + } + + /* Get the remaining bytes. */ + for (; i < aLength; i++) { + hash = AddToHash(hash, b[i]); + } + return hash; +} + +} /* namespace mozilla */ diff --git a/mfbt/HashFunctions.h b/mfbt/HashFunctions.h new file mode 100644 index 0000000000..4b740a3db1 --- /dev/null +++ b/mfbt/HashFunctions.h @@ -0,0 +1,417 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Utilities for hashing. */ + +/* + * This file exports functions for hashing data down to a uint32_t (a.k.a. + * mozilla::HashNumber), including: + * + * - HashString Hash a char* or char16_t/wchar_t* of known or unknown + * length. + * + * - HashBytes Hash a byte array of known length. + * + * - HashGeneric Hash one or more values. Currently, we support uint32_t, + * types which can be implicitly cast to uint32_t, data + * pointers, and function pointers. + * + * - AddToHash Add one or more values to the given hash. This supports the + * same list of types as HashGeneric. + * + * + * You can chain these functions together to hash complex objects. For example: + * + * class ComplexObject + * { + * char* mStr; + * uint32_t mUint1, mUint2; + * void (*mCallbackFn)(); + * + * public: + * HashNumber hash() + * { + * HashNumber hash = HashString(mStr); + * hash = AddToHash(hash, mUint1, mUint2); + * return AddToHash(hash, mCallbackFn); + * } + * }; + * + * If you want to hash an nsAString or nsACString, use the HashString functions + * in nsHashKeys.h. + */ + +#ifndef mozilla_HashFunctions_h +#define mozilla_HashFunctions_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Char16.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Types.h" +#include "mozilla/WrappingOperations.h" + +#include +#include + +namespace mozilla { + +using HashNumber = uint32_t; +static const uint32_t kHashNumberBits = 32; + +/** + * The golden ratio as a 32-bit fixed-point value. + */ +static const HashNumber kGoldenRatioU32 = 0x9E3779B9U; + +/* + * Given a raw hash code, h, return a number that can be used to select a hash + * bucket. + * + * This function aims to produce as uniform an output distribution as possible, + * especially in the most significant (leftmost) bits, even though the input + * distribution may be highly nonrandom, given the constraints that this must + * be deterministic and quick to compute. + * + * Since the leftmost bits of the result are best, the hash bucket index is + * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent + * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask. + */ +constexpr HashNumber ScrambleHashCode(HashNumber h) { + /* + * Simply returning h would not cause any hash tables to produce wrong + * answers. But it can produce pathologically bad performance: The caller + * right-shifts the result, keeping only the highest bits. The high bits of + * hash codes are very often completely entropy-free. (So are the lowest + * bits.) + * + * So we use Fibonacci hashing, as described in Knuth, The Art of Computer + * Programming, 6.4. This mixes all the bits of the input hash code h. + * + * The value of goldenRatio is taken from the hex expansion of the golden + * ratio, which starts 1.9E3779B9.... This value is especially good if + * values with consecutive hash codes are stored in a hash table; see Knuth + * for details. + */ + return mozilla::WrappingMultiply(h, kGoldenRatioU32); +} + +namespace detail { + +MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW +constexpr HashNumber RotateLeft5(HashNumber aValue) { + return (aValue << 5) | (aValue >> 27); +} + +constexpr HashNumber AddU32ToHash(HashNumber aHash, uint32_t aValue) { + /* + * This is the meat of all our hash routines. This hash function is not + * particularly sophisticated, but it seems to work well for our mostly + * plain-text inputs. Implementation notes follow. + * + * Our use of the golden ratio here is arbitrary; we could pick almost any + * number which: + * + * * is odd (because otherwise, all our hash values will be even) + * + * * has a reasonably-even mix of 1's and 0's (consider the extreme case + * where we multiply by 0x3 or 0xeffffff -- this will not produce good + * mixing across all bits of the hash). + * + * The rotation length of 5 is also arbitrary, although an odd number is again + * preferable so our hash explores the whole universe of possible rotations. + * + * Finally, we multiply by the golden ratio *after* xor'ing, not before. + * Otherwise, if |aHash| is 0 (as it often is for the beginning of a + * message), the expression + * + * mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash)) + * |xor| + * aValue + * + * evaluates to |aValue|. + * + * (Number-theoretic aside: Because any odd number |m| is relatively prime to + * our modulus (2**32), the list + * + * [x * m (mod 2**32) for 0 <= x < 2**32] + * + * has no duplicate elements. This means that multiplying by |m| does not + * cause us to skip any possible hash values. + * + * It's also nice if |m| has large-ish order mod 2**32 -- that is, if the + * smallest k such that m**k == 1 (mod 2**32) is large -- so we can safely + * multiply our hash value by |m| a few times without negating the + * multiplicative effect. Our golden ratio constant has order 2**29, which is + * more than enough for our purposes.) + */ + return mozilla::WrappingMultiply(kGoldenRatioU32, + RotateLeft5(aHash) ^ aValue); +} + +/** + * AddUintptrToHash takes sizeof(uintptr_t) as a template parameter. + */ +template +constexpr HashNumber AddUintptrToHash(HashNumber aHash, uintptr_t aValue) { + return AddU32ToHash(aHash, static_cast(aValue)); +} + +template <> +inline HashNumber AddUintptrToHash<8>(HashNumber aHash, uintptr_t aValue) { + uint32_t v1 = static_cast(aValue); + uint32_t v2 = static_cast(static_cast(aValue) >> 32); + return AddU32ToHash(AddU32ToHash(aHash, v1), v2); +} + +} /* namespace detail */ + +/** + * AddToHash takes a hash and some values and returns a new hash based on the + * inputs. + * + * Currently, we support hashing uint32_t's, values which we can implicitly + * convert to uint32_t, data pointers, and function pointers. + */ +template , + bool TypeIsNotEnum = !std::is_enum_v, + std::enable_if_t = 0> +[[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, T aA) { + /* + * Try to convert |A| to uint32_t implicitly. If this works, great. If not, + * we'll error out. + */ + return detail::AddU32ToHash(aHash, aA); +} + +template +[[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, A* aA) { + /* + * You might think this function should just take a void*. But then we'd only + * catch data pointers and couldn't handle function pointers. + */ + + static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!"); + + return detail::AddUintptrToHash(aHash, uintptr_t(aA)); +} + +// We use AddUintptrToHash() for hashing all integral types. 8-byte integral +// types are treated the same as 64-bit pointers, and smaller integral types are +// first implicitly converted to 32 bits and then passed to AddUintptrToHash() +// to be hashed. +template , int> = 0> +[[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) { + return detail::AddUintptrToHash(aHash, aA); +} + +template , int> = 0> +[[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) { + // Hash using AddUintptrToHash with the underlying type of the enum type + using UnderlyingType = typename std::underlying_type::type; + return detail::AddUintptrToHash( + aHash, static_cast(aA)); +} + +template +[[nodiscard]] HashNumber AddToHash(HashNumber aHash, A aArg, Args... aArgs) { + return AddToHash(AddToHash(aHash, aArg), aArgs...); +} + +/** + * The HashGeneric class of functions let you hash one or more values. + * + * If you want to hash together two values x and y, calling HashGeneric(x, y) is + * much better than calling AddToHash(x, y), because AddToHash(x, y) assumes + * that x has already been hashed. + */ +template +[[nodiscard]] inline HashNumber HashGeneric(Args... aArgs) { + return AddToHash(0, aArgs...); +} + +/** + * Hash successive |*aIter| until |!*aIter|, i.e. til null-termination. + * + * This function is *not* named HashString like the non-template overloads + * below. Some users define HashString overloads and pass inexactly-matching + * values to them -- but an inexactly-matching value would match this overload + * instead! We follow the general rule and don't mix and match template and + * regular overloads to avoid this. + * + * If you have the string's length, call HashStringKnownLength: it may be + * marginally faster. + */ +template +[[nodiscard]] constexpr HashNumber HashStringUntilZero(Iterator aIter) { + HashNumber hash = 0; + for (; auto c = *aIter; ++aIter) { + hash = AddToHash(hash, c); + } + return hash; +} + +/** + * Hash successive |aIter[i]| up to |i == aLength|. + */ +template +[[nodiscard]] constexpr HashNumber HashStringKnownLength(Iterator aIter, + size_t aLength) { + HashNumber hash = 0; + for (size_t i = 0; i < aLength; i++) { + hash = AddToHash(hash, aIter[i]); + } + return hash; +} + +/** + * The HashString overloads below do just what you'd expect. + * + * These functions are non-template functions so that users can 1) overload them + * with their own types 2) in a way that allows implicit conversions to happen. + */ +[[nodiscard]] inline HashNumber HashString(const char* aStr) { + // Use the |const unsigned char*| version of the above so that all ordinary + // character data hashes identically. + return HashStringUntilZero(reinterpret_cast(aStr)); +} + +[[nodiscard]] inline HashNumber HashString(const char* aStr, size_t aLength) { + // Delegate to the |const unsigned char*| version of the above to share + // template instantiations. + return HashStringKnownLength(reinterpret_cast(aStr), + aLength); +} + +[[nodiscard]] inline HashNumber HashString(const unsigned char* aStr, + size_t aLength) { + return HashStringKnownLength(aStr, aLength); +} + +[[nodiscard]] constexpr HashNumber HashString(const char16_t* aStr) { + return HashStringUntilZero(aStr); +} + +[[nodiscard]] inline HashNumber HashString(const char16_t* aStr, + size_t aLength) { + return HashStringKnownLength(aStr, aLength); +} + +/** + * HashString overloads for |wchar_t| on platforms where it isn't |char16_t|. + */ +template ::value && + !std::is_same::value>::type> +[[nodiscard]] inline HashNumber HashString(const WCharT* aStr) { + return HashStringUntilZero(aStr); +} + +template ::value && + !std::is_same::value>::type> +[[nodiscard]] inline HashNumber HashString(const WCharT* aStr, size_t aLength) { + return HashStringKnownLength(aStr, aLength); +} + +/** + * Hash some number of bytes. + * + * This hash walks word-by-word, rather than byte-by-byte, so you won't get the + * same result out of HashBytes as you would out of HashString. + */ +[[nodiscard]] extern MFBT_API HashNumber HashBytes(const void* bytes, + size_t aLength); + +/** + * A pseudorandom function mapping 32-bit integers to 32-bit integers. + * + * This is for when you're feeding private data (like pointer values or credit + * card numbers) to a non-crypto hash function (like HashBytes) and then using + * the hash code for something that untrusted parties could observe (like a JS + * Map). Plug in a HashCodeScrambler before that last step to avoid leaking the + * private data. + * + * By itself, this does not prevent hash-flooding DoS attacks, because an + * attacker can still generate many values with exactly equal hash codes by + * attacking the non-crypto hash function alone. Equal hash codes will, of + * course, still be equal however much you scramble them. + * + * The algorithm is SipHash-1-3. See . + */ +class HashCodeScrambler { + struct SipHasher; + + uint64_t mK0, mK1; + + public: + /** Creates a new scrambler with the given 128-bit key. */ + constexpr HashCodeScrambler(uint64_t aK0, uint64_t aK1) + : mK0(aK0), mK1(aK1) {} + + /** + * Scramble a hash code. Always produces the same result for the same + * combination of key and hash code. + */ + HashNumber scramble(HashNumber aHashCode) const { + SipHasher hasher(mK0, mK1); + return HashNumber(hasher.sipHash(aHashCode)); + } + + static constexpr size_t offsetOfMK0() { + return offsetof(HashCodeScrambler, mK0); + } + + static constexpr size_t offsetOfMK1() { + return offsetof(HashCodeScrambler, mK1); + } + + private: + struct SipHasher { + SipHasher(uint64_t aK0, uint64_t aK1) { + // 1. Initialization. + mV0 = aK0 ^ UINT64_C(0x736f6d6570736575); + mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d); + mV2 = aK0 ^ UINT64_C(0x6c7967656e657261); + mV3 = aK1 ^ UINT64_C(0x7465646279746573); + } + + uint64_t sipHash(uint64_t aM) { + // 2. Compression. + mV3 ^= aM; + sipRound(); + mV0 ^= aM; + + // 3. Finalization. + mV2 ^= 0xff; + for (int i = 0; i < 3; i++) sipRound(); + return mV0 ^ mV1 ^ mV2 ^ mV3; + } + + void sipRound() { + mV0 = WrappingAdd(mV0, mV1); + mV1 = RotateLeft(mV1, 13); + mV1 ^= mV0; + mV0 = RotateLeft(mV0, 32); + mV2 = WrappingAdd(mV2, mV3); + mV3 = RotateLeft(mV3, 16); + mV3 ^= mV2; + mV0 = WrappingAdd(mV0, mV3); + mV3 = RotateLeft(mV3, 21); + mV3 ^= mV0; + mV2 = WrappingAdd(mV2, mV1); + mV1 = RotateLeft(mV1, 17); + mV1 ^= mV2; + mV2 = RotateLeft(mV2, 32); + } + + uint64_t mV0, mV1, mV2, mV3; + }; +}; + +} /* namespace mozilla */ + +#endif /* mozilla_HashFunctions_h */ diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h new file mode 100644 index 0000000000..e5fadcf551 --- /dev/null +++ b/mfbt/HashTable.h @@ -0,0 +1,2275 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +//--------------------------------------------------------------------------- +// Overview +//--------------------------------------------------------------------------- +// +// This file defines HashMap and HashSet, hash tables that are +// fast and have a nice API. +// +// Both hash tables have two optional template parameters. +// +// - HashPolicy. This defines the operations for hashing and matching keys. The +// default HashPolicy is appropriate when both of the following two +// conditions are true. +// +// - The key type stored in the table (|Key| for |HashMap|, |T| +// for |HashSet|) is an integer, pointer, UniquePtr, float, or double. +// +// - The type used for lookups (|Lookup|) is the same as the key type. This +// is usually the case, but not always. +// +// There is also a |CStringHasher| policy for |char*| keys. If your keys +// don't match any of the above cases, you must provide your own hash policy; +// see the "Hash Policy" section below. +// +// - AllocPolicy. This defines how allocations are done by the table. +// +// - |MallocAllocPolicy| is the default and is usually appropriate; note that +// operations (such as insertions) that might cause allocations are +// fallible and must be checked for OOM. These checks are enforced by the +// use of [[nodiscard]]. +// +// - |InfallibleAllocPolicy| is another possibility; it allows the +// abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE(). +// +// Note that entry storage allocation is lazy, and not done until the first +// lookupForAdd(), put(), or putNew() is performed. +// +// See AllocPolicy.h for more details. +// +// Documentation on how to use HashMap and HashSet, including examples, is +// present within those classes. Search for "class HashMap" and "class +// HashSet". +// +// Both HashMap and HashSet are implemented on top of a third class, HashTable. +// You only need to look at HashTable if you want to understand the +// implementation. +// +// How does mozilla::HashTable (this file) compare with PLDHashTable (and its +// subclasses, such as nsTHashtable)? +// +// - mozilla::HashTable is a lot faster, largely because it uses templates +// throughout *and* inlines everything. PLDHashTable inlines operations much +// less aggressively, and also uses "virtual ops" for operations like hashing +// and matching entries that require function calls. +// +// - Correspondingly, mozilla::HashTable use is likely to increase executable +// size much more than PLDHashTable. +// +// - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap +// distinction. +// +// - mozilla::HashTable requires more explicit OOM checking. As mentioned +// above, the use of |InfallibleAllocPolicy| can simplify things. +// +// - mozilla::HashTable has a default capacity on creation of 32 and a minimum +// capacity of 4. PLDHashTable has a default capacity on creation of 8 and a +// minimum capacity of 8. + +#ifndef mozilla_HashTable_h +#define mozilla_HashTable_h + +#include +#include + +#include "mozilla/AllocPolicy.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Casting.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Maybe.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Opaque.h" +#include "mozilla/OperatorNewExtensions.h" +#include "mozilla/ReentrancyGuard.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/WrappingOperations.h" + +namespace mozilla { + +template +struct DefaultHasher; + +template +class HashMapEntry; + +namespace detail { + +template +class HashTableEntry; + +template +class HashTable; + +} // namespace detail + +// The "generation" of a hash table is an opaque value indicating the state of +// modification of the hash table through its lifetime. If the generation of +// a hash table compares equal at times T1 and T2, then lookups in the hash +// table, pointers to (or into) hash table entries, etc. at time T1 are valid +// at time T2. If the generation compares unequal, these computations are all +// invalid and must be performed again to be used. +// +// Generations are meaningfully comparable only with respect to a single hash +// table. It's always nonsensical to compare the generation of distinct hash +// tables H1 and H2. +using Generation = Opaque; + +//--------------------------------------------------------------------------- +// HashMap +//--------------------------------------------------------------------------- + +// HashMap is a fast hash-based map from keys to values. +// +// Template parameter requirements: +// - Key/Value: movable, destructible, assignable. +// - HashPolicy: see the "Hash Policy" section below. +// - AllocPolicy: see AllocPolicy.h. +// +// Note: +// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members +// called by HashMap must not call back into the same HashMap object. +// +template , + class AllocPolicy = MallocAllocPolicy> +class HashMap { + // -- Implementation details ----------------------------------------------- + + // HashMap is not copyable or assignable. + HashMap(const HashMap& hm) = delete; + HashMap& operator=(const HashMap& hm) = delete; + + using TableEntry = HashMapEntry; + + struct MapHashPolicy : HashPolicy { + using Base = HashPolicy; + using KeyType = Key; + + static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); } + + static void setKey(TableEntry& aEntry, Key& aKey) { + HashPolicy::rekey(aEntry.mutableKey(), aKey); + } + }; + + using Impl = detail::HashTable; + Impl mImpl; + + friend class Impl::Enum; + + public: + using Lookup = typename HashPolicy::Lookup; + using Entry = TableEntry; + + // -- Initialization ------------------------------------------------------- + + explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(), + uint32_t aLen = Impl::sDefaultLen) + : mImpl(std::move(aAllocPolicy), aLen) {} + + explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} + + // HashMap is movable. + HashMap(HashMap&& aRhs) = default; + HashMap& operator=(HashMap&& aRhs) = default; + + // -- Status and sizing ---------------------------------------------------- + + // The map's current generation. + Generation generation() const { return mImpl.generation(); } + + // Is the map empty? + bool empty() const { return mImpl.empty(); } + + // Number of keys/values in the map. + uint32_t count() const { return mImpl.count(); } + + // Number of key/value slots in the map. Note: resize will happen well before + // count() == capacity(). + uint32_t capacity() const { return mImpl.capacity(); } + + // The size of the map's entry storage, in bytes. If the keys/values contain + // pointers to other heap blocks, you must iterate over the map and measure + // them separately; hence the "shallow" prefix. + size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); + } + size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + + mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); + } + + // Attempt to minimize the capacity(). If the table is empty, this will free + // the empty storage and upon regrowth it will be given the minimum capacity. + void compact() { mImpl.compact(); } + + // Attempt to reserve enough space to fit at least |aLen| elements. This is + // total capacity, including elements already present. Does nothing if the + // map already has sufficient capacity. + [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } + + // -- Lookups -------------------------------------------------------------- + + // Does the map contain a key/value matching |aLookup|? + bool has(const Lookup& aLookup) const { + return mImpl.lookup(aLookup).found(); + } + + // Return a Ptr indicating whether a key/value matching |aLookup| is + // present in the map. E.g.: + // + // using HM = HashMap; + // HM h; + // if (HM::Ptr p = h.lookup(3)) { + // assert(p->key() == 3); + // char val = p->value(); + // } + // + using Ptr = typename Impl::Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const { + return mImpl.lookup(aLookup); + } + + // Like lookup(), but does not assert if two threads call it at the same + // time. Only use this method when none of the threads will modify the map. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { + return mImpl.readonlyThreadsafeLookup(aLookup); + } + + // -- Insertions ----------------------------------------------------------- + + // Overwrite existing value with |aValue|, or add it if not present. Returns + // false on OOM. + template + [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) { + return put(aKey, std::forward(aKey), + std::forward(aValue)); + } + + template + [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey, + ValueInput&& aValue) { + AddPtr p = lookupForAdd(aLookup); + if (p) { + p->value() = std::forward(aValue); + return true; + } + return add(p, std::forward(aKey), + std::forward(aValue)); + } + + // Like put(), but slightly faster. Must only be used when the given key is + // not already present. (In debug builds, assertions check this.) + template + [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) { + return mImpl.putNew(aKey, std::forward(aKey), + std::forward(aValue)); + } + + template + [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey, + ValueInput&& aValue) { + return mImpl.putNew(aLookup, std::forward(aKey), + std::forward(aValue)); + } + + // Like putNew(), but should be only used when the table is known to be big + // enough for the insertion, and hashing cannot fail. Typically this is used + // to populate an empty map with known-unique keys after reserving space with + // reserve(), e.g. + // + // using HM = HashMap; + // HM h; + // if (!h.reserve(3)) { + // MOZ_CRASH("OOM"); + // } + // h.putNewInfallible(1, 'a'); // unique key + // h.putNewInfallible(2, 'b'); // unique key + // h.putNewInfallible(3, 'c'); // unique key + // + template + void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) { + mImpl.putNewInfallible(aKey, std::forward(aKey), + std::forward(aValue)); + } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using + // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.: + // + // using HM = HashMap; + // HM h; + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3, 'a')) { + // return false; + // } + // } + // assert(p->key() == 3); + // char val = p->value(); + // + // N.B. The caller must ensure that no mutating hash table operations occur + // between a pair of lookupForAdd() and add() calls. To avoid looking up the + // key a second time, the caller may use the more efficient relookupOrAdd() + // method. This method reuses part of the hashing computation to more + // efficiently insert the key if it has not been added. For example, a + // mutation-handling version of the previous example: + // + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 'a')) { + // return false; + // } + // } + // assert(p->key() == 3); + // char val = p->value(); + // + using AddPtr = typename Impl::AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) { + return mImpl.lookupForAdd(aLookup); + } + + // Add a key/value. Returns false on OOM. + template + [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) { + return mImpl.add(aPtr, std::forward(aKey), + std::forward(aValue)); + } + + // See the comment above lookupForAdd() for details. + template + [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey, + ValueInput&& aValue) { + return mImpl.relookupOrAdd(aPtr, aKey, std::forward(aKey), + std::forward(aValue)); + } + + // -- Removal -------------------------------------------------------------- + + // Lookup and remove the key/value matching |aLookup|, if present. + void remove(const Lookup& aLookup) { + if (Ptr p = lookup(aLookup)) { + remove(p); + } + } + + // Remove a previously found key/value (assuming aPtr.found()). The map must + // not have been mutated in the interim. + void remove(Ptr aPtr) { mImpl.remove(aPtr); } + + // Remove all keys/values without changing the capacity. + void clear() { mImpl.clear(); } + + // Like clear() followed by compact(). + void clearAndCompact() { mImpl.clearAndCompact(); } + + // -- Rekeying ------------------------------------------------------------- + + // Infallibly rekey one entry, if necessary. Requires that template + // parameters Key and HashPolicy::Lookup are the same type. + void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) { + if (aOldKey != aNewKey) { + rekeyAs(aOldKey, aNewKey, aNewKey); + } + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, + const Key& aNewKey) { + if (Ptr p = lookup(aOldLookup)) { + mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey); + return true; + } + return false; + } + + // -- Iteration ------------------------------------------------------------ + + // |iter()| returns an Iterator: + // + // HashMap h; + // for (auto iter = h.iter(); !iter.done(); iter.next()) { + // char c = iter.get().value(); + // } + // + using Iterator = typename Impl::Iterator; + Iterator iter() const { return mImpl.iter(); } + + // |modIter()| returns a ModIterator: + // + // HashMap h; + // for (auto iter = h.modIter(); !iter.done(); iter.next()) { + // if (iter.get().value() == 'l') { + // iter.remove(); + // } + // } + // + // Table resize may occur in ModIterator's destructor. + using ModIterator = typename Impl::ModIterator; + ModIterator modIter() { return mImpl.modIter(); } + + // These are similar to Iterator/ModIterator/iter(), but use different + // terminology. + using Range = typename Impl::Range; + using Enum = typename Impl::Enum; + Range all() const { return mImpl.all(); } +}; + +//--------------------------------------------------------------------------- +// HashSet +//--------------------------------------------------------------------------- + +// HashSet is a fast hash-based set of values. +// +// Template parameter requirements: +// - T: movable, destructible, assignable. +// - HashPolicy: see the "Hash Policy" section below. +// - AllocPolicy: see AllocPolicy.h +// +// Note: +// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by +// HashSet must not call back into the same HashSet object. +// +template , + class AllocPolicy = MallocAllocPolicy> +class HashSet { + // -- Implementation details ----------------------------------------------- + + // HashSet is not copyable or assignable. + HashSet(const HashSet& hs) = delete; + HashSet& operator=(const HashSet& hs) = delete; + + struct SetHashPolicy : HashPolicy { + using Base = HashPolicy; + using KeyType = T; + + static const KeyType& getKey(const T& aT) { return aT; } + + static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); } + }; + + using Impl = detail::HashTable; + Impl mImpl; + + friend class Impl::Enum; + + public: + using Lookup = typename HashPolicy::Lookup; + using Entry = T; + + // -- Initialization ------------------------------------------------------- + + explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(), + uint32_t aLen = Impl::sDefaultLen) + : mImpl(std::move(aAllocPolicy), aLen) {} + + explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} + + // HashSet is movable. + HashSet(HashSet&& aRhs) = default; + HashSet& operator=(HashSet&& aRhs) = default; + + // -- Status and sizing ---------------------------------------------------- + + // The set's current generation. + Generation generation() const { return mImpl.generation(); } + + // Is the set empty? + bool empty() const { return mImpl.empty(); } + + // Number of elements in the set. + uint32_t count() const { return mImpl.count(); } + + // Number of element slots in the set. Note: resize will happen well before + // count() == capacity(). + uint32_t capacity() const { return mImpl.capacity(); } + + // The size of the set's entry storage, in bytes. If the elements contain + // pointers to other heap blocks, you must iterate over the set and measure + // them separately; hence the "shallow" prefix. + size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); + } + size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + + mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); + } + + // Attempt to minimize the capacity(). If the table is empty, this will free + // the empty storage and upon regrowth it will be given the minimum capacity. + void compact() { mImpl.compact(); } + + // Attempt to reserve enough space to fit at least |aLen| elements. This is + // total capacity, including elements already present. Does nothing if the + // map already has sufficient capacity. + [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } + + // -- Lookups -------------------------------------------------------------- + + // Does the set contain an element matching |aLookup|? + bool has(const Lookup& aLookup) const { + return mImpl.lookup(aLookup).found(); + } + + // Return a Ptr indicating whether an element matching |aLookup| is present + // in the set. E.g.: + // + // using HS = HashSet; + // HS h; + // if (HS::Ptr p = h.lookup(3)) { + // assert(*p == 3); // p acts like a pointer to int + // } + // + using Ptr = typename Impl::Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const { + return mImpl.lookup(aLookup); + } + + // Like lookup(), but does not assert if two threads call it at the same + // time. Only use this method when none of the threads will modify the set. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { + return mImpl.readonlyThreadsafeLookup(aLookup); + } + + // -- Insertions ----------------------------------------------------------- + + // Add |aU| if it is not present already. Returns false on OOM. + template + [[nodiscard]] bool put(U&& aU) { + AddPtr p = lookupForAdd(aU); + return p ? true : add(p, std::forward(aU)); + } + + // Like put(), but slightly faster. Must only be used when the given element + // is not already present. (In debug builds, assertions check this.) + template + [[nodiscard]] bool putNew(U&& aU) { + return mImpl.putNew(aU, std::forward(aU)); + } + + // Like the other putNew(), but for when |Lookup| is different to |T|. + template + [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) { + return mImpl.putNew(aLookup, std::forward(aU)); + } + + // Like putNew(), but should be only used when the table is known to be big + // enough for the insertion, and hashing cannot fail. Typically this is used + // to populate an empty set with known-unique elements after reserving space + // with reserve(), e.g. + // + // using HS = HashMap; + // HS h; + // if (!h.reserve(3)) { + // MOZ_CRASH("OOM"); + // } + // h.putNewInfallible(1); // unique element + // h.putNewInfallible(2); // unique element + // h.putNewInfallible(3); // unique element + // + template + void putNewInfallible(const Lookup& aLookup, U&& aU) { + mImpl.putNewInfallible(aLookup, std::forward(aU)); + } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using + // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: + // + // using HS = HashSet; + // HS h; + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3)) { + // return false; + // } + // } + // assert(*p == 3); // p acts like a pointer to int + // + // N.B. The caller must ensure that no mutating hash table operations occur + // between a pair of lookupForAdd() and add() calls. To avoid looking up the + // key a second time, the caller may use the more efficient relookupOrAdd() + // method. This method reuses part of the hashing computation to more + // efficiently insert the key if it has not been added. For example, a + // mutation-handling version of the previous example: + // + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 3)) { + // return false; + // } + // } + // assert(*p == 3); + // + // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the + // entry |t|, where the caller ensures match(l,t). + using AddPtr = typename Impl::AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) { + return mImpl.lookupForAdd(aLookup); + } + + // Add an element. Returns false on OOM. + template + [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) { + return mImpl.add(aPtr, std::forward(aU)); + } + + // See the comment above lookupForAdd() for details. + template + [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, + U&& aU) { + return mImpl.relookupOrAdd(aPtr, aLookup, std::forward(aU)); + } + + // -- Removal -------------------------------------------------------------- + + // Lookup and remove the element matching |aLookup|, if present. + void remove(const Lookup& aLookup) { + if (Ptr p = lookup(aLookup)) { + remove(p); + } + } + + // Remove a previously found element (assuming aPtr.found()). The set must + // not have been mutated in the interim. + void remove(Ptr aPtr) { mImpl.remove(aPtr); } + + // Remove all keys/values without changing the capacity. + void clear() { mImpl.clear(); } + + // Like clear() followed by compact(). + void clearAndCompact() { mImpl.clearAndCompact(); } + + // -- Rekeying ------------------------------------------------------------- + + // Infallibly rekey one entry, if present. Requires that template parameters + // T and HashPolicy::Lookup are the same type. + void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) { + if (aOldValue != aNewValue) { + rekeyAs(aOldValue, aNewValue, aNewValue); + } + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, + const T& aNewValue) { + if (Ptr p = lookup(aOldLookup)) { + mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue); + return true; + } + return false; + } + + // Infallibly replace the current key at |aPtr| with an equivalent key. + // Specifically, both HashPolicy::hash and HashPolicy::match must return + // identical results for the new and old key when applied against all + // possible matching values. + void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) { + MOZ_ASSERT(aPtr.found()); + MOZ_ASSERT(*aPtr != aNewValue); + MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup)); + MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup)); + const_cast(*aPtr) = aNewValue; + MOZ_ASSERT(*lookup(aLookup) == aNewValue); + } + void replaceKey(Ptr aPtr, const T& aNewValue) { + replaceKey(aPtr, aNewValue, aNewValue); + } + + // -- Iteration ------------------------------------------------------------ + + // |iter()| returns an Iterator: + // + // HashSet h; + // for (auto iter = h.iter(); !iter.done(); iter.next()) { + // int i = iter.get(); + // } + // + using Iterator = typename Impl::Iterator; + Iterator iter() const { return mImpl.iter(); } + + // |modIter()| returns a ModIterator: + // + // HashSet h; + // for (auto iter = h.modIter(); !iter.done(); iter.next()) { + // if (iter.get() == 42) { + // iter.remove(); + // } + // } + // + // Table resize may occur in ModIterator's destructor. + using ModIterator = typename Impl::ModIterator; + ModIterator modIter() { return mImpl.modIter(); } + + // These are similar to Iterator/ModIterator/iter(), but use different + // terminology. + using Range = typename Impl::Range; + using Enum = typename Impl::Enum; + Range all() const { return mImpl.all(); } +}; + +//--------------------------------------------------------------------------- +// Hash Policy +//--------------------------------------------------------------------------- + +// A hash policy |HP| for a hash table with key-type |Key| must provide: +// +// - a type |HP::Lookup| to use to lookup table entries; +// +// - a static member function |HP::hash| that hashes lookup values: +// +// static mozilla::HashNumber hash(const Lookup&); +// +// - a static member function |HP::match| that tests equality of key and +// lookup values: +// +// static bool match(const Key&, const Lookup&); +// +// Normally, Lookup = Key. In general, though, different values and types of +// values can be used to lookup and store. If a Lookup value |l| is not equal +// to the added Key value |k|, the user must ensure that |HP::match(k,l)| is +// true. E.g.: +// +// mozilla::HashSet::AddPtr p = h.lookup(l); +// if (!p) { +// assert(HP::match(k, l)); // must hold +// h.add(p, k); +// } + +// A pointer hashing policy that uses HashGeneric() to create good hashes for +// pointers. Note that we don't shift out the lowest k bits because we don't +// want to assume anything about the alignment of the pointers. +template +struct PointerHasher { + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { + size_t word = reinterpret_cast(aLookup); + return HashGeneric(word); + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return aKey == aLookup; + } + + static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } +}; + +// The default hash policy, which only works with integers. +template +struct DefaultHasher { + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { + // Just convert the integer to a HashNumber and use that as is. (This + // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is + // subsequently called on the value to improve the distribution. + return aLookup; + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + // Use builtin or overloaded operator==. + return aKey == aLookup; + } + + static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } +}; + +// A DefaultHasher specialization for enums. +template +struct DefaultHasher>> { + using Key = T; + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); } + + static bool match(const Key& aKey, const Lookup& aLookup) { + // Use builtin or overloaded operator==. + return aKey == static_cast(aLookup); + } + + static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } +}; + +// A DefaultHasher specialization for pointers. +template +struct DefaultHasher : PointerHasher {}; + +// A DefaultHasher specialization for mozilla::UniquePtr. +template +struct DefaultHasher> { + using Key = UniquePtr; + using Lookup = Key; + using PtrHasher = PointerHasher; + + static HashNumber hash(const Lookup& aLookup) { + return PtrHasher::hash(aLookup.get()); + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return PtrHasher::match(aKey.get(), aLookup.get()); + } + + static void rekey(UniquePtr& aKey, UniquePtr&& aNewKey) { + aKey = std::move(aNewKey); + } +}; + +// A DefaultHasher specialization for doubles. +template <> +struct DefaultHasher { + using Key = double; + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { + // Just xor the high bits with the low bits, and then treat the bits of the + // result as a uint32_t. + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + uint64_t u = BitwiseCast(aLookup); + return HashNumber(u ^ (u >> 32)); + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return BitwiseCast(aKey) == BitwiseCast(aLookup); + } +}; + +// A DefaultHasher specialization for floats. +template <> +struct DefaultHasher { + using Key = float; + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { + // Just use the value as if its bits form an integer. ScrambleHashCode() is + // subsequently called on the value to improve the distribution. + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + return HashNumber(BitwiseCast(aLookup)); + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return BitwiseCast(aKey) == BitwiseCast(aLookup); + } +}; + +// A hash policy for C strings. +struct CStringHasher { + using Key = const char*; + using Lookup = const char*; + + static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return strcmp(aKey, aLookup) == 0; + } +}; + +//--------------------------------------------------------------------------- +// Fallible Hashing Interface +//--------------------------------------------------------------------------- + +// Most of the time generating a hash code is infallible, but sometimes it is +// necessary to generate hash codes on demand in a way that can fail. Specialize +// this class for your own hash policy to provide fallible hashing. +// +// This is used by MovableCellHasher to handle the fact that generating a unique +// ID for cell pointer may fail due to OOM. +// +// The default implementations of these methods delegate to the usual HashPolicy +// implementation and always succeed. +template +struct FallibleHashMethods { + // Return true if a hashcode is already available for its argument, and + // sets |aHashOut|. Once this succeeds for a specific argument it + // must continue to do so. + // + // Return false if a hashcode is not already available. This implies that any + // lookup must fail, as the hash code would have to have been successfully + // created on insertion. + template + static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { + *aHashOut = HashPolicy::hash(aLookup); + return true; + } + + // Fallible method to ensure a hashcode exists for its argument and create one + // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns + // false on error, e.g. out of memory. + template + static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) { + *aHashOut = HashPolicy::hash(aLookup); + return true; + } +}; + +template +static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { + return FallibleHashMethods::maybeGetHash( + std::forward(aLookup), aHashOut); +} + +template +static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) { + return FallibleHashMethods::ensureHash( + std::forward(aLookup), aHashOut); +} + +//--------------------------------------------------------------------------- +// Implementation Details (HashMapEntry, HashTableEntry, HashTable) +//--------------------------------------------------------------------------- + +// Both HashMap and HashSet are implemented by a single HashTable that is even +// more heavily parameterized than the other two. This leaves HashTable gnarly +// and extremely coupled to HashMap and HashSet; thus code should not use +// HashTable directly. + +template +class HashMapEntry { + Key key_; + Value value_; + + template + friend class detail::HashTable; + template + friend class detail::HashTableEntry; + template + friend class HashMap; + + public: + template + HashMapEntry(KeyInput&& aKey, ValueInput&& aValue) + : key_(std::forward(aKey)), + value_(std::forward(aValue)) {} + + HashMapEntry(HashMapEntry&& aRhs) = default; + HashMapEntry& operator=(HashMapEntry&& aRhs) = default; + + using KeyType = Key; + using ValueType = Value; + + const Key& key() const { return key_; } + + // Use this method with caution! If the key is changed such that its hash + // value also changes, the map will be left in an invalid state. + Key& mutableKey() { return key_; } + + const Value& value() const { return value_; } + Value& value() { return value_; } + + private: + HashMapEntry(const HashMapEntry&) = delete; + void operator=(const HashMapEntry&) = delete; +}; + +namespace detail { + +template +class HashTable; + +template +class EntrySlot; + +template +class HashTableEntry { + private: + using NonConstT = std::remove_const_t; + + // Instead of having a hash table entry store that looks like this: + // + // +--------+--------+--------+--------+ + // | entry0 | entry1 | .... | entryN | + // +--------+--------+--------+--------+ + // + // where the entries contained their cached hash code, we're going to lay out + // the entry store thusly: + // + // +-------+-------+-------+-------+--------+--------+--------+--------+ + // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | + // +-------+-------+-------+-------+--------+--------+--------+--------+ + // + // with all the cached hashes prior to the actual entries themselves. + // + // We do this because implementing the first strategy requires us to make + // HashTableEntry look roughly like: + // + // template + // class HashTableEntry { + // HashNumber mKeyHash; + // T mValue; + // }; + // + // The problem with this setup is that, depending on the layout of `T`, there + // may be platform ABI-mandated padding between `mKeyHash` and the first + // member of `T`. This ABI-mandated padding is wasted space, and can be + // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms. + // In such cases, we're throwing away a quarter of our entry store on padding, + // which is undesirable. + // + // The second layout above, namely: + // + // +-------+-------+-------+-------+--------+--------+--------+--------+ + // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | + // +-------+-------+-------+-------+--------+--------+--------+--------+ + // + // means there is no wasted space between the hashes themselves, and no wasted + // space between the entries themselves. However, we would also like there to + // be no gap between the last hash and the first entry. The memory allocator + // guarantees the alignment of the start of the hashes. The use of a + // power-of-two capacity of at least 4 guarantees that the alignment of the + // *end* of the hash array is no less than the alignment of the start. + // Finally, the static_asserts here guarantee that the entries themselves + // don't need to be any more aligned than the alignment of the entry store + // itself. + // + // This assertion is safe for 32-bit builds because on both Windows and Linux + // (including Android), the minimum alignment for allocations larger than 8 + // bytes is 8 bytes, and the actual data for entries in our entry store is + // guaranteed to have that alignment as well, thanks to the power-of-two + // number of cached hash values stored prior to the entry data. + + // The allocation policy must allocate a table with at least this much + // alignment. + static constexpr size_t kMinimumAlignment = 8; + + static_assert(alignof(HashNumber) <= kMinimumAlignment, + "[N*2 hashes, N*2 T values] allocation's alignment must be " + "enough to align each hash"); + static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber), + "subsequent N*2 T values must not require more than an even " + "number of HashNumbers provides"); + + static const HashNumber sFreeKey = 0; + static const HashNumber sRemovedKey = 1; + static const HashNumber sCollisionBit = 1; + + alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)]; + + private: + template + friend class HashTable; + template + friend class EntrySlot; + + // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a + // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even + // through |void*|. Placing the latter cast in these separate functions + // breaks the chain such that affected GCC versions no longer warn/error. + void* rawValuePtr() { return mValueData; } + + static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; } + + HashTableEntry(const HashTableEntry&) = delete; + void operator=(const HashTableEntry&) = delete; + + NonConstT* valuePtr() { return reinterpret_cast(rawValuePtr()); } + + void destroyStoredT() { + NonConstT* ptr = valuePtr(); + ptr->~T(); + MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr)); + } + + public: + HashTableEntry() = default; + + ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); } + + void destroy() { destroyStoredT(); } + + void swap(HashTableEntry* aOther, bool aIsLive) { + // This allows types to use Argument-Dependent-Lookup, and thus use a custom + // std::swap, which is needed by types like JS::Heap and such. + using std::swap; + + if (this == aOther) { + return; + } + if (aIsLive) { + swap(*valuePtr(), *aOther->valuePtr()); + } else { + *aOther->valuePtr() = std::move(*valuePtr()); + destroy(); + } + } + + T& get() { return *valuePtr(); } + + NonConstT& getMutable() { return *valuePtr(); } +}; + +// A slot represents a cached hash value and its associated entry stored +// in the hash table. These two things are not stored in contiguous memory. +template +class EntrySlot { + using NonConstT = std::remove_const_t; + + using Entry = HashTableEntry; + + Entry* mEntry; + HashNumber* mKeyHash; + + template + friend class HashTable; + + EntrySlot(Entry* aEntry, HashNumber* aKeyHash) + : mEntry(aEntry), mKeyHash(aKeyHash) {} + + public: + static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; } + + EntrySlot(const EntrySlot&) = default; + EntrySlot(EntrySlot&& aOther) = default; + + EntrySlot& operator=(const EntrySlot&) = default; + EntrySlot& operator=(EntrySlot&&) = default; + + bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; } + + bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; } + + EntrySlot& operator++() { + ++mEntry; + ++mKeyHash; + return *this; + } + + void destroy() { mEntry->destroy(); } + + void swap(EntrySlot& aOther) { + mEntry->swap(aOther.mEntry, aOther.isLive()); + std::swap(*mKeyHash, *aOther.mKeyHash); + } + + T& get() const { return mEntry->get(); } + + NonConstT& getMutable() { return mEntry->getMutable(); } + + bool isFree() const { return *mKeyHash == Entry::sFreeKey; } + + void clearLive() { + MOZ_ASSERT(isLive()); + *mKeyHash = Entry::sFreeKey; + mEntry->destroyStoredT(); + } + + void clear() { + if (isLive()) { + mEntry->destroyStoredT(); + } + MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry)); + *mKeyHash = Entry::sFreeKey; + } + + bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; } + + void removeLive() { + MOZ_ASSERT(isLive()); + *mKeyHash = Entry::sRemovedKey; + mEntry->destroyStoredT(); + } + + bool isLive() const { return isLiveHash(*mKeyHash); } + + void setCollision() { + MOZ_ASSERT(isLive()); + *mKeyHash |= Entry::sCollisionBit; + } + void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; } + bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; } + bool matchHash(HashNumber hn) { + return (*mKeyHash & ~Entry::sCollisionBit) == hn; + } + HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; } + + template + void setLive(HashNumber aHashNumber, Args&&... aArgs) { + MOZ_ASSERT(!isLive()); + *mKeyHash = aHashNumber; + new (KnownNotNull, mEntry->valuePtr()) T(std::forward(aArgs)...); + MOZ_ASSERT(isLive()); + } + + Entry* toEntry() const { return mEntry; } +}; + +template +class HashTable : private AllocPolicy { + friend class mozilla::ReentrancyGuard; + + using NonConstT = std::remove_const_t; + using Key = typename HashPolicy::KeyType; + using Lookup = typename HashPolicy::Lookup; + + public: + using Entry = HashTableEntry; + using Slot = EntrySlot; + + template + static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) { + auto hashes = reinterpret_cast(aTable); + auto entries = reinterpret_cast(&hashes[aCapacity]); + Slot slot(entries, hashes); + for (size_t i = 0; i < size_t(aCapacity); ++i) { + f(slot); + ++slot; + } + } + + // A nullable pointer to a hash table element. A Ptr |p| can be tested + // either explicitly |if (p.found()) p->...| or using boolean conversion + // |if (p) p->...|. Ptr objects must not be used after any mutating hash + // table operations unless |generation()| is tested. + class Ptr { + friend class HashTable; + + Slot mSlot; +#ifdef DEBUG + const HashTable* mTable; + Generation mGeneration; +#endif + + protected: + Ptr(Slot aSlot, const HashTable& aTable) + : mSlot(aSlot) +#ifdef DEBUG + , + mTable(&aTable), + mGeneration(aTable.generation()) +#endif + { + } + + // This constructor is used only by AddPtr() within lookupForAdd(). + explicit Ptr(const HashTable& aTable) + : mSlot(nullptr, nullptr) +#ifdef DEBUG + , + mTable(&aTable), + mGeneration(aTable.generation()) +#endif + { + } + + bool isValid() const { return !!mSlot.toEntry(); } + + public: + Ptr() + : mSlot(nullptr, nullptr) +#ifdef DEBUG + , + mTable(nullptr), + mGeneration(0) +#endif + { + } + + bool found() const { + if (!isValid()) { + return false; + } +#ifdef DEBUG + MOZ_ASSERT(mGeneration == mTable->generation()); +#endif + return mSlot.isLive(); + } + + explicit operator bool() const { return found(); } + + bool operator==(const Ptr& aRhs) const { + MOZ_ASSERT(found() && aRhs.found()); + return mSlot == aRhs.mSlot; + } + + bool operator!=(const Ptr& aRhs) const { +#ifdef DEBUG + MOZ_ASSERT(mGeneration == mTable->generation()); +#endif + return !(*this == aRhs); + } + + T& operator*() const { +#ifdef DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(mGeneration == mTable->generation()); +#endif + return mSlot.get(); + } + + T* operator->() const { +#ifdef DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(mGeneration == mTable->generation()); +#endif + return &mSlot.get(); + } + }; + + // A Ptr that can be used to add a key after a failed lookup. + class AddPtr : public Ptr { + friend class HashTable; + + HashNumber mKeyHash; +#ifdef DEBUG + uint64_t mMutationCount; +#endif + + AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber) + : Ptr(aSlot, aTable), + mKeyHash(aHashNumber) +#ifdef DEBUG + , + mMutationCount(aTable.mMutationCount) +#endif + { + } + + // This constructor is used when lookupForAdd() is performed on a table + // lacking entry storage; it leaves mSlot null but initializes everything + // else. + AddPtr(const HashTable& aTable, HashNumber aHashNumber) + : Ptr(aTable), + mKeyHash(aHashNumber) +#ifdef DEBUG + , + mMutationCount(aTable.mMutationCount) +#endif + { + MOZ_ASSERT(isLive()); + } + + bool isLive() const { return isLiveHash(mKeyHash); } + + public: + AddPtr() : mKeyHash(0) {} + }; + + // A hash table iterator that (mostly) doesn't allow table modifications. + // As with Ptr/AddPtr, Iterator objects must not be used after any mutating + // hash table operation unless the |generation()| is tested. + class Iterator { + void moveToNextLiveEntry() { + while (++mCur < mEnd && !mCur.isLive()) { + continue; + } + } + + protected: + friend class HashTable; + + explicit Iterator(const HashTable& aTable) + : mCur(aTable.slotForIndex(0)), + mEnd(aTable.slotForIndex(aTable.capacity())) +#ifdef DEBUG + , + mTable(aTable), + mMutationCount(aTable.mMutationCount), + mGeneration(aTable.generation()), + mValidEntry(true) +#endif + { + if (!done() && !mCur.isLive()) { + moveToNextLiveEntry(); + } + } + + Slot mCur; + Slot mEnd; +#ifdef DEBUG + const HashTable& mTable; + uint64_t mMutationCount; + Generation mGeneration; + bool mValidEntry; +#endif + + public: + bool done() const { + MOZ_ASSERT(mGeneration == mTable.generation()); + MOZ_ASSERT(mMutationCount == mTable.mMutationCount); + return mCur == mEnd; + } + + T& get() const { + MOZ_ASSERT(!done()); + MOZ_ASSERT(mValidEntry); + MOZ_ASSERT(mGeneration == mTable.generation()); + MOZ_ASSERT(mMutationCount == mTable.mMutationCount); + return mCur.get(); + } + + void next() { + MOZ_ASSERT(!done()); + MOZ_ASSERT(mGeneration == mTable.generation()); + MOZ_ASSERT(mMutationCount == mTable.mMutationCount); + moveToNextLiveEntry(); +#ifdef DEBUG + mValidEntry = true; +#endif + } + }; + + // A hash table iterator that permits modification, removal and rekeying. + // Since rehashing when elements were removed during enumeration would be + // bad, it is postponed until the ModIterator is destructed. Since the + // ModIterator's destructor touches the hash table, the user must ensure + // that the hash table is still alive when the destructor runs. + class ModIterator : public Iterator { + friend class HashTable; + + HashTable& mTable; + bool mRekeyed; + bool mRemoved; + + // ModIterator is movable but not copyable. + ModIterator(const ModIterator&) = delete; + void operator=(const ModIterator&) = delete; + + protected: + explicit ModIterator(HashTable& aTable) + : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {} + + public: + MOZ_IMPLICIT ModIterator(ModIterator&& aOther) + : Iterator(aOther), + mTable(aOther.mTable), + mRekeyed(aOther.mRekeyed), + mRemoved(aOther.mRemoved) { + aOther.mRekeyed = false; + aOther.mRemoved = false; + } + + // Removes the current element from the table, leaving |get()| + // invalid until the next call to |next()|. + void remove() { + mTable.remove(this->mCur); + mRemoved = true; +#ifdef DEBUG + this->mValidEntry = false; + this->mMutationCount = mTable.mMutationCount; +#endif + } + + NonConstT& getMutable() { + MOZ_ASSERT(!this->done()); + MOZ_ASSERT(this->mValidEntry); + MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation()); + MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount); + return this->mCur.getMutable(); + } + + // Removes the current element and re-inserts it into the table with + // a new key at the new Lookup position. |get()| is invalid after + // this operation until the next call to |next()|. + void rekey(const Lookup& l, const Key& k) { + MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get())); + Ptr p(this->mCur, mTable); + mTable.rekeyWithoutRehash(p, l, k); + mRekeyed = true; +#ifdef DEBUG + this->mValidEntry = false; + this->mMutationCount = mTable.mMutationCount; +#endif + } + + void rekey(const Key& k) { rekey(k, k); } + + // Potentially rehashes the table. + ~ModIterator() { + if (mRekeyed) { + mTable.mGen++; + mTable.infallibleRehashIfOverloaded(); + } + + if (mRemoved) { + mTable.compact(); + } + } + }; + + // Range is similar to Iterator, but uses different terminology. + class Range { + friend class HashTable; + + Iterator mIter; + + protected: + explicit Range(const HashTable& table) : mIter(table) {} + + public: + bool empty() const { return mIter.done(); } + + T& front() const { return mIter.get(); } + + void popFront() { return mIter.next(); } + }; + + // Enum is similar to ModIterator, but uses different terminology. + class Enum { + ModIterator mIter; + + // Enum is movable but not copyable. + Enum(const Enum&) = delete; + void operator=(const Enum&) = delete; + + public: + template + explicit Enum(Map& map) : mIter(map.mImpl) {} + + MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {} + + bool empty() const { return mIter.done(); } + + T& front() const { return mIter.get(); } + + void popFront() { return mIter.next(); } + + void removeFront() { mIter.remove(); } + + NonConstT& mutableFront() { return mIter.getMutable(); } + + void rekeyFront(const Lookup& aLookup, const Key& aKey) { + mIter.rekey(aLookup, aKey); + } + + void rekeyFront(const Key& aKey) { mIter.rekey(aKey); } + }; + + // HashTable is movable + HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); } + HashTable& operator=(HashTable&& aRhs) { + MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited"); + if (mTable) { + destroyTable(*this, mTable, capacity()); + } + AllocPolicy::operator=(std::move(aRhs)); + moveFrom(aRhs); + return *this; + } + + private: + void moveFrom(HashTable& aRhs) { + mGen = aRhs.mGen; + mHashShift = aRhs.mHashShift; + mTable = aRhs.mTable; + mEntryCount = aRhs.mEntryCount; + mRemovedCount = aRhs.mRemovedCount; +#ifdef DEBUG + mMutationCount = aRhs.mMutationCount; + mEntered = aRhs.mEntered; +#endif + aRhs.mTable = nullptr; + aRhs.clearAndCompact(); + } + + // HashTable is not copyable or assignable + HashTable(const HashTable&) = delete; + void operator=(const HashTable&) = delete; + + static const uint32_t CAP_BITS = 30; + + public: + uint64_t mGen : 56; // entry storage generation number + uint64_t mHashShift : 8; // multiplicative hash shift + char* mTable; // entry storage + uint32_t mEntryCount; // number of entries in mTable + uint32_t mRemovedCount; // removed entry sentinels in mTable + +#ifdef DEBUG + uint64_t mMutationCount; + mutable bool mEntered; +#endif + + // The default initial capacity is 32 (enough to hold 16 elements), but it + // can be as low as 4. + static const uint32_t sDefaultLen = 16; + static const uint32_t sMinCapacity = 4; + // See the comments in HashTableEntry about this value. + static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions"); + static const uint32_t sMaxInit = 1u << (CAP_BITS - 1); + static const uint32_t sMaxCapacity = 1u << CAP_BITS; + + // Hash-table alpha is conceptually a fraction, but to avoid floating-point + // math we implement it as a ratio of integers. + static const uint8_t sAlphaDenominator = 4; + static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 + static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 + + static const HashNumber sFreeKey = Entry::sFreeKey; + static const HashNumber sRemovedKey = Entry::sRemovedKey; + static const HashNumber sCollisionBit = Entry::sCollisionBit; + + static uint32_t bestCapacity(uint32_t aLen) { + static_assert( + (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, + "multiplication in numerator below could overflow"); + static_assert( + sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator, + "numerator calculation below could potentially overflow"); + + // Callers should ensure this is true. + MOZ_ASSERT(aLen <= sMaxInit); + + // Compute the smallest capacity allowing |aLen| elements to be + // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling + // integral division: .) + uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) / + sMaxAlphaNumerator; + capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity); + + MOZ_ASSERT(capacity >= aLen); + MOZ_ASSERT(capacity <= sMaxCapacity); + + return capacity; + } + + static uint32_t hashShift(uint32_t aLen) { + // Reject all lengths whose initial computed capacity would exceed + // sMaxCapacity. Round that maximum aLen down to the nearest power of two + // for speedier code. + if (MOZ_UNLIKELY(aLen > sMaxInit)) { + MOZ_CRASH("initial length is too large"); + } + + return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen)); + } + + static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); } + + static HashNumber prepareHash(HashNumber aInputHash) { + HashNumber keyHash = ScrambleHashCode(aInputHash); + + // Avoid reserved hash codes. + if (!isLiveHash(keyHash)) { + keyHash -= (sRemovedKey + 1); + } + return keyHash & ~sCollisionBit; + } + + enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; + + // Fake a struct that we're going to alloc. See the comments in + // HashTableEntry about how the table is laid out, and why it's safe. + struct FakeSlot { + unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)]; + }; + + static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity, + FailureBehavior aReportFailure = ReportFailure) { + FakeSlot* fake = + aReportFailure + ? aAllocPolicy.template pod_malloc(aCapacity) + : aAllocPolicy.template maybe_pod_malloc(aCapacity); + + MOZ_ASSERT((reinterpret_cast(fake) % Entry::kMinimumAlignment) == + 0); + + char* table = reinterpret_cast(fake); + if (table) { + forEachSlot(table, aCapacity, [&](Slot& slot) { + *slot.mKeyHash = sFreeKey; + new (KnownNotNull, slot.toEntry()) Entry(); + }); + } + return table; + } + + static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable, + uint32_t aCapacity) { + forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) { + if (slot.isLive()) { + slot.toEntry()->destroyStoredT(); + } + }); + freeTable(aAllocPolicy, aOldTable, aCapacity); + } + + static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable, + uint32_t aCapacity) { + FakeSlot* fake = reinterpret_cast(aOldTable); + aAllocPolicy.free_(fake, aCapacity); + } + + public: + HashTable(AllocPolicy aAllocPolicy, uint32_t aLen) + : AllocPolicy(std::move(aAllocPolicy)), + mGen(0), + mHashShift(hashShift(aLen)), + mTable(nullptr), + mEntryCount(0), + mRemovedCount(0) +#ifdef DEBUG + , + mMutationCount(0), + mEntered(false) +#endif + { + } + + explicit HashTable(AllocPolicy aAllocPolicy) + : HashTable(aAllocPolicy, sDefaultLen) {} + + ~HashTable() { + if (mTable) { + destroyTable(*this, mTable, capacity()); + } + } + + private: + HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; } + + struct DoubleHash { + HashNumber mHash2; + HashNumber mSizeMask; + }; + + DoubleHash hash2(HashNumber aCurKeyHash) const { + uint32_t sizeLog2 = kHashNumberBits - mHashShift; + DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1, + (HashNumber(1) << sizeLog2) - 1}; + return dh; + } + + static HashNumber applyDoubleHash(HashNumber aHash1, + const DoubleHash& aDoubleHash) { + return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask; + } + + static MOZ_ALWAYS_INLINE bool match(T& aEntry, const Lookup& aLookup) { + return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup); + } + + enum LookupReason { ForNonAdd, ForAdd }; + + Slot slotForIndex(HashNumber aIndex) const { + auto hashes = reinterpret_cast(mTable); + auto entries = reinterpret_cast(&hashes[capacity()]); + return Slot(&entries[aIndex], &hashes[aIndex]); + } + + // Warning: in order for readonlyThreadsafeLookup() to be safe this + // function must not modify the table in any way when Reason==ForNonAdd. + template + MOZ_ALWAYS_INLINE Slot lookup(const Lookup& aLookup, + HashNumber aKeyHash) const { + MOZ_ASSERT(isLiveHash(aKeyHash)); + MOZ_ASSERT(!(aKeyHash & sCollisionBit)); + MOZ_ASSERT(mTable); + + // Compute the primary hash address. + HashNumber h1 = hash1(aKeyHash); + Slot slot = slotForIndex(h1); + + // Miss: return space for a new entry. + if (slot.isFree()) { + return slot; + } + + // Hit: return entry. + if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { + return slot; + } + + // Collision: double hash. + DoubleHash dh = hash2(aKeyHash); + + // Save the first removed entry pointer so we can recycle later. + Maybe firstRemoved; + + while (true) { + if (Reason == ForAdd && !firstRemoved) { + if (MOZ_UNLIKELY(slot.isRemoved())) { + firstRemoved.emplace(slot); + } else { + slot.setCollision(); + } + } + + h1 = applyDoubleHash(h1, dh); + + slot = slotForIndex(h1); + if (slot.isFree()) { + return firstRemoved.refOr(slot); + } + + if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { + return slot; + } + } + } + + // This is a copy of lookup() hardcoded to the assumptions: + // 1. the lookup is for an add; + // 2. the key, whose |keyHash| has been passed, is not in the table. + Slot findNonLiveSlot(HashNumber aKeyHash) { + MOZ_ASSERT(!(aKeyHash & sCollisionBit)); + MOZ_ASSERT(mTable); + + // We assume 'aKeyHash' has already been distributed. + + // Compute the primary hash address. + HashNumber h1 = hash1(aKeyHash); + Slot slot = slotForIndex(h1); + + // Miss: return space for a new entry. + if (!slot.isLive()) { + return slot; + } + + // Collision: double hash. + DoubleHash dh = hash2(aKeyHash); + + while (true) { + slot.setCollision(); + + h1 = applyDoubleHash(h1, dh); + + slot = slotForIndex(h1); + if (!slot.isLive()) { + return slot; + } + } + } + + enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; + + RebuildStatus changeTableSize( + uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) { + MOZ_ASSERT(IsPowerOfTwo(newCapacity)); + MOZ_ASSERT(!!mTable == !!capacity()); + + // Look, but don't touch, until we succeed in getting new entry store. + char* oldTable = mTable; + uint32_t oldCapacity = capacity(); + uint32_t newLog2 = mozilla::CeilingLog2(newCapacity); + + if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) { + if (aReportFailure) { + this->reportAllocOverflow(); + } + return RehashFailed; + } + + char* newTable = createTable(*this, newCapacity, aReportFailure); + if (!newTable) { + return RehashFailed; + } + + // We can't fail from here on, so update table parameters. + mHashShift = kHashNumberBits - newLog2; + mRemovedCount = 0; + mGen++; + mTable = newTable; + + // Copy only live entries, leaving removed ones behind. + forEachSlot(oldTable, oldCapacity, [&](Slot& slot) { + if (slot.isLive()) { + HashNumber hn = slot.getKeyHash(); + findNonLiveSlot(hn).setLive( + hn, std::move(const_cast(slot.get()))); + } + + slot.clear(); + }); + + // All entries have been destroyed, no need to destroyTable. + freeTable(*this, oldTable, oldCapacity); + return Rehashed; + } + + RebuildStatus rehashIfOverloaded( + FailureBehavior aReportFailure = ReportFailure) { + static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator, + "multiplication below could overflow"); + + // Note: if capacity() is zero, this will always succeed, which is + // what we want. + bool overloaded = mEntryCount + mRemovedCount >= + capacity() * sMaxAlphaNumerator / sAlphaDenominator; + + if (!overloaded) { + return NotOverloaded; + } + + // Succeed if a quarter or more of all entries are removed. Note that this + // always succeeds if capacity() == 0 (i.e. entry storage has not been + // allocated), which is what we want, because it means changeTableSize() + // will allocate the requested capacity rather than doubling it. + bool manyRemoved = mRemovedCount >= (capacity() >> 2); + uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2; + return changeTableSize(newCapacity, aReportFailure); + } + + void infallibleRehashIfOverloaded() { + if (rehashIfOverloaded(DontReportFailure) == RehashFailed) { + rehashTableInPlace(); + } + } + + void remove(Slot& aSlot) { + MOZ_ASSERT(mTable); + + if (aSlot.hasCollision()) { + aSlot.removeLive(); + mRemovedCount++; + } else { + aSlot.clearLive(); + } + mEntryCount--; +#ifdef DEBUG + mMutationCount++; +#endif + } + + void shrinkIfUnderloaded() { + static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator, + "multiplication below could overflow"); + bool underloaded = + capacity() > sMinCapacity && + mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator; + + if (underloaded) { + (void)changeTableSize(capacity() / 2, DontReportFailure); + } + } + + // This is identical to changeTableSize(currentSize), but without requiring + // a second table. We do this by recycling the collision bits to tell us if + // the element is already inserted or still waiting to be inserted. Since + // already-inserted elements win any conflicts, we get the same table as we + // would have gotten through random insertion order. + void rehashTableInPlace() { + mRemovedCount = 0; + mGen++; + forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); }); + for (uint32_t i = 0; i < capacity();) { + Slot src = slotForIndex(i); + + if (!src.isLive() || src.hasCollision()) { + ++i; + continue; + } + + HashNumber keyHash = src.getKeyHash(); + HashNumber h1 = hash1(keyHash); + DoubleHash dh = hash2(keyHash); + Slot tgt = slotForIndex(h1); + while (true) { + if (!tgt.hasCollision()) { + src.swap(tgt); + tgt.setCollision(); + break; + } + + h1 = applyDoubleHash(h1, dh); + tgt = slotForIndex(h1); + } + } + + // TODO: this algorithm leaves collision bits on *all* elements, even if + // they are on no collision path. We have the option of setting the + // collision bits correctly on a subsequent pass or skipping the rehash + // unless we are totally filled with tombstones: benchmark to find out + // which approach is best. + } + + // Prefer to use putNewInfallible; this function does not check + // invariants. + template + void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) { + MOZ_ASSERT(mTable); + + Slot slot = findNonLiveSlot(aKeyHash); + + if (slot.isRemoved()) { + mRemovedCount--; + aKeyHash |= sCollisionBit; + } + + slot.setLive(aKeyHash, std::forward(aArgs)...); + mEntryCount++; +#ifdef DEBUG + mMutationCount++; +#endif + } + + public: + void clear() { + forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); }); + mRemovedCount = 0; + mEntryCount = 0; +#ifdef DEBUG + mMutationCount++; +#endif + } + + // Resize the table down to the smallest capacity that doesn't overload the + // table. Since we call shrinkIfUnderloaded() on every remove, you only need + // to call this after a bulk removal of items done without calling remove(). + void compact() { + if (empty()) { + // Free the entry storage. + freeTable(*this, mTable, capacity()); + mGen++; + mHashShift = hashShift(0); // gives minimum capacity on regrowth + mTable = nullptr; + mRemovedCount = 0; + return; + } + + uint32_t bestCapacity = this->bestCapacity(mEntryCount); + MOZ_ASSERT(bestCapacity <= capacity()); + + if (bestCapacity < capacity()) { + (void)changeTableSize(bestCapacity, DontReportFailure); + } + } + + void clearAndCompact() { + clear(); + compact(); + } + + [[nodiscard]] bool reserve(uint32_t aLen) { + if (aLen == 0) { + return true; + } + + if (MOZ_UNLIKELY(aLen > sMaxInit)) { + return false; + } + + uint32_t bestCapacity = this->bestCapacity(aLen); + if (bestCapacity <= capacity()) { + return true; // Capacity is already sufficient. + } + + RebuildStatus status = changeTableSize(bestCapacity, ReportFailure); + MOZ_ASSERT(status != NotOverloaded); + return status != RehashFailed; + } + + Iterator iter() const { return Iterator(*this); } + + ModIterator modIter() { return ModIterator(*this); } + + Range all() const { return Range(*this); } + + bool empty() const { return mEntryCount == 0; } + + uint32_t count() const { return mEntryCount; } + + uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); } + + uint32_t capacity() const { return mTable ? rawCapacity() : 0; } + + Generation generation() const { return Generation(mGen); } + + size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(mTable); + } + + size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf); + } + + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { + if (empty()) { + return Ptr(); + } + + HashNumber inputHash; + if (!MaybeGetHash(aLookup, &inputHash)) { + return Ptr(); + } + + HashNumber keyHash = prepareHash(inputHash); + return Ptr(lookup(aLookup, keyHash), *this); + } + + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const { + ReentrancyGuard g(*this); + return readonlyThreadsafeLookup(aLookup); + } + + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) { + ReentrancyGuard g(*this); + + HashNumber inputHash; + if (!EnsureHash(aLookup, &inputHash)) { + return AddPtr(); + } + + HashNumber keyHash = prepareHash(inputHash); + + if (!mTable) { + return AddPtr(*this, keyHash); + } + + // Directly call the constructor in the return statement to avoid + // excess copying when building with Visual Studio 2017. + // See bug 1385181. + return AddPtr(lookup(aLookup, keyHash), *this, keyHash); + } + + template + [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) { + ReentrancyGuard g(*this); + MOZ_ASSERT_IF(aPtr.isValid(), mTable); + MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this); + MOZ_ASSERT(!aPtr.found()); + MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit)); + + // Check for error from ensureHash() here. + if (!aPtr.isLive()) { + return false; + } + + MOZ_ASSERT(aPtr.mGeneration == generation()); +#ifdef DEBUG + MOZ_ASSERT(aPtr.mMutationCount == mMutationCount); +#endif + + if (!aPtr.isValid()) { + MOZ_ASSERT(!mTable && mEntryCount == 0); + uint32_t newCapacity = rawCapacity(); + RebuildStatus status = changeTableSize(newCapacity, ReportFailure); + MOZ_ASSERT(status != NotOverloaded); + if (status == RehashFailed) { + return false; + } + aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); + + } else if (aPtr.mSlot.isRemoved()) { + // Changing an entry from removed to live does not affect whether we are + // overloaded and can be handled separately. + if (!this->checkSimulatedOOM()) { + return false; + } + mRemovedCount--; + aPtr.mKeyHash |= sCollisionBit; + + } else { + // Preserve the validity of |aPtr.mSlot|. + RebuildStatus status = rehashIfOverloaded(); + if (status == RehashFailed) { + return false; + } + if (status == NotOverloaded && !this->checkSimulatedOOM()) { + return false; + } + if (status == Rehashed) { + aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); + } + } + + aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward(aArgs)...); + mEntryCount++; +#ifdef DEBUG + mMutationCount++; + aPtr.mGeneration = generation(); + aPtr.mMutationCount = mMutationCount; +#endif + return true; + } + + // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this + // function must take care not to use |aLookup| after moving |aArgs|. + template + void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) { + MOZ_ASSERT(!lookup(aLookup).found()); + ReentrancyGuard g(*this); + HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); + putNewInfallibleInternal(keyHash, std::forward(aArgs)...); + } + + // Note: |aLookup| may alias arguments in |aArgs|, so this function must take + // care not to use |aLookup| after moving |aArgs|. + template + [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) { + MOZ_ASSERT(!lookup(aLookup).found()); + ReentrancyGuard g(*this); + if (!this->checkSimulatedOOM()) { + return false; + } + HashNumber inputHash; + if (!EnsureHash(aLookup, &inputHash)) { + return false; + } + HashNumber keyHash = prepareHash(inputHash); + if (rehashIfOverloaded() == RehashFailed) { + return false; + } + putNewInfallibleInternal(keyHash, std::forward(aArgs)...); + return true; + } + + // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this + // function must take care not to use |aLookup| after moving |aArgs|. + template + [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, + Args&&... aArgs) { + // Check for error from ensureHash() here. + if (!aPtr.isLive()) { + return false; + } +#ifdef DEBUG + aPtr.mGeneration = generation(); + aPtr.mMutationCount = mMutationCount; +#endif + if (mTable) { + ReentrancyGuard g(*this); + // Check that aLookup has not been destroyed. + MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash); + aPtr.mSlot = lookup(aLookup, aPtr.mKeyHash); + if (aPtr.found()) { + return true; + } + } else { + // Clear aPtr so it's invalid; add() will allocate storage and redo the + // lookup. + aPtr.mSlot = Slot(nullptr, nullptr); + } + return add(aPtr, std::forward(aArgs)...); + } + + void remove(Ptr aPtr) { + MOZ_ASSERT(mTable); + ReentrancyGuard g(*this); + MOZ_ASSERT(aPtr.found()); + MOZ_ASSERT(aPtr.mGeneration == generation()); + remove(aPtr.mSlot); + shrinkIfUnderloaded(); + } + + void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { + MOZ_ASSERT(mTable); + ReentrancyGuard g(*this); + MOZ_ASSERT(aPtr.found()); + MOZ_ASSERT(aPtr.mGeneration == generation()); + typename HashTableEntry::NonConstT t(std::move(*aPtr)); + HashPolicy::setKey(t, const_cast(aKey)); + remove(aPtr.mSlot); + HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); + putNewInfallibleInternal(keyHash, std::move(t)); + } + + void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { + rekeyWithoutRehash(aPtr, aLookup, aKey); + infallibleRehashIfOverloaded(); + } +}; + +} // namespace detail +} // namespace mozilla + +#endif /* mozilla_HashTable_h */ diff --git a/mfbt/HelperMacros.h b/mfbt/HelperMacros.h new file mode 100644 index 0000000000..883a16ec59 --- /dev/null +++ b/mfbt/HelperMacros.h @@ -0,0 +1,18 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* MOZ_STRINGIFY Macros */ + +#ifndef mozilla_HelperMacros_h +#define mozilla_HelperMacros_h + +// Wraps x in quotes without expanding a macro name +#define MOZ_STRINGIFY_NO_EXPANSION(x) #x + +// Wraps x in quotes; expanding x if it as a macro name +#define MOZ_STRINGIFY(x) MOZ_STRINGIFY_NO_EXPANSION(x) + +#endif // mozilla_HelperMacros_h diff --git a/mfbt/InitializedOnce.h b/mfbt/InitializedOnce.h new file mode 100644 index 0000000000..aac152df35 --- /dev/null +++ b/mfbt/InitializedOnce.h @@ -0,0 +1,247 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// Class template for objects that can only be initialized once. + +#ifndef mozilla_mfbt_initializedonce_h__ +#define mozilla_mfbt_initializedonce_h__ + +#include "mozilla/Assertions.h" +#include "mozilla/Maybe.h" + +#include + +namespace mozilla { + +namespace detail { + +enum struct InitWhen { InConstructorOnly, LazyAllowed }; +enum struct DestroyWhen { EarlyAllowed, InDestructorOnly }; + +namespace ValueCheckPolicies { +template +struct AllowAnyValue { + constexpr static bool Check(const T& /*aValue*/) { return true; } +}; + +template +struct ConvertsToTrue { + constexpr static bool Check(const T& aValue) { + return static_cast(aValue); + } +}; +} // namespace ValueCheckPolicies + +// A kind of mozilla::Maybe that can only be initialized and cleared once. It +// cannot be re-initialized. This is a more stateful than a const Maybe in +// that it can be cleared, but much less stateful than a non-const Maybe +// which could be reinitialized multiple times. Can only be used with const T +// to ensure that the contents cannot be modified either. +// TODO: Make constructors constexpr when Maybe's constructors are constexpr +// (Bug 1601336). +template class ValueCheckPolicy = + ValueCheckPolicies::AllowAnyValue> +class InitializedOnce final { + static_assert(std::is_const_v); + using MaybeType = Maybe>; + + public: + using ValueType = T; + + template + explicit constexpr InitializedOnce( + std::enable_if_t* = + nullptr) {} + + // note: aArg0 is named separately here to disallow calling this with no + // arguments. The default constructor should only be available conditionally + // and is declared above. + template + explicit constexpr InitializedOnce(Arg0&& aArg0, Args&&... aArgs) + : mMaybe{Some(std::remove_const_t{std::forward(aArg0), + std::forward(aArgs)...})} { + MOZ_ASSERT(ValueCheckPolicy::Check(*mMaybe)); + } + + InitializedOnce(const InitializedOnce&) = delete; + InitializedOnce(InitializedOnce&& aOther) : mMaybe{std::move(aOther.mMaybe)} { + static_assert(DestroyWhenVal == DestroyWhen::EarlyAllowed); +#ifdef DEBUG + aOther.mWasReset = true; +#endif + } + InitializedOnce& operator=(const InitializedOnce&) = delete; + InitializedOnce& operator=(InitializedOnce&& aOther) { + static_assert(InitWhenVal == InitWhen::LazyAllowed && + DestroyWhenVal == DestroyWhen::EarlyAllowed); + MOZ_ASSERT(!mWasReset); + MOZ_ASSERT(!mMaybe); + mMaybe.~MaybeType(); + new (&mMaybe) MaybeType{std::move(aOther.mMaybe)}; +#ifdef DEBUG + aOther.mWasReset = true; +#endif + return *this; + } + + template + constexpr std::enable_if_t init( + Args&&... aArgs) { + MOZ_ASSERT(mMaybe.isNothing()); + MOZ_ASSERT(!mWasReset); + mMaybe.emplace(std::remove_const_t{std::forward(aArgs)...}); + MOZ_ASSERT(ValueCheckPolicy::Check(*mMaybe)); + } + + constexpr explicit operator bool() const { return isSome(); } + constexpr bool isSome() const { return mMaybe.isSome(); } + constexpr bool isNothing() const { return mMaybe.isNothing(); } + + constexpr T& operator*() const { return *mMaybe; } + constexpr T* operator->() const { return mMaybe.operator->(); } + + constexpr T& ref() const { return mMaybe.ref(); } + + template + std::enable_if_t + destroy() { + MOZ_ASSERT(mMaybe.isSome()); + maybeDestroy(); + } + + template + std::enable_if_t + maybeDestroy() { + mMaybe.reset(); +#ifdef DEBUG + mWasReset = true; +#endif + } + + template + std::enable_if_t + release() { + MOZ_ASSERT(mMaybe.isSome()); + auto res = std::move(mMaybe.ref()); + destroy(); + return res; + } + + private: + MaybeType mMaybe; +#ifdef DEBUG + bool mWasReset = false; +#endif +}; + +template class ValueCheckPolicy> +class LazyInitializer { + public: + explicit LazyInitializer(InitializedOnce& aLazyInitialized) + : mLazyInitialized{aLazyInitialized} {} + + template + LazyInitializer& operator=(U&& aValue) { + mLazyInitialized.init(std::forward(aValue)); + return *this; + } + + LazyInitializer(const LazyInitializer&) = delete; + LazyInitializer& operator=(const LazyInitializer&) = delete; + + private: + InitializedOnce& + mLazyInitialized; +}; + +} // namespace detail + +// The following *InitializedOnce* template aliases allow to declare class +// member variables that can only be initialized once, but maybe destroyed +// earlier explicitly than in the containing classes destructor. +// The intention is to restrict the possible state transitions for member +// variables that can almost be const, but not quite. This may be particularly +// useful for classes with a lot of members. Uses in other contexts, e.g. as +// local variables, are possible, but probably seldom useful. They can only be +// instantiated with a const element type. Any misuses that cannot be detected +// at compile time trigger a MOZ_ASSERT at runtime. Individually spelled out +// assertions for these aspects are not necessary, which may improve the +// readability of the code without impairing safety. +// +// The base variant InitializedOnce requires initialization in the constructor, +// but allows early destruction using destroy(), and allow move construction. It +// is similar to Maybe in some sense, but a Maybe could be +// reinitialized arbitrarily. InitializedOnce expresses the intent not to do +// this, and prohibits reinitialization. +// +// The Lazy* variants allow default construction, and can be initialized lazily +// using init() in that case, but it cannot be reinitialized either. They do not +// allow early destruction. +// +// The Lazy*EarlyDestructible variants allow lazy initialization, early +// destruction, move construction and move assignment. This should be used only +// when really required. +// +// The *NotNull variants only allow initialization with values that convert to +// bool as true. They are named NotNull because the typical use case is with +// (smart) pointer types, but any other type convertible to bool will also work +// analogously. +// +// There is no variant combining detail::DestroyWhen::InConstructorOnly with +// detail::DestroyWhen::InDestructorOnly because this would be equivalent to a +// const member. +// +// For special cases, e.g. requiring custom value check policies, +// detail::InitializedOnce might be instantiated directly, but be mindful when +// doing this. + +template +using InitializedOnce = + detail::InitializedOnce; + +template +using InitializedOnceNotNull = + detail::InitializedOnce; + +template +using LazyInitializedOnce = + detail::InitializedOnce; + +template +using LazyInitializedOnceNotNull = + detail::InitializedOnce; + +template +using LazyInitializedOnceEarlyDestructible = + detail::InitializedOnce; + +template +using LazyInitializedOnceNotNullEarlyDestructible = + detail::InitializedOnce; + +template class ValueCheckPolicy> +auto do_Init(detail::InitializedOnce& aLazyInitialized) { + return detail::LazyInitializer(aLazyInitialized); +} + +} // namespace mozilla + +#endif diff --git a/mfbt/IntegerRange.h b/mfbt/IntegerRange.h new file mode 100644 index 0000000000..4415031454 --- /dev/null +++ b/mfbt/IntegerRange.h @@ -0,0 +1,192 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Iterator over ranges of integers */ + +#ifndef mozilla_IntegerRange_h +#define mozilla_IntegerRange_h + +#include "mozilla/Assertions.h" +#include "mozilla/ReverseIterator.h" + +#include +#include + +namespace mozilla { + +namespace detail { + +template +class IntegerIterator { + public: + // It is disputable whether these type definitions are correct, since + // operator* doesn't return a reference at all. Also, the iterator_category + // can be at most std::input_iterator_tag (rather than + // std::bidrectional_iterator_tag, as it might seem), because it is a stashing + // iterator. See also, e.g., + // https://stackoverflow.com/questions/50909701/what-should-be-iterator-category-for-a-stashing-iterator + using value_type = const IntTypeT; + using pointer = const value_type*; + using reference = const value_type&; + using difference_type = std::make_signed_t; + using iterator_category = std::input_iterator_tag; + + template + explicit IntegerIterator(IntType aCurrent) : mCurrent(aCurrent) {} + + template + explicit IntegerIterator(const IntegerIterator& aOther) + : mCurrent(aOther.mCurrent) {} + + // This intentionally returns a value rather than a reference, to make + // mozilla::ReverseIterator work with it. Still, std::reverse_iterator cannot + // be used with IntegerIterator because it still is a "stashing iterator". See + // Bug 1175485. + IntTypeT operator*() const { return mCurrent; } + + /* Increment and decrement operators */ + + IntegerIterator& operator++() { + ++mCurrent; + return *this; + } + IntegerIterator& operator--() { + --mCurrent; + return *this; + } + IntegerIterator operator++(int) { + auto ret = *this; + ++mCurrent; + return ret; + } + IntegerIterator operator--(int) { + auto ret = *this; + --mCurrent; + return ret; + } + + /* Comparison operators */ + + template + friend bool operator==(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + template + friend bool operator!=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + template + friend bool operator<(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + template + friend bool operator<=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + template + friend bool operator>(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + template + friend bool operator>=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2); + + private: + IntTypeT mCurrent; +}; + +template +bool operator==(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent == aIter2.mCurrent; +} + +template +bool operator!=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent != aIter2.mCurrent; +} + +template +bool operator<(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent < aIter2.mCurrent; +} + +template +bool operator<=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent <= aIter2.mCurrent; +} + +template +bool operator>(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent > aIter2.mCurrent; +} + +template +bool operator>=(const IntegerIterator& aIter1, + const IntegerIterator& aIter2) { + return aIter1.mCurrent >= aIter2.mCurrent; +} + +template +class IntegerRange { + public: + typedef IntegerIterator iterator; + typedef IntegerIterator const_iterator; + typedef ReverseIterator> reverse_iterator; + typedef ReverseIterator> const_reverse_iterator; + + template + explicit IntegerRange(IntType aEnd) : mBegin(0), mEnd(aEnd) {} + + template + IntegerRange(IntType1 aBegin, IntType2 aEnd) : mBegin(aBegin), mEnd(aEnd) {} + + iterator begin() const { return iterator(mBegin); } + const_iterator cbegin() const { return begin(); } + iterator end() const { return iterator(mEnd); } + const_iterator cend() const { return end(); } + reverse_iterator rbegin() const { return reverse_iterator(iterator(mEnd)); } + const_reverse_iterator crbegin() const { return rbegin(); } + reverse_iterator rend() const { return reverse_iterator(iterator(mBegin)); } + const_reverse_iterator crend() const { return rend(); } + + private: + IntTypeT mBegin; + IntTypeT mEnd; +}; + +template > +struct GeqZero { + static bool isNonNegative(T t) { return t >= 0; } +}; + +template +struct GeqZero { + static bool isNonNegative(T t) { return true; } +}; + +} // namespace detail + +template +detail::IntegerRange IntegerRange(IntType aEnd) { + static_assert(std::is_integral_v, "value must be integral"); + MOZ_ASSERT(detail::GeqZero::isNonNegative(aEnd), + "Should never have negative value here"); + return detail::IntegerRange(aEnd); +} + +template +detail::IntegerRange IntegerRange(IntType1 aBegin, IntType2 aEnd) { + static_assert(std::is_integral_v && std::is_integral_v, + "values must both be integral"); + static_assert(std::is_signed_v == std::is_signed_v, + "signed/unsigned mismatch"); + MOZ_ASSERT(aEnd >= aBegin, "End value should be larger than begin value"); + return detail::IntegerRange(aBegin, aEnd); +} + +} // namespace mozilla + +#endif // mozilla_IntegerRange_h diff --git a/mfbt/IntegerTypeTraits.h b/mfbt/IntegerTypeTraits.h new file mode 100644 index 0000000000..33b51b9901 --- /dev/null +++ b/mfbt/IntegerTypeTraits.h @@ -0,0 +1,86 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_IntegerTypeTraits_h +#define mozilla_IntegerTypeTraits_h + +#include +#include +#include + +namespace mozilla { + +namespace detail { + +/** + * StdintTypeForSizeAndSignedness returns the stdint integer type + * of given size (can be 1, 2, 4 or 8) and given signedness + * (false means unsigned, true means signed). + */ +template +struct StdintTypeForSizeAndSignedness; + +template <> +struct StdintTypeForSizeAndSignedness<1, true> { + typedef int8_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<1, false> { + typedef uint8_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<2, true> { + typedef int16_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<2, false> { + typedef uint16_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<4, true> { + typedef int32_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<4, false> { + typedef uint32_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<8, true> { + typedef int64_t Type; +}; + +template <> +struct StdintTypeForSizeAndSignedness<8, false> { + typedef uint64_t Type; +}; + +} // namespace detail + +template +struct UnsignedStdintTypeForSize + : detail::StdintTypeForSizeAndSignedness {}; + +template +struct SignedStdintTypeForSize + : detail::StdintTypeForSizeAndSignedness {}; + +template +struct PositionOfSignBit { + static_assert(std::is_integral_v, + "PositionOfSignBit is only for integral types"); + // 8 here should be CHAR_BIT from limits.h, but the world has moved on. + static const size_t value = 8 * sizeof(IntegerType) - 1; +}; + +} // namespace mozilla + +#endif // mozilla_IntegerTypeTraits_h diff --git a/mfbt/JSONWriter.cpp b/mfbt/JSONWriter.cpp new file mode 100644 index 0000000000..144291ae6a --- /dev/null +++ b/mfbt/JSONWriter.cpp @@ -0,0 +1,47 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/JSONWriter.h" + +namespace mozilla { +namespace detail { + +// The chars with non-'___' entries in this table are those that can be +// represented with a two-char escape sequence. The value is the second char in +// the sequence, that which follows the initial backslash. +#define ___ 0 +const char gTwoCharEscapes[256] = { + /* 0 1 2 3 4 5 6 7 8 9 */ + /* 0+ */ ___, ___, ___, ___, ___, ___, ___, ___, 'b', 't', + /* 10+ */ 'n', ___, 'f', 'r', ___, ___, ___, ___, ___, ___, + /* 20+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 30+ */ ___, ___, ___, ___, '"', ___, ___, ___, ___, ___, + /* 40+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 50+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 60+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 70+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 80+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 90+ */ ___, ___, '\\', ___, ___, ___, ___, ___, ___, ___, + /* 100+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 110+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 120+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 130+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 140+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 150+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 160+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 170+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 180+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 190+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 200+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 210+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 220+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 230+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 240+ */ ___, ___, ___, ___, ___, ___, ___, ___, ___, ___, + /* 250+ */ ___, ___, ___, ___, ___, ___}; +#undef ___ + +} // namespace detail +} // namespace mozilla diff --git a/mfbt/JSONWriter.h b/mfbt/JSONWriter.h new file mode 100644 index 0000000000..f779ee9837 --- /dev/null +++ b/mfbt/JSONWriter.h @@ -0,0 +1,545 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A JSON pretty-printer class. */ + +// A typical JSON-writing library requires you to first build up a data +// structure that represents a JSON object and then serialize it (to file, or +// somewhere else). This approach makes for a clean API, but building the data +// structure takes up memory. Sometimes that isn't desirable, such as when the +// JSON data is produced for memory reporting. +// +// The JSONWriter class instead allows JSON data to be written out +// incrementally without building up large data structures. +// +// The API is slightly uglier than you would see in a typical JSON-writing +// library, but still fairly easy to use. It's possible to generate invalid +// JSON with JSONWriter, but typically the most basic testing will identify any +// such problems. +// +// Similarly, there are no RAII facilities for automatically closing objects +// and arrays. These would be nice if you are generating all your code within +// nested functions, but in other cases you'd have to maintain an explicit +// stack of RAII objects and manually unwind it, which is no better than just +// calling "end" functions. Furthermore, the consequences of forgetting to +// close an object or array are obvious and, again, will be identified via +// basic testing, unlike other cases where RAII is typically used (e.g. smart +// pointers) and the consequences of defects are more subtle. +// +// Importantly, the class does solve the two hard problems of JSON +// pretty-printing, which are (a) correctly escaping strings, and (b) adding +// appropriate indentation and commas between items. +// +// By default, every property is placed on its own line. However, it is +// possible to request that objects and arrays be placed entirely on a single +// line, which can reduce output size significantly in some cases. +// +// Strings used (for property names and string property values) are |const +// char*| throughout, and can be ASCII or UTF-8. +// +// EXAMPLE +// ------- +// Assume that |MyWriteFunc| is a class that implements |JSONWriteFunc|. The +// following code: +// +// JSONWriter w(MakeUnique()); +// w.Start(); +// { +// w.NullProperty("null"); +// w.BoolProperty("bool", true); +// w.IntProperty("int", 1); +// w.StartArrayProperty("array"); +// { +// w.StringElement("string"); +// w.StartObjectElement(); +// { +// w.DoubleProperty("double", 3.4); +// w.StartArrayProperty("single-line array", w.SingleLineStyle); +// { +// w.IntElement(1); +// w.StartObjectElement(); // SingleLineStyle is inherited from +// w.EndObjectElement(); // above for this collection +// } +// w.EndArray(); +// } +// w.EndObjectElement(); +// } +// w.EndArrayProperty(); +// } +// w.End(); +// +// will produce pretty-printed output for the following JSON object: +// +// { +// "null": null, +// "bool": true, +// "int": 1, +// "array": [ +// "string", +// { +// "double": 3.4, +// "single-line array": [1, {}] +// } +// ] +// } +// +// The nesting in the example code is obviously optional, but can aid +// readability. + +#ifndef mozilla_JSONWriter_h +#define mozilla_JSONWriter_h + +#include "double-conversion/double-conversion.h" +#include "mozilla/Assertions.h" +#include "mozilla/IntegerPrintfMacros.h" +#include "mozilla/PodOperations.h" +#include "mozilla/Span.h" +#include "mozilla/Sprintf.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/Vector.h" + +#include + +namespace mozilla { + +// A quasi-functor for JSONWriter. We don't use a true functor because that +// requires templatizing JSONWriter, and the templatization seeps to lots of +// places we don't want it to. +class JSONWriteFunc { + public: + virtual void Write(const Span& aStr) = 0; + virtual ~JSONWriteFunc() = default; +}; + +// Ideally this would be within |EscapedString| but when compiling with GCC +// on Linux that caused link errors, whereas this formulation didn't. +namespace detail { +extern MFBT_DATA const char gTwoCharEscapes[256]; +} // namespace detail + +class JSONWriter { + // From http://www.ietf.org/rfc/rfc4627.txt: + // + // "All Unicode characters may be placed within the quotation marks except + // for the characters that must be escaped: quotation mark, reverse + // solidus, and the control characters (U+0000 through U+001F)." + // + // This implementation uses two-char escape sequences where possible, namely: + // + // \", \\, \b, \f, \n, \r, \t + // + // All control characters not in the above list are represented with a + // six-char escape sequence, e.g. '\u000b' (a.k.a. '\v'). + // + class EscapedString { + // `mStringSpan` initially points at the user-provided string. If that + // string needs escaping, `mStringSpan` will point at `mOwnedStr` below. + Span mStringSpan; + // String storage in case escaping is actually needed, null otherwise. + UniquePtr mOwnedStr; + + void CheckInvariants() const { + // Either there was no escaping so `mOwnedStr` is null, or escaping was + // needed, in which case `mStringSpan` should point at `mOwnedStr`. + MOZ_ASSERT(!mOwnedStr || mStringSpan.data() == mOwnedStr.get()); + } + + static char hexDigitToAsciiChar(uint8_t u) { + u = u & 0xf; + return u < 10 ? '0' + u : 'a' + (u - 10); + } + + public: + explicit EscapedString(const Span& aStr) : mStringSpan(aStr) { + // First, see if we need to modify the string. + size_t nExtra = 0; + for (const char& c : aStr) { + // ensure it can't be interpreted as negative + uint8_t u = static_cast(c); + if (u == 0) { + // Null terminator within the span, assume we may have been given a + // span to a buffer that contains a null-terminated string in it. + // We need to truncate the Span so that it doesn't include this null + // terminator and anything past it; Either we will return it as-is, or + // processing should stop there. + mStringSpan = mStringSpan.First(&c - mStringSpan.data()); + break; + } + if (detail::gTwoCharEscapes[u]) { + nExtra += 1; + } else if (u <= 0x1f) { + nExtra += 5; + } + } + + // Note: Don't use `aStr` anymore, as it could contain a null terminator; + // use the correctly-sized `mStringSpan` instead. + + if (nExtra == 0) { + // No escapes needed. mStringSpan already points at the original string. + CheckInvariants(); + return; + } + + // Escapes are needed. We'll create a new string. + mOwnedStr = MakeUnique(mStringSpan.Length() + nExtra); + + size_t i = 0; + for (const char c : mStringSpan) { + // ensure it can't be interpreted as negative + uint8_t u = static_cast(c); + MOZ_ASSERT(u != 0, "Null terminator should have been handled above"); + if (detail::gTwoCharEscapes[u]) { + mOwnedStr[i++] = '\\'; + mOwnedStr[i++] = detail::gTwoCharEscapes[u]; + } else if (u <= 0x1f) { + mOwnedStr[i++] = '\\'; + mOwnedStr[i++] = 'u'; + mOwnedStr[i++] = '0'; + mOwnedStr[i++] = '0'; + mOwnedStr[i++] = hexDigitToAsciiChar((u & 0x00f0) >> 4); + mOwnedStr[i++] = hexDigitToAsciiChar(u & 0x000f); + } else { + mOwnedStr[i++] = u; + } + } + MOZ_ASSERT(i == mStringSpan.Length() + nExtra); + mStringSpan = Span(mOwnedStr.get(), i); + CheckInvariants(); + } + + explicit EscapedString(const char* aStr) = delete; + + const Span& SpanRef() const { return mStringSpan; } + }; + + public: + // Collections (objects and arrays) are printed in a multi-line style by + // default. This can be changed to a single-line style if SingleLineStyle is + // specified. If a collection is printed in single-line style, every nested + // collection within it is also printed in single-line style, even if + // multi-line style is requested. + // If SingleLineStyle is set in the constructer, all JSON whitespace is + // eliminated, including spaces after colons and commas, for the most compact + // encoding possible. + enum CollectionStyle { + MultiLineStyle, // the default + SingleLineStyle + }; + + protected: + static constexpr Span scArrayBeginString = MakeStringSpan("["); + static constexpr Span scArrayEndString = MakeStringSpan("]"); + static constexpr Span scCommaString = MakeStringSpan(","); + static constexpr Span scEmptyString = MakeStringSpan(""); + static constexpr Span scFalseString = MakeStringSpan("false"); + static constexpr Span scNewLineString = MakeStringSpan("\n"); + static constexpr Span scNullString = MakeStringSpan("null"); + static constexpr Span scObjectBeginString = MakeStringSpan("{"); + static constexpr Span scObjectEndString = MakeStringSpan("}"); + static constexpr Span scPropertyBeginString = + MakeStringSpan("\""); + static constexpr Span scPropertyEndString = MakeStringSpan("\":"); + static constexpr Span scQuoteString = MakeStringSpan("\""); + static constexpr Span scSpaceString = MakeStringSpan(" "); + static constexpr Span scTopObjectBeginString = + MakeStringSpan("{"); + static constexpr Span scTopObjectEndString = MakeStringSpan("}"); + static constexpr Span scTrueString = MakeStringSpan("true"); + + JSONWriteFunc& mWriter; + const UniquePtr mMaybeOwnedWriter; + Vector mNeedComma; // do we need a comma at depth N? + Vector mNeedNewlines; // do we need newlines at depth N? + size_t mDepth; // the current nesting depth + + void Indent() { + for (size_t i = 0; i < mDepth; i++) { + mWriter.Write(scSpaceString); + } + } + + // Adds whatever is necessary (maybe a comma, and then a newline and + // whitespace) to separate an item (property or element) from what's come + // before. + void Separator() { + if (mNeedComma[mDepth]) { + mWriter.Write(scCommaString); + } + if (mDepth > 0 && mNeedNewlines[mDepth]) { + mWriter.Write(scNewLineString); + Indent(); + } else if (mNeedComma[mDepth] && mNeedNewlines[0]) { + mWriter.Write(scSpaceString); + } + } + + void PropertyNameAndColon(const Span& aName) { + mWriter.Write(scPropertyBeginString); + mWriter.Write(EscapedString(aName).SpanRef()); + mWriter.Write(scPropertyEndString); + if (mNeedNewlines[0]) { + mWriter.Write(scSpaceString); + } + } + + void Scalar(const Span& aMaybePropertyName, + const Span& aStringValue) { + Separator(); + if (!aMaybePropertyName.empty()) { + PropertyNameAndColon(aMaybePropertyName); + } + mWriter.Write(aStringValue); + mNeedComma[mDepth] = true; + } + + void QuotedScalar(const Span& aMaybePropertyName, + const Span& aStringValue) { + Separator(); + if (!aMaybePropertyName.empty()) { + PropertyNameAndColon(aMaybePropertyName); + } + mWriter.Write(scQuoteString); + mWriter.Write(aStringValue); + mWriter.Write(scQuoteString); + mNeedComma[mDepth] = true; + } + + void NewVectorEntries(bool aNeedNewLines) { + // If these tiny allocations OOM we might as well just crash because we + // must be in serious memory trouble. + MOZ_RELEASE_ASSERT(mNeedComma.resizeUninitialized(mDepth + 1)); + MOZ_RELEASE_ASSERT(mNeedNewlines.resizeUninitialized(mDepth + 1)); + mNeedComma[mDepth] = false; + mNeedNewlines[mDepth] = aNeedNewLines; + } + + void StartCollection(const Span& aMaybePropertyName, + const Span& aStartChar, + CollectionStyle aStyle = MultiLineStyle) { + Separator(); + if (!aMaybePropertyName.empty()) { + PropertyNameAndColon(aMaybePropertyName); + } + mWriter.Write(aStartChar); + mNeedComma[mDepth] = true; + mDepth++; + NewVectorEntries(mNeedNewlines[mDepth - 1] && aStyle == MultiLineStyle); + } + + // Adds the whitespace and closing char necessary to end a collection. + void EndCollection(const Span& aEndChar) { + MOZ_ASSERT(mDepth > 0); + if (mNeedNewlines[mDepth]) { + mWriter.Write(scNewLineString); + mDepth--; + Indent(); + } else { + mDepth--; + } + mWriter.Write(aEndChar); + } + + public: + explicit JSONWriter(JSONWriteFunc& aWriter, + CollectionStyle aStyle = MultiLineStyle) + : mWriter(aWriter), mNeedComma(), mNeedNewlines(), mDepth(0) { + NewVectorEntries(aStyle == MultiLineStyle); + } + + explicit JSONWriter(UniquePtr aWriter, + CollectionStyle aStyle = MultiLineStyle) + : mWriter(*aWriter), + mMaybeOwnedWriter(std::move(aWriter)), + mNeedComma(), + mNeedNewlines(), + mDepth(0) { + MOZ_RELEASE_ASSERT( + mMaybeOwnedWriter, + "JSONWriter must be given a non-null UniquePtr"); + NewVectorEntries(aStyle == MultiLineStyle); + } + + // Returns the JSONWriteFunc passed in at creation, for temporary use. The + // JSONWriter object still owns the JSONWriteFunc. + JSONWriteFunc& WriteFunc() const { return mWriter; } + + // For all the following functions, the "Prints:" comment indicates what the + // basic output looks like. However, it doesn't indicate the whitespace and + // trailing commas, which are automatically added as required. + // + // All property names and string properties are escaped as necessary. + + // Prints: { + void Start(CollectionStyle aStyle = MultiLineStyle) { + StartCollection(scEmptyString, scTopObjectBeginString, aStyle); + } + + // Prints: } and final newline. + void End() { + EndCollection(scTopObjectEndString); + if (mNeedNewlines[mDepth]) { + mWriter.Write(scNewLineString); + } + } + + // Prints: "": null + void NullProperty(const Span& aName) { + Scalar(aName, scNullString); + } + + template + void NullProperty(const char (&aName)[N]) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + NullProperty(Span(aName, N)); + } + + // Prints: null + void NullElement() { NullProperty(scEmptyString); } + + // Prints: "": + void BoolProperty(const Span& aName, bool aBool) { + Scalar(aName, aBool ? scTrueString : scFalseString); + } + + template + void BoolProperty(const char (&aName)[N], bool aBool) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + BoolProperty(Span(aName, N), aBool); + } + + // Prints: + void BoolElement(bool aBool) { BoolProperty(scEmptyString, aBool); } + + // Prints: "": + void IntProperty(const Span& aName, int64_t aInt) { + char buf[64]; + int len = SprintfLiteral(buf, "%" PRId64, aInt); + MOZ_RELEASE_ASSERT(len > 0); + Scalar(aName, Span(buf, size_t(len))); + } + + template + void IntProperty(const char (&aName)[N], int64_t aInt) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + IntProperty(Span(aName, N), aInt); + } + + // Prints: + void IntElement(int64_t aInt) { IntProperty(scEmptyString, aInt); } + + // Prints: "": + void DoubleProperty(const Span& aName, double aDouble) { + static const size_t buflen = 64; + char buf[buflen]; + const double_conversion::DoubleToStringConverter& converter = + double_conversion::DoubleToStringConverter::EcmaScriptConverter(); + double_conversion::StringBuilder builder(buf, buflen); + converter.ToShortest(aDouble, &builder); + // TODO: The builder should know the length?! + Scalar(aName, MakeStringSpan(builder.Finalize())); + } + + template + void DoubleProperty(const char (&aName)[N], double aDouble) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + DoubleProperty(Span(aName, N), aDouble); + } + + // Prints: + void DoubleElement(double aDouble) { DoubleProperty(scEmptyString, aDouble); } + + // Prints: "": "" + void StringProperty(const Span& aName, + const Span& aStr) { + QuotedScalar(aName, EscapedString(aStr).SpanRef()); + } + + template + void StringProperty(const char (&aName)[NN], const Span& aStr) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StringProperty(Span(aName, NN), aStr); + } + + template + void StringProperty(const Span& aName, const char (&aStr)[SN]) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StringProperty(aName, Span(aStr, SN)); + } + + template + void StringProperty(const char (&aName)[NN], const char (&aStr)[SN]) { + // Keep null terminators from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StringProperty(Span(aName, NN), Span(aStr, SN)); + } + + // Prints: "" + void StringElement(const Span& aStr) { + StringProperty(scEmptyString, aStr); + } + + template + void StringElement(const char (&aName)[N]) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StringElement(Span(aName, N)); + } + + // Prints: "": [ + void StartArrayProperty(const Span& aName, + CollectionStyle aStyle = MultiLineStyle) { + StartCollection(aName, scArrayBeginString, aStyle); + } + + template + void StartArrayProperty(const char (&aName)[N], + CollectionStyle aStyle = MultiLineStyle) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StartArrayProperty(Span(aName, N), aStyle); + } + + // Prints: [ + void StartArrayElement(CollectionStyle aStyle = MultiLineStyle) { + StartArrayProperty(scEmptyString, aStyle); + } + + // Prints: ] + void EndArray() { EndCollection(scArrayEndString); } + + // Prints: "": { + void StartObjectProperty(const Span& aName, + CollectionStyle aStyle = MultiLineStyle) { + StartCollection(aName, scObjectBeginString, aStyle); + } + + template + void StartObjectProperty(const char (&aName)[N], + CollectionStyle aStyle = MultiLineStyle) { + // Keep null terminator from literal strings, will be removed by + // EscapedString. This way C buffer arrays can be used as well. + StartObjectProperty(Span(aName, N), aStyle); + } + + // Prints: { + void StartObjectElement(CollectionStyle aStyle = MultiLineStyle) { + StartObjectProperty(scEmptyString, aStyle); + } + + // Prints: } + void EndObject() { EndCollection(scObjectEndString); } +}; + +} // namespace mozilla + +#endif /* mozilla_JSONWriter_h */ diff --git a/mfbt/JsRust.h b/mfbt/JsRust.h new file mode 100644 index 0000000000..ff622e33d4 --- /dev/null +++ b/mfbt/JsRust.h @@ -0,0 +1,21 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Checking for jsrust crate availability for linking. + * For testing, define MOZ_PRETEND_NO_JSRUST to pretend + * that we don't have jsrust. + */ + +#ifndef mozilla_JsRust_h +#define mozilla_JsRust_h + +#if (defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API)) && \ + !defined(MOZ_PRETEND_NO_JSRUST) +# define MOZ_HAS_JSRUST() 1 +#else +# define MOZ_HAS_JSRUST() 0 +#endif + +#endif // mozilla_JsRust_h diff --git a/mfbt/Latin1.h b/mfbt/Latin1.h new file mode 100644 index 0000000000..a57d771b64 --- /dev/null +++ b/mfbt/Latin1.h @@ -0,0 +1,262 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Latin-1 operations (i.e. a byte is the corresponding code point). + * (Note: this is *not* the same as the encoding of windows-1252 or + * latin1 content on the web. In Web terms, this encoding + * corresponds to "isomorphic decode" / "isomorphic encoding" from + * the Infra Standard.) + */ + +#ifndef mozilla_Latin1_h +#define mozilla_Latin1_h + +#include + +#include "mozilla/JsRust.h" +#include "mozilla/Span.h" + +#if MOZ_HAS_JSRUST() +# include "encoding_rs_mem.h" +#endif + +namespace mozilla { + +namespace detail { + +// It's important for optimizations that Latin1ness checks +// and inflation/deflation function use the same short +// string limit. The limit is 16, because that's the shortest +// that inflates/deflates using SIMD. +constexpr size_t kShortStringLimitForInlinePaths = 16; + +template +class MakeUnsignedChar { + public: + using Type = std::make_unsigned_t; +}; + +template <> +class MakeUnsignedChar { + public: + using Type = char16_t; +}; + +template <> +class MakeUnsignedChar { + public: + using Type = char32_t; +}; + +} // namespace detail + +/** + * Returns true iff |aChar| is Latin-1 but not ASCII, i.e. in the range + * [0x80, 0xFF]. + */ +template +constexpr bool IsNonAsciiLatin1(Char aChar) { + using UnsignedChar = typename detail::MakeUnsignedChar::Type; + auto uc = static_cast(aChar); + return uc >= 0x80 && uc <= 0xFF; +} + +#if MOZ_HAS_JSRUST() + +/** + * Returns |true| iff |aString| contains only Latin1 characters, that is, + * characters in the range [U+0000, U+00FF]. + * + * @param aString a potentially-invalid UTF-16 string to scan + */ +inline bool IsUtf16Latin1(mozilla::Span aString) { + size_t length = aString.Length(); + const char16_t* ptr = aString.Elements(); + // For short strings, calling into Rust is a pessimization, and the SIMD + // code won't have a chance to kick in anyway. + // 16 is a bit larger than logically necessary for this function alone, + // but it's important that the limit here matches the limit used in + // LossyConvertUtf16toLatin1! + if (length < mozilla::detail::kShortStringLimitForInlinePaths) { + char16_t accu = 0; + for (size_t i = 0; i < length; i++) { + accu |= ptr[i]; + } + return accu < 0x100; + } + return encoding_mem_is_utf16_latin1(ptr, length); +} + +/** + * Returns |true| iff |aString| is valid UTF-8 containing only Latin-1 + * characters. + * + * If you know that the argument is always absolutely guaranteed to be valid + * UTF-8, use the faster UnsafeIsValidUtf8Latin1() instead. + * + * @param aString potentially-invalid UTF-8 string to scan + */ +inline bool IsUtf8Latin1(mozilla::Span aString) { + return encoding_mem_is_utf8_latin1(aString.Elements(), aString.Length()); +} + +/** + * Returns |true| iff |aString|, which MUST be valid UTF-8, contains only + * Latin1 characters, that is, characters in the range [U+0000, U+00FF]. + * (If |aString| might not be valid UTF-8, use |IsUtf8Latin1| instead.) + * + * @param aString known-valid UTF-8 string to scan + */ +inline bool UnsafeIsValidUtf8Latin1(mozilla::Span aString) { + return encoding_mem_is_str_latin1(aString.Elements(), aString.Length()); +} + +/** + * Returns the index of first byte that starts an invalid byte + * sequence or a non-Latin1 byte sequence in a potentially-invalid UTF-8 + * string, or the length of the string if there are neither. + * + * If you know that the argument is always absolutely guaranteed to be valid + * UTF-8, use the faster UnsafeValidUtf8Lati1UpTo() instead. + * + * @param aString potentially-invalid UTF-8 string to scan + */ +inline size_t Utf8Latin1UpTo(mozilla::Span aString) { + return encoding_mem_utf8_latin1_up_to(aString.Elements(), aString.Length()); +} + +/** + * Returns the index of first byte that starts a non-Latin1 byte + * sequence in a known-valid UTF-8 string, or the length of the + * string if there are none. (If the string might not be valid + * UTF-8, use Utf8Latin1UpTo() instead.) + * + * @param aString known-valid UTF-8 string to scan + */ +inline size_t UnsafeValidUtf8Lati1UpTo(mozilla::Span aString) { + return encoding_mem_str_latin1_up_to(aString.Elements(), aString.Length()); +} + +/** + * If all the code points in the input are below U+0100, converts to Latin1, + * i.e. unsigned byte value is Unicode scalar value. If there are code points + * above U+00FF, produces unspecified garbage in a memory-safe way. The + * nature of the garbage must not be relied upon. + * + * The length of aDest must not be less than the length of aSource. + */ +inline void LossyConvertUtf16toLatin1(mozilla::Span aSource, + mozilla::Span aDest) { + const char16_t* srcPtr = aSource.Elements(); + size_t srcLen = aSource.Length(); + char* dstPtr = aDest.Elements(); + size_t dstLen = aDest.Length(); + // Avoid function call overhead when SIMD isn't used anyway + // If you change the length limit here, be sure to change + // IsUtf16Latin1 and IsAscii to match so that optimizations don't + // fail! + if (srcLen < mozilla::detail::kShortStringLimitForInlinePaths) { + MOZ_ASSERT(dstLen >= srcLen); + uint8_t* unsignedPtr = reinterpret_cast(dstPtr); + const char16_t* end = srcPtr + srcLen; + while (srcPtr < end) { + *unsignedPtr = static_cast(*srcPtr); + ++srcPtr; + ++unsignedPtr; + } + return; + } + encoding_mem_convert_utf16_to_latin1_lossy(srcPtr, srcLen, dstPtr, dstLen); +} + +/** + * If all the code points in the input are below U+0100, converts to Latin1, + * i.e. unsigned byte value is Unicode scalar value. If there are code points + * above U+00FF, produces unspecified garbage in a memory-safe way. The + * nature of the garbage must not be relied upon. + * + * Returns the number of code units written. + * + * The length of aDest must not be less than the length of aSource. + */ +inline size_t LossyConvertUtf8toLatin1(mozilla::Span aSource, + mozilla::Span aDest) { + return encoding_mem_convert_utf8_to_latin1_lossy( + aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length()); +} + +/** + * Converts each byte of |aSource|, interpreted as a Unicode scalar value + * having that unsigned value, to its UTF-8 representation in |aDest|. + * + * Returns the number of code units written. + * + * The length of aDest must be at least twice the length of aSource. + */ +inline size_t ConvertLatin1toUtf8(mozilla::Span aSource, + mozilla::Span aDest) { + return encoding_mem_convert_latin1_to_utf8( + aSource.Elements(), aSource.Length(), aDest.Elements(), aDest.Length()); +} + +/** + * Converts bytes whose unsigned value is interpreted as Unicode code point + * (i.e. U+0000 to U+00FF, inclusive) to UTF-8 with potentially insufficient + * output space. + * + * Returns the number of bytes read and the number of bytes written. + * + * If the output isn't large enough, not all input is consumed. + * + * The conversion is guaranteed to be complete if the length of aDest is + * at least the length of aSource times two. + * + * The output is always valid UTF-8 ending on scalar value boundary + * even in the case of partial conversion. + * + * The semantics of this function match the semantics of + * TextEncoder.encodeInto. + * https://encoding.spec.whatwg.org/#dom-textencoder-encodeinto + */ +inline std::tuple ConvertLatin1toUtf8Partial( + mozilla::Span aSource, mozilla::Span aDest) { + size_t srcLen = aSource.Length(); + size_t dstLen = aDest.Length(); + encoding_mem_convert_latin1_to_utf8_partial(aSource.Elements(), &srcLen, + aDest.Elements(), &dstLen); + return std::make_tuple(srcLen, dstLen); +} + +/** + * Converts Latin-1 code points (i.e. each byte is the identical code + * point) from |aSource| to UTF-16 code points in |aDest|. + * + * The length of aDest must not be less than the length of aSource. + */ +inline void ConvertLatin1toUtf16(mozilla::Span aSource, + mozilla::Span aDest) { + const char* srcPtr = aSource.Elements(); + size_t srcLen = aSource.Length(); + char16_t* dstPtr = aDest.Elements(); + size_t dstLen = aDest.Length(); + // Avoid function call overhead when SIMD isn't used anyway + if (srcLen < mozilla::detail::kShortStringLimitForInlinePaths) { + MOZ_ASSERT(dstLen >= srcLen); + const uint8_t* unsignedPtr = reinterpret_cast(srcPtr); + const uint8_t* end = unsignedPtr + srcLen; + while (unsignedPtr < end) { + *dstPtr = *unsignedPtr; + ++unsignedPtr; + ++dstPtr; + } + return; + } + encoding_mem_convert_latin1_to_utf16(srcPtr, srcLen, dstPtr, dstLen); +} + +#endif + +}; // namespace mozilla + +#endif // mozilla_Latin1_h diff --git a/mfbt/Likely.h b/mfbt/Likely.h new file mode 100644 index 0000000000..5b65e97241 --- /dev/null +++ b/mfbt/Likely.h @@ -0,0 +1,23 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * MOZ_LIKELY and MOZ_UNLIKELY macros to hint to the compiler how a + * boolean predicate should be branch-predicted. + */ + +#ifndef mozilla_Likely_h +#define mozilla_Likely_h + +#if defined(__clang__) || defined(__GNUC__) +# define MOZ_LIKELY(x) (__builtin_expect(!!(x), 1)) +# define MOZ_UNLIKELY(x) (__builtin_expect(!!(x), 0)) +#else +# define MOZ_LIKELY(x) (!!(x)) +# define MOZ_UNLIKELY(x) (!!(x)) +#endif + +#endif /* mozilla_Likely_h */ diff --git a/mfbt/LinkedList.h b/mfbt/LinkedList.h new file mode 100644 index 0000000000..850b8594c7 --- /dev/null +++ b/mfbt/LinkedList.h @@ -0,0 +1,748 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A type-safe doubly-linked list class. */ + +/* + * The classes LinkedList and LinkedListElement together form a + * convenient, type-safe doubly-linked list implementation. + * + * The class T which will be inserted into the linked list must inherit from + * LinkedListElement. A given object may be in only one linked list at a + * time. + * + * A LinkedListElement automatically removes itself from the list upon + * destruction, and a LinkedList will fatally assert in debug builds if it's + * non-empty when it's destructed. + * + * For example, you might use LinkedList in a simple observer list class as + * follows. + * + * class Observer : public LinkedListElement + * { + * public: + * void observe(char* aTopic) { ... } + * }; + * + * class ObserverContainer + * { + * private: + * LinkedList list; + * + * public: + * void addObserver(Observer* aObserver) + * { + * // Will assert if |aObserver| is part of another list. + * list.insertBack(aObserver); + * } + * + * void removeObserver(Observer* aObserver) + * { + * // Will assert if |aObserver| is not part of some list. + * aObserver.remove(); + * // Or, will assert if |aObserver| is not part of |list| specifically. + * // aObserver.removeFrom(list); + * } + * + * void notifyObservers(char* aTopic) + * { + * for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext()) { + * o->observe(aTopic); + * } + * } + * }; + * + * Additionally, the class AutoCleanLinkedList is a LinkedList that will + * remove and delete each element still within itself upon destruction. Note + * that because each element is deleted, elements must have been allocated + * using |new|. + */ + +#ifndef mozilla_LinkedList_h +#define mozilla_LinkedList_h + +#include +#include + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/RefPtr.h" + +#ifdef __cplusplus + +namespace mozilla { + +template +class LinkedListElement; + +namespace detail { + +/** + * LinkedList supports refcounted elements using this adapter class. Clients + * using LinkedList> will get a data structure that holds a strong + * reference to T as long as T is in the list. + */ +template +struct LinkedListElementTraits { + typedef T* RawType; + typedef const T* ConstRawType; + typedef T* ClientType; + typedef const T* ConstClientType; + + // These static methods are called when an element is added to or removed from + // a linked list. It can be used to keep track ownership in lists that are + // supposed to own their elements. If elements are transferred from one list + // to another, no enter or exit calls happen since the elements still belong + // to a list. + static void enterList(LinkedListElement* elt) {} + static void exitList(LinkedListElement* elt) {} + + // This method is called when AutoCleanLinkedList cleans itself + // during destruction. It can be used to call delete on elements if + // the list is the sole owner. + static void cleanElement(LinkedListElement* elt) { delete elt->asT(); } +}; + +template +struct LinkedListElementTraits> { + typedef T* RawType; + typedef const T* ConstRawType; + typedef RefPtr ClientType; + typedef RefPtr ConstClientType; + + static void enterList(LinkedListElement>* elt) { + elt->asT()->AddRef(); + } + static void exitList(LinkedListElement>* elt) { + elt->asT()->Release(); + } + static void cleanElement(LinkedListElement>* elt) {} +}; + +} /* namespace detail */ + +template +class LinkedList; + +template +class LinkedListElement { + typedef typename detail::LinkedListElementTraits Traits; + typedef typename Traits::RawType RawType; + typedef typename Traits::ConstRawType ConstRawType; + typedef typename Traits::ClientType ClientType; + typedef typename Traits::ConstClientType ConstClientType; + + /* + * It's convenient that we return nullptr when getNext() or getPrevious() + * hits the end of the list, but doing so costs an extra word of storage in + * each linked list node (to keep track of whether |this| is the sentinel + * node) and a branch on this value in getNext/getPrevious. + * + * We could get rid of the extra word of storage by shoving the "is + * sentinel" bit into one of the pointers, although this would, of course, + * have performance implications of its own. + * + * But the goal here isn't to win an award for the fastest or slimmest + * linked list; rather, we want a *convenient* linked list. So we won't + * waste time guessing which micro-optimization strategy is best. + * + * + * Speaking of unnecessary work, it's worth addressing here why we wrote + * mozilla::LinkedList in the first place, instead of using stl::list. + * + * The key difference between mozilla::LinkedList and stl::list is that + * mozilla::LinkedList stores the mPrev/mNext pointers in the object itself, + * while stl::list stores the mPrev/mNext pointers in a list element which + * itself points to the object being stored. + * + * mozilla::LinkedList's approach makes it harder to store an object in more + * than one list. But the upside is that you can call next() / prev() / + * remove() directly on the object. With stl::list, you'd need to store a + * pointer to its iterator in the object in order to accomplish this. Not + * only would this waste space, but you'd have to remember to update that + * pointer every time you added or removed the object from a list. + * + * In-place, constant-time removal is a killer feature of doubly-linked + * lists, and supporting this painlessly was a key design criterion. + */ + + private: + LinkedListElement* mNext; + LinkedListElement* mPrev; + const bool mIsSentinel; + + public: + LinkedListElement() : mNext(this), mPrev(this), mIsSentinel(false) {} + + /* + * Moves |aOther| into |*this|. If |aOther| is already in a list, then + * |aOther| is removed from the list and replaced by |*this|. + */ + LinkedListElement(LinkedListElement&& aOther) + : mIsSentinel(aOther.mIsSentinel) { + adjustLinkForMove(std::move(aOther)); + } + + LinkedListElement& operator=(LinkedListElement&& aOther) { + MOZ_ASSERT(mIsSentinel == aOther.mIsSentinel, "Mismatch NodeKind!"); + MOZ_ASSERT(!isInList(), + "Assigning to an element in a list messes up that list!"); + adjustLinkForMove(std::move(aOther)); + return *this; + } + + ~LinkedListElement() { + if (!mIsSentinel && isInList()) { + remove(); + } + } + + /* + * Get the next element in the list, or nullptr if this is the last element + * in the list. + */ + RawType getNext() { return mNext->asT(); } + ConstRawType getNext() const { return mNext->asT(); } + + /* + * Get the previous element in the list, or nullptr if this is the first + * element in the list. + */ + RawType getPrevious() { return mPrev->asT(); } + ConstRawType getPrevious() const { return mPrev->asT(); } + + /* + * Insert aElem after this element in the list. |this| must be part of a + * linked list when you call setNext(); otherwise, this method will assert. + */ + void setNext(RawType aElem) { + MOZ_ASSERT(isInList()); + setNextUnsafe(aElem); + } + + /* + * Insert aElem before this element in the list. |this| must be part of a + * linked list when you call setPrevious(); otherwise, this method will + * assert. + */ + void setPrevious(RawType aElem) { + MOZ_ASSERT(isInList()); + setPreviousUnsafe(aElem); + } + + /* + * Remove this element from the list which contains it. If this element is + * not currently part of a linked list, this method asserts. + */ + void remove() { + MOZ_ASSERT(isInList()); + + mPrev->mNext = mNext; + mNext->mPrev = mPrev; + mNext = this; + mPrev = this; + + Traits::exitList(this); + } + + /* + * Remove this element from the list containing it. Returns a pointer to the + * element that follows this element (before it was removed). This method + * asserts if the element does not belong to a list. Note: In a refcounted + * list, |this| may be destroyed. + */ + RawType removeAndGetNext() { + RawType r = getNext(); + remove(); + return r; + } + + /* + * Remove this element from the list containing it. Returns a pointer to the + * previous element in the containing list (before the removal). This method + * asserts if the element does not belong to a list. Note: In a refcounted + * list, |this| may be destroyed. + */ + RawType removeAndGetPrevious() { + RawType r = getPrevious(); + remove(); + return r; + } + + /* + * Identical to remove(), but also asserts in debug builds that this element + * is in aList. + */ + void removeFrom(const LinkedList& aList) { + aList.assertContains(asT()); + remove(); + } + + /* + * Return true if |this| part is of a linked list, and false otherwise. + */ + bool isInList() const { + MOZ_ASSERT((mNext == this) == (mPrev == this)); + return mNext != this; + } + + private: + friend class LinkedList; + friend struct detail::LinkedListElementTraits; + + enum class NodeKind { Normal, Sentinel }; + + explicit LinkedListElement(NodeKind nodeKind) + : mNext(this), mPrev(this), mIsSentinel(nodeKind == NodeKind::Sentinel) {} + + /* + * Return |this| cast to T* if we're a normal node, or return nullptr if + * we're a sentinel node. + */ + RawType asT() { return mIsSentinel ? nullptr : static_cast(this); } + ConstRawType asT() const { + return mIsSentinel ? nullptr : static_cast(this); + } + + /* + * Insert aElem after this element, but don't check that this element is in + * the list. This is called by LinkedList::insertFront(). + */ + void setNextUnsafe(RawType aElem) { + LinkedListElement* listElem = static_cast(aElem); + MOZ_RELEASE_ASSERT(!listElem->isInList()); + + listElem->mNext = this->mNext; + listElem->mPrev = this; + this->mNext->mPrev = listElem; + this->mNext = listElem; + + Traits::enterList(aElem); + } + + /* + * Insert aElem before this element, but don't check that this element is in + * the list. This is called by LinkedList::insertBack(). + */ + void setPreviousUnsafe(RawType aElem) { + LinkedListElement* listElem = static_cast*>(aElem); + MOZ_RELEASE_ASSERT(!listElem->isInList()); + + listElem->mNext = this; + listElem->mPrev = this->mPrev; + this->mPrev->mNext = listElem; + this->mPrev = listElem; + + Traits::enterList(aElem); + } + + /* + * Transfers the elements [aBegin, aEnd) before the "this" list element. + */ + void transferBeforeUnsafe(LinkedListElement& aBegin, + LinkedListElement& aEnd) { + MOZ_RELEASE_ASSERT(!aBegin.mIsSentinel); + if (!aBegin.isInList() || !aEnd.isInList()) { + return; + } + + auto otherPrev = aBegin.mPrev; + + aBegin.mPrev = this->mPrev; + this->mPrev->mNext = &aBegin; + this->mPrev = aEnd.mPrev; + aEnd.mPrev->mNext = this; + + // Patch the gap in the source list + otherPrev->mNext = &aEnd; + aEnd.mPrev = otherPrev; + } + + /* + * Adjust mNext and mPrev for implementing move constructor and move + * assignment. + */ + void adjustLinkForMove(LinkedListElement&& aOther) { + if (!aOther.isInList()) { + mNext = this; + mPrev = this; + return; + } + + if (!mIsSentinel) { + Traits::enterList(this); + } + + MOZ_ASSERT(aOther.mNext->mPrev == &aOther); + MOZ_ASSERT(aOther.mPrev->mNext == &aOther); + + /* + * Initialize |this| with |aOther|'s mPrev/mNext pointers, and adjust those + * element to point to this one. + */ + mNext = aOther.mNext; + mPrev = aOther.mPrev; + + mNext->mPrev = this; + mPrev->mNext = this; + + /* + * Adjust |aOther| so it doesn't think it's in a list. This makes it + * safely destructable. + */ + aOther.mNext = &aOther; + aOther.mPrev = &aOther; + + if (!mIsSentinel) { + Traits::exitList(&aOther); + } + } + + LinkedListElement& operator=(const LinkedListElement& aOther) = delete; + LinkedListElement(const LinkedListElement& aOther) = delete; +}; + +template +class LinkedList { + private: + typedef typename detail::LinkedListElementTraits Traits; + typedef typename Traits::RawType RawType; + typedef typename Traits::ConstRawType ConstRawType; + typedef typename Traits::ClientType ClientType; + typedef typename Traits::ConstClientType ConstClientType; + typedef LinkedListElement* ElementType; + typedef const LinkedListElement* ConstElementType; + + LinkedListElement sentinel; + + public: + template + class Iterator { + Type mCurrent; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + explicit Iterator(Type aCurrent) : mCurrent(aCurrent) {} + + Type operator*() const { return mCurrent; } + + const Iterator& operator++() { + mCurrent = static_cast(mCurrent)->getNext(); + return *this; + } + + bool operator!=(const Iterator& aOther) const { + return mCurrent != aOther.mCurrent; + } + }; + + LinkedList() : sentinel(LinkedListElement::NodeKind::Sentinel) {} + + LinkedList(LinkedList&& aOther) : sentinel(std::move(aOther.sentinel)) {} + + LinkedList& operator=(LinkedList&& aOther) { + MOZ_ASSERT(isEmpty(), + "Assigning to a non-empty list leaks elements in that list!"); + sentinel = std::move(aOther.sentinel); + return *this; + } + + ~LinkedList() { +# ifdef DEBUG + if (!isEmpty()) { + MOZ_CRASH_UNSAFE_PRINTF( + "%s has a buggy user: " + "it should have removed all this list's elements before " + "the list's destruction", + __PRETTY_FUNCTION__); + } +# endif + } + + /* + * Add aElem to the front of the list. + */ + void insertFront(RawType aElem) { + /* Bypass setNext()'s this->isInList() assertion. */ + sentinel.setNextUnsafe(aElem); + } + + /* + * Add aElem to the back of the list. + */ + void insertBack(RawType aElem) { sentinel.setPreviousUnsafe(aElem); } + + /* + * Move all elements from another list to the back + */ + void extendBack(LinkedList&& aOther) { + MOZ_RELEASE_ASSERT(this != &aOther); + if (aOther.isEmpty()) { + return; + } + sentinel.transferBeforeUnsafe(**aOther.begin(), aOther.sentinel); + } + + /* + * Move elements from another list to the specified position + */ + void splice(size_t aDestinationPos, LinkedList& aListFrom, + size_t aSourceStart, size_t aSourceLen) { + MOZ_RELEASE_ASSERT(this != &aListFrom); + if (aListFrom.isEmpty() || !aSourceLen) { + return; + } + + const auto safeForward = [](LinkedList& aList, + LinkedListElement& aBegin, + size_t aPos) -> LinkedListElement& { + auto* iter = &aBegin; + for (size_t i = 0; i < aPos; ++i, (iter = iter->mNext)) { + if (iter->mIsSentinel) { + break; + } + } + return *iter; + }; + + auto& sourceBegin = + safeForward(aListFrom, *aListFrom.sentinel.mNext, aSourceStart); + if (sourceBegin.mIsSentinel) { + return; + } + auto& sourceEnd = safeForward(aListFrom, sourceBegin, aSourceLen); + auto& destination = safeForward(*this, *sentinel.mNext, aDestinationPos); + + destination.transferBeforeUnsafe(sourceBegin, sourceEnd); + } + + /* + * Get the first element of the list, or nullptr if the list is empty. + */ + RawType getFirst() { return sentinel.getNext(); } + ConstRawType getFirst() const { return sentinel.getNext(); } + + /* + * Get the last element of the list, or nullptr if the list is empty. + */ + RawType getLast() { return sentinel.getPrevious(); } + ConstRawType getLast() const { return sentinel.getPrevious(); } + + /* + * Get and remove the first element of the list. If the list is empty, + * return nullptr. + */ + ClientType popFirst() { + ClientType ret = sentinel.getNext(); + if (ret) { + static_cast*>(RawType(ret))->remove(); + } + return ret; + } + + /* + * Get and remove the last element of the list. If the list is empty, + * return nullptr. + */ + ClientType popLast() { + ClientType ret = sentinel.getPrevious(); + if (ret) { + static_cast*>(RawType(ret))->remove(); + } + return ret; + } + + /* + * Return true if the list is empty, or false otherwise. + */ + bool isEmpty() const { return !sentinel.isInList(); } + + /** + * Returns whether the given element is in the list. + */ + bool contains(ConstRawType aElm) const { + return std::find(begin(), end(), aElm) != end(); + } + + /* + * Remove all the elements from the list. + * + * This runs in time linear to the list's length, because we have to mark + * each element as not in the list. + */ + void clear() { + while (popFirst()) { + } + } + + /** + * Return the length of elements in the list. + */ + size_t length() const { return std::distance(begin(), end()); } + + /* + * Allow range-based iteration: + * + * for (MyElementType* elt : myList) { ... } + */ + Iterator begin() { + return Iterator(getFirst()); + } + Iterator begin() const { + return Iterator(getFirst()); + } + Iterator end() { + return Iterator(nullptr); + } + Iterator end() const { + return Iterator(nullptr); + } + + /* + * Measures the memory consumption of the list excluding |this|. Note that + * it only measures the list elements themselves. If the list elements + * contain pointers to other memory blocks, those blocks must be measured + * separately during a subsequent iteration over the list. + */ + size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + size_t n = 0; + ConstRawType t = getFirst(); + while (t) { + n += aMallocSizeOf(t); + t = static_cast*>(t)->getNext(); + } + return n; + } + + /* + * Like sizeOfExcludingThis(), but measures |this| as well. + */ + size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf); + } + + /* + * In a debug build, make sure that the list is sane (no cycles, consistent + * mNext/mPrev pointers, only one sentinel). Has no effect in release builds. + */ + void debugAssertIsSane() const { +# ifdef DEBUG + const LinkedListElement* slow; + const LinkedListElement* fast1; + const LinkedListElement* fast2; + + /* + * Check for cycles in the forward singly-linked list using the + * tortoise/hare algorithm. + */ + for (slow = sentinel.mNext, fast1 = sentinel.mNext->mNext, + fast2 = sentinel.mNext->mNext->mNext; + slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel; + slow = slow->mNext, fast1 = fast2->mNext, fast2 = fast1->mNext) { + MOZ_ASSERT(slow != fast1); + MOZ_ASSERT(slow != fast2); + } + + /* Check for cycles in the backward singly-linked list. */ + for (slow = sentinel.mPrev, fast1 = sentinel.mPrev->mPrev, + fast2 = sentinel.mPrev->mPrev->mPrev; + slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel; + slow = slow->mPrev, fast1 = fast2->mPrev, fast2 = fast1->mPrev) { + MOZ_ASSERT(slow != fast1); + MOZ_ASSERT(slow != fast2); + } + + /* + * Check that |sentinel| is the only node in the list with + * mIsSentinel == true. + */ + for (const LinkedListElement* elem = sentinel.mNext; elem != &sentinel; + elem = elem->mNext) { + MOZ_ASSERT(!elem->mIsSentinel); + } + + /* Check that the mNext/mPrev pointers match up. */ + const LinkedListElement* prev = &sentinel; + const LinkedListElement* cur = sentinel.mNext; + do { + MOZ_ASSERT(cur->mPrev == prev); + MOZ_ASSERT(prev->mNext == cur); + + prev = cur; + cur = cur->mNext; + } while (cur != &sentinel); +# endif /* ifdef DEBUG */ + } + + private: + friend class LinkedListElement; + + void assertContains(const RawType aValue) const { +# ifdef DEBUG + for (ConstRawType elem = getFirst(); elem; elem = elem->getNext()) { + if (elem == aValue) { + return; + } + } + MOZ_CRASH("element wasn't found in this list!"); +# endif + } + + LinkedList& operator=(const LinkedList& aOther) = delete; + LinkedList(const LinkedList& aOther) = delete; +}; + +template +inline void ImplCycleCollectionUnlink(LinkedList>& aField) { + aField.clear(); +} + +template +inline void ImplCycleCollectionTraverse( + nsCycleCollectionTraversalCallback& aCallback, + LinkedList>& aField, const char* aName, uint32_t aFlags = 0) { + typedef typename detail::LinkedListElementTraits Traits; + typedef typename Traits::RawType RawType; + for (RawType element : aField) { + // RefPtr is stored as a raw pointer in LinkedList. + // So instead of creating a new RefPtr from the raw + // pointer (which is not allowed), we simply call + // CycleCollectionNoteChild against the raw pointer + CycleCollectionNoteChild(aCallback, element, aName, aFlags); + } +} + +template +class AutoCleanLinkedList : public LinkedList { + private: + using Traits = detail::LinkedListElementTraits; + using ClientType = typename detail::LinkedListElementTraits::ClientType; + + public: + AutoCleanLinkedList() = default; + AutoCleanLinkedList(AutoCleanLinkedList&&) = default; + ~AutoCleanLinkedList() { clear(); } + + AutoCleanLinkedList& operator=(AutoCleanLinkedList&& aOther) = default; + + void clear() { + while (ClientType element = this->popFirst()) { + Traits::cleanElement(element); + } + } +}; + +} /* namespace mozilla */ + +#endif /* __cplusplus */ + +#endif /* mozilla_LinkedList_h */ diff --git a/mfbt/MacroArgs.h b/mfbt/MacroArgs.h new file mode 100644 index 0000000000..9afaaef945 --- /dev/null +++ b/mfbt/MacroArgs.h @@ -0,0 +1,97 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implements various macros meant to ease the use of variadic macros. + */ + +#ifndef mozilla_MacroArgs_h +#define mozilla_MacroArgs_h + +// Concatenates pre-processor tokens in a way that can be used with __LINE__. +#define MOZ_CONCAT2(x, y) x##y +#define MOZ_CONCAT(x, y) MOZ_CONCAT2(x, y) + +/* + * MOZ_ARG_COUNT(...) counts the number of variadic arguments. + * You must pass in between 0 and 50 (inclusive) variadic arguments. + * For example: + * + * MOZ_ARG_COUNT() expands to 0 + * MOZ_ARG_COUNT(a) expands to 1 + * MOZ_ARG_COUNT(a, b) expands to 2 + * + * Implementation notes: + * The `##__VA_ARGS__` form is a GCC extension that removes the comma if + * __VA_ARGS__ is empty. It is supported by Clang too. MSVC ignores ##, + * and its default behavior is already to strip the comma when __VA_ARGS__ + * is empty. + * + * So MOZ_MACROARGS_ARG_COUNT_HELPER() expands to + * (_, 50, 49, ...) + * MOZ_MACROARGS_ARG_COUNT_HELPER(a) expands to + * (_, a, 50, 49, ...) + * etc. + */ +#define MOZ_ARG_COUNT(...) \ + MOZ_MACROARGS_ARG_COUNT_HELPER2(MOZ_MACROARGS_ARG_COUNT_HELPER(__VA_ARGS__)) + +#define MOZ_MACROARGS_ARG_COUNT_HELPER(...) \ + (_, ##__VA_ARGS__, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, \ + 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, \ + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#define MOZ_MACROARGS_ARG_COUNT_HELPER2(aArgs) \ + MOZ_MACROARGS_ARG_COUNT_HELPER3 aArgs + +#define MOZ_MACROARGS_ARG_COUNT_HELPER3( \ + a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, \ + a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, \ + a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45, a46, \ + a47, a48, a49, a50, a51, ...) \ + a51 + +/* + * MOZ_PASTE_PREFIX_AND_ARG_COUNT(aPrefix, ...) counts the number of variadic + * arguments and prefixes it with |aPrefix|. For example: + * + * MOZ_PASTE_PREFIX_AND_ARG_COUNT(, foo, 42) expands to 2 + * MOZ_PASTE_PREFIX_AND_ARG_COUNT(A, foo, 42, bar) expands to A3 + * MOZ_PASTE_PREFIX_AND_ARG_COUNT(A) expands to A0 + * MOZ_PASTE_PREFIX_AND_ARG_COUNT() expands to 0, but MSVC warns there + * aren't enough arguments given. + * + * You must pass in between 0 and 50 (inclusive) variadic arguments, past + * |aPrefix|. + */ +#define MOZ_PASTE_PREFIX_AND_ARG_COUNT_GLUE(a, b) a b +#define MOZ_PASTE_PREFIX_AND_ARG_COUNT(aPrefix, ...) \ + MOZ_PASTE_PREFIX_AND_ARG_COUNT_GLUE(MOZ_CONCAT, \ + (aPrefix, MOZ_ARG_COUNT(__VA_ARGS__))) + +/* + * MOZ_ARGS_AFTER_N expands to its arguments excluding the first |N| + * arguments. For example: + * + * MOZ_ARGS_AFTER_2(a, b, c, d) expands to: c, d + */ +#define MOZ_ARGS_AFTER_1(a1, ...) __VA_ARGS__ +#define MOZ_ARGS_AFTER_2(a1, a2, ...) __VA_ARGS__ + +/* + * MOZ_ARG_N expands to its |N|th argument. + */ +#define MOZ_ARG_1(a1, ...) a1 +#define MOZ_ARG_2(a1, a2, ...) a2 +#define MOZ_ARG_3(a1, a2, a3, ...) a3 +#define MOZ_ARG_4(a1, a2, a3, a4, ...) a4 +#define MOZ_ARG_5(a1, a2, a3, a4, a5, ...) a5 +#define MOZ_ARG_6(a1, a2, a3, a4, a5, a6, ...) a6 +#define MOZ_ARG_7(a1, a2, a3, a4, a5, a6, a7, ...) a7 +#define MOZ_ARG_8(a1, a2, a3, a4, a5, a6, a7, a8, ...) a8 +#define MOZ_ARG_9(a1, a2, a3, a4, a5, a6, a7, a8, a9, ...) a9 + +#endif /* mozilla_MacroArgs_h */ diff --git a/mfbt/MacroForEach.h b/mfbt/MacroForEach.h new file mode 100644 index 0000000000..c3067e3620 --- /dev/null +++ b/mfbt/MacroForEach.h @@ -0,0 +1,219 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implements a higher-order macro for iteratively calling another macro with + * fixed leading arguments, plus a trailing element picked from a second list + * of arguments. + */ + +#ifndef mozilla_MacroForEach_h +#define mozilla_MacroForEach_h + +#include "mozilla/MacroArgs.h" + +/* + * MOZ_FOR_EACH(aMacro, aFixedArgs, aArgs) expands to N calls to the macro + * |aMacro| where N is equal the number of items in the list |aArgs|. The + * arguments for each |aMacro| call are composed of *all* arguments in the list + * |aFixedArgs| as well as a single argument in the list |aArgs|. For example: + * + * #define MACRO_A(x) x + + * int a = MOZ_FOR_EACH(MACRO_A, (), (1, 2, 3)) 0; + * // Expands to: MACRO_A(1) MACRO_A(2) MACRO_A(3) 0; + * // And further to: 1 + 2 + 3 + 0; + * + * #define MACRO_B(k, x) (k + x) + + * int b = MOZ_FOR_EACH(MACRO_B, (5,), (1, 2)) 0; + * // Expands to: MACRO_B(5, 1) MACRO_B(5, 2) 0; + * + * #define MACRO_C(k1, k2, x) (k1 + k2 + x) + + * int c = MOZ_FOR_EACH(MACRO_C, (5, 8,), (1, 2)) 0; + * // Expands to: MACRO_B(5, 8, 1) MACRO_B(5, 8, 2) 0; + * + * MOZ_FOR_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) is identical + * to MOZ_FOR_EACH except that it inserts |aSeparator| between each call to + * the macro. |aSeparator| must be wrapped by parens. For example: + * + * #define MACRO_A(x) x + * int a = MOZ_FOR_EACH_SEPARATED(MACRO_A, (+), (), (1, 2, 3)); + * // Expands to: MACRO_A(1) + MACRO_A(2) + MACRO_A(3); + * // And further to: 1 + 2 + 3 + * + * #define MACRO_B(t, n) t n + * void test(MOZ_FOR_EACH_SEPARATED(MACRO_B, (,), (int,), (a, b))); + * // Expands to: void test(MACRO_B(int, a) , MACRO_B(int, b)); + * // And further to: void test(int a , int b); + * + * If the |aFixedArgs| list is not empty, a trailing comma must be included. + * + * The |aArgs| list may be up to 50 items long. + */ +#define MOZ_FOR_EACH_EXPAND_HELPER(...) __VA_ARGS__ +#define MOZ_FOR_EACH_GLUE(a, b) a b +#define MOZ_FOR_EACH_SEPARATED(aMacro, aSeparator, aFixedArgs, aArgs) \ + MOZ_FOR_EACH_GLUE(MOZ_PASTE_PREFIX_AND_ARG_COUNT( \ + MOZ_FOR_EACH_, MOZ_FOR_EACH_EXPAND_HELPER aArgs), \ + (aMacro, aSeparator, aFixedArgs, aArgs)) +#define MOZ_FOR_EACH(aMacro, aFixedArgs, aArgs) \ + MOZ_FOR_EACH_SEPARATED(aMacro, (), aFixedArgs, aArgs) + +#define MOZ_FOR_EACH_HELPER_GLUE(a, b) a b +#define MOZ_FOR_EACH_HELPER(aMacro, aFixedArgs, aArgs) \ + MOZ_FOR_EACH_HELPER_GLUE( \ + aMacro, (MOZ_FOR_EACH_EXPAND_HELPER aFixedArgs MOZ_ARG_1 aArgs)) + +#define MOZ_FOR_EACH_0(m, s, fa, a) +#define MOZ_FOR_EACH_1(m, s, fa, a) MOZ_FOR_EACH_HELPER(m, fa, a) +#define MOZ_FOR_EACH_2(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_1(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_3(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_2(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_4(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_3(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_5(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_4(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_6(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_5(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_7(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_6(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_8(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_7(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_9(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_8(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_10(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_9(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_11(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_10(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_12(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_11(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_13(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_12(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_14(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_13(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_15(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_14(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_16(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_15(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_17(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_16(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_18(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_17(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_19(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_18(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_20(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_19(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_21(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_20(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_22(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_21(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_23(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_22(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_24(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_23(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_25(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_24(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_26(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_25(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_27(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_26(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_28(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_27(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_29(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_28(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_30(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_29(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_31(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_30(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_32(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_31(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_33(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_32(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_34(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_33(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_35(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_34(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_36(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_35(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_37(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_36(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_38(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_37(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_39(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_38(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_40(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_39(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_41(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_40(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_42(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_41(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_43(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_42(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_44(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_43(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_45(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_44(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_46(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_45(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_47(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_46(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_48(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_47(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_49(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_48(m, s, fa, (MOZ_ARGS_AFTER_1 a)) +#define MOZ_FOR_EACH_50(m, s, fa, a) \ + MOZ_FOR_EACH_HELPER(m, fa, a) \ + MOZ_FOR_EACH_EXPAND_HELPER s MOZ_FOR_EACH_49(m, s, fa, (MOZ_ARGS_AFTER_1 a)) + +#endif /* mozilla_MacroForEach_h */ diff --git a/mfbt/MathAlgorithms.h b/mfbt/MathAlgorithms.h new file mode 100644 index 0000000000..66aa1b9f71 --- /dev/null +++ b/mfbt/MathAlgorithms.h @@ -0,0 +1,492 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* mfbt maths algorithms. */ + +#ifndef mozilla_MathAlgorithms_h +#define mozilla_MathAlgorithms_h + +#include "mozilla/Assertions.h" + +#include +#include +#include +#include +#include + +namespace mozilla { + +namespace detail { + +template +struct AllowDeprecatedAbsFixed : std::false_type {}; + +template <> +struct AllowDeprecatedAbsFixed : std::true_type {}; +template <> +struct AllowDeprecatedAbsFixed : std::true_type {}; + +template +struct AllowDeprecatedAbs : AllowDeprecatedAbsFixed {}; + +template <> +struct AllowDeprecatedAbs : std::true_type {}; +template <> +struct AllowDeprecatedAbs : std::true_type {}; + +} // namespace detail + +// DO NOT USE DeprecatedAbs. It exists only until its callers can be converted +// to Abs below, and it will be removed when all callers have been changed. +template +inline std::enable_if_t::value, T> DeprecatedAbs( + const T aValue) { + // The absolute value of the smallest possible value of a signed-integer type + // won't fit in that type (on twos-complement systems -- and we're blithely + // assuming we're on such systems, for the non- types listed above), + // so assert that the input isn't that value. + // + // This is the case if: the value is non-negative; or if adding one (giving a + // value in the range [-maxvalue, 0]), then negating (giving a value in the + // range [0, maxvalue]), doesn't produce maxvalue (because in twos-complement, + // (minvalue + 1) == -maxvalue). + MOZ_ASSERT(aValue >= 0 || + -(aValue + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1), + "You can't negate the smallest possible negative integer!"); + return aValue >= 0 ? aValue : -aValue; +} + +namespace detail { + +template +struct AbsReturnType; + +template +struct AbsReturnType< + T, std::enable_if_t && std::is_signed_v>> { + using Type = std::make_unsigned_t; +}; + +template +struct AbsReturnType>> { + using Type = T; +}; + +} // namespace detail + +template +inline constexpr typename detail::AbsReturnType::Type Abs(const T aValue) { + using ReturnType = typename detail::AbsReturnType::Type; + return aValue >= 0 ? ReturnType(aValue) : ~ReturnType(aValue) + 1; +} + +template <> +inline float Abs(const float aFloat) { + return std::fabs(aFloat); +} + +template <> +inline double Abs(const double aDouble) { + return std::fabs(aDouble); +} + +template <> +inline long double Abs(const long double aLongDouble) { + return std::fabs(aLongDouble); +} + +} // namespace mozilla + +#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || \ + defined(_M_X64) || defined(_M_ARM64)) +# define MOZ_BITSCAN_WINDOWS + +# include +# pragma intrinsic(_BitScanForward, _BitScanReverse) + +# if defined(_M_AMD64) || defined(_M_X64) || defined(_M_ARM64) +# define MOZ_BITSCAN_WINDOWS64 +# pragma intrinsic(_BitScanForward64, _BitScanReverse64) +# endif + +#endif + +namespace mozilla { + +namespace detail { + +#if defined(MOZ_BITSCAN_WINDOWS) + +inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) { + unsigned long index; + if (!_BitScanReverse(&index, static_cast(aValue))) return 32; + return uint_fast8_t(31 - index); +} + +inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) { + unsigned long index; + if (!_BitScanForward(&index, static_cast(aValue))) return 32; + return uint_fast8_t(index); +} + +inline uint_fast8_t CountPopulation32(uint32_t aValue) { + uint32_t x = aValue - ((aValue >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24; +} +inline uint_fast8_t CountPopulation64(uint64_t aValue) { + return uint_fast8_t(CountPopulation32(aValue & 0xffffffff) + + CountPopulation32(aValue >> 32)); +} + +inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) { +# if defined(MOZ_BITSCAN_WINDOWS64) + unsigned long index; + if (!_BitScanReverse64(&index, static_cast(aValue))) + return 64; + return uint_fast8_t(63 - index); +# else + uint32_t hi = uint32_t(aValue >> 32); + if (hi != 0) { + return CountLeadingZeroes32(hi); + } + return 32u + CountLeadingZeroes32(uint32_t(aValue)); +# endif +} + +inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) { +# if defined(MOZ_BITSCAN_WINDOWS64) + unsigned long index; + if (!_BitScanForward64(&index, static_cast(aValue))) + return 64; + return uint_fast8_t(index); +# else + uint32_t lo = uint32_t(aValue); + if (lo != 0) { + return CountTrailingZeroes32(lo); + } + return 32u + CountTrailingZeroes32(uint32_t(aValue >> 32)); +# endif +} + +#elif defined(__clang__) || defined(__GNUC__) + +# if defined(__clang__) +# if !__has_builtin(__builtin_ctz) || !__has_builtin(__builtin_clz) +# error "A clang providing __builtin_c[lt]z is required to build" +# endif +# else +// gcc has had __builtin_clz and friends since 3.4: no need to check. +# endif + +inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) { + return static_cast(__builtin_clz(aValue)); +} + +inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) { + return static_cast(__builtin_ctz(aValue)); +} + +inline uint_fast8_t CountPopulation32(uint32_t aValue) { + return static_cast(__builtin_popcount(aValue)); +} + +inline uint_fast8_t CountPopulation64(uint64_t aValue) { + return static_cast(__builtin_popcountll(aValue)); +} + +inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) { + return static_cast(__builtin_clzll(aValue)); +} + +inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) { + return static_cast(__builtin_ctzll(aValue)); +} + +#else +# error "Implement these!" +inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) = delete; +inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) = delete; +inline uint_fast8_t CountPopulation32(uint32_t aValue) = delete; +inline uint_fast8_t CountPopulation64(uint64_t aValue) = delete; +inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) = delete; +inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) = delete; +#endif + +} // namespace detail + +/** + * Compute the number of high-order zero bits in the NON-ZERO number |aValue|. + * That is, looking at the bitwise representation of the number, with the + * highest- valued bits at the start, return the number of zeroes before the + * first one is observed. + * + * CountLeadingZeroes32(0xF0FF1000) is 0; + * CountLeadingZeroes32(0x7F8F0001) is 1; + * CountLeadingZeroes32(0x3FFF0100) is 2; + * CountLeadingZeroes32(0x1FF50010) is 3; and so on. + */ +inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) { + MOZ_ASSERT(aValue != 0); + return detail::CountLeadingZeroes32(aValue); +} + +/** + * Compute the number of low-order zero bits in the NON-ZERO number |aValue|. + * That is, looking at the bitwise representation of the number, with the + * lowest- valued bits at the start, return the number of zeroes before the + * first one is observed. + * + * CountTrailingZeroes32(0x0100FFFF) is 0; + * CountTrailingZeroes32(0x7000FFFE) is 1; + * CountTrailingZeroes32(0x0080FFFC) is 2; + * CountTrailingZeroes32(0x0080FFF8) is 3; and so on. + */ +inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) { + MOZ_ASSERT(aValue != 0); + return detail::CountTrailingZeroes32(aValue); +} + +/** + * Compute the number of one bits in the number |aValue|, + */ +inline uint_fast8_t CountPopulation32(uint32_t aValue) { + return detail::CountPopulation32(aValue); +} + +/** Analogous to CountPopulation32, but for 64-bit numbers */ +inline uint_fast8_t CountPopulation64(uint64_t aValue) { + return detail::CountPopulation64(aValue); +} + +/** Analogous to CountLeadingZeroes32, but for 64-bit numbers. */ +inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) { + MOZ_ASSERT(aValue != 0); + return detail::CountLeadingZeroes64(aValue); +} + +/** Analogous to CountTrailingZeroes32, but for 64-bit numbers. */ +inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) { + MOZ_ASSERT(aValue != 0); + return detail::CountTrailingZeroes64(aValue); +} + +namespace detail { + +template +class CeilingLog2; + +template +class CeilingLog2 { + public: + static uint_fast8_t compute(const T aValue) { + // Check for <= 1 to avoid the == 0 undefined case. + return aValue <= 1 ? 0u : 32u - CountLeadingZeroes32(aValue - 1); + } +}; + +template +class CeilingLog2 { + public: + static uint_fast8_t compute(const T aValue) { + // Check for <= 1 to avoid the == 0 undefined case. + return aValue <= 1 ? 0u : 64u - CountLeadingZeroes64(aValue - 1); + } +}; + +} // namespace detail + +/** + * Compute the log of the least power of 2 greater than or equal to |aValue|. + * + * CeilingLog2(0..1) is 0; + * CeilingLog2(2) is 1; + * CeilingLog2(3..4) is 2; + * CeilingLog2(5..8) is 3; + * CeilingLog2(9..16) is 4; and so on. + */ +template +inline uint_fast8_t CeilingLog2(const T aValue) { + return detail::CeilingLog2::compute(aValue); +} + +/** A CeilingLog2 variant that accepts only size_t. */ +inline uint_fast8_t CeilingLog2Size(size_t aValue) { + return CeilingLog2(aValue); +} + +namespace detail { + +template +class FloorLog2; + +template +class FloorLog2 { + public: + static uint_fast8_t compute(const T aValue) { + return 31u - CountLeadingZeroes32(aValue | 1); + } +}; + +template +class FloorLog2 { + public: + static uint_fast8_t compute(const T aValue) { + return 63u - CountLeadingZeroes64(aValue | 1); + } +}; + +} // namespace detail + +/** + * Compute the log of the greatest power of 2 less than or equal to |aValue|. + * + * FloorLog2(0..1) is 0; + * FloorLog2(2..3) is 1; + * FloorLog2(4..7) is 2; + * FloorLog2(8..15) is 3; and so on. + */ +template +inline constexpr uint_fast8_t FloorLog2(const T aValue) { + return detail::FloorLog2::compute(aValue); +} + +/** A FloorLog2 variant that accepts only size_t. */ +inline uint_fast8_t FloorLog2Size(size_t aValue) { return FloorLog2(aValue); } + +/* + * Compute the smallest power of 2 greater than or equal to |x|. |x| must not + * be so great that the computed value would overflow |size_t|. + */ +inline size_t RoundUpPow2(size_t aValue) { + MOZ_ASSERT(aValue <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)), + "can't round up -- will overflow!"); + return size_t(1) << CeilingLog2(aValue); +} + +/** + * Rotates the bits of the given value left by the amount of the shift width. + */ +template +MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline T RotateLeft(const T aValue, + uint_fast8_t aShift) { + static_assert(std::is_unsigned_v, "Rotates require unsigned values"); + + MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!"); + MOZ_ASSERT(aShift > 0, + "Rotation by value length is undefined behavior, but compilers " + "do not currently fold a test into the rotate instruction. " + "Please remove this restriction when compilers optimize the " + "zero case (http://blog.regehr.org/archives/1063)."); + + return (aValue << aShift) | (aValue >> (sizeof(T) * CHAR_BIT - aShift)); +} + +/** + * Rotates the bits of the given value right by the amount of the shift width. + */ +template +MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW inline T RotateRight(const T aValue, + uint_fast8_t aShift) { + static_assert(std::is_unsigned_v, "Rotates require unsigned values"); + + MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!"); + MOZ_ASSERT(aShift > 0, + "Rotation by value length is undefined behavior, but compilers " + "do not currently fold a test into the rotate instruction. " + "Please remove this restriction when compilers optimize the " + "zero case (http://blog.regehr.org/archives/1063)."); + + return (aValue >> aShift) | (aValue << (sizeof(T) * CHAR_BIT - aShift)); +} + +/** + * Returns true if |x| is a power of two. + * Zero is not an integer power of two. (-Inf is not an integer) + */ +template +constexpr bool IsPowerOfTwo(T x) { + static_assert(std::is_unsigned_v, "IsPowerOfTwo requires unsigned values"); + return x && (x & (x - 1)) == 0; +} + +template +inline T Clamp(const T aValue, const T aMin, const T aMax) { + static_assert(std::is_integral_v, + "Clamp accepts only integral types, so that it doesn't have" + " to distinguish differently-signed zeroes (which users may" + " or may not care to distinguish, likely at a perf cost) or" + " to decide how to clamp NaN or a range with a NaN" + " endpoint."); + MOZ_ASSERT(aMin <= aMax); + + if (aValue <= aMin) return aMin; + if (aValue >= aMax) return aMax; + return aValue; +} + +template +inline uint_fast8_t CountTrailingZeroes(T aValue) { + static_assert(sizeof(T) <= 8); + static_assert(std::is_integral_v); + // This casts to 32-bits + if constexpr (sizeof(T) <= 4) { + return CountTrailingZeroes32(aValue); + } + // This doesn't + if constexpr (sizeof(T) == 8) { + return CountTrailingZeroes64(aValue); + } +} + +// Greatest Common Divisor, from +// https://en.wikipedia.org/wiki/Binary_GCD_algorithm#Implementation +template +MOZ_ALWAYS_INLINE T GCD(T aA, T aB) { + static_assert(std::is_integral_v); + + MOZ_ASSERT(aA >= 0); + MOZ_ASSERT(aB >= 0); + + if (aA == 0) { + return aB; + } + if (aB == 0) { + return aA; + } + + T az = CountTrailingZeroes(aA); + T bz = CountTrailingZeroes(aB); + T shift = std::min(az, bz); + aA >>= az; + aB >>= bz; + + while (aA != 0) { + if constexpr (!std::is_signed_v) { + if (aA < aB) { + std::swap(aA, aB); + } + } + T diff = aA - aB; + if constexpr (std::is_signed_v) { + aB = std::min(aA, aB); + } + if constexpr (std::is_signed_v) { + aA = std::abs(diff); + } else { + aA = diff; + } + if (aA) { + aA >>= CountTrailingZeroes(aA); + } + } + + return aB << shift; +} + +} /* namespace mozilla */ + +#endif /* mozilla_MathAlgorithms_h */ diff --git a/mfbt/Maybe.h b/mfbt/Maybe.h new file mode 100644 index 0000000000..3298370f49 --- /dev/null +++ b/mfbt/Maybe.h @@ -0,0 +1,977 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A class for optional values and in-place lazy construction. */ + +#ifndef mozilla_Maybe_h +#define mozilla_Maybe_h + +#include // for placement new +#include +#include +#include + +#include "mozilla/Alignment.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/MaybeStorageBase.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/OperatorNewExtensions.h" +#include "mozilla/Poison.h" +#include "mozilla/ThreadSafety.h" + +class nsCycleCollectionTraversalCallback; + +template +inline void CycleCollectionNoteChild( + nsCycleCollectionTraversalCallback& aCallback, T* aChild, const char* aName, + uint32_t aFlags); + +namespace mozilla { + +struct Nothing {}; + +inline constexpr bool operator==(const Nothing&, const Nothing&) { + return true; +} + +template +class Maybe; + +namespace detail { + +// You would think that poisoning Maybe instances could just be a call +// to mozWritePoison. Unfortunately, using a simple call to +// mozWritePoison generates poor code on MSVC for small structures. The +// generated code contains (always not-taken) branches and does a bunch +// of setup for `rep stos{l,q}`, even though we know at compile time +// exactly how many words we're poisoning. Instead, we're going to +// force MSVC to generate the code we want via recursive templates. + +// Write the given poisonValue into p at offset*sizeof(uintptr_t). +template +inline void WritePoisonAtOffset(void* p, const uintptr_t poisonValue) { + memcpy(static_cast(p) + offset * sizeof(poisonValue), &poisonValue, + sizeof(poisonValue)); +} + +template +struct InlinePoisoner { + static void poison(void* p, const uintptr_t poisonValue) { + WritePoisonAtOffset(p, poisonValue); + InlinePoisoner::poison(p, poisonValue); + } +}; + +template +struct InlinePoisoner { + static void poison(void*, const uintptr_t) { + // All done! + } +}; + +// We can't generate inline code for large structures, though, because we'll +// blow out recursive template instantiation limits, and the code would be +// bloated to boot. So provide a fallback to the out-of-line poisoner. +template +struct OutOfLinePoisoner { + static MOZ_NEVER_INLINE void poison(void* p, const uintptr_t) { + mozWritePoison(p, ObjectSize); + } +}; + +template +inline void PoisonObject(T* p) { + const uintptr_t POISON = mozPoisonValue(); + std::conditional_t<(sizeof(T) <= 8 * sizeof(POISON)), + InlinePoisoner<0, sizeof(T) / sizeof(POISON)>, + OutOfLinePoisoner>::poison(p, POISON); +} + +template +struct MaybePoisoner { + static const size_t N = sizeof(T); + + static void poison(void* aPtr) { +#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED + if (N >= sizeof(uintptr_t)) { + PoisonObject(static_cast*>(aPtr)); + } +#endif + MOZ_MAKE_MEM_UNDEFINED(aPtr, N); + } +}; + +template , + bool Copyable = std::is_copy_constructible_v, + bool Movable = std::is_move_constructible_v> +class Maybe_CopyMove_Enabler; + +#define MOZ_MAYBE_COPY_OPS() \ + Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler& aOther) { \ + if (downcast(aOther).isSome()) { \ + downcast(*this).emplace(*downcast(aOther)); \ + } \ + } \ + \ + Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler& aOther) { \ + return downcast(*this).template operator=(downcast(aOther)); \ + } + +#define MOZ_MAYBE_MOVE_OPS() \ + constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) { \ + if (downcast(aOther).isSome()) { \ + downcast(*this).emplace(std::move(*downcast(aOther))); \ + downcast(aOther).reset(); \ + } \ + } \ + \ + constexpr Maybe_CopyMove_Enabler& operator=( \ + Maybe_CopyMove_Enabler&& aOther) { \ + downcast(*this).template operator=(std::move(downcast(aOther))); \ + \ + return *this; \ + } + +#define MOZ_MAYBE_DOWNCAST() \ + static constexpr Maybe& downcast(Maybe_CopyMove_Enabler& aObj) { \ + return static_cast&>(aObj); \ + } \ + static constexpr const Maybe& downcast( \ + const Maybe_CopyMove_Enabler& aObj) { \ + return static_cast&>(aObj); \ + } + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = default; + Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = default; + constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) { + downcast(aOther).reset(); + } + constexpr Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&& aOther) { + downcast(aOther).reset(); + return *this; + } + + private: + MOZ_MAYBE_DOWNCAST() +}; + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = delete; + Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = delete; + constexpr Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&& aOther) { + downcast(aOther).reset(); + } + constexpr Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&& aOther) { + downcast(aOther).reset(); + return *this; + } + + private: + MOZ_MAYBE_DOWNCAST() +}; + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + MOZ_MAYBE_COPY_OPS() + MOZ_MAYBE_MOVE_OPS() + + private: + MOZ_MAYBE_DOWNCAST() +}; + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + MOZ_MAYBE_MOVE_OPS() + + private: + MOZ_MAYBE_DOWNCAST() +}; + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + MOZ_MAYBE_COPY_OPS() + + private: + MOZ_MAYBE_DOWNCAST() +}; + +template +class Maybe_CopyMove_Enabler { + public: + Maybe_CopyMove_Enabler() = default; + + Maybe_CopyMove_Enabler(const Maybe_CopyMove_Enabler&) = delete; + Maybe_CopyMove_Enabler& operator=(const Maybe_CopyMove_Enabler&) = delete; + Maybe_CopyMove_Enabler(Maybe_CopyMove_Enabler&&) = delete; + Maybe_CopyMove_Enabler& operator=(Maybe_CopyMove_Enabler&&) = delete; +}; + +#undef MOZ_MAYBE_COPY_OPS +#undef MOZ_MAYBE_MOVE_OPS +#undef MOZ_MAYBE_DOWNCAST + +template > +struct MaybeStorage; + +template +struct MaybeStorage : MaybeStorageBase { + protected: + char mIsSome = false; // not bool -- guarantees minimal space consumption + + MaybeStorage() = default; + explicit MaybeStorage(const T& aVal) + : MaybeStorageBase{aVal}, mIsSome{true} {} + explicit MaybeStorage(T&& aVal) + : MaybeStorageBase{std::move(aVal)}, mIsSome{true} {} + + template + explicit MaybeStorage(std::in_place_t, Args&&... aArgs) + : MaybeStorageBase{std::in_place, std::forward(aArgs)...}, + mIsSome{true} {} + + public: + // Copy and move operations are no-ops, since copying is moving is implemented + // by Maybe_CopyMove_Enabler. + + MaybeStorage(const MaybeStorage&) : MaybeStorageBase{} {} + MaybeStorage& operator=(const MaybeStorage&) { return *this; } + MaybeStorage(MaybeStorage&&) : MaybeStorageBase{} {} + MaybeStorage& operator=(MaybeStorage&&) { return *this; } + + ~MaybeStorage() { + if (mIsSome) { + this->addr()->T::~T(); + } + } +}; + +template +struct MaybeStorage : MaybeStorageBase { + protected: + char mIsSome = false; // not bool -- guarantees minimal space consumption + + constexpr MaybeStorage() = default; + constexpr explicit MaybeStorage(const T& aVal) + : MaybeStorageBase{aVal}, mIsSome{true} {} + constexpr explicit MaybeStorage(T&& aVal) + : MaybeStorageBase{std::move(aVal)}, mIsSome{true} {} + + template + constexpr explicit MaybeStorage(std::in_place_t, Args&&... aArgs) + : MaybeStorageBase{std::in_place, std::forward(aArgs)...}, + mIsSome{true} {} +}; + +} // namespace detail + +template ::type>::type> +constexpr Maybe Some(T&& aValue); + +/* + * Maybe is a container class which contains either zero or one elements. It + * serves two roles. It can represent values which are *semantically* optional, + * augmenting a type with an explicit 'Nothing' value. In this role, it provides + * methods that make it easy to work with values that may be missing, along with + * equality and comparison operators so that Maybe values can be stored in + * containers. Maybe values can be constructed conveniently in expressions using + * type inference, as follows: + * + * void doSomething(Maybe aFoo) { + * if (aFoo) // Make sure that aFoo contains a value... + * aFoo->takeAction(); // and then use |aFoo->| to access it. + * } // |*aFoo| also works! + * + * doSomething(Nothing()); // Passes a Maybe containing no value. + * doSomething(Some(Foo(100))); // Passes a Maybe containing |Foo(100)|. + * + * You'll note that it's important to check whether a Maybe contains a value + * before using it, using conversion to bool, |isSome()|, or |isNothing()|. You + * can avoid these checks, and sometimes write more readable code, using + * |valueOr()|, |ptrOr()|, and |refOr()|, which allow you to retrieve the value + * in the Maybe and provide a default for the 'Nothing' case. You can also use + * |apply()| to call a function only if the Maybe holds a value, and |map()| to + * transform the value in the Maybe, returning another Maybe with a possibly + * different type. + * + * Maybe's other role is to support lazily constructing objects without using + * dynamic storage. A Maybe directly contains storage for a value, but it's + * empty by default. |emplace()|, as mentioned above, can be used to construct a + * value in Maybe's storage. The value a Maybe contains can be destroyed by + * calling |reset()|; this will happen automatically if a Maybe is destroyed + * while holding a value. + * + * It's a common idiom in C++ to use a pointer as a 'Maybe' type, with a null + * value meaning 'Nothing' and any other value meaning 'Some'. You can convert + * from such a pointer to a Maybe value using 'ToMaybe()'. + * + * Maybe is inspired by similar types in the standard library of many other + * languages (e.g. Haskell's Maybe and Rust's Option). In the C++ world it's + * very similar to std::optional, which was proposed for C++14 and originated in + * Boost. The most important differences between Maybe and std::optional are: + * + * - std::optional may be compared with T. We deliberately forbid that. + * - std::optional has |valueOr()|, equivalent to Maybe's |valueOr()|, but + * lacks corresponding methods for |refOr()| and |ptrOr()|. + * - std::optional lacks |map()| and |apply()|, making it less suitable for + * functional-style code. + * - std::optional lacks many convenience functions that Maybe has. Most + * unfortunately, it lacks equivalents of the type-inferred constructor + * functions |Some()| and |Nothing()|. + */ +template +class MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS Maybe + : private detail::MaybeStorage, + public detail::Maybe_CopyMove_Enabler { + template + friend class detail::Maybe_CopyMove_Enabler; + + template + friend constexpr Maybe Some(U&& aValue); + + struct SomeGuard {}; + + template + constexpr Maybe(U&& aValue, SomeGuard) + : detail::MaybeStorage{std::forward(aValue)} {} + + using detail::MaybeStorage::mIsSome; + using detail::MaybeStorage::mStorage; + + void poisonData() { detail::MaybePoisoner::poison(&mStorage.val); } + + public: + using ValueType = T; + + MOZ_ALLOW_TEMPORARY constexpr Maybe() = default; + + MOZ_ALLOW_TEMPORARY MOZ_IMPLICIT constexpr Maybe(Nothing) : Maybe{} {} + + template + constexpr explicit Maybe(std::in_place_t, Args&&... aArgs) + : detail::MaybeStorage{std::in_place, std::forward(aArgs)...} {} + + /** + * Maybe can be copy-constructed from a Maybe if T is constructible from + * a const U&. + */ + template >> + MOZ_IMPLICIT Maybe(const Maybe& aOther) { + if (aOther.isSome()) { + emplace(*aOther); + } + } + + /** + * Maybe can be move-constructed from a Maybe if T is constructible from + * a U&&. + */ + template >> + MOZ_IMPLICIT Maybe(Maybe&& aOther) { + if (aOther.isSome()) { + emplace(std::move(*aOther)); + aOther.reset(); + } + } + + template >> + Maybe& operator=(const Maybe& aOther) { + if (aOther.isSome()) { + if (mIsSome) { + ref() = aOther.ref(); + } else { + emplace(*aOther); + } + } else { + reset(); + } + return *this; + } + + template >> + Maybe& operator=(Maybe&& aOther) { + if (aOther.isSome()) { + if (mIsSome) { + ref() = std::move(aOther.ref()); + } else { + emplace(std::move(*aOther)); + } + aOther.reset(); + } else { + reset(); + } + + return *this; + } + + constexpr Maybe& operator=(Nothing) { + reset(); + return *this; + } + + /* Methods that check whether this Maybe contains a value */ + constexpr explicit operator bool() const { return isSome(); } + constexpr bool isSome() const { return mIsSome; } + constexpr bool isNothing() const { return !mIsSome; } + + /* Returns the contents of this Maybe by value. Unsafe unless |isSome()|. + */ + constexpr T value() const&; + constexpr T value() &&; + constexpr T value() const&&; + + /** + * Move the contents of this Maybe out of internal storage and return it + * without calling the destructor. The internal storage is also reset to + * avoid multiple calls. Unsafe unless |isSome()|. + */ + T extract() { + MOZ_RELEASE_ASSERT(isSome()); + T v = std::move(mStorage.val); + reset(); + return v; + } + + /** + * Returns the value (possibly |Nothing()|) by moving it out of this Maybe + * and leaving |Nothing()| in its place. + */ + Maybe take() { return std::exchange(*this, Nothing()); } + + /* + * Returns the contents of this Maybe by value. If |isNothing()|, returns + * the default value provided. + * + * Note: If the value passed to aDefault is not the result of a trivial + * expression, but expensive to evaluate, e.g. |valueOr(ExpensiveFunction())|, + * use |valueOrFrom| instead, e.g. + * |valueOrFrom([arg] { return ExpensiveFunction(arg); })|. This ensures + * that the expensive expression is only evaluated when its result will + * actually be used. + */ + template + constexpr T valueOr(V&& aDefault) const { + if (isSome()) { + return ref(); + } + return std::forward(aDefault); + } + + /* + * Returns the contents of this Maybe by value. If |isNothing()|, returns + * the value returned from the function or functor provided. + */ + template + constexpr T valueOrFrom(F&& aFunc) const { + if (isSome()) { + return ref(); + } + return aFunc(); + } + + /* Returns the contents of this Maybe by pointer. Unsafe unless |isSome()|. + */ + T* ptr(); + constexpr const T* ptr() const; + + /* + * Returns the contents of this Maybe by pointer. If |isNothing()|, + * returns the default value provided. + */ + T* ptrOr(T* aDefault) { + if (isSome()) { + return ptr(); + } + return aDefault; + } + + constexpr const T* ptrOr(const T* aDefault) const { + if (isSome()) { + return ptr(); + } + return aDefault; + } + + /* + * Returns the contents of this Maybe by pointer. If |isNothing()|, + * returns the value returned from the function or functor provided. + */ + template + T* ptrOrFrom(F&& aFunc) { + if (isSome()) { + return ptr(); + } + return aFunc(); + } + + template + const T* ptrOrFrom(F&& aFunc) const { + if (isSome()) { + return ptr(); + } + return aFunc(); + } + + constexpr T* operator->(); + constexpr const T* operator->() const; + + /* Returns the contents of this Maybe by ref. Unsafe unless |isSome()|. */ + constexpr T& ref() &; + constexpr const T& ref() const&; + constexpr T&& ref() &&; + constexpr const T&& ref() const&&; + + /* + * Returns the contents of this Maybe by ref. If |isNothing()|, returns + * the default value provided. + */ + constexpr T& refOr(T& aDefault) { + if (isSome()) { + return ref(); + } + return aDefault; + } + + constexpr const T& refOr(const T& aDefault) const { + if (isSome()) { + return ref(); + } + return aDefault; + } + + /* + * Returns the contents of this Maybe by ref. If |isNothing()|, returns the + * value returned from the function or functor provided. + */ + template + constexpr T& refOrFrom(F&& aFunc) { + if (isSome()) { + return ref(); + } + return aFunc(); + } + + template + constexpr const T& refOrFrom(F&& aFunc) const { + if (isSome()) { + return ref(); + } + return aFunc(); + } + + constexpr T& operator*() &; + constexpr const T& operator*() const&; + constexpr T&& operator*() &&; + constexpr const T&& operator*() const&&; + + /* If |isSome()|, runs the provided function or functor on the contents of + * this Maybe. */ + template + constexpr Maybe& apply(Func&& aFunc) { + if (isSome()) { + std::forward(aFunc)(ref()); + } + return *this; + } + + template + constexpr const Maybe& apply(Func&& aFunc) const { + if (isSome()) { + std::forward(aFunc)(ref()); + } + return *this; + } + + /* + * If |isSome()|, runs the provided function and returns the result wrapped + * in a Maybe. If |isNothing()|, returns an empty Maybe value with the same + * value type as what the provided function would have returned. + */ + template + constexpr auto map(Func&& aFunc) { + if (isSome()) { + return Some(std::forward(aFunc)(ref())); + } + return Maybe(aFunc)(ref()))>{}; + } + + template + constexpr auto map(Func&& aFunc) const { + if (isSome()) { + return Some(std::forward(aFunc)(ref())); + } + return Maybe(aFunc)(ref()))>{}; + } + + /* If |isSome()|, empties this Maybe and destroys its contents. */ + constexpr void reset() { + if (isSome()) { + if constexpr (!std::is_trivially_destructible_v) { + /* + * Static analyzer gets confused if we have Maybe, + * so we suppress thread-safety warnings here + */ + MOZ_PUSH_IGNORE_THREAD_SAFETY + ref().T::~T(); + MOZ_POP_THREAD_SAFETY + poisonData(); + } + mIsSome = false; + } + } + + /* + * Constructs a T value in-place in this empty Maybe's storage. The + * arguments to |emplace()| are the parameters to T's constructor. + */ + template + constexpr void emplace(Args&&... aArgs); + + template + constexpr std::enable_if_t && + std::is_copy_constructible_v && + !std::is_move_constructible_v> + emplace(U&& aArgs) { + emplace(aArgs); + } + + friend std::ostream& operator<<(std::ostream& aStream, + const Maybe& aMaybe) { + if (aMaybe) { + aStream << aMaybe.ref(); + } else { + aStream << ""; + } + return aStream; + } +}; + +template +class Maybe { + public: + constexpr Maybe() = default; + constexpr MOZ_IMPLICIT Maybe(Nothing) {} + + void emplace(T& aRef) { mValue = &aRef; } + + /* Methods that check whether this Maybe contains a value */ + constexpr explicit operator bool() const { return isSome(); } + constexpr bool isSome() const { return mValue; } + constexpr bool isNothing() const { return !mValue; } + + T& ref() const { + MOZ_RELEASE_ASSERT(isSome()); + return *mValue; + } + + T* operator->() const { return &ref(); } + T& operator*() const { return ref(); } + + // Deliberately not defining value and ptr accessors, as these may be + // confusing on a reference-typed Maybe. + + // XXX Should we define refOr? + + void reset() { mValue = nullptr; } + + template + Maybe& apply(Func&& aFunc) { + if (isSome()) { + std::forward(aFunc)(ref()); + } + return *this; + } + + template + const Maybe& apply(Func&& aFunc) const { + if (isSome()) { + std::forward(aFunc)(ref()); + } + return *this; + } + + template + auto map(Func&& aFunc) { + Maybe(aFunc)(ref()))> val; + if (isSome()) { + val.emplace(std::forward(aFunc)(ref())); + } + return val; + } + + template + auto map(Func&& aFunc) const { + Maybe(aFunc)(ref()))> val; + if (isSome()) { + val.emplace(std::forward(aFunc)(ref())); + } + return val; + } + + bool refEquals(const Maybe& aOther) const { + return mValue == aOther.mValue; + } + + bool refEquals(const T& aOther) const { return mValue == &aOther; } + + private: + T* mValue = nullptr; +}; + +template +constexpr T Maybe::value() const& { + MOZ_RELEASE_ASSERT(isSome()); + return ref(); +} + +template +constexpr T Maybe::value() && { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(ref()); +} + +template +constexpr T Maybe::value() const&& { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(ref()); +} + +template +T* Maybe::ptr() { + MOZ_RELEASE_ASSERT(isSome()); + return &ref(); +} + +template +constexpr const T* Maybe::ptr() const { + MOZ_RELEASE_ASSERT(isSome()); + return &ref(); +} + +template +constexpr T* Maybe::operator->() { + MOZ_RELEASE_ASSERT(isSome()); + return ptr(); +} + +template +constexpr const T* Maybe::operator->() const { + MOZ_RELEASE_ASSERT(isSome()); + return ptr(); +} + +template +constexpr T& Maybe::ref() & { + MOZ_RELEASE_ASSERT(isSome()); + return mStorage.val; +} + +template +constexpr const T& Maybe::ref() const& { + MOZ_RELEASE_ASSERT(isSome()); + return mStorage.val; +} + +template +constexpr T&& Maybe::ref() && { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(mStorage.val); +} + +template +constexpr const T&& Maybe::ref() const&& { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(mStorage.val); +} + +template +constexpr T& Maybe::operator*() & { + MOZ_RELEASE_ASSERT(isSome()); + return ref(); +} + +template +constexpr const T& Maybe::operator*() const& { + MOZ_RELEASE_ASSERT(isSome()); + return ref(); +} + +template +constexpr T&& Maybe::operator*() && { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(ref()); +} + +template +constexpr const T&& Maybe::operator*() const&& { + MOZ_RELEASE_ASSERT(isSome()); + return std::move(ref()); +} + +template +template +constexpr void Maybe::emplace(Args&&... aArgs) { + MOZ_RELEASE_ASSERT(!isSome()); + ::new (KnownNotNull, &mStorage.val) T(std::forward(aArgs)...); + mIsSome = true; +} + +/* + * Some() creates a Maybe value containing the provided T value. If T has a + * move constructor, it's used to make this as efficient as possible. + * + * Some() selects the type of Maybe it returns by removing any const, volatile, + * or reference qualifiers from the type of the value you pass to it. This gives + * it more intuitive behavior when used in expressions, but it also means that + * if you need to construct a Maybe value that holds a const, volatile, or + * reference value, you need to use emplace() instead. + */ +template +constexpr Maybe Some(T&& aValue) { + return {std::forward(aValue), typename Maybe::SomeGuard{}}; +} + +template +constexpr Maybe SomeRef(T& aValue) { + Maybe value; + value.emplace(aValue); + return value; +} + +template +constexpr Maybe ToMaybeRef(T* const aPtr) { + return aPtr ? SomeRef(*aPtr) : Nothing{}; +} + +template +Maybe>> ToMaybe(T* aPtr) { + if (aPtr) { + return Some(*aPtr); + } + return Nothing(); +} + +/* + * Two Maybe values are equal if + * - both are Nothing, or + * - both are Some, and the values they contain are equal. + */ +template +constexpr bool operator==(const Maybe& aLHS, const Maybe& aRHS) { + static_assert(!std::is_reference_v, + "operator== is not defined for Maybe, compare values or " + "addresses explicitly instead"); + if (aLHS.isNothing() != aRHS.isNothing()) { + return false; + } + return aLHS.isNothing() || *aLHS == *aRHS; +} + +template +constexpr bool operator!=(const Maybe& aLHS, const Maybe& aRHS) { + return !(aLHS == aRHS); +} + +/* + * We support comparison to Nothing to allow reasonable expressions like: + * if (maybeValue == Nothing()) { ... } + */ +template +constexpr bool operator==(const Maybe& aLHS, const Nothing& aRHS) { + return aLHS.isNothing(); +} + +template +constexpr bool operator!=(const Maybe& aLHS, const Nothing& aRHS) { + return !(aLHS == aRHS); +} + +template +constexpr bool operator==(const Nothing& aLHS, const Maybe& aRHS) { + return aRHS.isNothing(); +} + +template +constexpr bool operator!=(const Nothing& aLHS, const Maybe& aRHS) { + return !(aLHS == aRHS); +} + +/* + * Maybe values are ordered in the same way T values are ordered, except that + * Nothing comes before anything else. + */ +template +constexpr bool operator<(const Maybe& aLHS, const Maybe& aRHS) { + if (aLHS.isNothing()) { + return aRHS.isSome(); + } + if (aRHS.isNothing()) { + return false; + } + return *aLHS < *aRHS; +} + +template +constexpr bool operator>(const Maybe& aLHS, const Maybe& aRHS) { + return !(aLHS < aRHS || aLHS == aRHS); +} + +template +constexpr bool operator<=(const Maybe& aLHS, const Maybe& aRHS) { + return aLHS < aRHS || aLHS == aRHS; +} + +template +constexpr bool operator>=(const Maybe& aLHS, const Maybe& aRHS) { + return !(aLHS < aRHS); +} + +template +inline void ImplCycleCollectionTraverse( + nsCycleCollectionTraversalCallback& aCallback, mozilla::Maybe& aField, + const char* aName, uint32_t aFlags = 0) { + if (aField) { + ImplCycleCollectionTraverse(aCallback, aField.ref(), aName, aFlags); + } +} + +template +inline void ImplCycleCollectionUnlink(mozilla::Maybe& aField) { + if (aField) { + ImplCycleCollectionUnlink(aField.ref()); + } +} + +} // namespace mozilla + +#endif /* mozilla_Maybe_h */ diff --git a/mfbt/MaybeOneOf.h b/mfbt/MaybeOneOf.h new file mode 100644 index 0000000000..769f18d5dd --- /dev/null +++ b/mfbt/MaybeOneOf.h @@ -0,0 +1,172 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A class storing one of two optional value types that supports in-place lazy + * construction. + */ + +#ifndef mozilla_MaybeOneOf_h +#define mozilla_MaybeOneOf_h + +#include // for size_t + +#include // for placement new +#include + +#include "mozilla/Assertions.h" +#include "mozilla/OperatorNewExtensions.h" +#include "mozilla/TemplateLib.h" + +namespace mozilla { + +/* + * MaybeOneOf is like Maybe, but it supports constructing either T1 + * or T2. When a MaybeOneOf is constructed, it is |empty()|, i.e., + * no value has been constructed and no destructor will be called when the + * MaybeOneOf is destroyed. Upon calling |construct()| or + * |construct()|, a T1 or T2 object will be constructed with the given + * arguments and that object will be destroyed when the owning MaybeOneOf is + * destroyed. + * + * Because MaybeOneOf must be aligned suitable to hold any value stored within + * it, and because |alignas| requirements don't affect platform ABI with respect + * to how parameters are laid out in memory, MaybeOneOf can't be used as the + * type of a function parameter. Pass MaybeOneOf to functions by pointer or + * reference instead. + */ +template +class MOZ_NON_PARAM MaybeOneOf { + static constexpr size_t StorageAlignment = + tl::Max::value; + static constexpr size_t StorageSize = tl::Max::value; + + alignas(StorageAlignment) unsigned char storage[StorageSize]; + + // GCC fails due to -Werror=strict-aliasing if |storage| is directly cast to + // T*. Indirecting through these functions addresses the problem. + void* data() { return storage; } + const void* data() const { return storage; } + + enum State { None, SomeT1, SomeT2 } state; + template + struct Type2State {}; + + template + T& as() { + MOZ_ASSERT(state == Type2State::result); + return *static_cast(data()); + } + + template + const T& as() const { + MOZ_ASSERT(state == Type2State::result); + return *static_cast(data()); + } + + public: + MaybeOneOf() : state(None) {} + ~MaybeOneOf() { destroyIfConstructed(); } + + MaybeOneOf(MaybeOneOf&& rhs) : state(None) { + if (!rhs.empty()) { + if (rhs.constructed()) { + construct(std::move(rhs.as())); + rhs.as().~T1(); + } else { + construct(std::move(rhs.as())); + rhs.as().~T2(); + } + rhs.state = None; + } + } + + MaybeOneOf& operator=(MaybeOneOf&& rhs) { + MOZ_ASSERT(this != &rhs, "Self-move is prohibited"); + this->~MaybeOneOf(); + new (this) MaybeOneOf(std::move(rhs)); + return *this; + } + + bool empty() const { return state == None; } + + template + bool constructed() const { + return state == Type2State::result; + } + + template + void construct(Args&&... aArgs) { + MOZ_ASSERT(state == None); + state = Type2State::result; + ::new (KnownNotNull, data()) T(std::forward(aArgs)...); + } + + template + T& ref() { + return as(); + } + + template + const T& ref() const { + return as(); + } + + void destroy() { + MOZ_ASSERT(state == SomeT1 || state == SomeT2); + if (state == SomeT1) { + as().~T1(); + } else if (state == SomeT2) { + as().~T2(); + } + state = None; + } + + void destroyIfConstructed() { + if (!empty()) { + destroy(); + } + } + + template + constexpr auto mapNonEmpty(Func&& aFunc) const { + MOZ_ASSERT(!empty()); + if (state == SomeT1) { + return std::forward(aFunc)(as()); + } + return std::forward(aFunc)(as()); + } + template + constexpr auto mapNonEmpty(Func&& aFunc) { + MOZ_ASSERT(!empty()); + if (state == SomeT1) { + return std::forward(aFunc)(as()); + } + return std::forward(aFunc)(as()); + } + + private: + MaybeOneOf(const MaybeOneOf& aOther) = delete; + const MaybeOneOf& operator=(const MaybeOneOf& aOther) = delete; +}; + +template +template +struct MaybeOneOf::Type2State { + typedef MaybeOneOf Enclosing; + static const typename Enclosing::State result = Enclosing::SomeT1; +}; + +template +template +struct MaybeOneOf::Type2State { + typedef MaybeOneOf Enclosing; + static const typename Enclosing::State result = Enclosing::SomeT2; +}; + +} // namespace mozilla + +#endif /* mozilla_MaybeOneOf_h */ diff --git a/mfbt/MaybeStorageBase.h b/mfbt/MaybeStorageBase.h new file mode 100644 index 0000000000..2732d78d05 --- /dev/null +++ b/mfbt/MaybeStorageBase.h @@ -0,0 +1,92 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Internal storage class used e.g. by Maybe and Result. This file doesn't + * contain any public declarations. */ + +#ifndef mfbt_MaybeStorageBase_h +#define mfbt_MaybeStorageBase_h + +#include +#include + +namespace mozilla::detail { + +template +constexpr bool IsTriviallyDestructibleAndCopyable = + std::is_trivially_destructible_v && + (std::is_trivially_copy_constructible_v || + !std::is_copy_constructible_v); + +template > +struct MaybeStorageBase; + +template +struct MaybeStorageBase { + protected: + using NonConstT = std::remove_const_t; + + union Union { + Union() {} + explicit Union(const T& aVal) : val{aVal} {} + template >> + explicit Union(U&& aVal) : val{std::forward(aVal)} {} + template + explicit Union(std::in_place_t, Args&&... aArgs) + : val{std::forward(aArgs)...} {} + + ~Union() {} + + NonConstT val; + } mStorage; + + public: + MaybeStorageBase() = default; + explicit MaybeStorageBase(const T& aVal) : mStorage{aVal} {} + explicit MaybeStorageBase(T&& aVal) : mStorage{std::move(aVal)} {} + template + explicit MaybeStorageBase(std::in_place_t, Args&&... aArgs) + : mStorage{std::in_place, std::forward(aArgs)...} {} + + const T* addr() const { return &mStorage.val; } + T* addr() { return &mStorage.val; } +}; + +template +struct MaybeStorageBase { + protected: + using NonConstT = std::remove_const_t; + + union Union { + constexpr Union() : dummy() {} + constexpr explicit Union(const T& aVal) : val{aVal} {} + constexpr explicit Union(T&& aVal) : val{std::move(aVal)} {} + template + constexpr explicit Union(std::in_place_t, Args&&... aArgs) + : val{std::forward(aArgs)...} {} + + NonConstT val; + char dummy; + } mStorage; + + public: + constexpr MaybeStorageBase() = default; + constexpr explicit MaybeStorageBase(const T& aVal) : mStorage{aVal} {} + constexpr explicit MaybeStorageBase(T&& aVal) : mStorage{std::move(aVal)} {} + + template + constexpr explicit MaybeStorageBase(std::in_place_t, Args&&... aArgs) + : mStorage{std::in_place, std::forward(aArgs)...} {} + + constexpr const T* addr() const { return &mStorage.val; } + constexpr T* addr() { return &mStorage.val; } +}; + +} // namespace mozilla::detail + +#endif diff --git a/mfbt/MemoryChecking.h b/mfbt/MemoryChecking.h new file mode 100644 index 0000000000..eed75cd058 --- /dev/null +++ b/mfbt/MemoryChecking.h @@ -0,0 +1,127 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Provides a common interface to the ASan (AddressSanitizer) and Valgrind + * functions used to mark memory in certain ways. In detail, the following + * three macros are provided: + * + * MOZ_MAKE_MEM_NOACCESS - Mark memory as unsafe to access (e.g. freed) + * MOZ_MAKE_MEM_UNDEFINED - Mark memory as accessible, with content undefined + * MOZ_MAKE_MEM_DEFINED - Mark memory as accessible, with content defined + * + * With Valgrind in use, these directly map to the three respective Valgrind + * macros. With ASan in use, the NOACCESS macro maps to poisoning the memory, + * while the UNDEFINED/DEFINED macros unpoison memory. + * + * With no memory checker available, all macros expand to the empty statement. + */ + +#ifndef mozilla_MemoryChecking_h +#define mozilla_MemoryChecking_h + +#if defined(MOZ_VALGRIND) +# include "valgrind/memcheck.h" +#endif + +#if defined(MOZ_ASAN) || defined(MOZ_VALGRIND) +# define MOZ_HAVE_MEM_CHECKS 1 +#endif + +#if defined(MOZ_ASAN) +# include + +# include "mozilla/Attributes.h" +# include "mozilla/Types.h" + +# ifdef _MSC_VER +// In clang-cl based ASAN, we link against the memory poisoning functions +// statically. +# define MOZ_ASAN_VISIBILITY +# else +# define MOZ_ASAN_VISIBILITY MOZ_EXPORT +# endif + +extern "C" { +/* These definitions are usually provided through the + * sanitizer/asan_interface.h header installed by ASan. + */ +void MOZ_ASAN_VISIBILITY __asan_poison_memory_region(void const volatile* addr, + size_t size); +void MOZ_ASAN_VISIBILITY +__asan_unpoison_memory_region(void const volatile* addr, size_t size); + +# define MOZ_MAKE_MEM_NOACCESS(addr, size) \ + __asan_poison_memory_region((addr), (size)) + +# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \ + __asan_unpoison_memory_region((addr), (size)) + +# define MOZ_MAKE_MEM_DEFINED(addr, size) \ + __asan_unpoison_memory_region((addr), (size)) + +/* + * These definitions are usually provided through the + * sanitizer/lsan_interface.h header installed by LSan. + */ +void MOZ_EXPORT __lsan_ignore_object(const void* p); +} +#elif defined(MOZ_MSAN) +# include + +# include "mozilla/Types.h" + +extern "C" { +/* These definitions are usually provided through the + * sanitizer/msan_interface.h header installed by MSan. + */ +void MOZ_EXPORT __msan_poison(void const volatile* addr, size_t size); +void MOZ_EXPORT __msan_unpoison(void const volatile* addr, size_t size); + +# define MOZ_MAKE_MEM_NOACCESS(addr, size) __msan_poison((addr), (size)) + +# define MOZ_MAKE_MEM_UNDEFINED(addr, size) __msan_poison((addr), (size)) + +# define MOZ_MAKE_MEM_DEFINED(addr, size) __msan_unpoison((addr), (size)) +} +#elif defined(MOZ_VALGRIND) +# define MOZ_MAKE_MEM_NOACCESS(addr, size) \ + VALGRIND_MAKE_MEM_NOACCESS((addr), (size)) + +# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \ + VALGRIND_MAKE_MEM_UNDEFINED((addr), (size)) + +# define MOZ_MAKE_MEM_DEFINED(addr, size) \ + VALGRIND_MAKE_MEM_DEFINED((addr), (size)) +#else + +# define MOZ_MAKE_MEM_NOACCESS(addr, size) \ + do { \ + } while (0) +# define MOZ_MAKE_MEM_UNDEFINED(addr, size) \ + do { \ + } while (0) +# define MOZ_MAKE_MEM_DEFINED(addr, size) \ + do { \ + } while (0) + +#endif + +/* + * MOZ_LSAN_INTENTIONAL_LEAK(X) is a macro to tell LeakSanitizer that X + * points to a value that will intentionally never be deallocated during + * the execution of the process. + * + * Additional uses of this macro should be reviewed by people + * conversant in leak-checking and/or MFBT peers. + */ +#if defined(MOZ_ASAN) +# define MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X) __lsan_ignore_object(X) +#else +# define MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X) /* nothing */ +#endif // defined(MOZ_ASAN) + +#endif /* mozilla_MemoryChecking_h */ diff --git a/mfbt/MemoryReporting.h b/mfbt/MemoryReporting.h new file mode 100644 index 0000000000..d2340ecf09 --- /dev/null +++ b/mfbt/MemoryReporting.h @@ -0,0 +1,30 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Memory reporting infrastructure. */ + +#ifndef mozilla_MemoryReporting_h +#define mozilla_MemoryReporting_h + +#include + +#ifdef __cplusplus + +namespace mozilla { + +/* + * This is for functions that are like malloc_usable_size. Such functions are + * used for measuring the size of data structures. + */ +typedef size_t (*MallocSizeOf)(const void* p); + +} /* namespace mozilla */ + +#endif /* __cplusplus */ + +typedef size_t (*MozMallocSizeOf)(const void* p); + +#endif /* mozilla_MemoryReporting_h */ diff --git a/mfbt/MoveOnlyFunction.h b/mfbt/MoveOnlyFunction.h new file mode 100644 index 0000000000..d6ade3fd49 --- /dev/null +++ b/mfbt/MoveOnlyFunction.h @@ -0,0 +1,47 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_MoveOnlyFunction_h +#define mozilla_MoveOnlyFunction_h + +// Use stl-like empty propagation to avoid issues with wrapping closures which +// implicitly coerce to bool. +#define FU2_WITH_LIMITED_EMPTY_PROPAGATION + +#include "function2/function2.hpp" + +namespace mozilla { + +/// A type like `std::function`, but with support for move-only callable +/// objects. +/// +/// A similar type is proposed to be added to the standard library as +/// `std::move_only_function` in C++23. +/// +/// Unlike `std::function`, the function signature may be given const or +/// reference qualifiers which will be applied to `operator()`. This can be used +/// to declare const qualified or move-only functions. +/// +/// The implementation this definition depends on (function2) also has support +/// for callables with overload sets, however support for this was not exposed +/// to align better with the proposed `std::move_only_function`, which does not +/// support overload sets. +/// +/// A custom typedef over `fu2::function_base` is used to control the size and +/// alignment of the inline storage to store 2 aligned pointers, and ensure the +/// type is compatible with `nsTArray`. +template +using MoveOnlyFunction = fu2::function_base< + /* IsOwning */ true, + /* IsCopyable */ false, + /* Capacity */ fu2::capacity_fixed<2 * sizeof(void*), alignof(void*)>, + /* IsThrowing */ false, + /* HasStrongExceptionGuarantee */ false, + /* Signature */ Signature>; + +} // namespace mozilla + +#endif // mozilla_MoveOnlyFunction_h diff --git a/mfbt/MruCache.h b/mfbt/MruCache.h new file mode 100644 index 0000000000..716224a3e0 --- /dev/null +++ b/mfbt/MruCache.h @@ -0,0 +1,165 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_MruCache_h +#define mozilla_MruCache_h + +#include +#include +#include + +#include "mozilla/Attributes.h" +#include "mozilla/HashFunctions.h" + +namespace mozilla { + +namespace detail { + +// Helper struct for checking if a value is empty. +// +// `IsNotEmpty` will return true if `Value` is not a pointer type or if the +// pointer value is not null. +template ::value> +struct EmptyChecker { + static bool IsNotEmpty(const Value&) { return true; } +}; +// Template specialization for the `IsPtr == true` case. +template +struct EmptyChecker { + static bool IsNotEmpty(const Value& aVal) { return aVal != nullptr; } +}; + +} // namespace detail + +// Provides a most recently used cache that can be used as a layer on top of +// a larger container where lookups can be expensive. The default size is 31, +// which as a prime number provides a better distrubution of cached entries. +// +// Users are expected to provide a `Cache` class that defines two required +// methods: +// - A method for providing the hash of a key: +// +// static HashNumber Hash(const KeyType& aKey) +// +// - A method for matching a key to a value, for pointer types the value +// is guaranteed not to be null. +// +// static bool Match(const KeyType& aKey, const ValueType& aVal) +// +// For example: +// class MruExample : public MruCache +// { +// static HashNumber Hash(const KeyType& aKey) +// { +// return HashGeneric(aKey); +// } +// static Match(const KeyType& aKey, const ValueType& aVal) +// { +// return aVal->mPtr == aKey; +// } +// }; +template +class MruCache { + // Best distribution is achieved with a prime number. Ideally the closest + // to a power of two will be the most efficient use of memory. This + // assertion is pretty weak, but should catch the common inclination to + // use a power-of-two. + static_assert(Size % 2 != 0, "Use a prime number"); + + // This is a stronger assertion but significantly limits the values to just + // those close to a power-of-two value. + // static_assert(Size == 7 || Size == 13 || Size == 31 || Size == 61 || + // Size == 127 || Size == 251 || Size == 509 || Size == 1021, + // "Use a prime number less than 1024"); + + public: + using KeyType = Key; + using ValueType = Value; + + MruCache() = default; + MruCache(const MruCache&) = delete; + MruCache(const MruCache&&) = delete; + + // Inserts the given value into the cache. Potentially overwrites an + // existing entry. + template + void Put(const KeyType& aKey, U&& aVal) { + *RawEntry(aKey) = std::forward(aVal); + } + + // Removes the given entry if it is in the cache. + void Remove(const KeyType& aKey) { Lookup(aKey).Remove(); } + + // Clears all cached entries and resets them to a default value. + void Clear() { + for (ValueType& val : mCache) { + val = ValueType{}; + } + } + + // Helper that holds an entry that matched a lookup key. Usage: + // + // auto p = mCache.Lookup(aKey); + // if (p) { + // return p.Data(); + // } + // + // auto foo = new Foo(); + // p.Set(foo); + // return foo; + class Entry { + public: + Entry(ValueType* aEntry, bool aMatch) : mEntry(aEntry), mMatch(aMatch) { + MOZ_ASSERT(mEntry); + } + + explicit operator bool() const { return mMatch; } + + ValueType& Data() const { + MOZ_ASSERT(mMatch); + return *mEntry; + } + + template + void Set(U&& aValue) { + mMatch = true; + Data() = std::forward(aValue); + } + + void Remove() { + if (mMatch) { + Data() = ValueType{}; + mMatch = false; + } + } + + private: + ValueType* mEntry; // Location of the entry in the cache. + bool mMatch; // Whether the value matched. + }; + + // Retrieves an entry from the cache. Can be used to test if an entry is + // present, update the entry to a new value, or remove the entry if one was + // matched. + Entry Lookup(const KeyType& aKey) { + using EmptyChecker = detail::EmptyChecker; + + auto entry = RawEntry(aKey); + bool match = EmptyChecker::IsNotEmpty(*entry) && Cache::Match(aKey, *entry); + return Entry(entry, match); + } + + private: + MOZ_ALWAYS_INLINE ValueType* RawEntry(const KeyType& aKey) { + return &mCache[Cache::Hash(aKey) % Size]; + } + + ValueType mCache[Size] = {}; +}; + +} // namespace mozilla + +#endif // mozilla_mrucache_h diff --git a/mfbt/NonDereferenceable.h b/mfbt/NonDereferenceable.h new file mode 100644 index 0000000000..30c4cac853 --- /dev/null +++ b/mfbt/NonDereferenceable.h @@ -0,0 +1,125 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_NonDereferenceable_h +#define mozilla_NonDereferenceable_h + +/* A pointer wrapper indicating that the pointer should not be dereferenced. */ + +#include "mozilla/Attributes.h" + +#include + +// Macro indicating that a function manipulates a pointer that will not be +// dereferenced, and therefore there is no need to check the object. +#if defined(__clang__) +# define NO_POINTEE_CHECKS __attribute__((no_sanitize("vptr"))) +#else +# define NO_POINTEE_CHECKS /* nothing */ +#endif + +namespace mozilla { + +// NonDereferenceable wraps a raw pointer value of type T*, but prevents +// dereferencing. +// +// The main use case is for pointers that referencing memory that may not +// contain a valid object, either because the object has already been freed, or +// is under active construction or destruction (and hence parts of it may be +// uninitialized or destructed.) +// Such a pointer may still be useful, e.g., for its numeric value for +// logging/debugging purposes, which may be accessed with `value()`. +// Using NonDereferenceable with such pointers will make this intent clearer, +// and prevent misuses. +// +// Note that NonDereferenceable is only a wrapper and is NOT an owning pointer, +// i.e., it will not release/free the object. +// +// NonDereferenceable allows conversions between compatible pointer types, e.g., +// to navigate a class hierarchy and identify parent/sub-objects. Note that the +// converted pointers stay safely NonDereferenceable. +// +// Use of NonDereferenceable is required to avoid errors from sanitization tools +// like `clang++ -fsanitize=vptr`, and should prevent false positives while +// pointers are manipulated within NonDereferenceable objects. +// +template +class NonDereferenceable { + public: + // Default construction with a null value. + NonDereferenceable() : mPtr(nullptr) {} + + // Default copy construction and assignment. + NO_POINTEE_CHECKS + NonDereferenceable(const NonDereferenceable&) = default; + NO_POINTEE_CHECKS + NonDereferenceable& operator=(const NonDereferenceable&) = default; + // No move operations, as we're only carrying a non-owning pointer, so + // copying is most efficient. + + // Construct/assign from a T* raw pointer. + // A raw pointer should usually point at a valid object, however we want to + // leave the ability to the user to create a NonDereferenceable from any + // pointer. Also, strictly speaking, in a constructor or destructor, `this` + // points at an object still being constructed or already partially + // destructed, which some very sensitive sanitizers could complain about. + NO_POINTEE_CHECKS + explicit NonDereferenceable(T* aPtr) : mPtr(aPtr) {} + NO_POINTEE_CHECKS + NonDereferenceable& operator=(T* aPtr) { + mPtr = aPtr; + return *this; + } + + // Construct/assign from a compatible pointer type. + template + NO_POINTEE_CHECKS explicit NonDereferenceable(U* aOther) + : mPtr(static_cast(aOther)) {} + template + NO_POINTEE_CHECKS NonDereferenceable& operator=(U* aOther) { + mPtr = static_cast(aOther); + return *this; + } + + // Construct/assign from a NonDereferenceable with a compatible pointer type. + template + NO_POINTEE_CHECKS MOZ_IMPLICIT + NonDereferenceable(const NonDereferenceable& aOther) + : mPtr(static_cast(aOther.mPtr)) {} + template + NO_POINTEE_CHECKS NonDereferenceable& operator=( + const NonDereferenceable& aOther) { + mPtr = static_cast(aOther.mPtr); + return *this; + } + + // Explicitly disallow dereference operators, so that compiler errors point + // at these lines: + T& operator*() = delete; // Cannot dereference NonDereferenceable! + T* operator->() = delete; // Cannot dereference NonDereferenceable! + + // Null check. + NO_POINTEE_CHECKS + explicit operator bool() const { return !!mPtr; } + + // Extract the pointer value, untyped. + NO_POINTEE_CHECKS + uintptr_t value() const { return reinterpret_cast(mPtr); } + + private: + // Let other NonDereferenceable templates access mPtr, to permit construction/ + // assignment from compatible pointer types. + template + friend class NonDereferenceable; + + T* MOZ_NON_OWNING_REF mPtr; +}; + +} // namespace mozilla + +#undef NO_POINTEE_CHECKS + +#endif /* mozilla_NonDereferenceable_h */ diff --git a/mfbt/NotNull.h b/mfbt/NotNull.h new file mode 100644 index 0000000000..1a12400e14 --- /dev/null +++ b/mfbt/NotNull.h @@ -0,0 +1,449 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_NotNull_h +#define mozilla_NotNull_h + +// It's often unclear if a particular pointer, be it raw (T*) or smart +// (RefPtr, nsCOMPtr, etc.) can be null. This leads to missing null +// checks (which can cause crashes) and unnecessary null checks (which clutter +// the code). +// +// C++ has a built-in alternative that avoids these problems: references. This +// module defines another alternative, NotNull, which can be used in cases +// where references are not suitable. +// +// In the comments below we use the word "handle" to cover all varieties of +// pointers and references. +// +// References +// ---------- +// References are always non-null. (You can do |T& r = *p;| where |p| is null, +// but that's undefined behaviour. C++ doesn't provide any built-in, ironclad +// guarantee of non-nullness.) +// +// A reference works well when you need a temporary handle to an existing +// single object, e.g. for passing a handle to a function, or as a local handle +// within another object. (In Rust parlance, this is a "borrow".) +// +// A reference is less appropriate in the following cases. +// +// - As a primary handle to an object. E.g. code such as this is possible but +// strange: |T& t = *new T(); ...; delete &t;| +// +// - As a handle to an array. It's common for |T*| to refer to either a single +// |T| or an array of |T|, but |T&| cannot refer to an array of |T| because +// you can't index off a reference (at least, not without first converting it +// to a pointer). +// +// - When the handle identity is meaningful, e.g. if you have a hashtable of +// handles, because you have to use |&| on the reference to convert it to a +// pointer. +// +// - Some people don't like using non-const references as function parameters, +// because it is not clear at the call site that the argument might be +// modified. +// +// - When you need "smart" behaviour. E.g. we lack reference equivalents to +// RefPtr and nsCOMPtr. +// +// - When interfacing with code that uses pointers a lot, sometimes using a +// reference just feels like an odd fit. +// +// Furthermore, a reference is impossible in the following cases. +// +// - When the handle is rebound to another object. References don't allow this. +// +// - When the handle has type |void|. |void&| is not allowed. +// +// NotNull is an alternative that can be used in any of the above cases except +// for the last one, where the handle type is |void|. See below. + +#include + +#include +#include + +#include "mozilla/Assertions.h" + +namespace mozilla { + +namespace detail { +template +struct CopyablePtr { + T mPtr; + + template + explicit CopyablePtr(U&& aPtr) : mPtr{std::forward(aPtr)} {} + + template + explicit CopyablePtr(CopyablePtr aPtr) : mPtr{std::move(aPtr.mPtr)} {} +}; +} // namespace detail + +template +class MovingNotNull; + +// NotNull can be used to wrap a "base" pointer (raw or smart) to indicate it +// is not null. Some examples: +// +// - NotNull +// - NotNull> +// - NotNull> +// - NotNull> +// +// NotNull has the following notable properties. +// +// - It has zero space overhead. +// +// - It must be initialized explicitly. There is no default initialization. +// +// - It auto-converts to the base pointer type. +// +// - It does not auto-convert from a base pointer. Implicit conversion from a +// less-constrained type (e.g. T*) to a more-constrained type (e.g. +// NotNull) is dangerous. Creation and assignment from a base pointer can +// only be done with WrapNotNull() or MakeNotNull<>(), which makes them +// impossible to overlook, both when writing and reading code. +// +// - When initialized (or assigned) it is checked, and if it is null we abort. +// This guarantees that it cannot be null. +// +// - |operator bool()| is deleted. This means you cannot check a NotNull in a +// boolean context, which eliminates the possibility of unnecessary null +// checks. +// +// - It is not movable, but copyable if the base pointer type is copyable. It +// may be used together with MovingNotNull to avoid unnecessary copies or when +// the base pointer type is not copyable (such as UniquePtr). +// +template +class NotNull { + template + friend constexpr NotNull WrapNotNull(U aBasePtr); + template + friend constexpr NotNull WrapNotNullUnchecked(U aBasePtr); + template + friend constexpr NotNull MakeNotNull(Args&&... aArgs); + template + friend class NotNull; + + detail::CopyablePtr mBasePtr; + + // This constructor is only used by WrapNotNull() and MakeNotNull(). + template + constexpr explicit NotNull(U aBasePtr) : mBasePtr(T{std::move(aBasePtr)}) { + static_assert(sizeof(T) == sizeof(NotNull), + "NotNull must have zero space overhead."); + static_assert(offsetof(NotNull, mBasePtr) == 0, + "mBasePtr must have zero offset."); + } + + public: + // Disallow default construction. + NotNull() = delete; + + // Construct/assign from another NotNull with a compatible base pointer type. + template >> + constexpr MOZ_IMPLICIT NotNull(const NotNull& aOther) + : mBasePtr(aOther.mBasePtr) {} + + template >> + constexpr MOZ_IMPLICIT NotNull(MovingNotNull&& aOther) + : mBasePtr(std::move(aOther).unwrapBasePtr()) {} + + // Disallow null checks, which are unnecessary for this type. + explicit operator bool() const = delete; + + // Explicit conversion to a base pointer. Use only to resolve ambiguity or to + // get a castable pointer. + constexpr const T& get() const { return mBasePtr.mPtr; } + + // Implicit conversion to a base pointer. Preferable to get(). + constexpr operator const T&() const { return get(); } + + // Implicit conversion to a raw pointer from const lvalue-reference if + // supported by the base pointer (for RefPtr -> T* compatibility). + template && + std::is_convertible_v, + int> = 0> + constexpr operator U*() const& { + return get(); + } + + // Don't allow implicit conversions to raw pointers from rvalue-references. + template && + std::is_convertible_v && + !std::is_convertible_v, + int> = 0> + constexpr operator U*() const&& = delete; + + // Dereference operators. + constexpr auto* operator->() const MOZ_NONNULL_RETURN { + return mBasePtr.mPtr.operator->(); + } + constexpr decltype(*mBasePtr.mPtr) operator*() const { + return *mBasePtr.mPtr; + } + + // NotNull can be copied, but not moved. Moving a NotNull with a smart base + // pointer would leave a nullptr NotNull behind. The move operations must not + // be explicitly deleted though, since that would cause overload resolution to + // fail in situations where a copy is possible. + NotNull(const NotNull&) = default; + NotNull& operator=(const NotNull&) = default; +}; + +// Specialization for T* to allow adding MOZ_NONNULL_RETURN attributes. +template +class NotNull { + template + friend constexpr NotNull WrapNotNull(U aBasePtr); + template + friend constexpr NotNull WrapNotNullUnchecked(U* aBasePtr); + template + friend constexpr NotNull MakeNotNull(Args&&... aArgs); + template + friend class NotNull; + + T* mBasePtr; + + // This constructor is only used by WrapNotNull() and MakeNotNull(). + template + constexpr explicit NotNull(U* aBasePtr) : mBasePtr(aBasePtr) {} + + public: + // Disallow default construction. + NotNull() = delete; + + // Construct/assign from another NotNull with a compatible base pointer type. + template >> + constexpr MOZ_IMPLICIT NotNull(const NotNull& aOther) + : mBasePtr(aOther.get()) { + static_assert(sizeof(T*) == sizeof(NotNull), + "NotNull must have zero space overhead."); + static_assert(offsetof(NotNull, mBasePtr) == 0, + "mBasePtr must have zero offset."); + } + + template >> + constexpr MOZ_IMPLICIT NotNull(MovingNotNull&& aOther) + : mBasePtr(NotNull{std::move(aOther)}) {} + + // Disallow null checks, which are unnecessary for this type. + explicit operator bool() const = delete; + + // Explicit conversion to a base pointer. Use only to resolve ambiguity or to + // get a castable pointer. + constexpr T* get() const MOZ_NONNULL_RETURN { return mBasePtr; } + + // Implicit conversion to a base pointer. Preferable to get(). + constexpr operator T*() const MOZ_NONNULL_RETURN { return get(); } + + // Dereference operators. + constexpr T* operator->() const MOZ_NONNULL_RETURN { return get(); } + constexpr T& operator*() const { return *mBasePtr; } +}; + +template +constexpr NotNull WrapNotNull(T aBasePtr) { + MOZ_RELEASE_ASSERT(aBasePtr); + return NotNull{std::move(aBasePtr)}; +} + +// WrapNotNullUnchecked should only be used in situations, where it is +// statically known that aBasePtr is non-null, and redundant release assertions +// should be avoided. It is only defined for raw base pointers, since it is only +// needed for those right now. There is no fundamental reason not to allow +// arbitrary base pointers here. +template +constexpr NotNull WrapNotNullUnchecked(T aBasePtr) { + return NotNull{std::move(aBasePtr)}; +} + +template +MOZ_NONNULL(1) +constexpr NotNull WrapNotNullUnchecked(T* const aBasePtr) { +#if defined(__clang__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wpointer-bool-conversion" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wnonnull-compare" +#endif + MOZ_ASSERT(aBasePtr); +#if defined(__clang__) +# pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop +#endif + return NotNull{aBasePtr}; +} + +// A variant of NotNull that can be used as a return value or parameter type and +// moved into both NotNull and non-NotNull targets. This is not possible with +// NotNull, as it is not movable. MovingNotNull can therefore not guarantee it +// is always non-nullptr, but it can't be dereferenced, and there are debug +// assertions that ensure it is only moved once. +template +class MOZ_NON_AUTOABLE MovingNotNull { + template + friend constexpr MovingNotNull WrapMovingNotNullUnchecked(U aBasePtr); + + T mBasePtr; +#ifdef DEBUG + bool mConsumed = false; +#endif + + // This constructor is only used by WrapNotNull() and MakeNotNull(). + template + constexpr explicit MovingNotNull(U aBasePtr) : mBasePtr{std::move(aBasePtr)} { +#ifndef DEBUG + static_assert(sizeof(T) == sizeof(MovingNotNull), + "NotNull must have zero space overhead."); +#endif + static_assert(offsetof(MovingNotNull, mBasePtr) == 0, + "mBasePtr must have zero offset."); + } + + public: + MovingNotNull() = delete; + + MOZ_IMPLICIT MovingNotNull(const NotNull& aSrc) : mBasePtr(aSrc.get()) {} + + template >> + MOZ_IMPLICIT MovingNotNull(const NotNull& aSrc) : mBasePtr(aSrc.get()) {} + + template >> + MOZ_IMPLICIT MovingNotNull(MovingNotNull&& aSrc) + : mBasePtr(std::move(aSrc).unwrapBasePtr()) {} + + MOZ_IMPLICIT operator T() && { return std::move(*this).unwrapBasePtr(); } + + MOZ_IMPLICIT operator NotNull() && { return std::move(*this).unwrap(); } + + NotNull unwrap() && { + return WrapNotNullUnchecked(std::move(*this).unwrapBasePtr()); + } + + T unwrapBasePtr() && { +#ifdef DEBUG + MOZ_ASSERT(!mConsumed); + mConsumed = true; +#endif + return std::move(mBasePtr); + } + + MovingNotNull(MovingNotNull&&) = default; + MovingNotNull& operator=(MovingNotNull&&) = default; +}; + +template +constexpr MovingNotNull WrapMovingNotNullUnchecked(T aBasePtr) { + return MovingNotNull{std::move(aBasePtr)}; +} + +template +constexpr MovingNotNull WrapMovingNotNull(T aBasePtr) { + MOZ_RELEASE_ASSERT(aBasePtr); + return WrapMovingNotNullUnchecked(std::move(aBasePtr)); +} + +namespace detail { + +// Extract the pointed-to type from a pointer type (be it raw or smart). +// The default implementation uses the dereferencing operator of the pointer +// type to find what it's pointing to. +template +struct PointedTo { + // Remove the reference that dereferencing operators may return. + using Type = std::remove_reference_t())>; + using NonConstType = std::remove_const_t; +}; + +// Specializations for raw pointers. +// This is especially required because VS 2017 15.6 (March 2018) started +// rejecting the above `decltype(*std::declval())` trick for raw +// pointers. +// See bug 1443367. +template +struct PointedTo { + using Type = T; + using NonConstType = T; +}; + +template +struct PointedTo { + using Type = const T; + using NonConstType = T; +}; + +} // namespace detail + +// Allocate an object with infallible new, and wrap its pointer in NotNull. +// |MakeNotNull>(args...)| will run |new Ob(args...)| +// and return NotNull>. +template +constexpr NotNull MakeNotNull(Args&&... aArgs) { + using Pointee = typename detail::PointedTo::NonConstType; + static_assert(!std::is_array_v, + "MakeNotNull cannot construct an array"); + return NotNull(new Pointee(std::forward(aArgs)...)); +} + +// Compare two NotNulls. +template +constexpr bool operator==(const NotNull& aLhs, const NotNull& aRhs) { + return aLhs.get() == aRhs.get(); +} +template +constexpr bool operator!=(const NotNull& aLhs, const NotNull& aRhs) { + return aLhs.get() != aRhs.get(); +} + +// Compare a NotNull to a base pointer. +template +constexpr bool operator==(const NotNull& aLhs, const U& aRhs) { + return aLhs.get() == aRhs; +} +template +constexpr bool operator!=(const NotNull& aLhs, const U& aRhs) { + return aLhs.get() != aRhs; +} + +// Compare a base pointer to a NotNull. +template +constexpr bool operator==(const T& aLhs, const NotNull& aRhs) { + return aLhs == aRhs.get(); +} +template +constexpr bool operator!=(const T& aLhs, const NotNull& aRhs) { + return aLhs != aRhs.get(); +} + +// Disallow comparing a NotNull to a nullptr. +template +bool operator==(const NotNull&, decltype(nullptr)) = delete; +template +bool operator!=(const NotNull&, decltype(nullptr)) = delete; + +// Disallow comparing a nullptr to a NotNull. +template +bool operator==(decltype(nullptr), const NotNull&) = delete; +template +bool operator!=(decltype(nullptr), const NotNull&) = delete; + +} // namespace mozilla + +#endif /* mozilla_NotNull_h */ diff --git a/mfbt/Opaque.h b/mfbt/Opaque.h new file mode 100644 index 0000000000..e5dc84f159 --- /dev/null +++ b/mfbt/Opaque.h @@ -0,0 +1,41 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* An opaque integral type supporting only comparison operators. */ + +#ifndef mozilla_Opaque_h +#define mozilla_Opaque_h + +#include + +namespace mozilla { + +/** + * Opaque is a replacement for integral T in cases where only comparisons + * must be supported, and it's desirable to prevent accidental dependency on + * exact values. + */ +template +class Opaque final { + static_assert(std::is_integral_v, + "mozilla::Opaque only supports integral types"); + + T mValue; + + public: + Opaque() = default; + explicit Opaque(T aValue) : mValue(aValue) {} + + bool operator==(const Opaque& aOther) const { + return mValue == aOther.mValue; + } + + bool operator!=(const Opaque& aOther) const { return !(*this == aOther); } +}; + +} // namespace mozilla + +#endif /* mozilla_Opaque_h */ diff --git a/mfbt/OperatorNewExtensions.h b/mfbt/OperatorNewExtensions.h new file mode 100644 index 0000000000..a44a6bdeae --- /dev/null +++ b/mfbt/OperatorNewExtensions.h @@ -0,0 +1,50 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A version of |operator new| that eschews mandatory null-checks. */ + +#ifndef mozilla_OperatorNewExtensions_h +#define mozilla_OperatorNewExtensions_h + +#include "mozilla/Assertions.h" + +// Credit goes to WebKit for this implementation, cf. +// https://bugs.webkit.org/show_bug.cgi?id=74676 +namespace mozilla { +enum NotNullTag { + KnownNotNull, +}; +} // namespace mozilla + +/* + * The logic here is a little subtle. [expr.new] states that if the allocation + * function being called returns null, then object initialization must not be + * done, and the entirety of the new expression must return null. Non-throwing + * (noexcept) functions are defined to return null to indicate failure. The + * standard placement operator new is defined in such a way, and so it requires + * a null check, even when that null check would be extraneous. Functions + * declared without such a specification are defined to throw std::bad_alloc if + * they fail, and return a non-null pointer otherwise. We compile without + * exceptions, so any placement new overload we define that doesn't declare + * itself as noexcept must therefore avoid generating a null check. Below is + * just such an overload. + * + * You might think that MOZ_NONNULL might perform the same function, but + * MOZ_NONNULL isn't supported on all of our compilers, and even when it is + * supported, doesn't work on all the versions we support. And even keeping + * those limitations in mind, we can't put MOZ_NONNULL on the global, + * standardized placement new function in any event. + * + * We deliberately don't add MOZ_NONNULL(3) to tag |p| as non-null, to benefit + * hypothetical static analyzers. Doing so makes |MOZ_ASSERT(p)|'s internal + * test vacuous, and some compilers warn about such vacuous tests. + */ +inline void* operator new(size_t, mozilla::NotNullTag, void* p) { + MOZ_ASSERT(p); + return p; +} + +#endif // mozilla_OperatorNewExtensions_h diff --git a/mfbt/PairHash.h b/mfbt/PairHash.h new file mode 100644 index 0000000000..100832dc12 --- /dev/null +++ b/mfbt/PairHash.h @@ -0,0 +1,75 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Utilities for hashing pairs. */ + +#ifndef mozilla_PairHash_h +#define mozilla_PairHash_h + +#include "mozilla/CompactPair.h" +#include "mozilla/HashFunctions.h" + +#include // std::pair + +namespace mozilla { + +/** + * The HashPair overloads below do just what you'd expect. + * + * These functions support hash of std::pair and mozilla::CompactPair + * where type T and U both support AddToHash. + */ +template +[[nodiscard]] inline HashNumber HashPair(const std::pair& pair) { + // Pair hash combines the hash of each member + return HashGeneric(pair.first, pair.second); +} + +template +[[nodiscard]] inline HashNumber HashCompactPair(const CompactPair& pair) { + // Pair hash combines the hash of each member + return HashGeneric(pair.first(), pair.second()); +} + +/** + * Hash policy for std::pair compatible with HashTable + */ +template +struct PairHasher { + using Key = std::pair; + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { return HashPair(aLookup); } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return aKey == aLookup; + } + + static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } +}; + +/** + * Hash policy for mozilla::CompactPair compatible with HashTable + */ +template +struct CompactPairHasher { + using Key = CompactPair; + using Lookup = Key; + + static HashNumber hash(const Lookup& aLookup) { + return HashCompactPair(aLookup); + } + + static bool match(const Key& aKey, const Lookup& aLookup) { + return aKey == aLookup; + } + + static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } +}; + +} // namespace mozilla + +#endif /* mozilla_PairHash_h */ diff --git a/mfbt/Path.h b/mfbt/Path.h new file mode 100644 index 0000000000..eed687dd06 --- /dev/null +++ b/mfbt/Path.h @@ -0,0 +1,31 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Represents the native path format on the platform. */ + +#ifndef mozilla_Path_h +#define mozilla_Path_h + +namespace mozilla { +namespace filesystem { + +/* + * Mozilla vaiant of std::filesystem::path. + * Only |value_type| is implemented at the moment. + */ +class Path { + public: +#ifdef XP_WIN + using value_type = char16_t; +#else + using value_type = char; +#endif +}; + +} /* namespace filesystem */ +} /* namespace mozilla */ + +#endif /* mozilla_Path_h */ diff --git a/mfbt/PodOperations.h b/mfbt/PodOperations.h new file mode 100644 index 0000000000..f4e5da4c79 --- /dev/null +++ b/mfbt/PodOperations.h @@ -0,0 +1,160 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Operations for zeroing POD types, arrays, and so on. + * + * These operations are preferable to memset, memcmp, and the like because they + * don't require remembering to multiply by sizeof(T), array lengths, and so on + * everywhere. + */ + +#ifndef mozilla_PodOperations_h +#define mozilla_PodOperations_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" + +#include +#include + +namespace mozilla { + +template +class Array; + +template +class NotNull; + +/** Set the contents of |aT| to 0. */ +template +static MOZ_ALWAYS_INLINE void PodZero(T* aT) { + memset(aT, 0, sizeof(T)); +} + +/** Set the contents of |aNElem| elements starting at |aT| to 0. */ +template +static MOZ_ALWAYS_INLINE void PodZero(T* aT, size_t aNElem) { + /* + * This function is often called with 'aNElem' small; we use an inline loop + * instead of calling 'memset' with a non-constant length. The compiler + * should inline the memset call with constant size, though. + */ + for (T* end = aT + aNElem; aT < end; aT++) { + memset(aT, 0, sizeof(T)); + } +} + +/** Set the contents of |aNElem| elements starting at |aT| to 0. */ +template +static MOZ_ALWAYS_INLINE void PodZero(NotNull aT, size_t aNElem) { + PodZero(aT.get(), aNElem); +} + +/* + * Arrays implicitly convert to pointers to their first element, which is + * dangerous when combined with the above PodZero definitions. Adding an + * overload for arrays is ambiguous, so we need another identifier. The + * ambiguous overload is left to catch mistaken uses of PodZero; if you get a + * compile error involving PodZero and array types, use PodArrayZero instead. + */ +template +static void PodZero(T (&aT)[N]) = delete; +template +static void PodZero(T (&aT)[N], size_t aNElem) = delete; + +/** Set the contents of the array |aT| to zero. */ +template +static MOZ_ALWAYS_INLINE void PodArrayZero(T (&aT)[N]) { + memset(aT, 0, N * sizeof(T)); +} + +template +static MOZ_ALWAYS_INLINE void PodArrayZero(Array& aArr) { + memset(&aArr[0], 0, N * sizeof(T)); +} + +/** + * Assign |*aSrc| to |*aDst|. The locations must not be the same and must not + * overlap. + */ +template +static MOZ_ALWAYS_INLINE void PodAssign(T* aDst, const T* aSrc) { + MOZ_ASSERT(aDst + 1 <= aSrc || aSrc + 1 <= aDst, + "destination and source must not overlap"); + memcpy(reinterpret_cast(aDst), reinterpret_cast(aSrc), + sizeof(T)); +} + +/** + * Copy |aNElem| T elements from |aSrc| to |aDst|. The two memory ranges must + * not overlap! + */ +template +static MOZ_ALWAYS_INLINE void PodCopy(T* aDst, const T* aSrc, size_t aNElem) { + MOZ_ASSERT(aDst + aNElem <= aSrc || aSrc + aNElem <= aDst, + "destination and source must not overlap"); + if (aNElem < 128) { + /* + * Avoid using operator= in this loop, as it may have been + * intentionally deleted by the POD type. + */ + for (const T* srcend = aSrc + aNElem; aSrc < srcend; aSrc++, aDst++) { + PodAssign(aDst, aSrc); + } + } else { + memcpy(aDst, aSrc, aNElem * sizeof(T)); + } +} + +template +static MOZ_ALWAYS_INLINE void PodCopy(volatile T* aDst, const volatile T* aSrc, + size_t aNElem) { + MOZ_ASSERT(aDst + aNElem <= aSrc || aSrc + aNElem <= aDst, + "destination and source must not overlap"); + + /* + * Volatile |aDst| requires extra work, because it's undefined behavior to + * modify volatile objects using the mem* functions. Just write out the + * loops manually, using operator= rather than memcpy for the same reason, + * and let the compiler optimize to the extent it can. + */ + for (const volatile T* srcend = aSrc + aNElem; aSrc < srcend; + aSrc++, aDst++) { + *aDst = *aSrc; + } +} + +/* + * Copy the contents of the array |aSrc| into the array |aDst|, both of size N. + * The arrays must not overlap! + */ +template +static MOZ_ALWAYS_INLINE void PodArrayCopy(T (&aDst)[N], const T (&aSrc)[N]) { + PodCopy(aDst, aSrc, N); +} + +/** + * Copy the memory for |aNElem| T elements from |aSrc| to |aDst|. If the two + * memory ranges overlap, then the effect is as if the |aNElem| elements are + * first copied from |aSrc| to a temporary array, and then from the temporary + * array to |aDst|. + */ +template +static MOZ_ALWAYS_INLINE void PodMove(T* aDst, const T* aSrc, size_t aNElem) { + MOZ_ASSERT(aNElem <= SIZE_MAX / sizeof(T), + "trying to move an impossible number of elements"); + memmove(aDst, aSrc, aNElem * sizeof(T)); +} + +/** + * Looking for a PodEqual? Use ArrayEqual from ArrayUtils.h. + * Note that we *cannot* use memcmp for this, due to padding bytes, etc.. + */ + +} // namespace mozilla + +#endif /* mozilla_PodOperations_h */ diff --git a/mfbt/Poison.cpp b/mfbt/Poison.cpp new file mode 100644 index 0000000000..db523b928a --- /dev/null +++ b/mfbt/Poison.cpp @@ -0,0 +1,206 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A poison value that can be used to fill a memory space with + * an address that leads to a safe crash when dereferenced. + */ + +#include "mozilla/Poison.h" + +#include "mozilla/Assertions.h" +#ifdef _WIN32 +# include +#elif !defined(__OS2__) +# include +# ifndef __wasi__ +# include +# ifndef MAP_ANON +# ifdef MAP_ANONYMOUS +# define MAP_ANON MAP_ANONYMOUS +# else +# error "Don't know how to get anonymous memory" +# endif +# endif +# endif +#endif + +// Freed memory is filled with a poison value, which we arrange to +// form a pointer either to an always-unmapped region of the address +// space, or to a page that has been reserved and rendered +// inaccessible via OS primitives. See tests/TestPoisonArea.cpp for +// extensive discussion of the requirements for this page. The code +// from here to 'class FreeList' needs to be kept in sync with that +// file. + +#ifdef _WIN32 +static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) { + return VirtualAlloc((void*)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS); +} + +static void ReleaseRegion(void* aRegion, uintptr_t aSize) { + VirtualFree(aRegion, aSize, MEM_RELEASE); +} + +static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) { + SYSTEM_INFO sinfo; + GetSystemInfo(&sinfo); + if (aRegion >= (uintptr_t)sinfo.lpMaximumApplicationAddress && + aRegion + aSize >= (uintptr_t)sinfo.lpMaximumApplicationAddress) { + return true; + } else { + return false; + } +} + +static uintptr_t GetDesiredRegionSize() { + SYSTEM_INFO sinfo; + GetSystemInfo(&sinfo); + return sinfo.dwAllocationGranularity; +} + +# define RESERVE_FAILED 0 + +#elif defined(__OS2__) +static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) { + // OS/2 doesn't support allocation at an arbitrary address, + // so return an address that is known to be invalid. + return (void*)0xFFFD0000; +} + +static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; } + +static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) { + // There's no reliable way to probe an address in the system + // arena other than by touching it and seeing if a trap occurs. + return false; +} + +static uintptr_t GetDesiredRegionSize() { + // Page size is fixed at 4k. + return 0x1000; +} + +# define RESERVE_FAILED 0 + +#elif defined(__wasi__) + +# define RESERVE_FAILED 0 + +static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) { + return RESERVE_FAILED; +} + +static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; } + +static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) { + const auto pageSize = 1 << 16; + MOZ_ASSERT(pageSize == sysconf(_SC_PAGESIZE)); + auto heapSize = __builtin_wasm_memory_size(0) * pageSize; + return aRegion + aSize < heapSize; +} + +static uintptr_t GetDesiredRegionSize() { return 0; } + +#else // __wasi__ + +# include "mozilla/TaggedAnonymousMemory.h" + +static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) { + return MozTaggedAnonymousMmap(reinterpret_cast(aRegion), aSize, + PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0, + "poison"); +} + +static void ReleaseRegion(void* aRegion, uintptr_t aSize) { + munmap(aRegion, aSize); +} + +static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) { +# ifdef XP_SOLARIS + if (posix_madvise(reinterpret_cast(aRegion), aSize, + POSIX_MADV_NORMAL)) { +# else + if (madvise(reinterpret_cast(aRegion), aSize, MADV_NORMAL)) { +# endif + return true; + } else { + return false; + } +} + +static uintptr_t GetDesiredRegionSize() { return sysconf(_SC_PAGESIZE); } + +# define RESERVE_FAILED MAP_FAILED + +#endif // system dependencies + +static_assert((sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8) && + (sizeof(uintptr_t) == sizeof(void*))); + +static uintptr_t ReservePoisonArea(uintptr_t rgnsize) { + if (sizeof(uintptr_t) == 8) { + // Use the hardware-inaccessible region. + // We have to avoid 64-bit constants and shifts by 32 bits, since this + // code is compiled in 32-bit mode, although it is never executed there. + return (((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu)) & + ~(rgnsize - 1)); + } + + // First see if we can allocate the preferred poison address from the OS. + uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize - 1)); + void* result = ReserveRegion(candidate, rgnsize); + if (result == (void*)candidate) { + // success - inaccessible page allocated + return candidate; + } + + // That didn't work, so see if the preferred address is within a range + // of permanently inacessible memory. + if (ProbeRegion(candidate, rgnsize)) { + // success - selected page cannot be usable memory + if (result != RESERVE_FAILED) { + ReleaseRegion(result, rgnsize); + } + return candidate; + } + + // The preferred address is already in use. Did the OS give us a + // consolation prize? + if (result != RESERVE_FAILED) { + return uintptr_t(result); + } + + // It didn't, so try to allocate again, without any constraint on + // the address. + result = ReserveRegion(0, rgnsize); + if (result != RESERVE_FAILED) { + return uintptr_t(result); + } + + MOZ_CRASH("no usable poison region identified"); +} + +static uintptr_t GetPoisonValue(uintptr_t aBase, uintptr_t aSize) { + if (aSize == 0) { // can't happen + return 0; + } + return aBase + aSize / 2 - 1; +} + +// Poison is used so pervasively throughout the codebase that we decided it was +// best to actually use ordered dynamic initialization of globals (AKA static +// constructors) for this. This way everything will have properly initialized +// poison -- except other dynamic initialization code in libmozglue, which there +// shouldn't be much of. (libmozglue is one of the first things loaded, and +// specifically comes before libxul, so nearly all gecko code runs strictly +// after this.) +extern "C" { +uintptr_t gMozillaPoisonSize = GetDesiredRegionSize(); +uintptr_t gMozillaPoisonBase = ReservePoisonArea(gMozillaPoisonSize); +uintptr_t gMozillaPoisonValue = + GetPoisonValue(gMozillaPoisonBase, gMozillaPoisonSize); +} diff --git a/mfbt/Poison.h b/mfbt/Poison.h new file mode 100644 index 0000000000..5b1fae1fd1 --- /dev/null +++ b/mfbt/Poison.h @@ -0,0 +1,109 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A poison value that can be used to fill a memory space with + * an address that leads to a safe crash when dereferenced. + */ + +#ifndef mozilla_Poison_h +#define mozilla_Poison_h + +#include "mozilla/Assertions.h" +#include "mozilla/Types.h" + +#include +#include + +MOZ_BEGIN_EXTERN_C + +extern MFBT_DATA uintptr_t gMozillaPoisonValue; + +/** + * @return the poison value. + */ +inline uintptr_t mozPoisonValue() { return gMozillaPoisonValue; } + +/** + * Overwrite the memory block of aSize bytes at aPtr with the poison value. + * Only a multiple of sizeof(uintptr_t) bytes are overwritten, the last + * few bytes (if any) are not overwritten. + */ +inline void mozWritePoison(void* aPtr, size_t aSize) { + const uintptr_t POISON = mozPoisonValue(); + char* p = (char*)aPtr; + char* limit = p + (aSize & ~(sizeof(uintptr_t) - 1)); + MOZ_ASSERT(aSize >= sizeof(uintptr_t), "poisoning this object has no effect"); + for (; p < limit; p += sizeof(uintptr_t)) { + memcpy(p, &POISON, sizeof(POISON)); + } +} + +/* Values annotated by CrashReporter */ +extern MFBT_DATA uintptr_t gMozillaPoisonBase; +extern MFBT_DATA uintptr_t gMozillaPoisonSize; + +MOZ_END_EXTERN_C + +#if defined(__cplusplus) + +namespace mozilla { + +/** + * A version of CorruptionCanary that is suitable as a member of objects that + * are statically allocated. + */ +class CorruptionCanaryForStatics { + public: + constexpr CorruptionCanaryForStatics() : mValue(kCanarySet) {} + + // This is required to avoid static constructor bloat. + ~CorruptionCanaryForStatics() = default; + + void Check() const { + if (mValue != kCanarySet) { + MOZ_CRASH("Canary check failed, check lifetime"); + } + } + + protected: + uintptr_t mValue; + + private: + static const uintptr_t kCanarySet = 0x0f0b0f0b; +}; + +/** + * This class is designed to cause crashes when various kinds of memory + * corruption are observed. For instance, let's say we have a class C where we + * suspect out-of-bounds writes to some members. We can insert a member of type + * Poison near the members we suspect are being corrupted by out-of-bounds + * writes. Or perhaps we have a class K we suspect is subject to use-after-free + * violations, in which case it doesn't particularly matter where in the class + * we add the member of type Poison. + * + * In either case, we then insert calls to Check() throughout the code. Doing + * so enables us to narrow down the location where the corruption is occurring. + * A pleasant side-effect of these additional Check() calls is that crash + * signatures may become more regular, as crashes will ideally occur + * consolidated at the point of a Check(), rather than scattered about at + * various uses of the corrupted memory. + */ +class CorruptionCanary : public CorruptionCanaryForStatics { + public: + constexpr CorruptionCanary() = default; + + ~CorruptionCanary() { + Check(); + mValue = mozPoisonValue(); + } +}; + +} // namespace mozilla + +#endif + +#endif /* mozilla_Poison_h */ diff --git a/mfbt/RandomNum.cpp b/mfbt/RandomNum.cpp new file mode 100644 index 0000000000..96de5d4055 --- /dev/null +++ b/mfbt/RandomNum.cpp @@ -0,0 +1,146 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "mozilla/RandomNum.h" + +#include +#ifdef XP_UNIX +# include +#endif + +#if defined(XP_WIN) + +// Microsoft doesn't "officially" support using RtlGenRandom() directly +// anymore, and the Windows headers assume that __stdcall is +// the default calling convention (which is true when Microsoft uses this +// function to build their own CRT libraries). + +// We will explicitly declare it with the proper calling convention. + +# include "minwindef.h" +# define RtlGenRandom SystemFunction036 +extern "C" BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, + ULONG RandomBufferLength); + +#endif + +#if defined(ANDROID) || defined(XP_DARWIN) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ + defined(__wasi__) +# include +# define USE_ARC4RANDOM +#endif + +#if defined(__linux__) +# include // For GRND_NONBLOCK. +# include // For SYS_getrandom. + +// Older glibc versions don't define SYS_getrandom, so we define it here if +// it's not available. See bug 995069. +# if defined(__x86_64__) +# define GETRANDOM_NR 318 +# elif defined(__i386__) +# define GETRANDOM_NR 355 +# elif defined(__aarch64__) +# define GETRANDOM_NR 278 +# elif defined(__arm__) +# define GETRANDOM_NR 384 +# elif defined(__powerpc__) +# define GETRANDOM_NR 359 +# elif defined(__s390__) +# define GETRANDOM_NR 349 +# elif defined(__mips__) +# include +# if _MIPS_SIM == _MIPS_SIM_ABI32 +# define GETRANDOM_NR 4353 +# elif _MIPS_SIM == _MIPS_SIM_ABI64 +# define GETRANDOM_NR 5313 +# elif _MIPS_SIM == _MIPS_SIM_NABI32 +# define GETRANDOM_NR 6317 +# endif +# endif + +# if defined(SYS_getrandom) +// We have SYS_getrandom. Use it to check GETRANDOM_NR. Only do this if we set +// GETRANDOM_NR so tier 3 platforms with recent glibc are not forced to define +// it for no good reason. +# if defined(GETRANDOM_NR) +static_assert(GETRANDOM_NR == SYS_getrandom, + "GETRANDOM_NR should match the actual SYS_getrandom value"); +# endif +# else +# define SYS_getrandom GETRANDOM_NR +# endif + +# if defined(GRND_NONBLOCK) +static_assert(GRND_NONBLOCK == 1, + "If GRND_NONBLOCK is not 1 the #define below is wrong"); +# else +# define GRND_NONBLOCK 1 +# endif + +#endif // defined(__linux__) + +namespace mozilla { + +MFBT_API bool GenerateRandomBytesFromOS(void* aBuffer, size_t aLength) { + MOZ_ASSERT(aBuffer); + MOZ_ASSERT(aLength > 0); + +#if defined(XP_WIN) + return !!RtlGenRandom(aBuffer, aLength); + +#elif defined(USE_ARC4RANDOM) // defined(XP_WIN) + + arc4random_buf(aBuffer, aLength); + return true; + +#elif defined(XP_UNIX) // defined(USE_ARC4RANDOM) + +# if defined(__linux__) + + long bytesGenerated = syscall(SYS_getrandom, aBuffer, aLength, GRND_NONBLOCK); + + if (static_cast(bytesGenerated) == aLength) { + return true; + } + + // Fall-through to UNIX behavior if failed + +# endif // defined(__linux__) + + int fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) { + return false; + } + + ssize_t bytesRead = read(fd, aBuffer, aLength); + + close(fd); + + return (static_cast(bytesRead) == aLength); + +#else // defined(XP_UNIX) +# error "Platform needs to implement GenerateRandomBytesFromOS()" +#endif +} + +MFBT_API Maybe RandomUint64() { + uint64_t randomNum; + if (!GenerateRandomBytesFromOS(&randomNum, sizeof(randomNum))) { + return Nothing(); + } + + return Some(randomNum); +} + +MFBT_API uint64_t RandomUint64OrDie() { + uint64_t randomNum; + MOZ_RELEASE_ASSERT(GenerateRandomBytesFromOS(&randomNum, sizeof(randomNum))); + return randomNum; +} + +} // namespace mozilla diff --git a/mfbt/RandomNum.h b/mfbt/RandomNum.h new file mode 100644 index 0000000000..23a24837e9 --- /dev/null +++ b/mfbt/RandomNum.h @@ -0,0 +1,51 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/* Routines for generating random numbers */ + +#ifndef mozilla_RandomNum_h_ +#define mozilla_RandomNum_h_ + +#include "mozilla/Maybe.h" +#include "mozilla/Types.h" + +namespace mozilla { + +/** + * Generate cryptographically secure random bytes using the best facilities + * available on the current OS. + * + * Return value: true if random bytes were copied into `aBuffer` or false on + * error. + * + * Useful whenever a secure random number is needed and NSS isn't available. + * (Perhaps because it hasn't been initialized yet) + * + * Current mechanisms: + * Windows: RtlGenRandom() + * Android, Darwin, DragonFly, FreeBSD, OpenBSD, NetBSD: arc4random() + * Linux: getrandom() if available, "/dev/urandom" otherwise + * Other Unix: "/dev/urandom" + * + */ +[[nodiscard]] MFBT_API bool GenerateRandomBytesFromOS(void* aBuffer, + size_t aLength); + +/** + * Generate a cryptographically secure random 64-bit unsigned number using the + * best facilities available on the current OS. + */ +MFBT_API Maybe RandomUint64(); + +/** + * Like RandomUint64, but always returns a uint64_t or crashes with an assert + * if the underlying RandomUint64 call failed. + */ +MFBT_API uint64_t RandomUint64OrDie(); + +} // namespace mozilla + +#endif // mozilla_RandomNum_h_ diff --git a/mfbt/Range.h b/mfbt/Range.h new file mode 100644 index 0000000000..a633bcf36f --- /dev/null +++ b/mfbt/Range.h @@ -0,0 +1,82 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_Range_h +#define mozilla_Range_h + +#include "mozilla/RangedPtr.h" +#include "mozilla/Span.h" + +#include +#include + +namespace mozilla { + +// Range is a tuple containing a pointer and a length. +template +class Range { + template + friend class Range; + + // Reassignment of RangedPtrs is so (subtly) restrictive that we just make + // Range immutable. + const RangedPtr mStart; + const RangedPtr mEnd; + + public: + Range() : mStart(nullptr, 0), mEnd(nullptr, 0) {} + Range(T* aPtr, size_t aLength) + : mStart(aPtr, aPtr, aPtr + aLength), + mEnd(aPtr + aLength, aPtr, aPtr + aLength) { + if (!aPtr) { + MOZ_ASSERT(!aLength, + "Range does not support nullptr with non-zero length."); + // ...because merely having a pointer to `nullptr + 1` is undefined + // behavior. UBSAN catches this as of clang-10. + } + } + Range(const RangedPtr& aStart, const RangedPtr& aEnd) + : mStart(aStart.get(), aStart.get(), aEnd.get()), + mEnd(aEnd.get(), aStart.get(), aEnd.get()) { + // Only accept two RangedPtrs within the same range. + aStart.checkIdenticalRange(aEnd); + MOZ_ASSERT(aStart <= aEnd); + } + + template , int>> + MOZ_IMPLICIT Range(const Range& aOther) + : mStart(aOther.mStart), mEnd(aOther.mEnd) {} + + MOZ_IMPLICIT Range(Span aSpan) : Range(aSpan.Elements(), aSpan.Length()) {} + + template , int>> + MOZ_IMPLICIT Range(const Span& aSpan) + : Range(aSpan.Elements(), aSpan.Length()) {} + + RangedPtr begin() const { return mStart; } + RangedPtr end() const { return mEnd; } + size_t length() const { return mEnd - mStart; } + + T& operator[](size_t aOffset) const { return mStart[aOffset]; } + + explicit operator bool() const { return mStart != nullptr; } + + operator Span() { return Span(mStart.get(), length()); } + + operator Span() const { return Span(mStart.get(), length()); } +}; + +template +Span(Range&) -> Span; + +template +Span(const Range&) -> Span; + +} // namespace mozilla + +#endif /* mozilla_Range_h */ diff --git a/mfbt/RangedArray.h b/mfbt/RangedArray.h new file mode 100644 index 0000000000..4417e09e9d --- /dev/null +++ b/mfbt/RangedArray.h @@ -0,0 +1,66 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * A compile-time constant-length array, with bounds-checking assertions -- but + * unlike mozilla::Array, with indexes biased by a constant. + * + * Thus where mozilla::Array is a three-element array indexed by [0, 3), + * mozilla::RangedArray is a three-element array indexed by [8, 11). + */ + +#ifndef mozilla_RangedArray_h +#define mozilla_RangedArray_h + +#include "mozilla/Array.h" + +namespace mozilla { + +template +class RangedArray { + private: + typedef Array ArrayType; + ArrayType mArr; + + public: + static size_t length() { return Length; } + static size_t minIndex() { return MinIndex; } + + T& operator[](size_t aIndex) { + MOZ_ASSERT(aIndex == MinIndex || aIndex > MinIndex); + return mArr[aIndex - MinIndex]; + } + + const T& operator[](size_t aIndex) const { + MOZ_ASSERT(aIndex == MinIndex || aIndex > MinIndex); + return mArr[aIndex - MinIndex]; + } + + typedef typename ArrayType::iterator iterator; + typedef typename ArrayType::const_iterator const_iterator; + typedef typename ArrayType::reverse_iterator reverse_iterator; + typedef typename ArrayType::const_reverse_iterator const_reverse_iterator; + + // Methods for range-based for loops. + iterator begin() { return mArr.begin(); } + const_iterator begin() const { return mArr.begin(); } + const_iterator cbegin() const { return mArr.cbegin(); } + iterator end() { return mArr.end(); } + const_iterator end() const { return mArr.end(); } + const_iterator cend() const { return mArr.cend(); } + + // Methods for reverse iterating. + reverse_iterator rbegin() { return mArr.rbegin(); } + const_reverse_iterator rbegin() const { return mArr.rbegin(); } + const_reverse_iterator crbegin() const { return mArr.crbegin(); } + reverse_iterator rend() { return mArr.rend(); } + const_reverse_iterator rend() const { return mArr.rend(); } + const_reverse_iterator crend() const { return mArr.crend(); } +}; + +} // namespace mozilla + +#endif // mozilla_RangedArray_h diff --git a/mfbt/RangedPtr.h b/mfbt/RangedPtr.h new file mode 100644 index 0000000000..9aec59f936 --- /dev/null +++ b/mfbt/RangedPtr.h @@ -0,0 +1,308 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implements a smart pointer asserted to remain within a range specified at + * construction. + */ + +#ifndef mozilla_RangedPtr_h +#define mozilla_RangedPtr_h + +#include "mozilla/ArrayUtils.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" + +#include +#include + +namespace mozilla { + +/* + * RangedPtr is a smart pointer restricted to an address range specified at + * creation. The pointer (and any smart pointers derived from it) must remain + * within the range [start, end] (inclusive of end to facilitate use as + * sentinels). Dereferencing or indexing into the pointer (or pointers derived + * from it) must remain within the range [start, end). All the standard pointer + * operators are defined on it; in debug builds these operations assert that the + * range specified at construction is respected. + * + * In theory passing a smart pointer instance as an argument can be slightly + * slower than passing a T* (due to ABI requirements for passing structs versus + * passing pointers), if the method being called isn't inlined. If you are in + * extremely performance-critical code, you may want to be careful using this + * smart pointer as an argument type. + * + * RangedPtr intentionally does not implicitly convert to T*. Use get() to + * explicitly convert to T*. Keep in mind that the raw pointer of course won't + * implement bounds checking in debug builds. + */ +template +class RangedPtr { + template + friend class RangedPtr; + + T* mPtr; + +#ifdef DEBUG + T* const mRangeStart; + T* const mRangeEnd; +#endif + + void checkSanity() { + MOZ_ASSERT(mRangeStart <= mPtr); + MOZ_ASSERT(mPtr <= mRangeEnd); + } + + /* Creates a new pointer for |aPtr|, restricted to this pointer's range. */ + RangedPtr create(T* aPtr) const { +#ifdef DEBUG + return RangedPtr(aPtr, mRangeStart, mRangeEnd); +#else + return RangedPtr(aPtr, nullptr, size_t(0)); +#endif + } + + uintptr_t asUintptr() const { return reinterpret_cast(mPtr); } + + public: + RangedPtr(T* aPtr, T* aStart, T* aEnd) + : mPtr(aPtr) +#ifdef DEBUG + , + mRangeStart(aStart), + mRangeEnd(aEnd) +#endif + { + MOZ_ASSERT(mRangeStart <= mRangeEnd); + checkSanity(); + } + RangedPtr(T* aPtr, T* aStart, size_t aLength) + : mPtr(aPtr) +#ifdef DEBUG + , + mRangeStart(aStart), + mRangeEnd(aStart + aLength) +#endif + { + MOZ_ASSERT(aLength <= size_t(-1) / sizeof(T)); + MOZ_ASSERT(reinterpret_cast(mRangeStart) + aLength * sizeof(T) >= + reinterpret_cast(mRangeStart)); + checkSanity(); + } + + /* Equivalent to RangedPtr(aPtr, aPtr, aLength). */ + RangedPtr(T* aPtr, size_t aLength) + : mPtr(aPtr) +#ifdef DEBUG + , + mRangeStart(aPtr), + mRangeEnd(aPtr + aLength) +#endif + { + MOZ_ASSERT(aLength <= size_t(-1) / sizeof(T)); + MOZ_ASSERT(reinterpret_cast(mRangeStart) + aLength * sizeof(T) >= + reinterpret_cast(mRangeStart)); + checkSanity(); + } + + /* Equivalent to RangedPtr(aArr, aArr, N). */ + template + explicit RangedPtr(T (&aArr)[N]) + : mPtr(aArr) +#ifdef DEBUG + , + mRangeStart(aArr), + mRangeEnd(aArr + N) +#endif + { + checkSanity(); + } + + RangedPtr(const RangedPtr& aOther) + : mPtr(aOther.mPtr) +#ifdef DEBUG + , + mRangeStart(aOther.mRangeStart), + mRangeEnd(aOther.mRangeEnd) +#endif + { + checkSanity(); + } + + template + MOZ_IMPLICIT RangedPtr(const RangedPtr& aOther) + : mPtr(aOther.mPtr) +#ifdef DEBUG + , + mRangeStart(aOther.mRangeStart), + mRangeEnd(aOther.mRangeEnd) +#endif + { + checkSanity(); + } + + T* get() const { return mPtr; } + + explicit operator bool() const { return mPtr != nullptr; } + + void checkIdenticalRange(const RangedPtr& aOther) const { + MOZ_ASSERT(mRangeStart == aOther.mRangeStart); + MOZ_ASSERT(mRangeEnd == aOther.mRangeEnd); + } + + template + RangedPtr ReinterpretCast() const { +#ifdef DEBUG + return {reinterpret_cast(mPtr), reinterpret_cast(mRangeStart), + reinterpret_cast(mRangeEnd)}; +#else + return {reinterpret_cast(mPtr), nullptr, nullptr}; +#endif + } + + /* + * You can only assign one RangedPtr into another if the two pointers have + * the same valid range: + * + * char arr1[] = "hi"; + * char arr2[] = "bye"; + * RangedPtr p1(arr1, 2); + * p1 = RangedPtr(arr1 + 1, arr1, arr1 + 2); // works + * p1 = RangedPtr(arr2, 3); // asserts + */ + RangedPtr& operator=(const RangedPtr& aOther) { + checkIdenticalRange(aOther); + mPtr = aOther.mPtr; + checkSanity(); + return *this; + } + + RangedPtr operator+(size_t aInc) const { + MOZ_ASSERT(aInc <= size_t(-1) / sizeof(T)); + MOZ_ASSERT(asUintptr() + aInc * sizeof(T) >= asUintptr()); + return create(mPtr + aInc); + } + + RangedPtr operator-(size_t aDec) const { + MOZ_ASSERT(aDec <= size_t(-1) / sizeof(T)); + MOZ_ASSERT(asUintptr() - aDec * sizeof(T) <= asUintptr()); + return create(mPtr - aDec); + } + + /* + * You can assign a raw pointer into a RangedPtr if the raw pointer is + * within the range specified at creation. + */ + template + RangedPtr& operator=(U* aPtr) { + *this = create(aPtr); + return *this; + } + + template + RangedPtr& operator=(const RangedPtr& aPtr) { + MOZ_ASSERT(mRangeStart <= aPtr.mPtr); + MOZ_ASSERT(aPtr.mPtr <= mRangeEnd); + mPtr = aPtr.mPtr; + checkSanity(); + return *this; + } + + RangedPtr& operator++() { return (*this += 1); } + + RangedPtr operator++(int) { + RangedPtr rcp = *this; + ++*this; + return rcp; + } + + RangedPtr& operator--() { return (*this -= 1); } + + RangedPtr operator--(int) { + RangedPtr rcp = *this; + --*this; + return rcp; + } + + RangedPtr& operator+=(size_t aInc) { + *this = *this + aInc; + return *this; + } + + RangedPtr& operator-=(size_t aDec) { + *this = *this - aDec; + return *this; + } + + T& operator[](ptrdiff_t aIndex) const { + MOZ_ASSERT(size_t(aIndex > 0 ? aIndex : -aIndex) <= size_t(-1) / sizeof(T)); + return *create(mPtr + aIndex); + } + + T& operator*() const { + MOZ_ASSERT(mPtr >= mRangeStart); + MOZ_ASSERT(mPtr < mRangeEnd); + return *mPtr; + } + + T* operator->() const { + MOZ_ASSERT(mPtr >= mRangeStart); + MOZ_ASSERT(mPtr < mRangeEnd); + return mPtr; + } + + template + bool operator==(const RangedPtr& aOther) const { + return mPtr == aOther.mPtr; + } + template + bool operator!=(const RangedPtr& aOther) const { + return !(*this == aOther); + } + + template + bool operator==(const U* u) const { + return mPtr == u; + } + template + bool operator!=(const U* u) const { + return !(*this == u); + } + + bool operator==(std::nullptr_t) const { return mPtr == nullptr; } + bool operator!=(std::nullptr_t) const { return mPtr != nullptr; } + + template + bool operator<(const RangedPtr& aOther) const { + return mPtr < aOther.mPtr; + } + template + bool operator<=(const RangedPtr& aOther) const { + return mPtr <= aOther.mPtr; + } + + template + bool operator>(const RangedPtr& aOther) const { + return mPtr > aOther.mPtr; + } + template + bool operator>=(const RangedPtr& aOther) const { + return mPtr >= aOther.mPtr; + } + + size_t operator-(const RangedPtr& aOther) const { + MOZ_ASSERT(mPtr >= aOther.mPtr); + return PointerRangeSize(aOther.mPtr, mPtr); + } + + private: + RangedPtr() = delete; +}; + +} /* namespace mozilla */ + +#endif /* mozilla_RangedPtr_h */ diff --git a/mfbt/ReentrancyGuard.h b/mfbt/ReentrancyGuard.h new file mode 100644 index 0000000000..56c963b418 --- /dev/null +++ b/mfbt/ReentrancyGuard.h @@ -0,0 +1,50 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Small helper class for asserting uses of a class are non-reentrant. */ + +#ifndef mozilla_ReentrancyGuard_h +#define mozilla_ReentrancyGuard_h + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" + +namespace mozilla { + +/* Useful for implementing containers that assert non-reentrancy */ +class MOZ_RAII ReentrancyGuard { +#ifdef DEBUG + bool& mEntered; +#endif + + public: + template +#ifdef DEBUG + explicit ReentrancyGuard(T& aObj) + : mEntered(aObj.mEntered) +#else + explicit ReentrancyGuard(T&) +#endif + { +#ifdef DEBUG + MOZ_ASSERT(!mEntered); + mEntered = true; +#endif + } + ~ReentrancyGuard() { +#ifdef DEBUG + mEntered = false; +#endif + } + + private: + ReentrancyGuard(const ReentrancyGuard&) = delete; + void operator=(const ReentrancyGuard&) = delete; +}; + +} // namespace mozilla + +#endif /* mozilla_ReentrancyGuard_h */ diff --git a/mfbt/RefCountType.h b/mfbt/RefCountType.h new file mode 100644 index 0000000000..e95a22a0ca --- /dev/null +++ b/mfbt/RefCountType.h @@ -0,0 +1,37 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_RefCountType_h +#define mozilla_RefCountType_h + +#include + +/** + * MozRefCountType is Mozilla's reference count type. + * + * We use the same type to represent the refcount of RefCounted objects + * as well, in order to be able to use the leak detection facilities + * that are implemented by XPCOM. + * + * Note that this type is not in the mozilla namespace so that it is + * usable for both C and C++ code. + */ +typedef uintptr_t MozRefCountType; + +/* + * This is the return type for AddRef() and Release() in nsISupports. + * IUnknown of COM returns an unsigned long from equivalent functions. + * + * The following ifdef exists to maintain binary compatibility with + * IUnknown, the base interface in Microsoft COM. + */ +#ifdef XP_WIN +typedef unsigned long MozExternalRefCountType; +#else +typedef uint32_t MozExternalRefCountType; +#endif + +#endif diff --git a/mfbt/RefCounted.h b/mfbt/RefCounted.h new file mode 100644 index 0000000000..e0458ac6bc --- /dev/null +++ b/mfbt/RefCounted.h @@ -0,0 +1,323 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* CRTP refcounting templates. Do not use unless you are an Expert. */ + +#ifndef mozilla_RefCounted_h +#define mozilla_RefCounted_h + +#include + +#include "mozilla/AlreadyAddRefed.h" +#include "mozilla/Assertions.h" +#include "mozilla/Atomics.h" +#include "mozilla/Attributes.h" +#include "mozilla/RefCountType.h" + +#ifdef __wasi__ +# include "mozilla/WasiAtomic.h" +#else +# include +#endif // __wasi__ + +#if defined(MOZILLA_INTERNAL_API) +# include "nsXPCOM.h" +#endif + +#if defined(MOZILLA_INTERNAL_API) && defined(NS_BUILD_REFCNT_LOGGING) +# define MOZ_REFCOUNTED_LEAK_CHECKING +#endif + +namespace mozilla { + +/** + * RefCounted is a sort of a "mixin" for a class T. RefCounted + * manages, well, refcounting for T, and because RefCounted is + * parameterized on T, RefCounted can call T's destructor directly. + * This means T doesn't need to have a virtual dtor and so doesn't + * need a vtable. + * + * RefCounted is created with refcount == 0. Newly-allocated + * RefCounted must immediately be assigned to a RefPtr to make the + * refcount > 0. It's an error to allocate and free a bare + * RefCounted, i.e. outside of the RefPtr machinery. Attempts to + * do so will abort DEBUG builds. + * + * Live RefCounted have refcount > 0. The lifetime (refcounts) of + * live RefCounted are controlled by RefPtr and + * RefPtr. Upon a transition from refcounted==1 + * to 0, the RefCounted "dies" and is destroyed. The "destroyed" + * state is represented in DEBUG builds by refcount==0xffffdead. This + * state distinguishes use-before-ref (refcount==0) from + * use-after-destroy (refcount==0xffffdead). + * + * Note that when deriving from RefCounted or AtomicRefCounted, you + * should add MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public + * section of your class, where ClassName is the name of your class. + * + * Note: SpiderMonkey should use js::RefCounted instead since that type + * will use appropriate js_delete and also not break ref-count logging. + */ +namespace detail { +const MozRefCountType DEAD = 0xffffdead; + +// When building code that gets compiled into Gecko, try to use the +// trace-refcount leak logging facilities. +class RefCountLogger { + public: + // Called by `RefCounted`-like classes to log a successful AddRef call in the + // Gecko leak-logging system. This call is a no-op outside of Gecko. Should be + // called afer incrementing the reference count. + template + static void logAddRef(const T* aPointer, MozRefCountType aRefCount) { +#ifdef MOZ_REFCOUNTED_LEAK_CHECKING + const void* pointer = aPointer; + const char* typeName = aPointer->typeName(); + uint32_t typeSize = aPointer->typeSize(); + NS_LogAddRef(const_cast(pointer), aRefCount, typeName, typeSize); +#endif + } + + // Created by `RefCounted`-like classes to log a successful Release call in + // the Gecko leak-logging system. The constructor should be invoked before the + // refcount is decremented to avoid invoking `typeName()` with a zero + // reference count. This call is a no-op outside of Gecko. + class MOZ_STACK_CLASS ReleaseLogger final { + public: + template + explicit ReleaseLogger(const T* aPointer) +#ifdef MOZ_REFCOUNTED_LEAK_CHECKING + : mPointer(aPointer), + mTypeName(aPointer->typeName()) +#endif + { + } + + void logRelease(MozRefCountType aRefCount) { +#ifdef MOZ_REFCOUNTED_LEAK_CHECKING + MOZ_ASSERT(aRefCount != DEAD); + NS_LogRelease(const_cast(mPointer), aRefCount, mTypeName); +#endif + } + +#ifdef MOZ_REFCOUNTED_LEAK_CHECKING + const void* mPointer; + const char* mTypeName; +#endif + }; +}; + +// This is used WeakPtr.h as well as this file. +enum RefCountAtomicity { AtomicRefCount, NonAtomicRefCount }; + +template +class RC { + public: + explicit RC(T aCount) : mValue(aCount) {} + + RC(const RC&) = delete; + RC& operator=(const RC&) = delete; + RC(RC&&) = delete; + RC& operator=(RC&&) = delete; + + T operator++() { return ++mValue; } + T operator--() { return --mValue; } + +#ifdef DEBUG + void operator=(const T& aValue) { mValue = aValue; } +#endif + + operator T() const { return mValue; } + + private: + T mValue; +}; + +template +class RC { + public: + explicit RC(T aCount) : mValue(aCount) {} + + RC(const RC&) = delete; + RC& operator=(const RC&) = delete; + RC(RC&&) = delete; + RC& operator=(RC&&) = delete; + + T operator++() { + // Memory synchronization is not required when incrementing a + // reference count. The first increment of a reference count on a + // thread is not important, since the first use of the object on a + // thread can happen before it. What is important is the transfer + // of the pointer to that thread, which may happen prior to the + // first increment on that thread. The necessary memory + // synchronization is done by the mechanism that transfers the + // pointer between threads. + return mValue.fetch_add(1, std::memory_order_relaxed) + 1; + } + + T operator--() { + // Since this may be the last release on this thread, we need + // release semantics so that prior writes on this thread are visible + // to the thread that destroys the object when it reads mValue with + // acquire semantics. + T result = mValue.fetch_sub(1, std::memory_order_release) - 1; + if (result == 0) { + // We're going to destroy the object on this thread, so we need + // acquire semantics to synchronize with the memory released by + // the last release on other threads, that is, to ensure that + // writes prior to that release are now visible on this thread. +#if defined(MOZ_TSAN) || defined(__wasi__) + // TSan doesn't understand std::atomic_thread_fence, so in order + // to avoid a false positive for every time a refcounted object + // is deleted, we replace the fence with an atomic operation. + mValue.load(std::memory_order_acquire); +#else + std::atomic_thread_fence(std::memory_order_acquire); +#endif + } + return result; + } + +#ifdef DEBUG + // This method is only called in debug builds, so we're not too concerned + // about its performance. + void operator=(const T& aValue) { + mValue.store(aValue, std::memory_order_seq_cst); + } +#endif + + operator T() const { + // Use acquire semantics since we're not sure what the caller is + // doing. + return mValue.load(std::memory_order_acquire); + } + + T IncrementIfNonzero() { + // This can be a relaxed load as any write of 0 that we observe will leave + // the field in a permanently zero (or `DEAD`) state (so a "stale" read of 0 + // is fine), and any other value is confirmed by the CAS below. + // + // This roughly matches rust's Arc::upgrade implementation as of rust 1.49.0 + T prev = mValue.load(std::memory_order_relaxed); + while (prev != 0) { + MOZ_ASSERT(prev != detail::DEAD, + "Cannot IncrementIfNonzero if marked as dead!"); + // TODO: It may be possible to use relaxed success ordering here? + if (mValue.compare_exchange_weak(prev, prev + 1, + std::memory_order_acquire, + std::memory_order_relaxed)) { + return prev + 1; + } + } + return 0; + } + + private: + std::atomic mValue; +}; + +template +class RefCounted { + protected: + RefCounted() : mRefCnt(0) {} +#ifdef DEBUG + ~RefCounted() { MOZ_ASSERT(mRefCnt == detail::DEAD); } +#endif + + public: + // Compatibility with RefPtr. + void AddRef() const { + // Note: this method must be thread safe for AtomicRefCounted. + MOZ_ASSERT(int32_t(mRefCnt) >= 0); + MozRefCountType cnt = ++mRefCnt; + detail::RefCountLogger::logAddRef(static_cast(this), cnt); + } + + void Release() const { + // Note: this method must be thread safe for AtomicRefCounted. + MOZ_ASSERT(int32_t(mRefCnt) > 0); + detail::RefCountLogger::ReleaseLogger logger(static_cast(this)); + MozRefCountType cnt = --mRefCnt; + // Note: it's not safe to touch |this| after decrementing the refcount, + // except for below. + logger.logRelease(cnt); + if (0 == cnt) { + // Because we have atomically decremented the refcount above, only + // one thread can get a 0 count here, so as long as we can assume that + // everything else in the system is accessing this object through + // RefPtrs, it's safe to access |this| here. +#ifdef DEBUG + mRefCnt = detail::DEAD; +#endif + delete static_cast(this); + } + } + + // Compatibility with wtf::RefPtr. + void ref() { AddRef(); } + void deref() { Release(); } + MozRefCountType refCount() const { return mRefCnt; } + bool hasOneRef() const { + MOZ_ASSERT(mRefCnt > 0); + return mRefCnt == 1; + } + + private: + mutable RC mRefCnt; +}; + +#ifdef MOZ_REFCOUNTED_LEAK_CHECKING +// Passing override for the optional argument marks the typeName and +// typeSize functions defined by this macro as overrides. +# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...) \ + virtual const char* typeName() const __VA_ARGS__ { return #T; } \ + virtual size_t typeSize() const __VA_ARGS__ { return sizeof(*this); } +#else +# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...) +#endif + +// Note that this macro is expanded unconditionally because it declares only +// two small inline functions which will hopefully get eliminated by the linker +// in non-leak-checking builds. +#define MOZ_DECLARE_REFCOUNTED_TYPENAME(T) \ + const char* typeName() const { return #T; } \ + size_t typeSize() const { return sizeof(*this); } + +} // namespace detail + +template +class RefCounted : public detail::RefCounted { + public: + ~RefCounted() { + static_assert(std::is_base_of::value, + "T must derive from RefCounted"); + } +}; + +namespace external { + +/** + * AtomicRefCounted is like RefCounted, with an atomically updated + * reference counter. + * + * NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING + * instead. + */ +template +class AtomicRefCounted + : public mozilla::detail::RefCounted { + public: + ~AtomicRefCounted() { + static_assert(std::is_base_of::value, + "T must derive from AtomicRefCounted"); + } +}; + +} // namespace external + +} // namespace mozilla + +#endif // mozilla_RefCounted_h diff --git a/mfbt/RefPtr.h b/mfbt/RefPtr.h new file mode 100644 index 0000000000..b36491f49b --- /dev/null +++ b/mfbt/RefPtr.h @@ -0,0 +1,657 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_RefPtr_h +#define mozilla_RefPtr_h + +#include "mozilla/AlreadyAddRefed.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/DbgMacro.h" + +#include + +/*****************************************************************************/ + +// template class RefPtrGetterAddRefs; + +class nsQueryReferent; +class nsCOMPtr_helper; +class nsISupports; + +namespace mozilla { +template +class MovingNotNull; +template +class NotNull; +template +class OwningNonNull; +template +class StaticLocalRefPtr; +template +class StaticRefPtr; +#if defined(XP_WIN) +namespace mscom { +class AgileReference; +} // namespace mscom +#endif // defined(XP_WIN) + +// Traditionally, RefPtr supports automatic refcounting of any pointer type +// with AddRef() and Release() methods that follow the traditional semantics. +// +// This traits class can be specialized to operate on other pointer types. For +// example, we specialize this trait for opaque FFI types that represent +// refcounted objects in Rust. +// +// Given the use of ConstRemovingRefPtrTraits below, U should not be a const- +// qualified type. +template +struct RefPtrTraits { + static void AddRef(U* aPtr) { aPtr->AddRef(); } + static void Release(U* aPtr) { aPtr->Release(); } +}; + +} // namespace mozilla + +template +class MOZ_IS_REFPTR RefPtr { + private: + void assign_with_AddRef(T* aRawPtr) { + if (aRawPtr) { + ConstRemovingRefPtrTraits::AddRef(aRawPtr); + } + assign_assuming_AddRef(aRawPtr); + } + + void assign_assuming_AddRef(T* aNewPtr) { + T* oldPtr = mRawPtr; + mRawPtr = aNewPtr; + if (oldPtr) { + ConstRemovingRefPtrTraits::Release(oldPtr); + } + } + + private: + T* MOZ_OWNING_REF mRawPtr; + + public: + typedef T element_type; + + ~RefPtr() { + if (mRawPtr) { + ConstRemovingRefPtrTraits::Release(mRawPtr); + } + } + + // Constructors + + RefPtr() + : mRawPtr(nullptr) + // default constructor + {} + + RefPtr(const RefPtr& aSmartPtr) + : mRawPtr(aSmartPtr.mRawPtr) + // copy-constructor + { + if (mRawPtr) { + ConstRemovingRefPtrTraits::AddRef(mRawPtr); + } + } + + RefPtr(RefPtr&& aRefPtr) : mRawPtr(aRefPtr.mRawPtr) { + aRefPtr.mRawPtr = nullptr; + } + + // construct from a raw pointer (of the right type) + + MOZ_IMPLICIT RefPtr(T* aRawPtr) : mRawPtr(aRawPtr) { + if (mRawPtr) { + ConstRemovingRefPtrTraits::AddRef(mRawPtr); + } + } + + MOZ_IMPLICIT RefPtr(decltype(nullptr)) : mRawPtr(nullptr) {} + + template >> + MOZ_IMPLICIT RefPtr(already_AddRefed& aSmartPtr) + : mRawPtr(aSmartPtr.take()) + // construct from |already_AddRefed| + {} + + template >> + MOZ_IMPLICIT RefPtr(already_AddRefed&& aSmartPtr) + : mRawPtr(aSmartPtr.take()) + // construct from |otherRefPtr.forget()| + {} + + template >> + MOZ_IMPLICIT RefPtr(const RefPtr& aSmartPtr) + : mRawPtr(aSmartPtr.get()) + // copy-construct from a smart pointer with a related pointer type + { + if (mRawPtr) { + ConstRemovingRefPtrTraits::AddRef(mRawPtr); + } + } + + template >> + MOZ_IMPLICIT RefPtr(RefPtr&& aSmartPtr) + : mRawPtr(aSmartPtr.forget().take()) + // construct from |Move(RefPtr)|. + {} + + template > && + std::is_convertible_v>>> + MOZ_IMPLICIT RefPtr(const mozilla::NotNull& aSmartPtr) + : mRawPtr(RefPtr(aSmartPtr.get()).forget().take()) + // construct from |mozilla::NotNull|. + {} + + template > && + std::is_convertible_v>>> + MOZ_IMPLICIT RefPtr(mozilla::MovingNotNull&& aSmartPtr) + : mRawPtr(RefPtr(std::move(aSmartPtr).unwrapBasePtr()).forget().take()) + // construct from |mozilla::MovingNotNull|. + {} + + MOZ_IMPLICIT RefPtr(const nsQueryReferent& aHelper); + MOZ_IMPLICIT RefPtr(const nsCOMPtr_helper& aHelper); +#if defined(XP_WIN) + MOZ_IMPLICIT RefPtr(const mozilla::mscom::AgileReference& aAgileRef); +#endif // defined(XP_WIN) + + // Defined in OwningNonNull.h + template + MOZ_IMPLICIT RefPtr(const mozilla::OwningNonNull& aOther); + + // Defined in StaticLocalPtr.h + template + MOZ_IMPLICIT RefPtr(const mozilla::StaticLocalRefPtr& aOther); + + // Defined in StaticPtr.h + template + MOZ_IMPLICIT RefPtr(const mozilla::StaticRefPtr& aOther); + + // Assignment operators + + RefPtr& operator=(decltype(nullptr)) { + assign_assuming_AddRef(nullptr); + return *this; + } + + RefPtr& operator=(const RefPtr& aRhs) + // copy assignment operator + { + assign_with_AddRef(aRhs.mRawPtr); + return *this; + } + + template + RefPtr& operator=(const RefPtr& aRhs) + // assign from an RefPtr of a related pointer type + { + assign_with_AddRef(aRhs.get()); + return *this; + } + + RefPtr& operator=(T* aRhs) + // assign from a raw pointer (of the right type) + { + assign_with_AddRef(aRhs); + return *this; + } + + template + RefPtr& operator=(already_AddRefed& aRhs) + // assign from |already_AddRefed| + { + assign_assuming_AddRef(aRhs.take()); + return *this; + } + + template + RefPtr& operator=(already_AddRefed&& aRhs) + // assign from |otherRefPtr.forget()| + { + assign_assuming_AddRef(aRhs.take()); + return *this; + } + + RefPtr& operator=(const nsQueryReferent& aQueryReferent); + RefPtr& operator=(const nsCOMPtr_helper& aHelper); +#if defined(XP_WIN) + RefPtr& operator=(const mozilla::mscom::AgileReference& aAgileRef); +#endif // defined(XP_WIN) + + template >> + RefPtr& operator=(RefPtr&& aRefPtr) { + assign_assuming_AddRef(aRefPtr.forget().take()); + return *this; + } + + template >>> + RefPtr& operator=(const mozilla::NotNull& aSmartPtr) + // assign from |mozilla::NotNull|. + { + assign_assuming_AddRef(RefPtr(aSmartPtr.get()).forget().take()); + return *this; + } + + template >>> + RefPtr& operator=(mozilla::MovingNotNull&& aSmartPtr) + // assign from |mozilla::MovingNotNull|. + { + assign_assuming_AddRef( + RefPtr(std::move(aSmartPtr).unwrapBasePtr()).forget().take()); + return *this; + } + + // Defined in OwningNonNull.h + template + RefPtr& operator=(const mozilla::OwningNonNull& aOther); + + // Defined in StaticLocalPtr.h + template + RefPtr& operator=(const mozilla::StaticLocalRefPtr& aOther); + + // Defined in StaticPtr.h + template + RefPtr& operator=(const mozilla::StaticRefPtr& aOther); + + // Other pointer operators + + void swap(RefPtr& aRhs) + // ...exchange ownership with |aRhs|; can save a pair of refcount operations + { + T* temp = aRhs.mRawPtr; + aRhs.mRawPtr = mRawPtr; + mRawPtr = temp; + } + + void swap(T*& aRhs) + // ...exchange ownership with |aRhs|; can save a pair of refcount operations + { + T* temp = aRhs; + aRhs = mRawPtr; + mRawPtr = temp; + } + + already_AddRefed MOZ_MAY_CALL_AFTER_MUST_RETURN forget() + // return the value of mRawPtr and null out mRawPtr. Useful for + // already_AddRefed return values. + { + T* temp = nullptr; + swap(temp); + return already_AddRefed(temp); + } + + template + void forget(I** aRhs) + // Set the target of aRhs to the value of mRawPtr and null out mRawPtr. + // Useful to avoid unnecessary AddRef/Release pairs with "out" + // parameters where aRhs bay be a T** or an I** where I is a base class + // of T. + { + MOZ_ASSERT(aRhs, "Null pointer passed to forget!"); + *aRhs = mRawPtr; + mRawPtr = nullptr; + } + + void forget(nsISupports** aRhs) { + MOZ_ASSERT(aRhs, "Null pointer passed to forget!"); + *aRhs = ToSupports(mRawPtr); + mRawPtr = nullptr; + } + + T* get() const + /* + Prefer the implicit conversion provided automatically by |operator T*() + const|. Use |get()| to resolve ambiguity or to get a castable pointer. + */ + { + return const_cast(mRawPtr); + } + + operator T*() const& + /* + ...makes an |RefPtr| act like its underlying raw pointer type whenever it + is used in a context where a raw pointer is expected. It is this operator + that makes an |RefPtr| substitutable for a raw pointer. + + Prefer the implicit use of this operator to calling |get()|, except where + necessary to resolve ambiguity. + */ + { + return get(); + } + + // Don't allow implicit conversion of temporary RefPtr to raw pointer, + // because the refcount might be one and the pointer will immediately become + // invalid. + operator T*() const&& = delete; + + // These are needed to avoid the deleted operator above. XXX Why is operator! + // needed separately? Shouldn't the compiler prefer using the non-deleted + // operator bool instead of the deleted operator T*? + explicit operator bool() const { return !!mRawPtr; } + bool operator!() const { return !mRawPtr; } + + T* operator->() const MOZ_NO_ADDREF_RELEASE_ON_RETURN { + MOZ_ASSERT(mRawPtr != nullptr, + "You can't dereference a NULL RefPtr with operator->()."); + return get(); + } + + template + class Proxy { + typedef R (T::*member_function)(Args...); + T* mRawPtr; + member_function mFunction; + + public: + Proxy(T* aRawPtr, member_function aFunction) + : mRawPtr(aRawPtr), mFunction(aFunction) {} + template + R operator()(ActualArgs&&... aArgs) { + return ((*mRawPtr).*mFunction)(std::forward(aArgs)...); + } + }; + + template + Proxy operator->*(R (T::*aFptr)(Args...)) const { + MOZ_ASSERT(mRawPtr != nullptr, + "You can't dereference a NULL RefPtr with operator->*()."); + return Proxy(get(), aFptr); + } + + RefPtr* get_address() + // This is not intended to be used by clients. See |address_of| + // below. + { + return this; + } + + const RefPtr* get_address() const + // This is not intended to be used by clients. See |address_of| + // below. + { + return this; + } + + public: + T& operator*() const { + MOZ_ASSERT(mRawPtr != nullptr, + "You can't dereference a NULL RefPtr with operator*()."); + return *get(); + } + + T** StartAssignment() { + assign_assuming_AddRef(nullptr); + return reinterpret_cast(&mRawPtr); + } + + private: + // This helper class makes |RefPtr| possible by casting away + // the constness from the pointer when calling AddRef() and Release(). + // + // This is necessary because AddRef() and Release() implementations can't + // generally expected to be const themselves (without heavy use of |mutable| + // and |const_cast| in their own implementations). + // + // This should be sound because while |RefPtr| provides a + // const view of an object, the object itself should not be const (it + // would have to be allocated as |new const T| or similar to be const). + template + struct ConstRemovingRefPtrTraits { + static void AddRef(U* aPtr) { mozilla::RefPtrTraits::AddRef(aPtr); } + static void Release(U* aPtr) { mozilla::RefPtrTraits::Release(aPtr); } + }; + template + struct ConstRemovingRefPtrTraits { + static void AddRef(const U* aPtr) { + mozilla::RefPtrTraits::AddRef(const_cast(aPtr)); + } + static void Release(const U* aPtr) { + mozilla::RefPtrTraits::Release(const_cast(aPtr)); + } + }; +}; + +class nsCycleCollectionTraversalCallback; +template +void CycleCollectionNoteChild(nsCycleCollectionTraversalCallback& aCallback, + T* aChild, const char* aName, uint32_t aFlags); + +template +inline void ImplCycleCollectionUnlink(RefPtr& aField) { + aField = nullptr; +} + +template +inline void ImplCycleCollectionTraverse( + nsCycleCollectionTraversalCallback& aCallback, RefPtr& aField, + const char* aName, uint32_t aFlags = 0) { + CycleCollectionNoteChild(aCallback, aField.get(), aName, aFlags); +} + +template +inline RefPtr* address_of(RefPtr& aPtr) { + return aPtr.get_address(); +} + +template +inline const RefPtr* address_of(const RefPtr& aPtr) { + return aPtr.get_address(); +} + +template +class RefPtrGetterAddRefs +/* + ... + + This class is designed to be used for anonymous temporary objects in the + argument list of calls that return COM interface pointers, e.g., + + RefPtr fooP; + ...->GetAddRefedPointer(getter_AddRefs(fooP)) + + DO NOT USE THIS TYPE DIRECTLY IN YOUR CODE. Use |getter_AddRefs()| instead. + + When initialized with a |RefPtr|, as in the example above, it returns + a |void**|, a |T**|, or an |nsISupports**| as needed, that the + outer call (|GetAddRefedPointer| in this case) can fill in. + + This type should be a nested class inside |RefPtr|. +*/ +{ + public: + explicit RefPtrGetterAddRefs(RefPtr& aSmartPtr) + : mTargetSmartPtr(aSmartPtr) { + // nothing else to do + } + + operator void**() { + return reinterpret_cast(mTargetSmartPtr.StartAssignment()); + } + + operator T**() { return mTargetSmartPtr.StartAssignment(); } + + T*& operator*() { return *(mTargetSmartPtr.StartAssignment()); } + + private: + RefPtr& mTargetSmartPtr; +}; + +template +inline RefPtrGetterAddRefs getter_AddRefs(RefPtr& aSmartPtr) +/* + Used around a |RefPtr| when + ...makes the class |RefPtrGetterAddRefs| invisible. +*/ +{ + return RefPtrGetterAddRefs(aSmartPtr); +} + +// Comparing two |RefPtr|s + +template +inline bool operator==(const RefPtr& aLhs, const RefPtr& aRhs) { + return static_cast(aLhs.get()) == static_cast(aRhs.get()); +} + +template +inline bool operator!=(const RefPtr& aLhs, const RefPtr& aRhs) { + return static_cast(aLhs.get()) != static_cast(aRhs.get()); +} + +// Comparing an |RefPtr| to a raw pointer + +template +inline bool operator==(const RefPtr& aLhs, const U* aRhs) { + return static_cast(aLhs.get()) == static_cast(aRhs); +} + +template +inline bool operator==(const U* aLhs, const RefPtr& aRhs) { + return static_cast(aLhs) == static_cast(aRhs.get()); +} + +template +inline bool operator!=(const RefPtr& aLhs, const U* aRhs) { + return static_cast(aLhs.get()) != static_cast(aRhs); +} + +template +inline bool operator!=(const U* aLhs, const RefPtr& aRhs) { + return static_cast(aLhs) != static_cast(aRhs.get()); +} + +template +inline bool operator==(const RefPtr& aLhs, U* aRhs) { + return static_cast(aLhs.get()) == const_cast(aRhs); +} + +template +inline bool operator==(U* aLhs, const RefPtr& aRhs) { + return const_cast(aLhs) == static_cast(aRhs.get()); +} + +template +inline bool operator!=(const RefPtr& aLhs, U* aRhs) { + return static_cast(aLhs.get()) != const_cast(aRhs); +} + +template +inline bool operator!=(U* aLhs, const RefPtr& aRhs) { + return const_cast(aLhs) != static_cast(aRhs.get()); +} + +// Comparing an |RefPtr| to |nullptr| + +template +inline bool operator==(const RefPtr& aLhs, decltype(nullptr)) { + return aLhs.get() == nullptr; +} + +template +inline bool operator==(decltype(nullptr), const RefPtr& aRhs) { + return nullptr == aRhs.get(); +} + +template +inline bool operator!=(const RefPtr& aLhs, decltype(nullptr)) { + return aLhs.get() != nullptr; +} + +template +inline bool operator!=(decltype(nullptr), const RefPtr& aRhs) { + return nullptr != aRhs.get(); +} + +// MOZ_DBG support + +template +std::ostream& operator<<(std::ostream& aOut, const RefPtr& aObj) { + return mozilla::DebugValue(aOut, aObj.get()); +} + +/*****************************************************************************/ + +template +inline already_AddRefed do_AddRef(T* aObj) { + RefPtr ref(aObj); + return ref.forget(); +} + +template +inline already_AddRefed do_AddRef(const RefPtr& aObj) { + RefPtr ref(aObj); + return ref.forget(); +} + +namespace mozilla { + +template +class AlignmentFinder; + +// Provide a specialization of AlignmentFinder to allow MOZ_ALIGNOF(RefPtr) +// with an incomplete T. +template +class AlignmentFinder> { + public: + static const size_t alignment = alignof(T*); +}; + +/** + * Helper function to be able to conveniently write things like: + * + * already_AddRefed + * f(...) + * { + * return MakeAndAddRef(...); + * } + */ +template +already_AddRefed MakeAndAddRef(Args&&... aArgs) { + RefPtr p(new T(std::forward(aArgs)...)); + return p.forget(); +} + +/** + * Helper function to be able to conveniently write things like: + * + * auto runnable = + * MakeRefPtr>( + * mOnSuccess, mOnFailure, *error, mWindowID); + */ +template +RefPtr MakeRefPtr(Args&&... aArgs) { + RefPtr p(new T(std::forward(aArgs)...)); + return p; +} + +} // namespace mozilla + +/** + * Deduction guide to allow simple `RefPtr` definitions from an + * already_AddRefed without repeating the type, e.g.: + * + * RefPtr ptr = MakeAndAddRef(...); + */ +template +RefPtr(already_AddRefed) -> RefPtr; + +#endif /* mozilla_RefPtr_h */ diff --git a/mfbt/Result.h b/mfbt/Result.h new file mode 100644 index 0000000000..cc4f878269 --- /dev/null +++ b/mfbt/Result.h @@ -0,0 +1,861 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* A type suitable for returning either a value or an error from a function. */ + +#ifndef mozilla_Result_h +#define mozilla_Result_h + +#include +#include +#include +#include +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/CompactPair.h" +#include "mozilla/MaybeStorageBase.h" + +namespace mozilla { + +/** + * Empty struct, indicating success for operations that have no return value. + * For example, if you declare another empty struct `struct OutOfMemory {};`, + * then `Result` represents either success or OOM. + */ +struct Ok {}; + +/** + * A tag used to differentiate between GenericErrorResult created by the Err + * function (completely new error) and GenericErrorResult created by the + * Result::propagateErr function (propagated error). This can be used to track + * error propagation and eventually produce error stacks for logging/debugging + * purposes. + */ +struct ErrorPropagationTag {}; + +template +class GenericErrorResult; +template +class Result; + +namespace detail { + +enum class PackingStrategy { + Variant, + NullIsOk, + LowBitTagIsError, + PackedVariant, +}; + +template +struct UnusedZero; + +template +class ResultImplementation; + +template +struct EmptyWrapper : V { + constexpr EmptyWrapper() = default; + explicit constexpr EmptyWrapper(const V&) {} + explicit constexpr EmptyWrapper(std::in_place_t) {} + + constexpr V* addr() { return this; } + constexpr const V* addr() const { return this; } +}; + +// The purpose of AlignedStorageOrEmpty is to make an empty class look like +// std::aligned_storage_t for the purposes of the PackingStrategy::NullIsOk +// specializations of ResultImplementation below. We can't use +// std::aligned_storage_t itself with an empty class, since it would no longer +// be empty. +template +using AlignedStorageOrEmpty = + std::conditional_t, EmptyWrapper, + MaybeStorageBase>; + +template +class ResultImplementationNullIsOkBase { + protected: + using ErrorStorageType = typename UnusedZero::StorageType; + + static constexpr auto kNullValue = UnusedZero::nullValue; + + static_assert(std::is_trivially_copyable_v); + + // XXX This can't be statically asserted in general, if ErrorStorageType is + // not a basic type. With C++20 bit_cast, we could probably re-add such as + // assertion. static_assert(kNullValue == decltype(kNullValue)(0)); + + CompactPair, ErrorStorageType> mValue; + + public: + explicit constexpr ResultImplementationNullIsOkBase(const V& aSuccessValue) + : mValue(aSuccessValue, kNullValue) {} + explicit constexpr ResultImplementationNullIsOkBase(V&& aSuccessValue) + : mValue(std::move(aSuccessValue), kNullValue) {} + template + explicit constexpr ResultImplementationNullIsOkBase(std::in_place_t, + Args&&... aArgs) + : mValue(std::piecewise_construct, + std::tuple(std::in_place, std::forward(aArgs)...), + std::tuple(kNullValue)) {} + explicit constexpr ResultImplementationNullIsOkBase(E aErrorValue) + : mValue(std::piecewise_construct, std::tuple<>(), + std::tuple(UnusedZero::Store(std::move(aErrorValue)))) { + MOZ_ASSERT(mValue.second() != kNullValue); + } + + constexpr ResultImplementationNullIsOkBase( + ResultImplementationNullIsOkBase&& aOther) + : mValue(std::piecewise_construct, std::tuple<>(), + std::tuple(aOther.mValue.second())) { + if constexpr (!std::is_empty_v) { + if (isOk()) { + new (mValue.first().addr()) V(std::move(*aOther.mValue.first().addr())); + } + } + } + ResultImplementationNullIsOkBase& operator=( + ResultImplementationNullIsOkBase&& aOther) { + if constexpr (!std::is_empty_v) { + if (isOk()) { + mValue.first().addr()->~V(); + } + } + mValue.second() = std::move(aOther.mValue.second()); + if constexpr (!std::is_empty_v) { + if (isOk()) { + new (mValue.first().addr()) V(std::move(*aOther.mValue.first().addr())); + } + } + return *this; + } + + constexpr bool isOk() const { return mValue.second() == kNullValue; } + + constexpr const V& inspect() const { return *mValue.first().addr(); } + constexpr V unwrap() { return std::move(*mValue.first().addr()); } + constexpr void updateAfterTracing(V&& aValue) { + MOZ_ASSERT(isOk()); + if (!std::is_empty_v) { + mValue.first().addr()->~V(); + new (mValue.first().addr()) V(std::move(aValue)); + } + } + + constexpr decltype(auto) inspectErr() const { + return UnusedZero::Inspect(mValue.second()); + } + constexpr E unwrapErr() { return UnusedZero::Unwrap(mValue.second()); } + constexpr void updateErrorAfterTracing(E&& aErrorValue) { + mValue.second() = UnusedZero::Store(std::move(aErrorValue)); + } +}; + +template > +class ResultImplementationNullIsOk; + +template +class ResultImplementationNullIsOk + : public ResultImplementationNullIsOkBase { + public: + using ResultImplementationNullIsOkBase::ResultImplementationNullIsOkBase; +}; + +template +class ResultImplementationNullIsOk + : public ResultImplementationNullIsOkBase { + public: + using ResultImplementationNullIsOkBase::ResultImplementationNullIsOkBase; + + ResultImplementationNullIsOk(ResultImplementationNullIsOk&&) = default; + ResultImplementationNullIsOk& operator=(ResultImplementationNullIsOk&&) = + default; + + ~ResultImplementationNullIsOk() { + if (this->isOk()) { + this->mValue.first().addr()->~V(); + } + } +}; + +/** + * Specialization for when the success type is default-constructible and the + * error type is a value type which can never have the value 0 (as determined by + * UnusedZero<>). + */ +template +class ResultImplementation + : public ResultImplementationNullIsOk { + public: + static constexpr PackingStrategy Strategy = PackingStrategy::NullIsOk; + using ResultImplementationNullIsOk::ResultImplementationNullIsOk; +}; + +template +using UnsignedIntType = std::conditional_t< + S == 1, std::uint8_t, + std::conditional_t< + S == 2, std::uint16_t, + std::conditional_t>>>; + +/** + * Specialization for when alignment permits using the least significant bit + * as a tag bit. + */ +template +class ResultImplementation { + static_assert(std::is_trivially_copyable_v && + std::is_trivially_destructible_v); + static_assert(std::is_trivially_copyable_v && + std::is_trivially_destructible_v); + + static constexpr size_t kRequiredSize = std::max(sizeof(V), sizeof(E)); + + using StorageType = UnsignedIntType; + +#if defined(__clang__) + alignas(std::max(alignof(V), alignof(E))) StorageType mBits; +#else + // Some gcc versions choke on using std::max with alignas, see + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94929 (and this seems to have + // regressed in some gcc 9.x version before being fixed again) Keeping the + // code above since we would eventually drop this when we no longer support + // gcc versions with the bug. + alignas(alignof(V) > alignof(E) ? alignof(V) : alignof(E)) StorageType mBits; +#endif + + public: + static constexpr PackingStrategy Strategy = PackingStrategy::LowBitTagIsError; + + explicit constexpr ResultImplementation(V aValue) : mBits(0) { + if constexpr (!std::is_empty_v) { + std::memcpy(&mBits, &aValue, sizeof(V)); + MOZ_ASSERT((mBits & 1) == 0); + } else { + (void)aValue; + } + } + explicit constexpr ResultImplementation(E aErrorValue) : mBits(1) { + if constexpr (!std::is_empty_v) { + std::memcpy(&mBits, &aErrorValue, sizeof(E)); + MOZ_ASSERT((mBits & 1) == 0); + mBits |= 1; + } else { + (void)aErrorValue; + } + } + + constexpr bool isOk() const { return (mBits & 1) == 0; } + + constexpr V inspect() const { + V res; + std::memcpy(&res, &mBits, sizeof(V)); + return res; + } + constexpr V unwrap() { return inspect(); } + + constexpr E inspectErr() const { + const auto bits = mBits ^ 1; + E res; + std::memcpy(&res, &bits, sizeof(E)); + return res; + } + constexpr E unwrapErr() { return inspectErr(); } + + constexpr void updateAfterTracing(V&& aValue) { + this->~ResultImplementation(); + new (this) ResultImplementation(std::move(aValue)); + } + constexpr void updateErrorAfterTracing(E&& aErrorValue) { + this->~ResultImplementation(); + new (this) ResultImplementation(std::move(aErrorValue)); + } +}; + +// Return true if any of the struct can fit in a word. +template +struct IsPackableVariant { + struct VEbool { + explicit constexpr VEbool(V&& aValue) : v(std::move(aValue)), ok(true) {} + explicit constexpr VEbool(E&& aErrorValue) + : e(std::move(aErrorValue)), ok(false) {} + V v; + E e; + bool ok; + }; + struct EVbool { + explicit constexpr EVbool(V&& aValue) : v(std::move(aValue)), ok(true) {} + explicit constexpr EVbool(E&& aErrorValue) + : e(std::move(aErrorValue)), ok(false) {} + E e; + V v; + bool ok; + }; + + using Impl = + std::conditional_t; + + static const bool value = sizeof(Impl) <= sizeof(uintptr_t); +}; + +/** + * Specialization for when both type are not using all the bytes, in order to + * use one byte as a tag. + */ +template +class ResultImplementation { + using Impl = typename IsPackableVariant::Impl; + Impl data; + + public: + static constexpr PackingStrategy Strategy = PackingStrategy::PackedVariant; + + explicit constexpr ResultImplementation(V aValue) : data(std::move(aValue)) {} + explicit constexpr ResultImplementation(E aErrorValue) + : data(std::move(aErrorValue)) {} + + constexpr bool isOk() const { return data.ok; } + + constexpr const V& inspect() const { return data.v; } + constexpr V unwrap() { return std::move(data.v); } + + constexpr const E& inspectErr() const { return data.e; } + constexpr E unwrapErr() { return std::move(data.e); } + + constexpr void updateAfterTracing(V&& aValue) { + MOZ_ASSERT(data.ok); + this->~ResultImplementation(); + new (this) ResultImplementation(std::move(aValue)); + } + constexpr void updateErrorAfterTracing(E&& aErrorValue) { + MOZ_ASSERT(!data.ok); + this->~ResultImplementation(); + new (this) ResultImplementation(std::move(aErrorValue)); + } +}; + +// To use nullptr as a special value, we need the counter part to exclude zero +// from its range of valid representations. +// +// By default assume that zero can be represented. +template +struct UnusedZero { + static const bool value = false; +}; + +// This template can be used as a helper for specializing UnusedZero for scoped +// enum types which never use 0 as an error value, e.g. +// +// namespace mozilla::detail { +// +// template <> +// struct UnusedZero : UnusedZeroEnum {}; +// +// } // namespace mozilla::detail +// +template +struct UnusedZeroEnum { + using StorageType = std::underlying_type_t; + + static constexpr bool value = true; + static constexpr StorageType nullValue = 0; + + static constexpr T Inspect(const StorageType& aValue) { + return static_cast(aValue); + } + static constexpr T Unwrap(StorageType aValue) { + return static_cast(aValue); + } + static constexpr StorageType Store(T aValue) { + return static_cast(aValue); + } +}; + +// A bit of help figuring out which of the above specializations to use. +// +// We begin by safely assuming types don't have a spare bit, unless they are +// empty. +template +struct HasFreeLSB { + static const bool value = std::is_empty_v; +}; + +// As an incomplete type, void* does not have a spare bit. +template <> +struct HasFreeLSB { + static const bool value = false; +}; + +// The lowest bit of a properly-aligned pointer is always zero if the pointee +// type is greater than byte-aligned. That bit is free to use if it's masked +// out of such pointers before they're dereferenced. +template +struct HasFreeLSB { + static const bool value = (alignof(T) & 1) == 0; +}; + +// Select one of the previous result implementation based on the properties of +// the V and E types. +template +struct SelectResultImpl { + static const PackingStrategy value = + (HasFreeLSB::value && HasFreeLSB::value) + ? PackingStrategy::LowBitTagIsError + : (UnusedZero::value && sizeof(E) <= sizeof(uintptr_t)) + ? PackingStrategy::NullIsOk + : (std::is_default_constructible_v && + std::is_default_constructible_v && IsPackableVariant::value) + ? PackingStrategy::PackedVariant + : PackingStrategy::Variant; + + using Type = ResultImplementation; +}; + +template +struct IsResult : std::false_type {}; + +template +struct IsResult> : std::true_type {}; + +} // namespace detail + +template +constexpr auto ToResult(Result&& aValue) + -> decltype(std::forward>(aValue)) { + return std::forward>(aValue); +} + +/** + * Result represents the outcome of an operation that can either succeed + * or fail. It contains either a success value of type V or an error value of + * type E. + * + * All Result methods are const, so results are basically immutable. + * This is just like Variant but with a slightly different API, and the + * following cases are optimized so Result can be stored more efficiently: + * + * - If both the success and error types do not use their least significant bit, + * are trivially copyable and destructible, Result is guaranteed to be as + * large as the larger type. This is determined via the HasFreeLSB trait. By + * default, empty classes (in particular Ok) and aligned pointer types are + * assumed to have a free LSB, but you can specialize this trait for other + * types. If the success type is empty, the representation is guaranteed to be + * all zero bits on success. Do not change this representation! There is JIT + * code that depends on it. (Implementation note: The lowest bit is used as a + * tag bit: 0 to indicate the Result's bits are a success value, 1 to indicate + * the Result's bits (with the 1 masked out) encode an error value) + * + * - Else, if the error type can't have a all-zero bits representation and is + * not larger than a pointer, a CompactPair is used to represent this rather + * than a Variant. This has shown to be better optimizable, and the template + * code is much simpler than that of Variant, so it should also compile faster. + * Whether an error type can't be all-zero bits, is determined via the + * UnusedZero trait. MFBT doesn't declare any public type UnusedZero, but + * nsresult is declared UnusedZero in XPCOM. + * + * The purpose of Result is to reduce the screwups caused by using `false` or + * `nullptr` to indicate errors. + * What screwups? See for + * a partial list. + * + * Result or Result are not meaningful. The success or + * error values in a Result instance are non-modifiable in-place anyway. This + * guarantee must also be maintained when evolving Result. They can be + * unwrap()ped, but this loses const qualification. However, Result + * or Result may be misleading and prevent movability. Just use + * Result. (Result may make sense though, just Result is not possible.) + */ +template +class [[nodiscard]] Result final { + // See class comment on Result and Result. + static_assert(!std::is_const_v); + static_assert(!std::is_const_v); + static_assert(!std::is_reference_v); + static_assert(!std::is_reference_v); + + using Impl = typename detail::SelectResultImpl::Type; + + Impl mImpl; + // Are you getting this error? + // > error: implicit instantiation of undefined template + // > 'mozilla::detail::ResultImplementation<$V,$E, + // > mozilla::detail::PackingStrategy::Variant>' + // You need to include "ResultVariant.h"! + + public: + static constexpr detail::PackingStrategy Strategy = Impl::Strategy; + using ok_type = V; + using err_type = E; + + /** Create a success result. */ + MOZ_IMPLICIT constexpr Result(V&& aValue) : mImpl(std::move(aValue)) { + MOZ_ASSERT(isOk()); + } + + /** Create a success result. */ + MOZ_IMPLICIT constexpr Result(const V& aValue) : mImpl(aValue) { + MOZ_ASSERT(isOk()); + } + + /** Create a success result in-place. */ + template + explicit constexpr Result(std::in_place_t, Args&&... aArgs) + : mImpl(std::in_place, std::forward(aArgs)...) { + MOZ_ASSERT(isOk()); + } + + /** Create an error result. */ + explicit constexpr Result(const E& aErrorValue) : mImpl(aErrorValue) { + MOZ_ASSERT(isErr()); + } + explicit constexpr Result(E&& aErrorValue) : mImpl(std::move(aErrorValue)) { + MOZ_ASSERT(isErr()); + } + + /** + * Create a (success/error) result from another (success/error) result with a + * different but convertible error type. */ + template >> + MOZ_IMPLICIT constexpr Result(Result&& aOther) + : mImpl(aOther.isOk() ? Impl{aOther.unwrap()} + : Impl{aOther.unwrapErr()}) {} + + /** + * Implementation detail of MOZ_TRY(). + * Create an error result from another error result. + */ + template + MOZ_IMPLICIT constexpr Result(GenericErrorResult&& aErrorResult) + : mImpl(std::move(aErrorResult.mErrorValue)) { + static_assert(std::is_convertible_v, "E2 must be convertible to E"); + MOZ_ASSERT(isErr()); + } + + /** + * Implementation detail of MOZ_TRY(). + * Create an error result from another error result. + */ + template + MOZ_IMPLICIT constexpr Result(const GenericErrorResult& aErrorResult) + : mImpl(aErrorResult.mErrorValue) { + static_assert(std::is_convertible_v, "E2 must be convertible to E"); + MOZ_ASSERT(isErr()); + } + + Result(const Result&) = delete; + Result(Result&&) = default; + Result& operator=(const Result&) = delete; + Result& operator=(Result&&) = default; + + /** True if this Result is a success result. */ + constexpr bool isOk() const { return mImpl.isOk(); } + + /** True if this Result is an error result. */ + constexpr bool isErr() const { return !mImpl.isOk(); } + + /** Take the success value from this Result, which must be a success result. + */ + constexpr V unwrap() { + MOZ_ASSERT(isOk()); + return mImpl.unwrap(); + } + + /** + * Take the success value from this Result, which must be a success result. + * If it is an error result, then return the aValue. + */ + constexpr V unwrapOr(V aValue) { + return MOZ_LIKELY(isOk()) ? mImpl.unwrap() : std::move(aValue); + } + + /** Take the error value from this Result, which must be an error result. */ + constexpr E unwrapErr() { + MOZ_ASSERT(isErr()); + return mImpl.unwrapErr(); + } + + /** Used only for GC tracing. If used in Rooted>, V must have a + * GCPolicy for tracing it. */ + constexpr void updateAfterTracing(V&& aValue) { + mImpl.updateAfterTracing(std::move(aValue)); + } + + /** Used only for GC tracing. If used in Rooted>, E must have a + * GCPolicy for tracing it. */ + constexpr void updateErrorAfterTracing(E&& aErrorValue) { + mImpl.updateErrorAfterTracing(std::move(aErrorValue)); + } + + /** See the success value from this Result, which must be a success result. */ + constexpr decltype(auto) inspect() const { + static_assert(!std::is_reference_v< + std::invoke_result_t> || + std::is_const_v>>); + MOZ_ASSERT(isOk()); + return mImpl.inspect(); + } + + /** See the error value from this Result, which must be an error result. */ + constexpr decltype(auto) inspectErr() const { + static_assert( + !std::is_reference_v< + std::invoke_result_t> || + std::is_const_v>>); + MOZ_ASSERT(isErr()); + return mImpl.inspectErr(); + } + + /** Propagate the error value from this Result, which must be an error result. + * + * This can be used to propagate an error from a function call to the caller + * with a different value type, but the same error type: + * + * Result Func1() { + * Result res = Func2(); + * if (res.isErr()) { return res.propagateErr(); } + * } + */ + constexpr GenericErrorResult propagateErr() { + MOZ_ASSERT(isErr()); + return GenericErrorResult{mImpl.unwrapErr(), ErrorPropagationTag{}}; + } + + /** + * Map a function V -> V2 over this result's success variant. If this result + * is an error, do not invoke the function and propagate the error. + * + * Mapping over success values invokes the function to produce a new success + * value: + * + * // Map Result to another Result + * Result res(5); + * Result res2 = res.map([](int x) { return x * x; }); + * MOZ_ASSERT(res.isOk()); + * MOZ_ASSERT(res2.unwrap() == 25); + * + * // Map Result to Result + * Result res("hello, map!"); + * Result res2 = res.map(strlen); + * MOZ_ASSERT(res.isOk()); + * MOZ_ASSERT(res2.unwrap() == 11); + * + * Mapping over an error does not invoke the function and propagates the + * error: + * + * Result res(5); + * MOZ_ASSERT(res.isErr()); + * Result res2 = res.map([](V v) { ... }); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res2.unwrapErr() == 5); + */ + template + constexpr auto map(F f) -> Result, E> { + using RetResult = Result, E>; + return MOZ_LIKELY(isOk()) ? RetResult(f(unwrap())) : RetResult(unwrapErr()); + } + + /** + * Map a function E -> E2 over this result's error variant. If this result is + * a success, do not invoke the function and move the success over. + * + * Mapping over error values invokes the function to produce a new error + * value: + * + * // Map Result to another Result + * Result res(5); + * Result res2 = res.mapErr([](int x) { return x * x; }); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res2.unwrapErr() == 25); + * + * // Map Result to Result + * Result res("hello, mapErr!"); + * Result res2 = res.mapErr(strlen); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res2.unwrapErr() == 14); + * + * Mapping over a success does not invoke the function and moves the success: + * + * Result res(5); + * MOZ_ASSERT(res.isOk()); + * Result res2 = res.mapErr([](E e) { ... }); + * MOZ_ASSERT(res2.isOk()); + * MOZ_ASSERT(res2.unwrap() == 5); + */ + template + constexpr auto mapErr(F f) { + using RetResult = Result>; + return MOZ_UNLIKELY(isErr()) ? RetResult(f(unwrapErr())) + : RetResult(unwrap()); + } + + /** + * Map a function E -> Result over this result's error variant. If + * this result is a success, do not invoke the function and move the success + * over. + * + * `orElse`ing over error values invokes the function to produce a new + * result: + * + * // `orElse` Result error variant to another Result + * // error variant or Result success variant + * auto orElse = [](int x) -> Result { + * if (x != 6) { + * return Err(x * x); + * } + * return V(...); + * }; + * + * Result res(5); + * auto res2 = res.orElse(orElse); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res2.unwrapErr() == 25); + * + * Result res3(6); + * auto res4 = res3.orElse(orElse); + * MOZ_ASSERT(res4.isOk()); + * MOZ_ASSERT(res4.unwrap() == ...); + * + * // `orElse` Result error variant to Result + * // error variant or Result success variant + * auto orElse = [](const char* s) -> Result { + * if (strcmp(s, "foo")) { + * return Err(strlen(s)); + * } + * return V(...); + * }; + * + * Result res("hello, orElse!"); + * auto res2 = res.orElse(orElse); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res2.unwrapErr() == 14); + * + * Result res3("foo"); + * auto res4 = ress.orElse(orElse); + * MOZ_ASSERT(res4.isOk()); + * MOZ_ASSERT(res4.unwrap() == ...); + * + * `orElse`ing over a success does not invoke the function and moves the + * success: + * + * Result res(5); + * MOZ_ASSERT(res.isOk()); + * Result res2 = res.orElse([](E e) { ... }); + * MOZ_ASSERT(res2.isOk()); + * MOZ_ASSERT(res2.unwrap() == 5); + */ + template + auto orElse(F f) -> Result::err_type> { + return MOZ_UNLIKELY(isErr()) ? f(unwrapErr()) : unwrap(); + } + + /** + * Given a function V -> Result, apply it to this result's success + * value and return its result. If this result is an error value, it is + * propagated. + * + * This is sometimes called "flatMap" or ">>=" in other contexts. + * + * `andThen`ing over success values invokes the function to produce a new + * result: + * + * Result res("hello, andThen!"); + * Result res2 = res.andThen([](const char* s) { + * return containsHtmlTag(s) + * ? Result(Error("Invalid: contains HTML")) + * : Result(HtmlFreeString(s)); + * } + * }); + * MOZ_ASSERT(res2.isOk()); + * MOZ_ASSERT(res2.unwrap() == HtmlFreeString("hello, andThen!"); + * + * `andThen`ing over error results does not invoke the function, and just + * propagates the error result: + * + * Result res("some error"); + * auto res2 = res.andThen([](int x) { ... }); + * MOZ_ASSERT(res2.isErr()); + * MOZ_ASSERT(res.unwrapErr() == res2.unwrapErr()); + */ + template >::value>> + constexpr auto andThen(F f) -> std::invoke_result_t { + return MOZ_LIKELY(isOk()) ? f(unwrap()) : propagateErr(); + } +}; + +/** + * A type that auto-converts to an error Result. This is like a Result without + * a success type. It's the best return type for functions that always return + * an error--functions designed to build and populate error objects. It's also + * useful in error-handling macros; see MOZ_TRY for an example. + */ +template +class [[nodiscard]] GenericErrorResult { + E mErrorValue; + + template + friend class Result; + + public: + explicit constexpr GenericErrorResult(const E& aErrorValue) + : mErrorValue(aErrorValue) {} + + explicit constexpr GenericErrorResult(E&& aErrorValue) + : mErrorValue(std::move(aErrorValue)) {} + + constexpr GenericErrorResult(const E& aErrorValue, const ErrorPropagationTag&) + : GenericErrorResult(aErrorValue) {} + + constexpr GenericErrorResult(E&& aErrorValue, const ErrorPropagationTag&) + : GenericErrorResult(std::move(aErrorValue)) {} +}; + +template +inline constexpr auto Err(E&& aErrorValue) { + return GenericErrorResult>(std::forward(aErrorValue)); +} + +} // namespace mozilla + +/** + * MOZ_TRY(expr) is the C++ equivalent of Rust's `try!(expr);`. First, it + * evaluates expr, which must produce a Result value. On success, it + * discards the result altogether. On error, it immediately returns an error + * Result from the enclosing function. + */ +#define MOZ_TRY(expr) \ + do { \ + auto mozTryTempResult_ = ::mozilla::ToResult(expr); \ + if (MOZ_UNLIKELY(mozTryTempResult_.isErr())) { \ + return mozTryTempResult_.propagateErr(); \ + } \ + } while (0) + +/** + * MOZ_TRY_VAR(target, expr) is the C++ equivalent of Rust's `target = + * try!(expr);`. First, it evaluates expr, which must produce a Result value. On + * success, the result's success value is assigned to target. On error, + * immediately returns the error result. |target| must be an lvalue. + */ +#define MOZ_TRY_VAR(target, expr) \ + do { \ + auto mozTryVarTempResult_ = (expr); \ + if (MOZ_UNLIKELY(mozTryVarTempResult_.isErr())) { \ + return mozTryVarTempResult_.propagateErr(); \ + } \ + (target) = mozTryVarTempResult_.unwrap(); \ + } while (0) + +#endif // mozilla_Result_h diff --git a/mfbt/ResultExtensions.h b/mfbt/ResultExtensions.h new file mode 100644 index 0000000000..97f197d800 --- /dev/null +++ b/mfbt/ResultExtensions.h @@ -0,0 +1,371 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Extensions to the Result type to enable simpler handling of XPCOM/NSPR + * results. */ + +#ifndef mozilla_ResultExtensions_h +#define mozilla_ResultExtensions_h + +#include "mozilla/Assertions.h" +#include "nscore.h" +#include "prtypes.h" +#include "mozilla/dom/quota/RemoveParen.h" + +namespace mozilla { + +struct ErrorPropagationTag; + +// Allow nsresult errors to automatically convert to nsresult values, so MOZ_TRY +// can be used in XPCOM methods with Result results. +template <> +class [[nodiscard]] GenericErrorResult { + nsresult mErrorValue; + + template + friend class Result; + + public: + explicit GenericErrorResult(nsresult aErrorValue) : mErrorValue(aErrorValue) { + MOZ_ASSERT(NS_FAILED(aErrorValue)); + } + + GenericErrorResult(nsresult aErrorValue, const ErrorPropagationTag&) + : GenericErrorResult(aErrorValue) {} + + operator nsresult() const { return mErrorValue; } +}; + +// Allow MOZ_TRY to handle `PRStatus` values. +template +inline Result ToResult(PRStatus aValue); + +} // namespace mozilla + +#include "mozilla/Result.h" + +namespace mozilla { + +template +struct ResultTypeTraits; + +template <> +struct ResultTypeTraits { + static nsresult From(nsresult aValue) { return aValue; } +}; + +template +inline Result ToResult(nsresult aValue) { + if (NS_FAILED(aValue)) { + return Err(ResultTypeTraits::From(aValue)); + } + return Ok(); +} + +template +inline Result ToResult(PRStatus aValue) { + if (aValue == PR_SUCCESS) { + return Ok(); + } + return Err(ResultTypeTraits::From(NS_ERROR_FAILURE)); +} + +namespace detail { +template +auto ResultRefAsParam(R& aResult) { + return &aResult; +} + +template +Result ToResultInvokeInternal(const Func& aFunc, + const RArgMapper& aRArgMapper, + Args&&... aArgs) { + // XXX Thereotically, if R is a pointer to a non-refcounted type, this might + // be a non-owning pointer, but unless we find a case where this actually is + // relevant, it's safe to forbid any raw pointer result. + static_assert( + !std::is_pointer_v, + "Raw pointer results are not supported, please specify a smart pointer " + "result type explicitly, so that getter_AddRefs is used"); + + R res; + nsresult rv = aFunc(std::forward(aArgs)..., aRArgMapper(res)); + if (NS_FAILED(rv)) { + return Err(ResultTypeTraits::From(rv)); + } + return res; +} + +template +struct outparam_as_pointer; + +template +struct outparam_as_pointer { + using type = T*; +}; + +template +struct outparam_as_reference; + +template +struct outparam_as_reference { + using type = T&; +}; + +template typename RArg, + typename Func, typename... Args> +using to_result_retval_t = + decltype(std::declval()( + std::declval()..., + std::declval()))>::type>()), + Result(Err(ResultTypeTraits::From(NS_ERROR_FAILURE)))); + +// There are two ToResultInvokeSelector overloads, which cover the cases of a) a +// pointer-typed output parameter, and b) a reference-typed output parameter, +// using to_result_retval_t in connection with outparam_as_pointer and +// outparam_as_reference type traits. These type traits may be specialized for +// types other than raw pointers to allow calling functions with argument types +// that implicitly convert/bind to a raw pointer/reference. The overload that is +// used is selected by expression SFINAE: the decltype expression in +// to_result_retval_t is only valid in either case. +template +auto ToResultInvokeSelector(const Func& aFunc, Args&&... aArgs) + -> to_result_retval_t { + return ToResultInvokeInternal( + aFunc, [](R& res) -> decltype(auto) { return ResultRefAsParam(res); }, + std::forward(aArgs)...); +} + +template +auto ToResultInvokeSelector(const Func& aFunc, Args&&... aArgs) + -> to_result_retval_t { + return ToResultInvokeInternal( + aFunc, [](R& res) -> decltype(auto) { return *ResultRefAsParam(res); }, + std::forward(aArgs)...); +} + +} // namespace detail + +/** + * Adapts a function with a nsresult error type and an R* output parameter as + * the last parameter to a function returning a mozilla::Result + * object. + * + * This can also be used with member functions together with std::men_fn, e.g. + * + * nsCOMPtr file = ...; + * auto existsOrErr = ToResultInvoke(std::mem_fn(&nsIFile::Exists), + * *file); + * + * but it is more convenient to use the member function version, which has the + * additional benefit of enabling the deduction of the success result type: + * + * nsCOMPtr file = ...; + * auto existsOrErr = ToResultInvokeMember(*file, &nsIFile::Exists); + */ +template +Result ToResultInvoke(const Func& aFunc, Args&&... aArgs) { + return detail::ToResultInvokeSelector( + aFunc, std::forward(aArgs)...); +} + +namespace detail { +template +struct tag { + using type = T; +}; + +template +struct select_last { + using type = typename decltype((tag{}, ...))::type; +}; + +template +using select_last_t = typename select_last::type; + +template <> +struct select_last<> { + using type = void; +}; + +template +auto ToResultInvokeMemberInternal(T& aObj, const Func& aFunc, Args&&... aArgs) { + if constexpr (std::is_pointer_v || + (std::is_lvalue_reference_v && + !std::is_const_v>)) { + auto lambda = [&](RArg res) { + return (aObj.*aFunc)(std::forward(aArgs)..., res); + }; + return detail::ToResultInvokeSelector< + std::remove_reference_t>, E, + decltype(lambda)>(lambda); + } else { + // No output parameter present, return a Result + return mozilla::ToResult((aObj.*aFunc)(std::forward(aArgs)...)); + } +} + +// For use in MOZ_TO_RESULT_INVOKE_MEMBER/MOZ_TO_RESULT_INVOKE_MEMBER_TYPED. +template +auto DerefHelper(const T&) -> T&; + +template +auto DerefHelper(T*) -> T&; + +template