summaryrefslogtreecommitdiffstats
path: root/image/decoders
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /image/decoders
parentInitial commit. (diff)
downloadthunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.tar.xz
thunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'image/decoders')
-rw-r--r--image/decoders/EXIF.cpp519
-rw-r--r--image/decoders/EXIF.h95
-rw-r--r--image/decoders/GIF2.h67
-rw-r--r--image/decoders/iccjpeg.c184
-rw-r--r--image/decoders/iccjpeg.h65
-rw-r--r--image/decoders/icon/android/moz.build13
-rw-r--r--image/decoders/icon/android/nsIconChannel.cpp125
-rw-r--r--image/decoders/icon/android/nsIconChannel.h45
-rw-r--r--image/decoders/icon/components.conf29
-rw-r--r--image/decoders/icon/gtk/moz.build20
-rw-r--r--image/decoders/icon/gtk/nsIconChannel.cpp483
-rw-r--r--image/decoders/icon/gtk/nsIconChannel.h53
-rw-r--r--image/decoders/icon/mac/moz.build13
-rw-r--r--image/decoders/icon/mac/nsIconChannel.h61
-rw-r--r--image/decoders/icon/mac/nsIconChannelCocoa.mm505
-rw-r--r--image/decoders/icon/moz.build39
-rw-r--r--image/decoders/icon/nsIconProtocolHandler.cpp68
-rw-r--r--image/decoders/icon/nsIconProtocolHandler.h25
-rw-r--r--image/decoders/icon/nsIconURI.cpp654
-rw-r--r--image/decoders/icon/nsIconURI.h118
-rw-r--r--image/decoders/icon/win/moz.build21
-rw-r--r--image/decoders/icon/win/nsIconChannel.cpp1006
-rw-r--r--image/decoders/icon/win/nsIconChannel.h65
-rw-r--r--image/decoders/moz.build62
-rw-r--r--image/decoders/nsAVIFDecoder.cpp1991
-rw-r--r--image/decoders/nsAVIFDecoder.h289
-rw-r--r--image/decoders/nsBMPDecoder.cpp1275
-rw-r--r--image/decoders/nsBMPDecoder.h285
-rw-r--r--image/decoders/nsGIFDecoder2.cpp1073
-rw-r--r--image/decoders/nsGIFDecoder2.h166
-rw-r--r--image/decoders/nsICODecoder.cpp709
-rw-r--r--image/decoders/nsICODecoder.h106
-rw-r--r--image/decoders/nsIconDecoder.cpp125
-rw-r--r--image/decoders/nsIconDecoder.h64
-rw-r--r--image/decoders/nsJPEGDecoder.cpp999
-rw-r--r--image/decoders/nsJPEGDecoder.h113
-rw-r--r--image/decoders/nsJXLDecoder.cpp163
-rw-r--r--image/decoders/nsJXLDecoder.h55
-rw-r--r--image/decoders/nsPNGDecoder.cpp1035
-rw-r--r--image/decoders/nsPNGDecoder.h148
-rw-r--r--image/decoders/nsWebPDecoder.cpp605
-rw-r--r--image/decoders/nsWebPDecoder.h105
42 files changed, 13641 insertions, 0 deletions
diff --git a/image/decoders/EXIF.cpp b/image/decoders/EXIF.cpp
new file mode 100644
index 0000000000..97563248c7
--- /dev/null
+++ b/image/decoders/EXIF.cpp
@@ -0,0 +1,519 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EXIF.h"
+
+#include "mozilla/EndianUtils.h"
+#include "mozilla/StaticPrefs_image.h"
+
+namespace mozilla::image {
+
+// Section references in this file refer to the EXIF v2.3 standard, also known
+// as CIPA DC-008-Translation-2010.
+
+// See Section 4.6.4, Table 4.
+// Typesafe enums are intentionally not used here since we're comparing to raw
+// integers produced by parsing.
+enum class EXIFTag : uint16_t {
+ Orientation = 0x112,
+ XResolution = 0x11a,
+ YResolution = 0x11b,
+ PixelXDimension = 0xa002,
+ PixelYDimension = 0xa003,
+ ResolutionUnit = 0x128,
+ IFDPointer = 0x8769,
+};
+
+// See Section 4.6.2.
+enum EXIFType {
+ ByteType = 1,
+ ASCIIType = 2,
+ ShortType = 3,
+ LongType = 4,
+ RationalType = 5,
+ UndefinedType = 7,
+ SignedLongType = 9,
+ SignedRational = 10,
+};
+
+static const char* EXIFHeader = "Exif\0\0";
+static const uint32_t EXIFHeaderLength = 6;
+static const uint32_t TIFFHeaderStart = EXIFHeaderLength;
+
+struct ParsedEXIFData {
+ Orientation orientation;
+ Maybe<float> resolutionX;
+ Maybe<float> resolutionY;
+ Maybe<uint32_t> pixelXDimension;
+ Maybe<uint32_t> pixelYDimension;
+ Maybe<ResolutionUnit> resolutionUnit;
+};
+
+static float ToDppx(float aResolution, ResolutionUnit aUnit) {
+ constexpr float kPointsPerInch = 72.0f;
+ constexpr float kPointsPerCm = 1.0f / 2.54f;
+ switch (aUnit) {
+ case ResolutionUnit::Dpi:
+ return aResolution / kPointsPerInch;
+ case ResolutionUnit::Dpcm:
+ return aResolution / kPointsPerCm;
+ }
+ MOZ_CRASH("Unknown resolution unit?");
+}
+
+static Resolution ResolutionFromParsedData(const ParsedEXIFData& aData,
+ const gfx::IntSize& aRealImageSize) {
+ if (!aData.resolutionUnit || !aData.resolutionX || !aData.resolutionY) {
+ return {};
+ }
+
+ Resolution resolution{ToDppx(*aData.resolutionX, *aData.resolutionUnit),
+ ToDppx(*aData.resolutionY, *aData.resolutionUnit)};
+
+ if (StaticPrefs::image_exif_density_correction_sanity_check_enabled()) {
+ if (!aData.pixelXDimension || !aData.pixelYDimension) {
+ return {};
+ }
+
+ const gfx::IntSize exifSize(*aData.pixelXDimension, *aData.pixelYDimension);
+
+ gfx::IntSize scaledSize = aRealImageSize;
+ resolution.ApplyTo(scaledSize.width, scaledSize.height);
+
+ if (exifSize != scaledSize) {
+ return {};
+ }
+ }
+
+ return resolution;
+}
+
+/////////////////////////////////////////////////////////////
+// Parse EXIF data, typically found in a JPEG's APP1 segment.
+/////////////////////////////////////////////////////////////
+EXIFData EXIFParser::ParseEXIF(const uint8_t* aData, const uint32_t aLength,
+ const gfx::IntSize& aRealImageSize) {
+ if (!Initialize(aData, aLength)) {
+ return EXIFData();
+ }
+
+ if (!ParseEXIFHeader()) {
+ return EXIFData();
+ }
+
+ uint32_t offsetIFD;
+ if (!ParseTIFFHeader(offsetIFD)) {
+ return EXIFData();
+ }
+
+ JumpTo(offsetIFD);
+
+ ParsedEXIFData data;
+ ParseIFD(data);
+
+ return EXIFData{data.orientation,
+ ResolutionFromParsedData(data, aRealImageSize)};
+}
+
+/////////////////////////////////////////////////////////
+// Parse the EXIF header. (Section 4.7.2, Figure 30)
+/////////////////////////////////////////////////////////
+bool EXIFParser::ParseEXIFHeader() {
+ return MatchString(EXIFHeader, EXIFHeaderLength);
+}
+
+/////////////////////////////////////////////////////////
+// Parse the TIFF header. (Section 4.5.2, Table 1)
+/////////////////////////////////////////////////////////
+bool EXIFParser::ParseTIFFHeader(uint32_t& aIFD0OffsetOut) {
+ // Determine byte order.
+ if (MatchString("MM\0*", 4)) {
+ mByteOrder = ByteOrder::BigEndian;
+ } else if (MatchString("II*\0", 4)) {
+ mByteOrder = ByteOrder::LittleEndian;
+ } else {
+ return false;
+ }
+
+ // Determine offset of the 0th IFD. (It shouldn't be greater than 64k, which
+ // is the maximum size of the entry APP1 segment.)
+ uint32_t ifd0Offset;
+ if (!ReadUInt32(ifd0Offset) || ifd0Offset > 64 * 1024) {
+ return false;
+ }
+
+ // The IFD offset is relative to the beginning of the TIFF header, which
+ // begins after the EXIF header, so we need to increase the offset
+ // appropriately.
+ aIFD0OffsetOut = ifd0Offset + TIFFHeaderStart;
+ return true;
+}
+
+// An arbitrary limit on the amount of pointers that we'll chase, to prevent bad
+// inputs getting us stuck.
+constexpr uint32_t kMaxEXIFDepth = 16;
+
+/////////////////////////////////////////////////////////
+// Parse the entries in IFD0. (Section 4.6.2)
+/////////////////////////////////////////////////////////
+void EXIFParser::ParseIFD(ParsedEXIFData& aData, uint32_t aDepth) {
+ if (NS_WARN_IF(aDepth > kMaxEXIFDepth)) {
+ return;
+ }
+
+ uint16_t entryCount;
+ if (!ReadUInt16(entryCount)) {
+ return;
+ }
+
+ for (uint16_t entry = 0; entry < entryCount; ++entry) {
+ // Read the fields of the 12-byte entry.
+ uint16_t tag;
+ if (!ReadUInt16(tag)) {
+ return;
+ }
+
+ uint16_t type;
+ if (!ReadUInt16(type)) {
+ return;
+ }
+
+ uint32_t count;
+ if (!ReadUInt32(count)) {
+ return;
+ }
+
+ switch (EXIFTag(tag)) {
+ case EXIFTag::Orientation:
+ // We should have an orientation value here; go ahead and parse it.
+ if (!ParseOrientation(type, count, aData.orientation)) {
+ return;
+ }
+ break;
+ case EXIFTag::ResolutionUnit:
+ if (!ParseResolutionUnit(type, count, aData.resolutionUnit)) {
+ return;
+ }
+ break;
+ case EXIFTag::XResolution:
+ if (!ParseResolution(type, count, aData.resolutionX)) {
+ return;
+ }
+ break;
+ case EXIFTag::YResolution:
+ if (!ParseResolution(type, count, aData.resolutionY)) {
+ return;
+ }
+ break;
+ case EXIFTag::PixelXDimension:
+ if (!ParseDimension(type, count, aData.pixelXDimension)) {
+ return;
+ }
+ break;
+ case EXIFTag::PixelYDimension:
+ if (!ParseDimension(type, count, aData.pixelYDimension)) {
+ return;
+ }
+ break;
+ case EXIFTag::IFDPointer: {
+ uint32_t offset;
+ if (!ReadUInt32(offset)) {
+ return;
+ }
+
+ ScopedJump jump(*this, offset + TIFFHeaderStart);
+ ParseIFD(aData, aDepth + 1);
+ break;
+ }
+
+ default:
+ Advance(4);
+ break;
+ }
+ }
+}
+
+bool EXIFParser::ReadRational(float& aOut) {
+ // Values larger than 4 bytes (like rationals) are specified as an offset into
+ // the TIFF header.
+ uint32_t valueOffset;
+ if (!ReadUInt32(valueOffset)) {
+ return false;
+ }
+ ScopedJump jumpToHeader(*this, valueOffset + TIFFHeaderStart);
+ uint32_t numerator;
+ if (!ReadUInt32(numerator)) {
+ return false;
+ }
+ uint32_t denominator;
+ if (!ReadUInt32(denominator)) {
+ return false;
+ }
+ if (denominator == 0) {
+ return false;
+ }
+ aOut = float(numerator) / float(denominator);
+ return true;
+}
+
+bool EXIFParser::ParseResolution(uint16_t aType, uint32_t aCount,
+ Maybe<float>& aOut) {
+ if (!StaticPrefs::image_exif_density_correction_enabled()) {
+ Advance(4);
+ return true;
+ }
+ if (aType != RationalType || aCount != 1) {
+ return false;
+ }
+ float value;
+ if (!ReadRational(value)) {
+ return false;
+ }
+ if (value == 0.0f) {
+ return false;
+ }
+ aOut = Some(value);
+ return true;
+}
+
+bool EXIFParser::ParseDimension(uint16_t aType, uint32_t aCount,
+ Maybe<uint32_t>& aOut) {
+ if (!StaticPrefs::image_exif_density_correction_enabled()) {
+ Advance(4);
+ return true;
+ }
+
+ if (aCount != 1) {
+ return false;
+ }
+
+ switch (aType) {
+ case ShortType: {
+ uint16_t value;
+ if (!ReadUInt16(value)) {
+ return false;
+ }
+ aOut = Some(value);
+ Advance(2);
+ break;
+ }
+ case LongType: {
+ uint32_t value;
+ if (!ReadUInt32(value)) {
+ return false;
+ }
+ aOut = Some(value);
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool EXIFParser::ParseResolutionUnit(uint16_t aType, uint32_t aCount,
+ Maybe<ResolutionUnit>& aOut) {
+ if (!StaticPrefs::image_exif_density_correction_enabled()) {
+ Advance(4);
+ return true;
+ }
+ if (aType != ShortType || aCount != 1) {
+ return false;
+ }
+ uint16_t value;
+ if (!ReadUInt16(value)) {
+ return false;
+ }
+ switch (value) {
+ case 2:
+ aOut = Some(ResolutionUnit::Dpi);
+ break;
+ case 3:
+ aOut = Some(ResolutionUnit::Dpcm);
+ break;
+ default:
+ return false;
+ }
+
+ // This is a 32-bit field, but the unit value only occupies the first 16 bits.
+ // We need to advance another 16 bits to consume the entire field.
+ Advance(2);
+ return true;
+}
+
+bool EXIFParser::ParseOrientation(uint16_t aType, uint32_t aCount,
+ Orientation& aOut) {
+ // Sanity check the type and count.
+ if (aType != ShortType || aCount != 1) {
+ return false;
+ }
+
+ uint16_t value;
+ if (!ReadUInt16(value)) {
+ return false;
+ }
+
+ switch (value) {
+ case 1:
+ aOut = Orientation(Angle::D0, Flip::Unflipped);
+ break;
+ case 2:
+ aOut = Orientation(Angle::D0, Flip::Horizontal);
+ break;
+ case 3:
+ aOut = Orientation(Angle::D180, Flip::Unflipped);
+ break;
+ case 4:
+ aOut = Orientation(Angle::D180, Flip::Horizontal);
+ break;
+ case 5:
+ aOut = Orientation(Angle::D90, Flip::Horizontal);
+ break;
+ case 6:
+ aOut = Orientation(Angle::D90, Flip::Unflipped);
+ break;
+ case 7:
+ aOut = Orientation(Angle::D270, Flip::Horizontal);
+ break;
+ case 8:
+ aOut = Orientation(Angle::D270, Flip::Unflipped);
+ break;
+ default:
+ return false;
+ }
+
+ // This is a 32-bit field, but the orientation value only occupies the first
+ // 16 bits. We need to advance another 16 bits to consume the entire field.
+ Advance(2);
+ return true;
+}
+
+bool EXIFParser::Initialize(const uint8_t* aData, const uint32_t aLength) {
+ if (aData == nullptr) {
+ return false;
+ }
+
+ // An APP1 segment larger than 64k violates the JPEG standard.
+ if (aLength > 64 * 1024) {
+ return false;
+ }
+
+ mStart = mCurrent = aData;
+ mLength = mRemainingLength = aLength;
+ mByteOrder = ByteOrder::Unknown;
+ return true;
+}
+
+void EXIFParser::Advance(const uint32_t aDistance) {
+ if (mRemainingLength >= aDistance) {
+ mCurrent += aDistance;
+ mRemainingLength -= aDistance;
+ } else {
+ mCurrent = mStart;
+ mRemainingLength = 0;
+ }
+}
+
+void EXIFParser::JumpTo(const uint32_t aOffset) {
+ if (mLength >= aOffset) {
+ mCurrent = mStart + aOffset;
+ mRemainingLength = mLength - aOffset;
+ } else {
+ mCurrent = mStart;
+ mRemainingLength = 0;
+ }
+}
+
+bool EXIFParser::MatchString(const char* aString, const uint32_t aLength) {
+ if (mRemainingLength < aLength) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < aLength; ++i) {
+ if (mCurrent[i] != aString[i]) {
+ return false;
+ }
+ }
+
+ Advance(aLength);
+ return true;
+}
+
+bool EXIFParser::MatchUInt16(const uint16_t aValue) {
+ if (mRemainingLength < 2) {
+ return false;
+ }
+
+ bool matched;
+ switch (mByteOrder) {
+ case ByteOrder::LittleEndian:
+ matched = LittleEndian::readUint16(mCurrent) == aValue;
+ break;
+ case ByteOrder::BigEndian:
+ matched = BigEndian::readUint16(mCurrent) == aValue;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Should know the byte order by now");
+ matched = false;
+ }
+
+ if (matched) {
+ Advance(2);
+ }
+
+ return matched;
+}
+
+bool EXIFParser::ReadUInt16(uint16_t& aValue) {
+ if (mRemainingLength < 2) {
+ return false;
+ }
+
+ bool matched = true;
+ switch (mByteOrder) {
+ case ByteOrder::LittleEndian:
+ aValue = LittleEndian::readUint16(mCurrent);
+ break;
+ case ByteOrder::BigEndian:
+ aValue = BigEndian::readUint16(mCurrent);
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Should know the byte order by now");
+ matched = false;
+ }
+
+ if (matched) {
+ Advance(2);
+ }
+
+ return matched;
+}
+
+bool EXIFParser::ReadUInt32(uint32_t& aValue) {
+ if (mRemainingLength < 4) {
+ return false;
+ }
+
+ bool matched = true;
+ switch (mByteOrder) {
+ case ByteOrder::LittleEndian:
+ aValue = LittleEndian::readUint32(mCurrent);
+ break;
+ case ByteOrder::BigEndian:
+ aValue = BigEndian::readUint32(mCurrent);
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Should know the byte order by now");
+ matched = false;
+ }
+
+ if (matched) {
+ Advance(4);
+ }
+
+ return matched;
+}
+
+} // namespace mozilla::image
diff --git a/image/decoders/EXIF.h b/image/decoders/EXIF.h
new file mode 100644
index 0000000000..eb23f8d537
--- /dev/null
+++ b/image/decoders/EXIF.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_EXIF_h
+#define mozilla_image_decoders_EXIF_h
+
+#include <stdint.h>
+#include "nsDebug.h"
+
+#include "Orientation.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/image/Resolution.h"
+#include "mozilla/gfx/Point.h"
+
+namespace mozilla::image {
+
+enum class ByteOrder : uint8_t { Unknown, LittleEndian, BigEndian };
+
+struct EXIFData {
+ const Orientation orientation = Orientation();
+ const Resolution resolution = Resolution();
+};
+
+struct ParsedEXIFData;
+
+enum class ResolutionUnit : uint8_t {
+ Dpi,
+ Dpcm,
+};
+
+class EXIFParser {
+ public:
+ static EXIFData Parse(const uint8_t* aData, const uint32_t aLength,
+ const gfx::IntSize& aRealImageSize) {
+ EXIFParser parser;
+ return parser.ParseEXIF(aData, aLength, aRealImageSize);
+ }
+
+ private:
+ EXIFParser()
+ : mStart(nullptr),
+ mCurrent(nullptr),
+ mLength(0),
+ mRemainingLength(0),
+ mByteOrder(ByteOrder::Unknown) {}
+
+ EXIFData ParseEXIF(const uint8_t* aData, const uint32_t aLength,
+ const gfx::IntSize& aRealImageSize);
+ bool ParseEXIFHeader();
+ bool ParseTIFFHeader(uint32_t& aIFD0OffsetOut);
+
+ void ParseIFD(ParsedEXIFData&, uint32_t aDepth = 0);
+ bool ParseOrientation(uint16_t aType, uint32_t aCount, Orientation&);
+ bool ParseResolution(uint16_t aType, uint32_t aCount, Maybe<float>&);
+ bool ParseResolutionUnit(uint16_t aType, uint32_t aCount,
+ Maybe<ResolutionUnit>&);
+ bool ParseDimension(uint16_t aType, uint32_t aCount, Maybe<uint32_t>&);
+
+ bool Initialize(const uint8_t* aData, const uint32_t aLength);
+ void Advance(const uint32_t aDistance);
+ void JumpTo(const uint32_t aOffset);
+
+ uint32_t CurrentOffset() const { return mCurrent - mStart; }
+
+ class ScopedJump {
+ EXIFParser& mParser;
+ uint32_t mOldOffset;
+
+ public:
+ ScopedJump(EXIFParser& aParser, uint32_t aOffset)
+ : mParser(aParser), mOldOffset(aParser.CurrentOffset()) {
+ mParser.JumpTo(aOffset);
+ }
+
+ ~ScopedJump() { mParser.JumpTo(mOldOffset); }
+ };
+
+ bool MatchString(const char* aString, const uint32_t aLength);
+ bool MatchUInt16(const uint16_t aValue);
+ bool ReadUInt16(uint16_t& aOut);
+ bool ReadUInt32(uint32_t& aOut);
+ bool ReadRational(float& aOut);
+
+ const uint8_t* mStart;
+ const uint8_t* mCurrent;
+ uint32_t mLength;
+ uint32_t mRemainingLength;
+ ByteOrder mByteOrder;
+};
+
+} // namespace mozilla::image
+
+#endif // mozilla_image_decoders_EXIF_h
diff --git a/image/decoders/GIF2.h b/image/decoders/GIF2.h
new file mode 100644
index 0000000000..c0c6bf0fde
--- /dev/null
+++ b/image/decoders/GIF2.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_GIF2_H
+#define mozilla_image_decoders_GIF2_H
+
+#define MAX_LZW_BITS 12
+#define MAX_BITS 4097 // 2^MAX_LZW_BITS+1
+#define MAX_COLORS 256
+#define MIN_HOLD_SIZE 256
+
+enum { GIF_TRAILER = 0x3B }; // ';'
+enum { GIF_IMAGE_SEPARATOR = 0x2C }; // ','
+enum { GIF_EXTENSION_INTRODUCER = 0x21 }; // '!'
+enum { GIF_GRAPHIC_CONTROL_LABEL = 0xF9 };
+enum { GIF_APPLICATION_EXTENSION_LABEL = 0xFF };
+
+// A GIF decoder's state
+typedef struct gif_struct {
+ // LZW decoder state machine
+ uint8_t* stackp; // Current stack pointer
+ int datasize;
+ int codesize;
+ int codemask;
+ int avail; // Index of next available slot in dictionary
+ int oldcode;
+ uint8_t firstchar;
+ int bits; // Number of unread bits in "datum"
+ int32_t datum; // 32-bit input buffer
+
+ // Output state machine
+ int64_t pixels_remaining; // Pixels remaining to be output.
+
+ // Parameters for image frame currently being decoded
+ int tpixel; // Index of transparent pixel
+ int32_t disposal_method; // Restore to background, leave in place, etc.
+ uint32_t* local_colormap; // Per-image colormap
+ uint32_t local_colormap_buffer_size; // Size of the buffer containing the
+ // local colormap.
+ int local_colormap_size; // Size of local colormap array.
+ uint32_t delay_time; // Display time, in milliseconds,
+ // for this image in a multi-image GIF
+
+ // Global (multi-image) state
+ int version; // Either 89 for GIF89 or 87 for GIF87
+ int32_t screen_width; // Logical screen width & height
+ int32_t screen_height;
+ uint8_t global_colormap_depth; // Depth of global colormap array
+ uint16_t global_colormap_count; // Number of colors in global colormap
+ int images_decoded; // Counts images for multi-part GIFs
+ int loop_count; // Netscape specific extension block to control
+ // the number of animation loops a GIF
+ // renders.
+
+ bool is_transparent; // TRUE, if tpixel is valid
+
+ uint16_t prefix[MAX_BITS]; // LZW decoding tables
+ uint32_t global_colormap[MAX_COLORS]; // Default colormap if local not
+ // supplied
+ uint8_t suffix[MAX_BITS]; // LZW decoding tables
+ uint8_t stack[MAX_BITS]; // Base of LZW decoder stack
+
+} gif_struct;
+
+#endif // mozilla_image_decoders_GIF2_H
diff --git a/image/decoders/iccjpeg.c b/image/decoders/iccjpeg.c
new file mode 100644
index 0000000000..6157fe8298
--- /dev/null
+++ b/image/decoders/iccjpeg.c
@@ -0,0 +1,184 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * iccjpeg.c
+ *
+ * This file provides code to read and write International Color Consortium
+ * (ICC) device profiles embedded in JFIF JPEG image files. The ICC has
+ * defined a standard format for including such data in JPEG "APP2" markers.
+ * The code given here does not know anything about the internal structure
+ * of the ICC profile data; it just knows how to put the profile data into
+ * a JPEG file being written, or get it back out when reading.
+ *
+ * This code depends on new features added to the IJG JPEG library as of
+ * IJG release 6b; it will not compile or work with older IJG versions.
+ *
+ * NOTE: this code would need surgery to work on 16-bit-int machines
+ * with ICC profiles exceeding 64K bytes in size. If you need to do that,
+ * change all the "unsigned int" variables to "INT32". You'll also need
+ * to find a malloc() replacement that can allocate more than 64K.
+ */
+
+#include "iccjpeg.h"
+#include <stdlib.h> /* define malloc() */
+
+/*
+ * Since an ICC profile can be larger than the maximum size of a JPEG marker
+ * (64K), we need provisions to split it into multiple markers. The format
+ * defined by the ICC specifies one or more APP2 markers containing the
+ * following data:
+ * Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
+ * Marker sequence number 1 for first APP2, 2 for next, etc (1 byte)
+ * Number of markers Total number of APP2's used (1 byte)
+ * Profile data (remainder of APP2 data)
+ * Decoders should use the marker sequence numbers to reassemble the profile,
+ * rather than assuming that the APP2 markers appear in the correct sequence.
+ */
+
+#define ICC_MARKER (JPEG_APP0 + 2) /* JPEG marker code for ICC */
+#define ICC_OVERHEAD_LEN 14 /* size of non-profile data in APP2 */
+#define MAX_BYTES_IN_MARKER 65533 /* maximum data len of a JPEG marker */
+#define MAX_DATA_BYTES_IN_MARKER (MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN)
+
+/*
+ * Prepare for reading an ICC profile
+ */
+
+void setup_read_icc_profile(j_decompress_ptr cinfo) {
+ /* Tell the library to keep any APP2 data it may find */
+ jpeg_save_markers(cinfo, ICC_MARKER, 0xFFFF);
+}
+
+/*
+ * Handy subroutine to test whether a saved marker is an ICC profile marker.
+ */
+
+static boolean marker_is_icc(jpeg_saved_marker_ptr marker) {
+ return marker->marker == ICC_MARKER &&
+ marker->data_length >= ICC_OVERHEAD_LEN &&
+ /* verify the identifying string */
+ GETJOCTET(marker->data[0]) == 0x49 &&
+ GETJOCTET(marker->data[1]) == 0x43 &&
+ GETJOCTET(marker->data[2]) == 0x43 &&
+ GETJOCTET(marker->data[3]) == 0x5F &&
+ GETJOCTET(marker->data[4]) == 0x50 &&
+ GETJOCTET(marker->data[5]) == 0x52 &&
+ GETJOCTET(marker->data[6]) == 0x4F &&
+ GETJOCTET(marker->data[7]) == 0x46 &&
+ GETJOCTET(marker->data[8]) == 0x49 &&
+ GETJOCTET(marker->data[9]) == 0x4C &&
+ GETJOCTET(marker->data[10]) == 0x45 &&
+ GETJOCTET(marker->data[11]) == 0x0;
+}
+
+/*
+ * See if there was an ICC profile in the JPEG file being read;
+ * if so, reassemble and return the profile data.
+ *
+ * TRUE is returned if an ICC profile was found, FALSE if not.
+ * If TRUE is returned, *icc_data_ptr is set to point to the
+ * returned data, and *icc_data_len is set to its length.
+ *
+ * IMPORTANT: the data at **icc_data_ptr has been allocated with malloc()
+ * and must be freed by the caller with free() when the caller no longer
+ * needs it. (Alternatively, we could write this routine to use the
+ * IJG library's memory allocator, so that the data would be freed implicitly
+ * at jpeg_finish_decompress() time. But it seems likely that many apps
+ * will prefer to have the data stick around after decompression finishes.)
+ *
+ * NOTE: if the file contains invalid ICC APP2 markers, we just silently
+ * return FALSE. You might want to issue an error message instead.
+ */
+
+boolean read_icc_profile(j_decompress_ptr cinfo, JOCTET** icc_data_ptr,
+ unsigned int* icc_data_len) {
+ jpeg_saved_marker_ptr marker;
+ int num_markers = 0;
+ int seq_no;
+ JOCTET* icc_data;
+ unsigned int total_length;
+#define MAX_SEQ_NO 255 /* sufficient since marker numbers are bytes */
+ char marker_present[MAX_SEQ_NO + 1]; /* 1 if marker found */
+ unsigned int data_length[MAX_SEQ_NO + 1]; /* size of profile data in marker */
+ unsigned int data_offset[MAX_SEQ_NO + 1]; /* offset for data in marker */
+
+ *icc_data_ptr = NULL; /* avoid confusion if FALSE return */
+ *icc_data_len = 0;
+
+ /* This first pass over the saved markers discovers whether there are
+ * any ICC markers and verifies the consistency of the marker numbering.
+ */
+
+ for (seq_no = 1; seq_no <= MAX_SEQ_NO; seq_no++) {
+ marker_present[seq_no] = 0;
+ }
+
+ for (marker = cinfo->marker_list; marker != NULL; marker = marker->next) {
+ if (marker_is_icc(marker)) {
+ if (num_markers == 0) {
+ num_markers = GETJOCTET(marker->data[13]);
+ } else if (num_markers != GETJOCTET(marker->data[13])) {
+ return FALSE; /* inconsistent num_markers fields */
+ }
+ seq_no = GETJOCTET(marker->data[12]);
+ if (seq_no <= 0 || seq_no > num_markers) {
+ return FALSE; /* bogus sequence number */
+ }
+ if (marker_present[seq_no]) {
+ return FALSE; /* duplicate sequence numbers */
+ }
+ marker_present[seq_no] = 1;
+ data_length[seq_no] = marker->data_length - ICC_OVERHEAD_LEN;
+ }
+ }
+
+ if (num_markers == 0) {
+ return FALSE;
+ }
+
+ /* Check for missing markers, count total space needed,
+ * compute offset of each marker's part of the data.
+ */
+
+ total_length = 0;
+ for (seq_no = 1; seq_no <= num_markers; seq_no++) {
+ if (marker_present[seq_no] == 0) {
+ return FALSE; /* missing sequence number */
+ }
+ data_offset[seq_no] = total_length;
+ total_length += data_length[seq_no];
+ }
+
+ if (total_length <= 0) {
+ return FALSE; /* found only empty markers? */
+ }
+
+ /* Allocate space for assembled data */
+ icc_data = (JOCTET*)malloc(total_length * sizeof(JOCTET));
+ if (icc_data == NULL) {
+ return FALSE; /* oops, out of memory */
+ }
+
+ /* and fill it in */
+ for (marker = cinfo->marker_list; marker != NULL; marker = marker->next) {
+ if (marker_is_icc(marker)) {
+ JOCTET FAR* src_ptr;
+ JOCTET* dst_ptr;
+ unsigned int length;
+ seq_no = GETJOCTET(marker->data[12]);
+ dst_ptr = icc_data + data_offset[seq_no];
+ src_ptr = marker->data + ICC_OVERHEAD_LEN;
+ length = data_length[seq_no];
+ while (length--) {
+ *dst_ptr++ = *src_ptr++;
+ }
+ }
+ }
+
+ *icc_data_ptr = icc_data;
+ *icc_data_len = total_length;
+
+ return TRUE;
+}
diff --git a/image/decoders/iccjpeg.h b/image/decoders/iccjpeg.h
new file mode 100644
index 0000000000..4d48144a23
--- /dev/null
+++ b/image/decoders/iccjpeg.h
@@ -0,0 +1,65 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * iccjpeg.h
+ *
+ * This file provides code to read and write International Color Consortium
+ * (ICC) device profiles embedded in JFIF JPEG image files. The ICC has
+ * defined a standard format for including such data in JPEG "APP2" markers.
+ * The code given here does not know anything about the internal structure
+ * of the ICC profile data; it just knows how to put the profile data into
+ * a JPEG file being written, or get it back out when reading.
+ *
+ * This code depends on new features added to the IJG JPEG library as of
+ * IJG release 6b; it will not compile or work with older IJG versions.
+ *
+ * NOTE: this code would need surgery to work on 16-bit-int machines
+ * with ICC profiles exceeding 64K bytes in size. See iccprofile.c
+ * for details.
+ */
+
+#ifndef mozilla_image_decoders_iccjpeg_h
+#define mozilla_image_decoders_iccjpeg_h
+
+#include <stdio.h> /* needed to define "FILE", "NULL" */
+#include "jpeglib.h"
+
+/*
+ * Reading a JPEG file that may contain an ICC profile requires two steps:
+ *
+ * 1. After jpeg_create_decompress() but before jpeg_read_header(),
+ * call setup_read_icc_profile(). This routine tells the IJG library
+ * to save in memory any APP2 markers it may find in the file.
+ *
+ * 2. After jpeg_read_header(), call read_icc_profile() to find out
+ * whether there was a profile and obtain it if so.
+ */
+
+/*
+ * Prepare for reading an ICC profile
+ */
+
+extern void setup_read_icc_profile JPP((j_decompress_ptr cinfo));
+
+/*
+ * See if there was an ICC profile in the JPEG file being read;
+ * if so, reassemble and return the profile data.
+ *
+ * TRUE is returned if an ICC profile was found, FALSE if not.
+ * If TRUE is returned, *icc_data_ptr is set to point to the
+ * returned data, and *icc_data_len is set to its length.
+ *
+ * IMPORTANT: the data at **icc_data_ptr has been allocated with malloc()
+ * and must be freed by the caller with free() when the caller no longer
+ * needs it. (Alternatively, we could write this routine to use the
+ * IJG library's memory allocator, so that the data would be freed implicitly
+ * at jpeg_finish_decompress() time. But it seems likely that many apps
+ * will prefer to have the data stick around after decompression finishes.)
+ */
+
+extern boolean read_icc_profile JPP((j_decompress_ptr cinfo,
+ JOCTET** icc_data_ptr,
+ unsigned int* icc_data_len));
+#endif // mozilla_image_decoders_iccjpeg_h
diff --git a/image/decoders/icon/android/moz.build b/image/decoders/icon/android/moz.build
new file mode 100644
index 0000000000..a99ae228d4
--- /dev/null
+++ b/image/decoders/icon/android/moz.build
@@ -0,0 +1,13 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ "nsIconChannel.cpp",
+]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"
diff --git a/image/decoders/icon/android/nsIconChannel.cpp b/image/decoders/icon/android/nsIconChannel.cpp
new file mode 100644
index 0000000000..7599cf2bb7
--- /dev/null
+++ b/image/decoders/icon/android/nsIconChannel.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdlib.h>
+#include "mozilla/gfx/Swizzle.h"
+#include "mozilla/dom/ContentChild.h"
+#include "mozilla/NullPrincipal.h"
+#include "nsMimeTypes.h"
+#include "nsXULAppAPI.h"
+#include "AndroidBridge.h"
+#include "nsIconChannel.h"
+#include "nsIIconURI.h"
+#include "nsIStringStream.h"
+#include "nsNetUtil.h"
+#include "nsComponentManagerUtils.h"
+
+NS_IMPL_ISUPPORTS(nsIconChannel, nsIRequest, nsIChannel)
+
+using namespace mozilla;
+using mozilla::dom::ContentChild;
+
+static nsresult GetIconForExtension(const nsACString& aFileExt,
+ uint32_t aIconSize, uint8_t* const aBuf) {
+ if (!AndroidBridge::Bridge()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ AndroidBridge::Bridge()->GetIconForExtension(aFileExt, aIconSize, aBuf);
+
+ return NS_OK;
+}
+
+static nsresult CallRemoteGetIconForExtension(const nsACString& aFileExt,
+ uint32_t aIconSize,
+ uint8_t* const aBuf) {
+ NS_ENSURE_TRUE(aBuf != nullptr, NS_ERROR_NULL_POINTER);
+
+ // An array has to be used to get data from remote process
+ nsTArray<uint8_t> bits;
+ uint32_t bufSize = aIconSize * aIconSize * 4;
+
+ if (!ContentChild::GetSingleton()->SendGetIconForExtension(
+ PromiseFlatCString(aFileExt), aIconSize, &bits)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ NS_ASSERTION(bits.Length() == bufSize, "Pixels array is incomplete");
+ if (bits.Length() != bufSize) {
+ return NS_ERROR_FAILURE;
+ }
+
+ memcpy(aBuf, bits.Elements(), bufSize);
+
+ return NS_OK;
+}
+
+static nsresult moz_icon_to_channel(nsIURI* aURI, const nsACString& aFileExt,
+ uint32_t aIconSize, nsIChannel** aChannel) {
+ NS_ENSURE_TRUE(aIconSize < 256 && aIconSize > 0, NS_ERROR_UNEXPECTED);
+
+ int width = aIconSize;
+ int height = aIconSize;
+
+ // moz-icon data should have two bytes for the size,
+ // then the ARGB pixel values with pre-multiplied Alpha
+ const int channels = 4;
+ CheckedInt32 buf_size =
+ 4 + channels * CheckedInt32(height) * CheckedInt32(width);
+ if (!buf_size.isValid()) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ uint8_t* const buf = (uint8_t*)moz_xmalloc(buf_size.value());
+ uint8_t* out = buf;
+
+ *(out++) = width;
+ *(out++) = height;
+ *(out++) = uint8_t(mozilla::gfx::SurfaceFormat::R8G8B8A8);
+
+ // Set all bits to ensure in nsIconDecoder we color manage and premultiply.
+ *(out++) = 0xFF;
+
+ nsresult rv;
+ if (XRE_IsParentProcess()) {
+ rv = GetIconForExtension(aFileExt, aIconSize, out);
+ } else {
+ rv = CallRemoteGetIconForExtension(aFileExt, aIconSize, out);
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIStringInputStream> stream =
+ do_CreateInstance("@mozilla.org/io/string-input-stream;1", &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = stream->AdoptData((char*)buf, buf_size.value());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // nsIconProtocolHandler::NewChannel will provide the correct loadInfo for
+ // this iconChannel. Use the most restrictive security settings for the
+ // temporary loadInfo to make sure the channel can not be opened.
+ nsCOMPtr<nsIPrincipal> nullPrincipal =
+ NullPrincipal::CreateWithoutOriginAttributes();
+ return NS_NewInputStreamChannel(
+ aChannel, aURI, stream.forget(), nullPrincipal,
+ nsILoadInfo::SEC_REQUIRE_SAME_ORIGIN_DATA_IS_BLOCKED,
+ nsIContentPolicy::TYPE_INTERNAL_IMAGE, nsLiteralCString(IMAGE_ICON_MS));
+}
+
+nsresult nsIconChannel::Init(nsIURI* aURI) {
+ nsCOMPtr<nsIMozIconURI> iconURI = do_QueryInterface(aURI);
+ NS_ASSERTION(iconURI, "URI is not an nsIMozIconURI");
+
+ nsAutoCString stockIcon;
+ iconURI->GetStockIcon(stockIcon);
+
+ uint32_t desiredImageSize;
+ iconURI->GetImageSize(&desiredImageSize);
+
+ nsAutoCString iconFileExt;
+ iconURI->GetFileExtension(iconFileExt);
+
+ return moz_icon_to_channel(iconURI, iconFileExt, desiredImageSize,
+ getter_AddRefs(mRealChannel));
+}
diff --git a/image/decoders/icon/android/nsIconChannel.h b/image/decoders/icon/android/nsIconChannel.h
new file mode 100644
index 0000000000..e25196c6ee
--- /dev/null
+++ b/image/decoders/icon/android/nsIconChannel.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_icon_android_nsIconChannel_h
+#define mozilla_image_decoders_icon_android_nsIconChannel_h
+
+#include "mozilla/Attributes.h"
+
+#include "nsIChannel.h"
+#include "nsIURI.h"
+#include "nsCOMPtr.h"
+
+/**
+ * This class is the Android implementation of nsIconChannel.
+ * It asks Android for an icon, and creates a new channel for
+ * that file to which all calls will be proxied.
+ */
+class nsIconChannel final : public nsIChannel {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_FORWARD_NSIREQUEST(mRealChannel->)
+ NS_FORWARD_NSICHANNEL(mRealChannel->)
+
+ nsIconChannel() {}
+
+ /**
+ * Called by nsIconProtocolHandler after it creates this channel.
+ * Must be called before calling any other function on this object.
+ * If this method fails, no other function must be called on this object.
+ */
+ nsresult Init(nsIURI* aURI);
+
+ private:
+ ~nsIconChannel() {}
+
+ /**
+ * The channel to the temp icon file (e.g. to /tmp/2qy9wjqw.html).
+ * Will always be non-null after a successful Init.
+ */
+ nsCOMPtr<nsIChannel> mRealChannel;
+};
+
+#endif // mozilla_image_decoders_icon_android_nsIconChannel_h
diff --git a/image/decoders/icon/components.conf b/image/decoders/icon/components.conf
new file mode 100644
index 0000000000..68bff8e231
--- /dev/null
+++ b/image/decoders/icon/components.conf
@@ -0,0 +1,29 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ 'cid': '{d0f9db12-249c-11d5-9905-001083010e9b}',
+ 'contract_ids': ['@mozilla.org/network/protocol;1?name=moz-icon'],
+ 'type': 'nsIconProtocolHandler',
+ 'headers': ['/image/decoders/icon/nsIconProtocolHandler.h'],
+ 'protocol_config': {
+ 'scheme': 'moz-icon',
+ 'flags': [
+ 'URI_NORELATIVE',
+ 'URI_NOAUTH',
+ 'URI_IS_UI_RESOURCE',
+ 'URI_IS_LOCAL_RESOURCE',
+ ],
+ 'default_port': 0,
+ },
+ },
+ {
+ 'cid': '{1460df3b-774c-4205-8349-838e507c3ef9}',
+ 'type': 'nsMozIconURI::Mutator',
+ 'headers': ['/image/decoders/icon/nsIconURI.h'],
+ },
+]
diff --git a/image/decoders/icon/gtk/moz.build b/image/decoders/icon/gtk/moz.build
new file mode 100644
index 0000000000..3eac86b6ba
--- /dev/null
+++ b/image/decoders/icon/gtk/moz.build
@@ -0,0 +1,20 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ "nsIconChannel.cpp",
+]
+
+EXPORTS += [
+ "nsIconChannel.h",
+]
+
+FINAL_LIBRARY = "xul"
+
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
+ CXXFLAGS += CONFIG["MOZ_GTK3_CFLAGS"]
+
+include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/image/decoders/icon/gtk/nsIconChannel.cpp b/image/decoders/icon/gtk/nsIconChannel.cpp
new file mode 100644
index 0000000000..2b8b958b9e
--- /dev/null
+++ b/image/decoders/icon/gtk/nsIconChannel.cpp
@@ -0,0 +1,483 @@
+/* vim:set ts=2 sw=2 sts=2 cin et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIconChannel.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/NullPrincipal.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/dom/ContentChild.h"
+#include "mozilla/gfx/Swizzle.h"
+#include "mozilla/ipc/ByteBuf.h"
+#include <algorithm>
+
+#include <gio/gio.h>
+
+#include <gtk/gtk.h>
+
+#include "nsMimeTypes.h"
+#include "nsIMIMEService.h"
+
+#include "nsServiceManagerUtils.h"
+
+#include "nsNetUtil.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIStringStream.h"
+#include "nsServiceManagerUtils.h"
+#include "nsIURL.h"
+#include "nsIPipe.h"
+#include "nsIAsyncInputStream.h"
+#include "nsIAsyncOutputStream.h"
+#include "prlink.h"
+#include "gfxPlatform.h"
+
+using mozilla::CheckedInt32;
+using mozilla::ipc::ByteBuf;
+
+NS_IMPL_ISUPPORTS(nsIconChannel, nsIRequest, nsIChannel)
+
+static nsresult MozGdkPixbufToByteBuf(GdkPixbuf* aPixbuf, ByteBuf* aByteBuf) {
+ int width = gdk_pixbuf_get_width(aPixbuf);
+ int height = gdk_pixbuf_get_height(aPixbuf);
+ NS_ENSURE_TRUE(height < 256 && width < 256 && height > 0 && width > 0 &&
+ gdk_pixbuf_get_colorspace(aPixbuf) == GDK_COLORSPACE_RGB &&
+ gdk_pixbuf_get_bits_per_sample(aPixbuf) == 8 &&
+ gdk_pixbuf_get_has_alpha(aPixbuf) &&
+ gdk_pixbuf_get_n_channels(aPixbuf) == 4,
+ NS_ERROR_UNEXPECTED);
+
+ const int n_channels = 4;
+ CheckedInt32 buf_size =
+ 4 + n_channels * CheckedInt32(height) * CheckedInt32(width);
+ if (!buf_size.isValid()) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ uint8_t* const buf = (uint8_t*)moz_xmalloc(buf_size.value());
+ uint8_t* out = buf;
+
+ *(out++) = width;
+ *(out++) = height;
+ *(out++) = uint8_t(mozilla::gfx::SurfaceFormat::OS_RGBA);
+
+ // Set all bits to ensure in nsIconDecoder we color manage and premultiply.
+ *(out++) = 0xFF;
+
+ const guchar* const pixels = gdk_pixbuf_get_pixels(aPixbuf);
+ int instride = gdk_pixbuf_get_rowstride(aPixbuf);
+ int outstride = width * n_channels;
+
+ // encode the RGB data and the A data and adjust the stride as necessary.
+ mozilla::gfx::SwizzleData(pixels, instride,
+ mozilla::gfx::SurfaceFormat::R8G8B8A8, out,
+ outstride, mozilla::gfx::SurfaceFormat::OS_RGBA,
+ mozilla::gfx::IntSize(width, height));
+
+ *aByteBuf = ByteBuf(buf, buf_size.value(), buf_size.value());
+ return NS_OK;
+}
+
+static nsresult ByteBufToStream(ByteBuf&& aBuf, nsIInputStream** aStream) {
+ nsresult rv;
+ nsCOMPtr<nsIStringInputStream> stream =
+ do_CreateInstance("@mozilla.org/io/string-input-stream;1", &rv);
+
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ // stream takes ownership of buf and will free it on destruction.
+ // This function cannot fail.
+ rv = stream->AdoptData(reinterpret_cast<char*>(aBuf.mData), aBuf.mLen);
+ MOZ_ASSERT(CheckedInt32(aBuf.mLen).isValid(),
+ "aBuf.mLen should fit in int32_t");
+ aBuf.mData = nullptr;
+
+ // If this no longer holds then re-examine buf's lifetime.
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ stream.forget(aStream);
+ return NS_OK;
+}
+
+static nsresult StreamToChannel(already_AddRefed<nsIInputStream> aStream,
+ nsIURI* aURI, nsIChannel** aChannel) {
+ // nsIconProtocolHandler::NewChannel will provide the correct loadInfo for
+ // this iconChannel. Use the most restrictive security settings for the
+ // temporary loadInfo to make sure the channel can not be opened.
+ nsCOMPtr<nsIPrincipal> nullPrincipal =
+ mozilla::NullPrincipal::CreateWithoutOriginAttributes();
+ return NS_NewInputStreamChannel(
+ aChannel, aURI, std::move(aStream), nullPrincipal,
+ nsILoadInfo::SEC_REQUIRE_SAME_ORIGIN_DATA_IS_BLOCKED,
+ nsIContentPolicy::TYPE_INTERNAL_IMAGE, nsLiteralCString(IMAGE_ICON_MS));
+}
+
+static GtkWidget* gProtoWindow = nullptr;
+static GtkWidget* gStockImageWidget = nullptr;
+
+static void ensure_stock_image_widget() {
+ // Only the style of the GtkImage needs to be used, but the widget is kept
+ // to track dynamic style changes.
+ if (!gProtoWindow) {
+ gProtoWindow = gtk_window_new(GTK_WINDOW_POPUP);
+ GtkWidget* protoLayout = gtk_fixed_new();
+ gtk_container_add(GTK_CONTAINER(gProtoWindow), protoLayout);
+
+ gStockImageWidget = gtk_image_new();
+ gtk_container_add(GTK_CONTAINER(protoLayout), gStockImageWidget);
+
+ gtk_widget_ensure_style(gStockImageWidget);
+ }
+}
+
+static GtkIconSize moz_gtk_icon_size(const char* name) {
+ if (strcmp(name, "button") == 0) {
+ return GTK_ICON_SIZE_BUTTON;
+ }
+
+ if (strcmp(name, "menu") == 0) {
+ return GTK_ICON_SIZE_MENU;
+ }
+
+ if (strcmp(name, "toolbar") == 0) {
+ return GTK_ICON_SIZE_LARGE_TOOLBAR;
+ }
+
+ if (strcmp(name, "toolbarsmall") == 0) {
+ return GTK_ICON_SIZE_SMALL_TOOLBAR;
+ }
+
+ if (strcmp(name, "dnd") == 0) {
+ return GTK_ICON_SIZE_DND;
+ }
+
+ if (strcmp(name, "dialog") == 0) {
+ return GTK_ICON_SIZE_DIALOG;
+ }
+
+ return GTK_ICON_SIZE_MENU;
+}
+
+static int32_t GetIconSize(nsIMozIconURI* aIconURI) {
+ nsAutoCString iconSizeString;
+
+ aIconURI->GetIconSize(iconSizeString);
+ if (iconSizeString.IsEmpty()) {
+ uint32_t size;
+ mozilla::DebugOnly<nsresult> rv = aIconURI->GetImageSize(&size);
+ NS_ASSERTION(NS_SUCCEEDED(rv), "GetImageSize failed");
+ return size;
+ }
+ int size;
+
+ GtkIconSize icon_size = moz_gtk_icon_size(iconSizeString.get());
+ gtk_icon_size_lookup(icon_size, &size, nullptr);
+ return size;
+}
+
+/* Scale icon buffer to preferred size */
+static nsresult ScaleIconBuf(GdkPixbuf** aBuf, int32_t iconSize) {
+ // Scale buffer only if width or height differ from preferred size
+ if (gdk_pixbuf_get_width(*aBuf) != iconSize &&
+ gdk_pixbuf_get_height(*aBuf) != iconSize) {
+ GdkPixbuf* scaled =
+ gdk_pixbuf_scale_simple(*aBuf, iconSize, iconSize, GDK_INTERP_BILINEAR);
+ // replace original buffer by scaled
+ g_object_unref(*aBuf);
+ *aBuf = scaled;
+ if (!scaled) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ return NS_OK;
+}
+
+/* static */
+nsresult nsIconChannel::GetIconWithGIO(nsIMozIconURI* aIconURI,
+ ByteBuf* aDataOut) {
+ GIcon* icon = nullptr;
+ nsCOMPtr<nsIURL> fileURI;
+
+ // Read icon content
+ aIconURI->GetIconURL(getter_AddRefs(fileURI));
+
+ // Get icon for file specified by URI
+ if (fileURI) {
+ nsAutoCString spec;
+ fileURI->GetAsciiSpec(spec);
+ if (fileURI->SchemeIs("file")) {
+ GFile* file = g_file_new_for_uri(spec.get());
+ GFileInfo* fileInfo =
+ g_file_query_info(file, G_FILE_ATTRIBUTE_STANDARD_ICON,
+ G_FILE_QUERY_INFO_NONE, nullptr, nullptr);
+ g_object_unref(file);
+ if (fileInfo) {
+ // icon from g_content_type_get_icon doesn't need unref
+ icon = g_file_info_get_icon(fileInfo);
+ if (icon) {
+ g_object_ref(icon);
+ }
+ g_object_unref(fileInfo);
+ }
+ }
+ }
+
+ // Try to get icon by using MIME type
+ if (!icon) {
+ nsAutoCString type;
+ aIconURI->GetContentType(type);
+ // Try to get MIME type from file extension by using nsIMIMEService
+ if (type.IsEmpty()) {
+ nsCOMPtr<nsIMIMEService> ms(do_GetService("@mozilla.org/mime;1"));
+ if (ms) {
+ nsAutoCString fileExt;
+ aIconURI->GetFileExtension(fileExt);
+ ms->GetTypeFromExtension(fileExt, type);
+ }
+ }
+ char* ctype = nullptr; // character representation of content type
+ if (!type.IsEmpty()) {
+ ctype = g_content_type_from_mime_type(type.get());
+ }
+ if (ctype) {
+ icon = g_content_type_get_icon(ctype);
+ g_free(ctype);
+ }
+ }
+
+ // Get default icon theme
+ GtkIconTheme* iconTheme = gtk_icon_theme_get_default();
+ GtkIconInfo* iconInfo = nullptr;
+ // Get icon size
+ int32_t iconSize = GetIconSize(aIconURI);
+
+ if (icon) {
+ // Use icon and theme to get GtkIconInfo
+ iconInfo = gtk_icon_theme_lookup_by_gicon(iconTheme, icon, iconSize,
+ (GtkIconLookupFlags)0);
+ g_object_unref(icon);
+ }
+
+ if (!iconInfo) {
+ // Mozilla's mimetype lookup failed. Try the "unknown" icon.
+ iconInfo = gtk_icon_theme_lookup_icon(iconTheme, "unknown", iconSize,
+ (GtkIconLookupFlags)0);
+ if (!iconInfo) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ }
+
+ // Create a GdkPixbuf buffer containing icon and scale it
+ GdkPixbuf* buf = gtk_icon_info_load_icon(iconInfo, nullptr);
+ gtk_icon_info_free(iconInfo);
+ if (!buf) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsresult rv = ScaleIconBuf(&buf, iconSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = MozGdkPixbufToByteBuf(buf, aDataOut);
+ g_object_unref(buf);
+ return rv;
+}
+
+/* static */
+nsresult nsIconChannel::GetIcon(nsIURI* aURI, ByteBuf* aDataOut) {
+ nsCOMPtr<nsIMozIconURI> iconURI = do_QueryInterface(aURI);
+ NS_ASSERTION(iconURI, "URI is not an nsIMozIconURI");
+
+ if (!iconURI) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (gfxPlatform::IsHeadless()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsAutoCString stockIcon;
+ iconURI->GetStockIcon(stockIcon);
+ if (stockIcon.IsEmpty()) {
+ return GetIconWithGIO(iconURI, aDataOut);
+ }
+
+ // Search for stockIcon
+ nsAutoCString iconSizeString;
+ iconURI->GetIconSize(iconSizeString);
+
+ nsAutoCString iconStateString;
+ iconURI->GetIconState(iconStateString);
+
+ GtkIconSize icon_size = moz_gtk_icon_size(iconSizeString.get());
+ GtkStateType state = iconStateString.EqualsLiteral("disabled")
+ ? GTK_STATE_INSENSITIVE
+ : GTK_STATE_NORMAL;
+
+ // First lookup the icon by stock id and text direction.
+ GtkTextDirection direction = GTK_TEXT_DIR_NONE;
+ if (StringEndsWith(stockIcon, "-ltr"_ns)) {
+ direction = GTK_TEXT_DIR_LTR;
+ } else if (StringEndsWith(stockIcon, "-rtl"_ns)) {
+ direction = GTK_TEXT_DIR_RTL;
+ }
+
+ bool forceDirection = direction != GTK_TEXT_DIR_NONE;
+ nsAutoCString stockID;
+ bool useIconName = false;
+ if (!forceDirection) {
+ direction = gtk_widget_get_default_direction();
+ stockID = stockIcon;
+ } else {
+ // GTK versions < 2.22 use icon names from concatenating stock id with
+ // -(rtl|ltr), which is how the moz-icon stock name is interpreted here.
+ stockID = Substring(stockIcon, 0, stockIcon.Length() - 4);
+ // However, if we lookup bidi icons by the stock name, then GTK versions
+ // >= 2.22 will use a bidi lookup convention that most icon themes do not
+ // yet follow. Therefore, we first check to see if the theme supports the
+ // old icon name as this will have bidi support (if found).
+ GtkIconTheme* icon_theme = gtk_icon_theme_get_default();
+ // Micking what gtk_icon_set_render_icon does with sizes, though it's not
+ // critical as icons will be scaled to suit size. It just means we follow
+ // the same paths and so share caches.
+ gint width, height;
+ if (gtk_icon_size_lookup(icon_size, &width, &height)) {
+ gint size = std::min(width, height);
+ // We use gtk_icon_theme_lookup_icon() without
+ // GTK_ICON_LOOKUP_USE_BUILTIN instead of gtk_icon_theme_has_icon() so
+ // we don't pick up fallback icons added by distributions for backward
+ // compatibility.
+ GtkIconInfo* icon = gtk_icon_theme_lookup_icon(
+ icon_theme, stockIcon.get(), size, (GtkIconLookupFlags)0);
+ if (icon) {
+ useIconName = true;
+ gtk_icon_info_free(icon);
+ }
+ }
+ }
+
+ ensure_stock_image_widget();
+ GtkStyle* style = gtk_widget_get_style(gStockImageWidget);
+ GtkIconSet* icon_set = nullptr;
+ if (!useIconName) {
+ icon_set = gtk_style_lookup_icon_set(style, stockID.get());
+ }
+
+ if (!icon_set) {
+ // Either we have chosen icon-name lookup for a bidi icon, or stockIcon is
+ // not a stock id so we assume it is an icon name.
+ useIconName = true;
+ // Creating a GtkIconSet is a convenient way to allow the style to
+ // render the icon, possibly with variations suitable for insensitive
+ // states.
+ icon_set = gtk_icon_set_new();
+ GtkIconSource* icon_source = gtk_icon_source_new();
+
+ gtk_icon_source_set_icon_name(icon_source, stockIcon.get());
+ gtk_icon_set_add_source(icon_set, icon_source);
+ gtk_icon_source_free(icon_source);
+ }
+
+ GdkPixbuf* icon = gtk_icon_set_render_icon(
+ icon_set, style, direction, state, icon_size, gStockImageWidget, nullptr);
+ if (useIconName) {
+ gtk_icon_set_unref(icon_set);
+ }
+
+ // According to documentation, gtk_icon_set_render_icon() never returns
+ // nullptr, but it does return nullptr when we have the problem reported
+ // here: https://bugzilla.gnome.org/show_bug.cgi?id=629878#c13
+ if (!icon) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsresult rv = MozGdkPixbufToByteBuf(icon, aDataOut);
+
+ g_object_unref(icon);
+
+ return rv;
+}
+
+nsresult nsIconChannel::Init(nsIURI* aURI) {
+ nsCOMPtr<nsIInputStream> stream;
+
+ using ContentChild = mozilla::dom::ContentChild;
+ if (auto* contentChild = ContentChild::GetSingleton()) {
+ // Get the icon via IPC and translate the promise of a ByteBuf
+ // into an actually-existing channel.
+ RefPtr<ContentChild::GetSystemIconPromise> icon =
+ contentChild->SendGetSystemIcon(aURI);
+ if (!icon) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsCOMPtr<nsIAsyncInputStream> inputStream;
+ nsCOMPtr<nsIAsyncOutputStream> outputStream;
+ NS_NewPipe2(getter_AddRefs(inputStream), getter_AddRefs(outputStream), true,
+ false, 0, UINT32_MAX);
+
+ // FIXME: Bug 1718324
+ // The GetSystemIcon() call will end up on the parent doing GetIcon()
+ // and by using ByteBuf we might not be immune to some deadlock, at least
+ // on paper. From analysis in
+ // https://phabricator.services.mozilla.com/D118596#3865440 we should be
+ // safe in practice, but it would be nicer to just write that differently.
+
+ icon->Then(
+ mozilla::GetCurrentSerialEventTarget(), __func__,
+ [outputStream](std::tuple<nsresult, mozilla::Maybe<ByteBuf>>&& aArg) {
+ nsresult rv = std::get<0>(aArg);
+ mozilla::Maybe<ByteBuf> bytes = std::move(std::get<1>(aArg));
+
+ if (NS_SUCCEEDED(rv)) {
+ MOZ_RELEASE_ASSERT(bytes);
+ uint32_t written;
+ rv = outputStream->Write(reinterpret_cast<char*>(bytes->mData),
+ static_cast<uint32_t>(bytes->mLen),
+ &written);
+ if (NS_SUCCEEDED(rv)) {
+ const bool wroteAll = static_cast<size_t>(written) == bytes->mLen;
+ MOZ_ASSERT(wroteAll);
+ if (!wroteAll) {
+ rv = NS_ERROR_UNEXPECTED;
+ }
+ }
+ } else {
+ MOZ_ASSERT(!bytes);
+ }
+
+ if (NS_FAILED(rv)) {
+ outputStream->CloseWithStatus(rv);
+ }
+ },
+ [outputStream](mozilla::ipc::ResponseRejectReason) {
+ outputStream->CloseWithStatus(NS_ERROR_FAILURE);
+ });
+
+ stream = inputStream.forget();
+ } else {
+ // Get the icon directly.
+ ByteBuf bytebuf;
+ nsresult rv = GetIcon(aURI, &bytebuf);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = ByteBufToStream(std::move(bytebuf), getter_AddRefs(stream));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return StreamToChannel(stream.forget(), aURI, getter_AddRefs(mRealChannel));
+}
+
+void nsIconChannel::Shutdown() {
+ if (gProtoWindow) {
+ gtk_widget_destroy(gProtoWindow);
+ gProtoWindow = nullptr;
+ gStockImageWidget = nullptr;
+ }
+}
diff --git a/image/decoders/icon/gtk/nsIconChannel.h b/image/decoders/icon/gtk/nsIconChannel.h
new file mode 100644
index 0000000000..6ad26602d0
--- /dev/null
+++ b/image/decoders/icon/gtk/nsIconChannel.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_icon_gtk_nsIconChannel_h
+#define mozilla_image_decoders_icon_gtk_nsIconChannel_h
+
+#include "mozilla/Attributes.h"
+
+#include "nsIChannel.h"
+#include "nsIURI.h"
+#include "nsIIconURI.h"
+#include "nsCOMPtr.h"
+
+namespace mozilla::ipc {
+class ByteBuf;
+}
+
+/// This class is the GTK implementation of nsIconChannel. It asks
+/// GTK for the icon, translates the pixel data in-memory into
+/// nsIconDecoder format, and proxies the nsChannel interface to a new
+/// channel returning that image.
+class nsIconChannel final : public nsIChannel {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_FORWARD_NSIREQUEST(mRealChannel->)
+ NS_FORWARD_NSICHANNEL(mRealChannel->)
+
+ nsIconChannel() {}
+
+ static void Shutdown();
+
+ /// Called by nsIconProtocolHandler after it creates this channel.
+ /// Must be called before calling any other function on this object.
+ /// If this method fails, no other function must be called on this object.
+ nsresult Init(nsIURI* aURI);
+
+ /// Obtains an icon, in nsIconDecoder format, as a ByteBuf instead
+ /// of a channel. For use with IPC.
+ static nsresult GetIcon(nsIURI* aURI, mozilla::ipc::ByteBuf* aDataOut);
+
+ private:
+ ~nsIconChannel() {}
+ /// The input stream channel which will yield the image.
+ /// Will always be non-null after a successful Init.
+ nsCOMPtr<nsIChannel> mRealChannel;
+
+ static nsresult GetIconWithGIO(nsIMozIconURI* aIconURI,
+ mozilla::ipc::ByteBuf* aDataOut);
+};
+
+#endif // mozilla_image_decoders_icon_gtk_nsIconChannel_h
diff --git a/image/decoders/icon/mac/moz.build b/image/decoders/icon/mac/moz.build
new file mode 100644
index 0000000000..3467659a8f
--- /dev/null
+++ b/image/decoders/icon/mac/moz.build
@@ -0,0 +1,13 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ "nsIconChannelCocoa.mm",
+]
+
+FINAL_LIBRARY = "xul"
+
+include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/image/decoders/icon/mac/nsIconChannel.h b/image/decoders/icon/mac/nsIconChannel.h
new file mode 100644
index 0000000000..dca2a3c51a
--- /dev/null
+++ b/image/decoders/icon/mac/nsIconChannel.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_encoders_icon_mac_nsIconChannel_h
+#define mozilla_image_encoders_icon_mac_nsIconChannel_h
+
+#include "mozilla/Attributes.h"
+
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include "nsIChannel.h"
+#include "nsILoadGroup.h"
+#include "nsILoadInfo.h"
+#include "nsIInterfaceRequestor.h"
+#include "nsIInterfaceRequestorUtils.h"
+#include "nsIInputStreamPump.h"
+#include "nsIStreamListener.h"
+#include "nsIURI.h"
+#include "nsNetUtil.h"
+
+class nsIFile;
+
+class nsIconChannel final : public nsIChannel, public nsIStreamListener {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIREQUEST
+ NS_DECL_NSICHANNEL
+ NS_DECL_NSIREQUESTOBSERVER
+ NS_DECL_NSISTREAMLISTENER
+
+ nsIconChannel();
+
+ nsresult Init(nsIURI* uri);
+
+ protected:
+ virtual ~nsIconChannel();
+
+ nsCOMPtr<nsIURI> mUrl;
+ nsCOMPtr<nsIURI> mOriginalURI;
+ nsCOMPtr<nsILoadGroup> mLoadGroup;
+ nsCOMPtr<nsIInterfaceRequestor> mCallbacks;
+ nsCOMPtr<nsISupports> mOwner;
+ nsCOMPtr<nsILoadInfo> mLoadInfo;
+
+ nsCOMPtr<nsIInputStreamPump> mPump;
+ nsCOMPtr<nsIStreamListener> mListener;
+ bool mCanceled = false;
+
+ [[nodiscard]] nsresult MakeInputStream(nsIInputStream** _retval,
+ bool nonBlocking);
+
+ nsresult ExtractIconInfoFromUrl(nsIFile** aLocalFile,
+ uint32_t* aDesiredImageSize,
+ nsACString& aContentType,
+ nsACString& aFileExtension);
+};
+
+#endif // mozilla_image_encoders_icon_mac_nsIconChannel_h
diff --git a/image/decoders/icon/mac/nsIconChannelCocoa.mm b/image/decoders/icon/mac/nsIconChannelCocoa.mm
new file mode 100644
index 0000000000..368ecdda20
--- /dev/null
+++ b/image/decoders/icon/mac/nsIconChannelCocoa.mm
@@ -0,0 +1,505 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsContentUtils.h"
+#include "nsIconChannel.h"
+#include "mozilla/BasePrincipal.h"
+#include "mozilla/EndianUtils.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIIconURI.h"
+#include "nsIInputStream.h"
+#include "nsIInterfaceRequestor.h"
+#include "nsIInterfaceRequestorUtils.h"
+#include "nsString.h"
+#include "nsMimeTypes.h"
+#include "nsIURL.h"
+#include "nsNetCID.h"
+#include "nsIPipe.h"
+#include "nsIOutputStream.h"
+#include "nsCExternalHandlerService.h"
+#include "nsILocalFileMac.h"
+#include "nsIFileURL.h"
+#include "nsTArray.h"
+#include "nsObjCExceptions.h"
+#include "nsProxyRelease.h"
+#include "nsContentSecurityManager.h"
+#include "nsNetUtil.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+
+#include <Cocoa/Cocoa.h>
+
+using namespace mozilla;
+
+// nsIconChannel methods
+nsIconChannel::nsIconChannel() {}
+
+nsIconChannel::~nsIconChannel() {
+ if (mLoadInfo) {
+ NS_ReleaseOnMainThread("nsIconChannel::mLoadInfo", mLoadInfo.forget());
+ }
+}
+
+NS_IMPL_ISUPPORTS(nsIconChannel, nsIChannel, nsIRequest, nsIRequestObserver, nsIStreamListener)
+
+nsresult nsIconChannel::Init(nsIURI* uri) {
+ NS_ASSERTION(uri, "no uri");
+ mUrl = uri;
+ mOriginalURI = uri;
+ nsresult rv;
+ mPump = do_CreateInstance(NS_INPUTSTREAMPUMP_CONTRACTID, &rv);
+ return rv;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIRequest methods:
+
+NS_IMETHODIMP
+nsIconChannel::GetName(nsACString& result) { return mUrl->GetSpec(result); }
+
+NS_IMETHODIMP
+nsIconChannel::IsPending(bool* result) { return mPump->IsPending(result); }
+
+NS_IMETHODIMP
+nsIconChannel::GetStatus(nsresult* status) { return mPump->GetStatus(status); }
+
+NS_IMETHODIMP nsIconChannel::SetCanceledReason(const nsACString& aReason) {
+ return SetCanceledReasonImpl(aReason);
+}
+
+NS_IMETHODIMP nsIconChannel::GetCanceledReason(nsACString& aReason) {
+ return GetCanceledReasonImpl(aReason);
+}
+
+NS_IMETHODIMP nsIconChannel::CancelWithReason(nsresult aStatus, const nsACString& aReason) {
+ return CancelWithReasonImpl(aStatus, aReason);
+}
+
+NS_IMETHODIMP
+nsIconChannel::Cancel(nsresult status) {
+ mCanceled = true;
+ return mPump->Cancel(status);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetCanceled(bool* result) {
+ *result = mCanceled;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::Suspend(void) { return mPump->Suspend(); }
+
+NS_IMETHODIMP
+nsIconChannel::Resume(void) { return mPump->Resume(); }
+
+// nsIRequestObserver methods
+NS_IMETHODIMP
+nsIconChannel::OnStartRequest(nsIRequest* aRequest) {
+ if (mListener) {
+ return mListener->OnStartRequest(this);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::OnStopRequest(nsIRequest* aRequest, nsresult aStatus) {
+ if (mListener) {
+ mListener->OnStopRequest(this, aStatus);
+ mListener = nullptr;
+ }
+
+ // Remove from load group
+ if (mLoadGroup) {
+ mLoadGroup->RemoveRequest(this, nullptr, aStatus);
+ }
+
+ return NS_OK;
+}
+
+// nsIStreamListener methods
+NS_IMETHODIMP
+nsIconChannel::OnDataAvailable(nsIRequest* aRequest, nsIInputStream* aStream, uint64_t aOffset,
+ uint32_t aCount) {
+ if (mListener) {
+ return mListener->OnDataAvailable(this, aStream, aOffset, aCount);
+ }
+ return NS_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIChannel methods:
+
+NS_IMETHODIMP
+nsIconChannel::GetOriginalURI(nsIURI** aURI) {
+ *aURI = mOriginalURI;
+ NS_ADDREF(*aURI);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetOriginalURI(nsIURI* aURI) {
+ NS_ENSURE_ARG_POINTER(aURI);
+ mOriginalURI = aURI;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetURI(nsIURI** aURI) {
+ *aURI = mUrl;
+ NS_IF_ADDREF(*aURI);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::Open(nsIInputStream** _retval) {
+ nsCOMPtr<nsIStreamListener> listener;
+ nsresult rv = nsContentSecurityManager::doContentSecurityCheck(this, listener);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return MakeInputStream(_retval, false);
+}
+
+nsresult nsIconChannel::ExtractIconInfoFromUrl(nsIFile** aLocalFile, uint32_t* aDesiredImageSize,
+ nsACString& aContentType,
+ nsACString& aFileExtension) {
+ nsresult rv = NS_OK;
+ nsCOMPtr<nsIMozIconURI> iconURI(do_QueryInterface(mUrl, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ iconURI->GetImageSize(aDesiredImageSize);
+ iconURI->GetContentType(aContentType);
+ iconURI->GetFileExtension(aFileExtension);
+
+ nsCOMPtr<nsIURL> url;
+ rv = iconURI->GetIconURL(getter_AddRefs(url));
+ if (NS_FAILED(rv) || !url) return NS_OK;
+
+ nsCOMPtr<nsIFileURL> fileURL = do_QueryInterface(url, &rv);
+ if (NS_FAILED(rv) || !fileURL) return NS_OK;
+
+ nsCOMPtr<nsIFile> file;
+ rv = fileURL->GetFile(getter_AddRefs(file));
+ if (NS_FAILED(rv) || !file) return NS_OK;
+
+ nsCOMPtr<nsILocalFileMac> localFileMac(do_QueryInterface(file, &rv));
+ if (NS_FAILED(rv) || !localFileMac) return NS_OK;
+
+ *aLocalFile = file;
+ NS_IF_ADDREF(*aLocalFile);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::AsyncOpen(nsIStreamListener* aListener) {
+ nsCOMPtr<nsIStreamListener> listener = aListener;
+ nsresult rv = nsContentSecurityManager::doContentSecurityCheck(this, listener);
+ if (NS_FAILED(rv)) {
+ mCallbacks = nullptr;
+ return rv;
+ }
+
+ MOZ_ASSERT(mLoadInfo->GetSecurityMode() == 0 || mLoadInfo->GetInitialSecurityCheckDone() ||
+ (mLoadInfo->GetSecurityMode() ==
+ nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL &&
+ mLoadInfo->GetLoadingPrincipal() &&
+ mLoadInfo->GetLoadingPrincipal()->IsSystemPrincipal()),
+ "security flags in loadInfo but doContentSecurityCheck() not called");
+
+ nsCOMPtr<nsIInputStream> inStream;
+ rv = MakeInputStream(getter_AddRefs(inStream), true);
+ if (NS_FAILED(rv)) {
+ mCallbacks = nullptr;
+ return rv;
+ }
+
+ // Init our stream pump
+ nsCOMPtr<nsISerialEventTarget> target =
+ nsContentUtils::GetEventTargetByLoadInfo(mLoadInfo, mozilla::TaskCategory::Other);
+ rv = mPump->Init(inStream, 0, 0, false, target);
+ if (NS_FAILED(rv)) {
+ mCallbacks = nullptr;
+ return rv;
+ }
+
+ rv = mPump->AsyncRead(this);
+ if (NS_SUCCEEDED(rv)) {
+ // Store our real listener
+ mListener = aListener;
+ // Add ourself to the load group, if available
+ if (mLoadGroup) {
+ mLoadGroup->AddRequest(this, nullptr);
+ }
+ } else {
+ mCallbacks = nullptr;
+ }
+
+ return rv;
+}
+
+nsresult nsIconChannel::MakeInputStream(nsIInputStream** _retval, bool aNonBlocking) {
+ NS_OBJC_BEGIN_TRY_BLOCK_RETURN;
+
+ nsCString contentType;
+ nsAutoCString fileExt;
+ nsCOMPtr<nsIFile> fileloc; // file we want an icon for
+ uint32_t desiredImageSize;
+ nsresult rv =
+ ExtractIconInfoFromUrl(getter_AddRefs(fileloc), &desiredImageSize, contentType, fileExt);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool fileExists = false;
+ if (fileloc) {
+ fileloc->Exists(&fileExists);
+ }
+
+ NSImage* iconImage = nil;
+
+ // first try to get the icon from the file if it exists
+ if (fileExists) {
+ nsCOMPtr<nsILocalFileMac> localFileMac(do_QueryInterface(fileloc, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ CFURLRef macURL;
+ if (NS_SUCCEEDED(localFileMac->GetCFURL(&macURL))) {
+ iconImage = [[NSWorkspace sharedWorkspace] iconForFile:[(NSURL*)macURL path]];
+ ::CFRelease(macURL);
+ }
+ }
+
+ // if we don't have an icon yet try to get one by extension
+ if (!iconImage && !fileExt.IsEmpty()) {
+ NSString* fileExtension = [NSString stringWithUTF8String:fileExt.get()];
+ iconImage = [[NSWorkspace sharedWorkspace] iconForFileType:fileExtension];
+ }
+
+ // If we still don't have an icon, get the generic document icon.
+ if (!iconImage) {
+ iconImage = [[NSWorkspace sharedWorkspace] iconForFileType:NSFileTypeUnknown];
+ }
+
+ if (!iconImage) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (desiredImageSize > 255) {
+ // The Icon image format represents width and height as u8, so it does not
+ // allow for images sized 256 or more.
+ return NS_ERROR_FAILURE;
+ }
+
+ // Set the actual size to *twice* the requested size.
+ // We do this because our UI doesn't take the window's device pixel ratio into
+ // account when it requests these icons; e.g. it will request an icon with
+ // size 16, place it in a 16x16 CSS pixel sized image, and then display it in
+ // a window on a HiDPI screen where the icon now covers 32x32 physical screen
+ // pixels. So we just always double the size here in order to prevent blurriness.
+ uint32_t size = (desiredImageSize < 128) ? desiredImageSize * 2 : desiredImageSize;
+ uint32_t width = size;
+ uint32_t height = size;
+
+ // The "image format" we're outputting here (and which gets decoded by
+ // nsIconDecoder) has the following format:
+ // - 1 byte for the image width, as u8
+ // - 1 byte for the image height, as u8
+ // - the raw image data as BGRA, width * height * 4 bytes.
+ size_t bufferCapacity = 4 + width * height * 4;
+ UniquePtr<uint8_t[]> fileBuf = MakeUniqueFallible<uint8_t[]>(bufferCapacity);
+ if (NS_WARN_IF(!fileBuf)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ fileBuf[0] = uint8_t(width);
+ fileBuf[1] = uint8_t(height);
+ fileBuf[2] = uint8_t(mozilla::gfx::SurfaceFormat::B8G8R8A8);
+
+ // Clear all bits to ensure in nsIconDecoder we assume we are already color
+ // managed and premultiplied.
+ fileBuf[3] = 0;
+
+ uint8_t* imageBuf = &fileBuf[4];
+
+ // Create a CGBitmapContext around imageBuf and draw iconImage to it.
+ // This gives us the image data in the format we want: BGRA, four bytes per
+ // pixel, in host endianness, with premultiplied alpha.
+ CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+ CGContextRef ctx =
+ CGBitmapContextCreate(imageBuf, width, height, 8 /* bitsPerComponent */, width * 4, cs,
+ kCGBitmapByteOrder32Host | kCGImageAlphaPremultipliedFirst);
+ CGColorSpaceRelease(cs);
+
+ NSGraphicsContext* oldContext = [NSGraphicsContext currentContext];
+ [NSGraphicsContext setCurrentContext:[NSGraphicsContext graphicsContextWithCGContext:ctx
+ flipped:NO]];
+
+ [iconImage drawInRect:NSMakeRect(0, 0, width, height)];
+
+ [NSGraphicsContext setCurrentContext:oldContext];
+
+ CGContextRelease(ctx);
+
+ // Now, create a pipe and stuff our data into it
+ nsCOMPtr<nsIInputStream> inStream;
+ nsCOMPtr<nsIOutputStream> outStream;
+ NS_NewPipe(getter_AddRefs(inStream), getter_AddRefs(outStream), bufferCapacity, bufferCapacity,
+ aNonBlocking);
+
+ uint32_t written;
+ rv = outStream->Write((char*)fileBuf.get(), bufferCapacity, &written);
+ if (NS_SUCCEEDED(rv)) {
+ NS_IF_ADDREF(*_retval = inStream);
+ }
+
+ // Drop notification callbacks to prevent cycles.
+ mCallbacks = nullptr;
+
+ return NS_OK;
+
+ NS_OBJC_END_TRY_BLOCK_RETURN(NS_ERROR_FAILURE);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetLoadFlags(uint32_t* aLoadAttributes) {
+ return mPump->GetLoadFlags(aLoadAttributes);
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadFlags(uint32_t aLoadAttributes) {
+ return mPump->SetLoadFlags(aLoadAttributes);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetTRRMode(nsIRequest::TRRMode* aTRRMode) { return GetTRRModeImpl(aTRRMode); }
+
+NS_IMETHODIMP
+nsIconChannel::SetTRRMode(nsIRequest::TRRMode aTRRMode) { return SetTRRModeImpl(aTRRMode); }
+
+NS_IMETHODIMP
+nsIconChannel::GetIsDocument(bool* aIsDocument) {
+ return NS_GetIsDocumentChannel(this, aIsDocument);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentType(nsACString& aContentType) {
+ aContentType.AssignLiteral(IMAGE_ICON_MS);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentType(const nsACString& aContentType) {
+ // It doesn't make sense to set the content-type on this type
+ // of channel...
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentCharset(nsACString& aContentCharset) {
+ aContentCharset.AssignLiteral(IMAGE_ICON_MS);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentCharset(const nsACString& aContentCharset) {
+ // It doesn't make sense to set the content-type on this type
+ // of channel...
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDisposition(uint32_t* aContentDisposition) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentDisposition(uint32_t aContentDisposition) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDispositionFilename(nsAString& aContentDispositionFilename) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentDispositionFilename(const nsAString& aContentDispositionFilename) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDispositionHeader(nsACString& aContentDispositionHeader) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentLength(int64_t* aContentLength) {
+ *aContentLength = 0;
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentLength(int64_t aContentLength) {
+ MOZ_ASSERT_UNREACHABLE("nsIconChannel::SetContentLength");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetLoadGroup(nsILoadGroup** aLoadGroup) {
+ *aLoadGroup = mLoadGroup;
+ NS_IF_ADDREF(*aLoadGroup);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadGroup(nsILoadGroup* aLoadGroup) {
+ mLoadGroup = aLoadGroup;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetOwner(nsISupports** aOwner) {
+ *aOwner = mOwner.get();
+ NS_IF_ADDREF(*aOwner);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetOwner(nsISupports* aOwner) {
+ mOwner = aOwner;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetLoadInfo(nsILoadInfo** aLoadInfo) {
+ NS_IF_ADDREF(*aLoadInfo = mLoadInfo);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadInfo(nsILoadInfo* aLoadInfo) {
+ MOZ_RELEASE_ASSERT(aLoadInfo, "loadinfo can't be null");
+ mLoadInfo = aLoadInfo;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetNotificationCallbacks(nsIInterfaceRequestor** aNotificationCallbacks) {
+ *aNotificationCallbacks = mCallbacks.get();
+ NS_IF_ADDREF(*aNotificationCallbacks);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetNotificationCallbacks(nsIInterfaceRequestor* aNotificationCallbacks) {
+ mCallbacks = aNotificationCallbacks;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetSecurityInfo(nsITransportSecurityInfo** aSecurityInfo) {
+ *aSecurityInfo = nullptr;
+ return NS_OK;
+}
diff --git a/image/decoders/icon/moz.build b/image/decoders/icon/moz.build
new file mode 100644
index 0000000000..96cf951b3b
--- /dev/null
+++ b/image/decoders/icon/moz.build
@@ -0,0 +1,39 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ "nsIconProtocolHandler.cpp",
+ "nsIconURI.cpp",
+]
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
+
+FINAL_LIBRARY = "xul"
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+EXPORTS += [
+ "nsIconURI.h",
+]
+
+platform = None
+
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
+ platform = "gtk"
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ platform = "win"
+
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "cocoa":
+ platform = "mac"
+
+if CONFIG["OS_TARGET"] == "Android":
+ platform = "android"
+
+if platform:
+ LOCAL_INCLUDES += [platform]
diff --git a/image/decoders/icon/nsIconProtocolHandler.cpp b/image/decoders/icon/nsIconProtocolHandler.cpp
new file mode 100644
index 0000000000..9334f908db
--- /dev/null
+++ b/image/decoders/icon/nsIconProtocolHandler.cpp
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIconProtocolHandler.h"
+
+#include "nsIconChannel.h"
+#include "nsIconURI.h"
+#include "nsCRT.h"
+#include "nsCOMPtr.h"
+#include "nsNetCID.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+nsIconProtocolHandler::nsIconProtocolHandler() {}
+
+nsIconProtocolHandler::~nsIconProtocolHandler() {}
+
+NS_IMPL_ISUPPORTS(nsIconProtocolHandler, nsIProtocolHandler,
+ nsISupportsWeakReference)
+
+///////////////////////////////////////////////////////////////////////////////
+// nsIProtocolHandler methods:
+
+NS_IMETHODIMP
+nsIconProtocolHandler::GetScheme(nsACString& result) {
+ result = "moz-icon";
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconProtocolHandler::AllowPort(int32_t port, const char* scheme,
+ bool* _retval) {
+ // don't override anything.
+ *_retval = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconProtocolHandler::NewChannel(nsIURI* url, nsILoadInfo* aLoadInfo,
+ nsIChannel** result) {
+ NS_ENSURE_ARG_POINTER(url);
+ nsIconChannel* channel = new nsIconChannel;
+ if (!channel) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ NS_ADDREF(channel);
+
+ nsresult rv = channel->Init(url);
+ if (NS_FAILED(rv)) {
+ NS_RELEASE(channel);
+ return rv;
+ }
+
+ // set the loadInfo on the new channel
+ rv = channel->SetLoadInfo(aLoadInfo);
+ if (NS_FAILED(rv)) {
+ NS_RELEASE(channel);
+ return rv;
+ }
+
+ *result = channel;
+ return NS_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
diff --git a/image/decoders/icon/nsIconProtocolHandler.h b/image/decoders/icon/nsIconProtocolHandler.h
new file mode 100644
index 0000000000..63843eaa4b
--- /dev/null
+++ b/image/decoders/icon/nsIconProtocolHandler.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_icon_nsIconProtocolHandler_h
+#define mozilla_image_decoders_icon_nsIconProtocolHandler_h
+
+#include "nsWeakReference.h"
+#include "nsIProtocolHandler.h"
+
+class nsIconProtocolHandler : public nsIProtocolHandler,
+ public nsSupportsWeakReference {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIPROTOCOLHANDLER
+
+ // nsIconProtocolHandler methods:
+ nsIconProtocolHandler();
+
+ protected:
+ virtual ~nsIconProtocolHandler();
+};
+
+#endif // mozilla_image_decoders_icon_nsIconProtocolHandler_h
diff --git a/image/decoders/icon/nsIconURI.cpp b/image/decoders/icon/nsIconURI.cpp
new file mode 100644
index 0000000000..d917337bf9
--- /dev/null
+++ b/image/decoders/icon/nsIconURI.cpp
@@ -0,0 +1,654 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set sw=2 sts=2 ts=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIconURI.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/ipc/URIUtils.h"
+#include "mozilla/Sprintf.h"
+
+#include "nsIClassInfoImpl.h"
+#include "nsIIOService.h"
+#include "nsISerializable.h"
+#include "nsIObjectInputStream.h"
+#include "nsIObjectOutputStream.h"
+#include "nsIURL.h"
+#include "nsNetUtil.h"
+#include "plstr.h"
+#include "nsCRT.h"
+#include <stdlib.h>
+
+using namespace mozilla;
+using namespace mozilla::ipc;
+
+#define DEFAULT_IMAGE_SIZE 16
+
+#if defined(MAX_PATH)
+# define SANE_FILE_NAME_LEN MAX_PATH
+#elif defined(PATH_MAX)
+# define SANE_FILE_NAME_LEN PATH_MAX
+#else
+# define SANE_FILE_NAME_LEN 1024
+#endif
+
+static NS_DEFINE_CID(kThisIconURIImplementationCID,
+ NS_THIS_ICONURI_IMPLEMENTATION_CID);
+
+static const char* const kSizeStrings[] = {"button", "toolbar", "toolbarsmall",
+ "menu", "dnd", "dialog"};
+
+static const char* const kStateStrings[] = {"normal", "disabled"};
+
+////////////////////////////////////////////////////////////////////////////////
+
+NS_IMPL_CLASSINFO(nsMozIconURI, nullptr, nsIClassInfo::THREADSAFE,
+ NS_ICONURI_CID)
+// Empty CI getter. We only need nsIClassInfo for Serialization
+NS_IMPL_CI_INTERFACE_GETTER0(nsMozIconURI)
+
+nsMozIconURI::nsMozIconURI()
+ : mSize(DEFAULT_IMAGE_SIZE), mIconSize(-1), mIconState(-1) {}
+
+nsMozIconURI::~nsMozIconURI() {}
+
+NS_IMPL_ADDREF(nsMozIconURI)
+NS_IMPL_RELEASE(nsMozIconURI)
+
+NS_INTERFACE_MAP_BEGIN(nsMozIconURI)
+ if (aIID.Equals(kThisIconURIImplementationCID)) {
+ foundInterface = static_cast<nsIURI*>(this);
+ } else
+ NS_INTERFACE_MAP_ENTRY(nsIMozIconURI)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIURI)
+ NS_INTERFACE_MAP_ENTRY(nsIURI)
+ NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsINestedURI, mIconURL)
+ NS_INTERFACE_MAP_ENTRY(nsISerializable)
+ NS_IMPL_QUERY_CLASSINFO(nsMozIconURI)
+NS_INTERFACE_MAP_END
+
+#define MOZICON_SCHEME "moz-icon:"
+#define MOZICON_SCHEME_LEN (sizeof(MOZICON_SCHEME) - 1)
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIURI methods:
+
+NS_IMETHODIMP
+nsMozIconURI::GetSpec(nsACString& aSpec) {
+ aSpec = MOZICON_SCHEME;
+
+ if (mIconURL) {
+ nsAutoCString fileIconSpec;
+ nsresult rv = mIconURL->GetSpec(fileIconSpec);
+ NS_ENSURE_SUCCESS(rv, rv);
+ aSpec += fileIconSpec;
+ } else if (!mStockIcon.IsEmpty()) {
+ aSpec += "//stock/";
+ aSpec += mStockIcon;
+ } else {
+ aSpec += "//";
+ aSpec += mFileName;
+ }
+
+ aSpec += "?size=";
+ if (mIconSize >= 0) {
+ aSpec += kSizeStrings[mIconSize];
+ } else {
+ char buf[20];
+ SprintfLiteral(buf, "%d", mSize);
+ aSpec.Append(buf);
+ }
+
+ if (mIconState >= 0) {
+ aSpec += "&state=";
+ aSpec += kStateStrings[mIconState];
+ }
+
+ if (!mContentType.IsEmpty()) {
+ aSpec += "&contentType=";
+ aSpec += mContentType.get();
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetSpecIgnoringRef(nsACString& result) { return GetSpec(result); }
+
+NS_IMETHODIMP
+nsMozIconURI::GetDisplaySpec(nsACString& aUnicodeSpec) {
+ return GetSpec(aUnicodeSpec);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetDisplayHostPort(nsACString& aUnicodeHostPort) {
+ return GetHostPort(aUnicodeHostPort);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetDisplayHost(nsACString& aUnicodeHost) {
+ return GetHost(aUnicodeHost);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetDisplayPrePath(nsACString& aPrePath) {
+ return GetPrePath(aPrePath);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetHasRef(bool* result) {
+ *result = false;
+ return NS_OK;
+}
+
+NS_IMPL_NSIURIMUTATOR_ISUPPORTS(nsMozIconURI::Mutator, nsIURISetters,
+ nsIURIMutator, nsISerializable)
+
+NS_IMETHODIMP
+nsMozIconURI::Mutate(nsIURIMutator** aMutator) {
+ RefPtr<nsMozIconURI::Mutator> mutator = new nsMozIconURI::Mutator();
+ nsresult rv = mutator->InitFromURI(this);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ mutator.forget(aMutator);
+ return NS_OK;
+}
+
+// helper function for parsing out attributes like size, and contentType
+// from the icon url.
+// takes a string like ?size=32&contentType=text/html and returns a new string
+// containing just the attribute value. i.e you could pass in this string with
+// an attribute name of 'size=', this will return 32
+// Assumption: attribute pairs in the string are separated by '&'.
+static void extractAttributeValue(const char* aSearchString,
+ const char* aAttributeName,
+ nsCString& aResult) {
+ aResult.Truncate();
+
+ if (aSearchString && aAttributeName) {
+ // search the string for attributeName
+ uint32_t attributeNameSize = strlen(aAttributeName);
+ const char* startOfAttribute = PL_strcasestr(aSearchString, aAttributeName);
+ if (startOfAttribute &&
+ (*(startOfAttribute - 1) == '?' || *(startOfAttribute - 1) == '&')) {
+ startOfAttribute += attributeNameSize; // skip over the attributeName
+ // is there something after the attribute name
+ if (*startOfAttribute) {
+ const char* endofAttribute = strchr(startOfAttribute, '&');
+ if (endofAttribute) {
+ aResult.Assign(Substring(startOfAttribute, endofAttribute));
+ } else {
+ aResult.Assign(startOfAttribute);
+ }
+ } // if we have a attribute value
+ } // if we have a attribute name
+ } // if we got non-null search string and attribute name values
+}
+
+nsresult nsMozIconURI::SetSpecInternal(const nsACString& aSpec) {
+ // Reset everything to default values.
+ mIconURL = nullptr;
+ mSize = DEFAULT_IMAGE_SIZE;
+ mContentType.Truncate();
+ mFileName.Truncate();
+ mStockIcon.Truncate();
+ mIconSize = -1;
+ mIconState = -1;
+
+ nsAutoCString iconSpec(aSpec);
+ if (!Substring(iconSpec, 0, MOZICON_SCHEME_LEN)
+ .EqualsLiteral(MOZICON_SCHEME) ||
+ (!Substring(iconSpec, MOZICON_SCHEME_LEN, 7).EqualsLiteral("file://") &&
+ // Checking for the leading '//' will match both the '//stock/' and
+ // '//.foo' cases:
+ !Substring(iconSpec, MOZICON_SCHEME_LEN, 2).EqualsLiteral("//"))) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+
+ int32_t questionMarkPos = iconSpec.Find("?");
+ if (questionMarkPos != -1 &&
+ static_cast<int32_t>(iconSpec.Length()) > (questionMarkPos + 1)) {
+ extractAttributeValue(iconSpec.get(), "contentType=", mContentType);
+
+ nsAutoCString sizeString;
+ extractAttributeValue(iconSpec.get(), "size=", sizeString);
+ if (!sizeString.IsEmpty()) {
+ const char* sizeStr = sizeString.get();
+ for (uint32_t i = 0; i < ArrayLength(kSizeStrings); i++) {
+ if (nsCRT::strcasecmp(sizeStr, kSizeStrings[i]) == 0) {
+ mIconSize = i;
+ break;
+ }
+ }
+
+ int32_t sizeValue = atoi(sizeString.get());
+ if (sizeValue > 0) {
+ mSize = sizeValue;
+ }
+ }
+
+ nsAutoCString stateString;
+ extractAttributeValue(iconSpec.get(), "state=", stateString);
+ if (!stateString.IsEmpty()) {
+ const char* stateStr = stateString.get();
+ for (uint32_t i = 0; i < ArrayLength(kStateStrings); i++) {
+ if (nsCRT::strcasecmp(stateStr, kStateStrings[i]) == 0) {
+ mIconState = i;
+ break;
+ }
+ }
+ }
+ }
+
+ int32_t pathLength = iconSpec.Length() - MOZICON_SCHEME_LEN;
+ if (questionMarkPos != -1) {
+ pathLength = questionMarkPos - MOZICON_SCHEME_LEN;
+ }
+ if (pathLength < 3) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+
+ nsAutoCString iconPath(Substring(iconSpec, MOZICON_SCHEME_LEN, pathLength));
+
+ // Icon URI path can have three forms:
+ // (1) //stock/<icon-identifier>
+ // (2) //<some dummy file with an extension>
+ // (3) a valid URL
+
+ if (!strncmp("//stock/", iconPath.get(), 8)) {
+ mStockIcon.Assign(Substring(iconPath, 8));
+ // An icon identifier must always be specified.
+ if (mStockIcon.IsEmpty()) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+ return NS_OK;
+ }
+
+ if (StringBeginsWith(iconPath, "//"_ns)) {
+ // Sanity check this supposed dummy file name.
+ if (iconPath.Length() > SANE_FILE_NAME_LEN) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+ iconPath.Cut(0, 2);
+ mFileName.Assign(iconPath);
+ }
+
+ nsresult rv;
+ nsCOMPtr<nsIIOService> ioService(do_GetService(NS_IOSERVICE_CONTRACTID, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIURI> uri;
+ ioService->NewURI(iconPath, nullptr, nullptr, getter_AddRefs(uri));
+ mIconURL = do_QueryInterface(uri);
+ if (mIconURL) {
+ // The inner URI should be a 'file:' one. If not, bail.
+ if (!mIconURL->SchemeIs("file")) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+ mFileName.Truncate();
+ } else if (mFileName.IsEmpty()) {
+ return NS_ERROR_MALFORMED_URI;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetPrePath(nsACString& prePath) {
+ prePath = MOZICON_SCHEME;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetScheme(nsACString& aScheme) {
+ aScheme = "moz-icon";
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::SetScheme(const nsACString& aScheme) {
+ // doesn't make sense to set the scheme of a moz-icon URL
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetUsername(nsACString& aUsername) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetUsername(const nsACString& aUsername) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetPassword(nsACString& aPassword) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetPassword(const nsACString& aPassword) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetUserPass(nsACString& aUserPass) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetUserPass(const nsACString& aUserPass) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetHostPort(nsACString& aHostPort) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetHostPort(const nsACString& aHostPort) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetHost(nsACString& aHost) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetHost(const nsACString& aHost) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetPort(int32_t* aPort) { return NS_ERROR_FAILURE; }
+
+nsresult nsMozIconURI::SetPort(int32_t aPort) { return NS_ERROR_FAILURE; }
+
+NS_IMETHODIMP
+nsMozIconURI::GetPathQueryRef(nsACString& aPath) {
+ aPath.Truncate();
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::SetPathQueryRef(const nsACString& aPath) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetFilePath(nsACString& aFilePath) {
+ aFilePath.Truncate();
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::SetFilePath(const nsACString& aFilePath) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetQuery(nsACString& aQuery) {
+ aQuery.Truncate();
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::SetQuery(const nsACString& aQuery) {
+ return NS_ERROR_FAILURE;
+}
+
+nsresult nsMozIconURI::SetQueryWithEncoding(const nsACString& aQuery,
+ const Encoding* aEncoding) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetRef(nsACString& aRef) {
+ aRef.Truncate();
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::SetRef(const nsACString& aRef) {
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::Equals(nsIURI* other, bool* result) {
+ *result = false;
+ NS_ENSURE_ARG_POINTER(other);
+ MOZ_ASSERT(result, "null pointer");
+
+ nsAutoCString spec1;
+ nsAutoCString spec2;
+
+ nsresult rv = GetSpec(spec1);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = other->GetSpec(spec2);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!nsCRT::strcasecmp(spec1.get(), spec2.get())) {
+ *result = true;
+ } else {
+ *result = false;
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::EqualsExceptRef(nsIURI* other, bool* result) {
+ // GetRef/SetRef not supported by nsMozIconURI, so
+ // EqualsExceptRef() is the same as Equals().
+ return Equals(other, result);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::SchemeIs(const char* aScheme, bool* aEquals) {
+ MOZ_ASSERT(aEquals, "null pointer");
+ if (!aScheme) {
+ *aEquals = false;
+ return NS_OK;
+ }
+
+ *aEquals = nsCRT::strcasecmp("moz-icon", aScheme) == 0;
+ return NS_OK;
+}
+
+nsresult nsMozIconURI::Clone(nsIURI** result) {
+ nsCOMPtr<nsIURL> newIconURL;
+ if (mIconURL) {
+ newIconURL = mIconURL;
+ }
+
+ RefPtr<nsMozIconURI> uri = new nsMozIconURI();
+ newIconURL.swap(uri->mIconURL);
+ uri->mSize = mSize;
+ uri->mContentType = mContentType;
+ uri->mFileName = mFileName;
+ uri->mStockIcon = mStockIcon;
+ uri->mIconSize = mIconSize;
+ uri->mIconState = mIconState;
+ uri.forget(result);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::Resolve(const nsACString& relativePath, nsACString& result) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetAsciiSpec(nsACString& aSpecA) { return GetSpec(aSpecA); }
+
+NS_IMETHODIMP
+nsMozIconURI::GetAsciiHostPort(nsACString& aHostPortA) {
+ return GetHostPort(aHostPortA);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetAsciiHost(nsACString& aHostA) { return GetHost(aHostA); }
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIIconUri methods:
+
+NS_IMETHODIMP
+nsMozIconURI::GetIconURL(nsIURL** aFileUrl) {
+ *aFileUrl = mIconURL;
+ NS_IF_ADDREF(*aFileUrl);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetImageSize(uint32_t* aImageSize)
+// measured by # of pixels in a row. defaults to 16.
+{
+ *aImageSize = mSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetContentType(nsACString& aContentType) {
+ aContentType = mContentType;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetFileExtension(nsACString& aFileExtension) {
+ // First, try to get the extension from mIconURL if we have one
+ if (mIconURL) {
+ nsAutoCString fileExt;
+ if (NS_SUCCEEDED(mIconURL->GetFileExtension(fileExt))) {
+ if (!fileExt.IsEmpty()) {
+ // unfortunately, this code doesn't give us the required '.' in
+ // front of the extension so we have to do it ourselves.
+ aFileExtension.Assign('.');
+ aFileExtension.Append(fileExt);
+ }
+ }
+ return NS_OK;
+ }
+
+ if (!mFileName.IsEmpty()) {
+ // truncate the extension out of the file path...
+ const char* chFileName = mFileName.get(); // get the underlying buffer
+ const char* fileExt = strrchr(chFileName, '.');
+ if (!fileExt) {
+ return NS_OK;
+ }
+ aFileExtension = fileExt;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetStockIcon(nsACString& aStockIcon) {
+ aStockIcon = mStockIcon;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetIconSize(nsACString& aSize) {
+ if (mIconSize >= 0) {
+ aSize = kSizeStrings[mIconSize];
+ } else {
+ aSize.Truncate();
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetIconState(nsACString& aState) {
+ if (mIconState >= 0) {
+ aState = kStateStrings[mIconState];
+ } else {
+ aState.Truncate();
+ }
+ return NS_OK;
+}
+
+void nsMozIconURI::Serialize(URIParams& aParams) {
+ IconURIParams params;
+
+ if (mIconURL) {
+ URIParams iconURLParams;
+ SerializeURI(mIconURL, iconURLParams);
+ if (iconURLParams.type() == URIParams::T__None) {
+ // Serialization failed, bail.
+ return;
+ }
+
+ params.uri() = Some(std::move(iconURLParams));
+ } else {
+ params.uri() = Nothing();
+ }
+
+ params.size() = mSize;
+ params.fileName() = mFileName;
+ params.stockIcon() = mStockIcon;
+ params.iconSize() = mIconSize;
+ params.iconState() = mIconState;
+
+ aParams = params;
+}
+
+bool nsMozIconURI::Deserialize(const URIParams& aParams) {
+ if (aParams.type() != URIParams::TIconURIParams) {
+ MOZ_ASSERT_UNREACHABLE("Received unknown URI from other process!");
+ return false;
+ }
+
+ const IconURIParams& params = aParams.get_IconURIParams();
+ if (params.uri().isSome()) {
+ nsCOMPtr<nsIURI> uri = DeserializeURI(params.uri().ref());
+ mIconURL = do_QueryInterface(uri);
+ if (!mIconURL) {
+ MOZ_ASSERT_UNREACHABLE("bad nsIURI passed");
+ return false;
+ }
+ }
+
+ mSize = params.size();
+ mContentType = params.contentType();
+ mFileName = params.fileName();
+ mStockIcon = params.stockIcon();
+
+ if (params.iconSize() < -1 ||
+ params.iconSize() >= (int32_t)ArrayLength(kSizeStrings)) {
+ return false;
+ }
+ mIconSize = params.iconSize();
+
+ if (params.iconState() < -1 ||
+ params.iconState() >= (int32_t)ArrayLength(kStateStrings)) {
+ return false;
+ }
+ mIconState = params.iconState();
+
+ return true;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetInnerURI(nsIURI** aURI) {
+ nsCOMPtr<nsIURI> iconURL = mIconURL;
+ if (!iconURL) {
+ *aURI = nullptr;
+ return NS_ERROR_FAILURE;
+ }
+
+ iconURL.forget(aURI);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMozIconURI::GetInnermostURI(nsIURI** aURI) {
+ return NS_ImplGetInnermostURI(this, aURI);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::Read(nsIObjectInputStream* aStream) {
+ MOZ_ASSERT_UNREACHABLE("Use nsIURIMutator.read() instead");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+nsresult nsMozIconURI::ReadPrivate(nsIObjectInputStream* aStream) {
+ nsAutoCString spec;
+ nsresult rv = aStream->ReadCString(spec);
+ NS_ENSURE_SUCCESS(rv, rv);
+ return SetSpecInternal(spec);
+}
+
+NS_IMETHODIMP
+nsMozIconURI::Write(nsIObjectOutputStream* aStream) {
+ nsAutoCString spec;
+ nsresult rv = GetSpec(spec);
+ NS_ENSURE_SUCCESS(rv, rv);
+ return aStream->WriteStringZ(spec.get());
+}
diff --git a/image/decoders/icon/nsIconURI.h b/image/decoders/icon/nsIconURI.h
new file mode 100644
index 0000000000..1f55bc686c
--- /dev/null
+++ b/image/decoders/icon/nsIconURI.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_icon_nsIconURI_h
+#define mozilla_image_decoders_icon_nsIconURI_h
+
+#include "nsIIconURI.h"
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include "nsINestedURI.h"
+#include "nsIURIMutator.h"
+#include "nsISerializable.h"
+
+#define NS_THIS_ICONURI_IMPLEMENTATION_CID \
+ { /* 0b9bb0c2-fee6-470b-b9b9-9fd9462b5e19 */ \
+ 0x5c3e417f, 0xb686, 0x4105, { \
+ 0x86, 0xe7, 0xf9, 0x1b, 0xac, 0x97, 0x4d, 0x5c \
+ } \
+ }
+
+namespace mozilla {
+class Encoding;
+}
+
+class nsMozIconURI final : public nsIMozIconURI,
+ public nsINestedURI,
+ public nsISerializable {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIURI
+ NS_DECL_NSIMOZICONURI
+ NS_DECL_NSINESTEDURI
+ NS_DECL_NSISERIALIZABLE
+
+ protected:
+ nsMozIconURI();
+ virtual ~nsMozIconURI();
+ nsCOMPtr<nsIURL> mIconURL; // a URL that we want the icon for
+ uint32_t mSize; // the # of pixels in a row that we want for this image.
+ // Typically 16, 32, 128, etc.
+ nsCString mContentType; // optional field explicitly specifying the content
+ // type
+ nsCString mFileName; // for if we don't have an actual file path, we're just
+ // given a filename with an extension
+ nsCString mStockIcon;
+ int32_t mIconSize; // -1 if not specified, otherwise index into
+ // kSizeStrings
+ int32_t mIconState; // -1 if not specified, otherwise index into
+ // kStateStrings
+
+ private:
+ nsresult Clone(nsIURI** aURI);
+ nsresult SetSpecInternal(const nsACString& input);
+ nsresult SetScheme(const nsACString& input);
+ nsresult SetUserPass(const nsACString& input);
+ nsresult SetUsername(const nsACString& input);
+ nsresult SetPassword(const nsACString& input);
+ nsresult SetHostPort(const nsACString& aValue);
+ nsresult SetHost(const nsACString& input);
+ nsresult SetPort(int32_t port);
+ nsresult SetPathQueryRef(const nsACString& input);
+ nsresult SetRef(const nsACString& input);
+ nsresult SetFilePath(const nsACString& input);
+ nsresult SetQuery(const nsACString& input);
+ nsresult SetQueryWithEncoding(const nsACString& input,
+ const mozilla::Encoding* encoding);
+ nsresult ReadPrivate(nsIObjectInputStream* stream);
+ bool Deserialize(const mozilla::ipc::URIParams&);
+
+ public:
+ class Mutator final : public nsIURIMutator,
+ public BaseURIMutator<nsMozIconURI>,
+ public nsISerializable {
+ NS_DECL_ISUPPORTS
+ NS_FORWARD_SAFE_NSIURISETTERS_RET(mURI)
+
+ NS_IMETHOD
+ Write(nsIObjectOutputStream* aOutputStream) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ [[nodiscard]] NS_IMETHOD Read(nsIObjectInputStream* aStream) override {
+ return InitFromInputStream(aStream);
+ }
+
+ NS_IMETHOD Deserialize(const mozilla::ipc::URIParams& aParams) override {
+ return InitFromIPCParams(aParams);
+ }
+
+ NS_IMETHOD Finalize(nsIURI** aURI) override {
+ mURI.forget(aURI);
+ return NS_OK;
+ }
+
+ NS_IMETHOD SetSpec(const nsACString& aSpec,
+ nsIURIMutator** aMutator) override {
+ if (aMutator) {
+ nsCOMPtr<nsIURIMutator> mutator = this;
+ mutator.forget(aMutator);
+ }
+ return InitFromSpec(aSpec);
+ }
+
+ explicit Mutator() {}
+
+ private:
+ virtual ~Mutator() {}
+
+ friend class nsMozIconURI;
+ };
+
+ friend BaseURIMutator<nsMozIconURI>;
+};
+
+#endif // mozilla_image_decoders_icon_nsIconURI_h
diff --git a/image/decoders/icon/win/moz.build b/image/decoders/icon/win/moz.build
new file mode 100644
index 0000000000..c4472f89ac
--- /dev/null
+++ b/image/decoders/icon/win/moz.build
@@ -0,0 +1,21 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ "nsIconChannel.cpp",
+]
+
+EXPORTS += [
+ "nsIconChannel.h",
+]
+
+LOCAL_INCLUDES += [
+ "/image",
+]
+
+FINAL_LIBRARY = "xul"
+
+include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/image/decoders/icon/win/nsIconChannel.cpp b/image/decoders/icon/win/nsIconChannel.cpp
new file mode 100644
index 0000000000..fe76afe9b1
--- /dev/null
+++ b/image/decoders/icon/win/nsIconChannel.cpp
@@ -0,0 +1,1006 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/BasePrincipal.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/WindowsProcessMitigations.h"
+#include "mozilla/dom/ContentChild.h"
+#include "mozilla/ipc/ByteBuf.h"
+
+#include "nsComponentManagerUtils.h"
+#include "nsIconChannel.h"
+#include "nsIIconURI.h"
+#include "nsIInterfaceRequestor.h"
+#include "nsIInterfaceRequestorUtils.h"
+#include "nsString.h"
+#include "nsReadableUtils.h"
+#include "nsMimeTypes.h"
+#include "nsIURL.h"
+#include "nsIPipe.h"
+#include "nsNetCID.h"
+#include "nsIFile.h"
+#include "nsIFileURL.h"
+#include "nsIIconURI.h"
+#include "nsIAsyncInputStream.h"
+#include "nsIAsyncOutputStream.h"
+#include "nsIMIMEService.h"
+#include "nsCExternalHandlerService.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsProxyRelease.h"
+#include "nsContentSecurityManager.h"
+#include "nsContentUtils.h"
+#include "nsNetUtil.h"
+#include "nsThreadUtils.h"
+
+#include "Decoder.h"
+#include "DecodePool.h"
+
+// we need windows.h to read out registry information...
+#include <windows.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <objbase.h>
+#include <wchar.h>
+
+using namespace mozilla;
+using namespace mozilla::image;
+
+using mozilla::ipc::ByteBuf;
+
+struct ICONFILEHEADER {
+ uint16_t ifhReserved;
+ uint16_t ifhType;
+ uint16_t ifhCount;
+};
+
+struct ICONENTRY {
+ int8_t ieWidth;
+ int8_t ieHeight;
+ uint8_t ieColors;
+ uint8_t ieReserved;
+ uint16_t iePlanes;
+ uint16_t ieBitCount;
+ uint32_t ieSizeImage;
+ uint32_t ieFileOffset;
+};
+
+struct IconPathInfo {
+ nsCOMPtr<nsIFile> localFile;
+ nsAutoString filePath;
+ UINT infoFlags = 0;
+};
+
+using HIconPromise = MozPromise<HICON, nsresult, true>;
+
+static UINT GetSizeInfoFlag(uint32_t aDesiredImageSize) {
+ return aDesiredImageSize > 16 ? SHGFI_SHELLICONSIZE : SHGFI_SMALLICON;
+}
+
+static nsresult ExtractIconInfoFromUrl(nsIURI* aUrl, nsIFile** aLocalFile,
+ uint32_t* aDesiredImageSize,
+ nsCString& aContentType,
+ nsCString& aFileExtension) {
+ nsresult rv = NS_OK;
+ nsCOMPtr<nsIMozIconURI> iconURI(do_QueryInterface(aUrl, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ iconURI->GetImageSize(aDesiredImageSize);
+ iconURI->GetContentType(aContentType);
+ iconURI->GetFileExtension(aFileExtension);
+
+ nsCOMPtr<nsIURL> url;
+ rv = iconURI->GetIconURL(getter_AddRefs(url));
+ if (NS_FAILED(rv) || !url) return NS_OK;
+
+ nsCOMPtr<nsIFileURL> fileURL = do_QueryInterface(url, &rv);
+ if (NS_FAILED(rv) || !fileURL) return NS_OK;
+
+ nsCOMPtr<nsIFile> file;
+ rv = fileURL->GetFile(getter_AddRefs(file));
+ if (NS_FAILED(rv) || !file) return NS_OK;
+
+ return file->Clone(aLocalFile);
+}
+
+static nsresult ExtractIconPathInfoFromUrl(nsIURI* aUrl,
+ IconPathInfo* aIconPathInfo) {
+ nsCString contentType;
+ nsCString fileExt;
+ nsCOMPtr<nsIFile> localFile; // file we want an icon for
+ uint32_t desiredImageSize;
+ nsresult rv = ExtractIconInfoFromUrl(aUrl, getter_AddRefs(localFile),
+ &desiredImageSize, contentType, fileExt);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // if the file exists, we are going to use it's real attributes...
+ // otherwise we only want to use it for it's extension...
+ UINT infoFlags = SHGFI_ICON;
+
+ bool fileExists = false;
+
+ nsAutoString filePath;
+ CopyASCIItoUTF16(fileExt, filePath);
+ if (localFile) {
+ rv = localFile->Normalize();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ localFile->GetPath(filePath);
+ if (filePath.Length() < 2 || filePath[1] != ':') {
+ return NS_ERROR_MALFORMED_URI; // UNC
+ }
+
+ if (filePath.Last() == ':') {
+ filePath.Append('\\');
+ } else {
+ localFile->Exists(&fileExists);
+ if (!fileExists) {
+ localFile->GetLeafName(filePath);
+ }
+ }
+ }
+
+ if (!fileExists) {
+ infoFlags |= SHGFI_USEFILEATTRIBUTES;
+ }
+
+ infoFlags |= GetSizeInfoFlag(desiredImageSize);
+
+ // if we have a content type... then use it! but for existing files,
+ // we want to show their real icon.
+ if (!fileExists && !contentType.IsEmpty()) {
+ nsCOMPtr<nsIMIMEService> mimeService(
+ do_GetService(NS_MIMESERVICE_CONTRACTID, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsAutoCString defFileExt;
+ mimeService->GetPrimaryExtension(contentType, fileExt, defFileExt);
+ // If the mime service does not know about this mime type, we show
+ // the generic icon.
+ // In any case, we need to insert a '.' before the extension.
+ filePath = u"."_ns + NS_ConvertUTF8toUTF16(defFileExt);
+ }
+
+ if (!localFile && !fileExists &&
+ ((filePath.Length() == 1 && filePath.Last() == '.') ||
+ filePath.Length() == 0)) {
+ filePath = u".MozBogusExtensionMoz"_ns;
+ }
+
+ aIconPathInfo->localFile = std::move(localFile);
+ aIconPathInfo->filePath = std::move(filePath);
+ aIconPathInfo->infoFlags = infoFlags;
+
+ return NS_OK;
+}
+
+static bool GetSpecialFolderIcon(nsIFile* aFile, int aFolder, UINT aInfoFlags,
+ HICON* aIcon) {
+ if (!aFile) {
+ return false;
+ }
+
+ wchar_t fileNativePath[MAX_PATH];
+ nsAutoString fileNativePathStr;
+ aFile->GetPath(fileNativePathStr);
+ ::GetShortPathNameW(fileNativePathStr.get(), fileNativePath,
+ ArrayLength(fileNativePath));
+
+ struct IdListDeleter {
+ void operator()(ITEMIDLIST* ptr) { ::CoTaskMemFree(ptr); }
+ };
+
+ UniquePtr<ITEMIDLIST, IdListDeleter> idList;
+ HRESULT hr =
+ ::SHGetSpecialFolderLocation(nullptr, aFolder, getter_Transfers(idList));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ wchar_t specialNativePath[MAX_PATH];
+ ::SHGetPathFromIDListW(idList.get(), specialNativePath);
+ ::GetShortPathNameW(specialNativePath, specialNativePath,
+ ArrayLength(specialNativePath));
+
+ if (wcsicmp(fileNativePath, specialNativePath) != 0) {
+ return false;
+ }
+
+ SHFILEINFOW sfi = {};
+ aInfoFlags |= (SHGFI_PIDL | SHGFI_SYSICONINDEX);
+ if (::SHGetFileInfoW((LPCWSTR)(LPCITEMIDLIST)idList.get(), 0, &sfi,
+ sizeof(sfi), aInfoFlags) == 0) {
+ return false;
+ }
+
+ *aIcon = sfi.hIcon;
+ return true;
+}
+
+static nsresult GetIconHandleFromPathInfo(const IconPathInfo& aPathInfo,
+ HICON* aIcon) {
+ MOZ_DIAGNOSTIC_ASSERT(!IsWin32kLockedDown());
+
+ // Is this the "Desktop" folder?
+ if (GetSpecialFolderIcon(aPathInfo.localFile, CSIDL_DESKTOP,
+ aPathInfo.infoFlags, aIcon)) {
+ return NS_OK;
+ }
+
+ // Is this the "My Documents" folder?
+ if (GetSpecialFolderIcon(aPathInfo.localFile, CSIDL_PERSONAL,
+ aPathInfo.infoFlags, aIcon)) {
+ return NS_OK;
+ }
+
+ // There are other "Special Folders" and Namespace entities that we
+ // are not fetching icons for, see:
+ // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/
+ // shellcc/platform/shell/reference/enums/csidl.asp
+ // If we ever need to get them, code to do so would be inserted here.
+
+ // Not a special folder, or something else failed above.
+ SHFILEINFOW sfi = {};
+ if (::SHGetFileInfoW(aPathInfo.filePath.get(), FILE_ATTRIBUTE_ARCHIVE, &sfi,
+ sizeof(sfi), aPathInfo.infoFlags) != 0) {
+ *aIcon = sfi.hIcon;
+ return NS_OK;
+ }
+
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+// Match stock icons with names
+static mozilla::Maybe<SHSTOCKICONID> GetStockIconIDForName(
+ const nsACString& aStockName) {
+ return aStockName.EqualsLiteral("uac-shield") ? Some(SIID_SHIELD) : Nothing();
+}
+
+// Specific to Vista and above
+static nsresult GetStockHIcon(nsIMozIconURI* aIconURI, HICON* aIcon) {
+ uint32_t desiredImageSize;
+ aIconURI->GetImageSize(&desiredImageSize);
+ nsAutoCString stockIcon;
+ aIconURI->GetStockIcon(stockIcon);
+
+ Maybe<SHSTOCKICONID> stockIconID = GetStockIconIDForName(stockIcon);
+ if (stockIconID.isNothing()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ UINT infoFlags = SHGSI_ICON;
+ infoFlags |= GetSizeInfoFlag(desiredImageSize);
+
+ SHSTOCKICONINFO sii = {0};
+ sii.cbSize = sizeof(sii);
+ HRESULT hr = SHGetStockIconInfo(*stockIconID, infoFlags, &sii);
+ if (FAILED(hr)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ *aIcon = sii.hIcon;
+
+ return NS_OK;
+}
+
+// Given a BITMAPINFOHEADER, returns the size of the color table.
+static int GetColorTableSize(BITMAPINFOHEADER* aHeader) {
+ int colorTableSize = -1;
+
+ // http://msdn.microsoft.com/en-us/library/dd183376%28v=VS.85%29.aspx
+ switch (aHeader->biBitCount) {
+ case 0:
+ colorTableSize = 0;
+ break;
+ case 1:
+ colorTableSize = 2 * sizeof(RGBQUAD);
+ break;
+ case 4:
+ case 8: {
+ // The maximum possible size for the color table is 2**bpp, so check for
+ // that and fail if we're not in those bounds
+ unsigned int maxEntries = 1 << (aHeader->biBitCount);
+ if (aHeader->biClrUsed > 0 && aHeader->biClrUsed <= maxEntries) {
+ colorTableSize = aHeader->biClrUsed * sizeof(RGBQUAD);
+ } else if (aHeader->biClrUsed == 0) {
+ colorTableSize = maxEntries * sizeof(RGBQUAD);
+ }
+ break;
+ }
+ case 16:
+ case 32:
+ // If we have BI_BITFIELDS compression, we would normally need 3 DWORDS
+ // for the bitfields mask which would be stored in the color table;
+ // However, we instead force the bitmap to request data of type BI_RGB so
+ // the color table should be of size 0. Setting aHeader->biCompression =
+ // BI_RGB forces the later call to GetDIBits to return to us BI_RGB data.
+ if (aHeader->biCompression == BI_BITFIELDS) {
+ aHeader->biCompression = BI_RGB;
+ }
+ colorTableSize = 0;
+ break;
+ case 24:
+ colorTableSize = 0;
+ break;
+ }
+
+ if (colorTableSize < 0) {
+ NS_WARNING("Unable to figure out the color table size for this bitmap");
+ }
+
+ return colorTableSize;
+}
+
+// Given a header and a size, creates a freshly allocated BITMAPINFO structure.
+// It is the caller's responsibility to null-check and delete the structure.
+static BITMAPINFO* CreateBitmapInfo(BITMAPINFOHEADER* aHeader,
+ size_t aColorTableSize) {
+ BITMAPINFO* bmi = (BITMAPINFO*)::operator new(
+ sizeof(BITMAPINFOHEADER) + aColorTableSize, mozilla::fallible);
+ if (bmi) {
+ memcpy(bmi, aHeader, sizeof(BITMAPINFOHEADER));
+ memset(bmi->bmiColors, 0, aColorTableSize);
+ }
+ return bmi;
+}
+
+static nsresult MakeIconBuffer(HICON aIcon, ByteBuf* aOutBuffer) {
+ nsresult rv = NS_ERROR_FAILURE;
+
+ if (aIcon) {
+ // we got a handle to an icon. Now we want to get a bitmap for the icon
+ // using GetIconInfo....
+ ICONINFO iconInfo;
+ if (GetIconInfo(aIcon, &iconInfo)) {
+ // we got the bitmaps, first find out their size
+ HDC hDC = CreateCompatibleDC(nullptr); // get a device context for
+ // the screen.
+ BITMAPINFOHEADER maskHeader = {sizeof(BITMAPINFOHEADER)};
+ BITMAPINFOHEADER colorHeader = {sizeof(BITMAPINFOHEADER)};
+ int colorTableSize, maskTableSize;
+ if (GetDIBits(hDC, iconInfo.hbmMask, 0, 0, nullptr,
+ (BITMAPINFO*)&maskHeader, DIB_RGB_COLORS) &&
+ GetDIBits(hDC, iconInfo.hbmColor, 0, 0, nullptr,
+ (BITMAPINFO*)&colorHeader, DIB_RGB_COLORS) &&
+ maskHeader.biHeight == colorHeader.biHeight &&
+ maskHeader.biWidth == colorHeader.biWidth &&
+ colorHeader.biBitCount > 8 && colorHeader.biSizeImage > 0 &&
+ colorHeader.biWidth >= 0 && colorHeader.biWidth <= 255 &&
+ colorHeader.biHeight >= 0 && colorHeader.biHeight <= 255 &&
+ maskHeader.biSizeImage > 0 &&
+ (colorTableSize = GetColorTableSize(&colorHeader)) >= 0 &&
+ (maskTableSize = GetColorTableSize(&maskHeader)) >= 0) {
+ uint32_t iconSize = sizeof(ICONFILEHEADER) + sizeof(ICONENTRY) +
+ sizeof(BITMAPINFOHEADER) + colorHeader.biSizeImage +
+ maskHeader.biSizeImage;
+
+ if (!aOutBuffer->Allocate(iconSize)) {
+ rv = NS_ERROR_OUT_OF_MEMORY;
+ } else {
+ uint8_t* whereTo = aOutBuffer->mData;
+ int howMuch;
+
+ // the data starts with an icon file header
+ ICONFILEHEADER iconHeader;
+ iconHeader.ifhReserved = 0;
+ iconHeader.ifhType = 1;
+ iconHeader.ifhCount = 1;
+ howMuch = sizeof(ICONFILEHEADER);
+ memcpy(whereTo, &iconHeader, howMuch);
+ whereTo += howMuch;
+
+ // followed by the single icon entry
+ ICONENTRY iconEntry;
+ iconEntry.ieWidth = static_cast<int8_t>(colorHeader.biWidth);
+ iconEntry.ieHeight = static_cast<int8_t>(colorHeader.biHeight);
+ iconEntry.ieColors = 0;
+ iconEntry.ieReserved = 0;
+ iconEntry.iePlanes = 1;
+ iconEntry.ieBitCount = colorHeader.biBitCount;
+ iconEntry.ieSizeImage = sizeof(BITMAPINFOHEADER) +
+ colorHeader.biSizeImage +
+ maskHeader.biSizeImage;
+ iconEntry.ieFileOffset = sizeof(ICONFILEHEADER) + sizeof(ICONENTRY);
+ howMuch = sizeof(ICONENTRY);
+ memcpy(whereTo, &iconEntry, howMuch);
+ whereTo += howMuch;
+
+ // followed by the bitmap info header
+ // (doubling the height because icons have two bitmaps)
+ colorHeader.biHeight *= 2;
+ colorHeader.biSizeImage += maskHeader.biSizeImage;
+ howMuch = sizeof(BITMAPINFOHEADER);
+ memcpy(whereTo, &colorHeader, howMuch);
+ whereTo += howMuch;
+ colorHeader.biHeight /= 2;
+ colorHeader.biSizeImage -= maskHeader.biSizeImage;
+
+ // followed by the XOR bitmap data (colorHeader)
+ // (you'd expect the color table to come here, but it apparently
+ // doesn't)
+ BITMAPINFO* colorInfo =
+ CreateBitmapInfo(&colorHeader, colorTableSize);
+ if (colorInfo &&
+ GetDIBits(hDC, iconInfo.hbmColor, 0, colorHeader.biHeight,
+ whereTo, colorInfo, DIB_RGB_COLORS)) {
+ whereTo += colorHeader.biSizeImage;
+
+ // and finally the AND bitmap data (maskHeader)
+ BITMAPINFO* maskInfo = CreateBitmapInfo(&maskHeader, maskTableSize);
+ if (maskInfo &&
+ GetDIBits(hDC, iconInfo.hbmMask, 0, maskHeader.biHeight,
+ whereTo, maskInfo, DIB_RGB_COLORS)) {
+ rv = NS_OK;
+ } // if we got bitmap bits
+ delete maskInfo;
+ } // if we got mask bits
+ delete colorInfo;
+ } // if we allocated the buffer
+ } // if we got mask size
+
+ DeleteDC(hDC);
+ DeleteObject(iconInfo.hbmColor);
+ DeleteObject(iconInfo.hbmMask);
+ } // if we got icon info
+ DestroyIcon(aIcon);
+ } // if we got an hIcon
+
+ return rv;
+}
+
+static nsresult GetIconHandleFromURLBlocking(nsIMozIconURI* aUrl,
+ HICON* aIcon) {
+ nsAutoCString stockIcon;
+ aUrl->GetStockIcon(stockIcon);
+ if (!stockIcon.IsEmpty()) {
+ return GetStockHIcon(aUrl, aIcon);
+ }
+
+ IconPathInfo iconPathInfo;
+ nsresult rv = ExtractIconPathInfoFromUrl(aUrl, &iconPathInfo);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction(
+ "GetIconHandleFromURLBlocking",
+ [&] { rv = GetIconHandleFromPathInfo(iconPathInfo, aIcon); });
+
+ RefPtr<nsIEventTarget> target = DecodePool::Singleton()->GetIOEventTarget();
+
+ nsresult dispatchResult = SyncRunnable::DispatchToThread(target, task);
+ NS_ENSURE_SUCCESS(dispatchResult, dispatchResult);
+
+ return rv;
+}
+
+static RefPtr<HIconPromise> GetIconHandleFromURLAsync(nsIMozIconURI* aUrl) {
+ RefPtr<HIconPromise::Private> promise = new HIconPromise::Private(__func__);
+
+ nsAutoCString stockIcon;
+ aUrl->GetStockIcon(stockIcon);
+ if (!stockIcon.IsEmpty()) {
+ HICON hIcon = nullptr;
+ nsresult rv = GetStockHIcon(aUrl, &hIcon);
+ if (NS_SUCCEEDED(rv)) {
+ promise->Resolve(hIcon, __func__);
+ } else {
+ promise->Reject(rv, __func__);
+ }
+ return promise;
+ }
+
+ IconPathInfo iconPathInfo;
+ nsresult rv = ExtractIconPathInfoFromUrl(aUrl, &iconPathInfo);
+ if (NS_FAILED(rv)) {
+ promise->Reject(rv, __func__);
+ return promise;
+ }
+
+ nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction(
+ "GetIconHandleFromURLAsync", [iconPathInfo, promise] {
+ HICON hIcon = nullptr;
+ nsresult rv = GetIconHandleFromPathInfo(iconPathInfo, &hIcon);
+ if (NS_SUCCEEDED(rv)) {
+ promise->Resolve(hIcon, __func__);
+ } else {
+ promise->Reject(rv, __func__);
+ }
+ });
+
+ RefPtr<nsIEventTarget> target = DecodePool::Singleton()->GetIOEventTarget();
+
+ rv = target->Dispatch(task.forget(), NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ promise->Reject(rv, __func__);
+ }
+
+ return promise;
+}
+
+static RefPtr<nsIconChannel::ByteBufPromise> GetIconBufferFromURLAsync(
+ nsIMozIconURI* aUrl) {
+ RefPtr<nsIconChannel::ByteBufPromise::Private> promise =
+ new nsIconChannel::ByteBufPromise::Private(__func__);
+
+ GetIconHandleFromURLAsync(aUrl)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [promise](HICON aIcon) {
+ ByteBuf iconBuffer;
+ nsresult rv = MakeIconBuffer(aIcon, &iconBuffer);
+ if (NS_SUCCEEDED(rv)) {
+ promise->Resolve(std::move(iconBuffer), __func__);
+ } else {
+ promise->Reject(rv, __func__);
+ }
+ },
+ [promise](nsresult rv) { promise->Reject(rv, __func__); });
+
+ return promise;
+}
+
+static nsresult WriteByteBufToOutputStream(const ByteBuf& aBuffer,
+ nsIAsyncOutputStream* aStream) {
+ uint32_t written = 0;
+ nsresult rv = aStream->Write(reinterpret_cast<const char*>(aBuffer.mData),
+ aBuffer.mLen, &written);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return (written == aBuffer.mLen) ? NS_OK : NS_ERROR_UNEXPECTED;
+}
+
+NS_IMPL_ISUPPORTS(nsIconChannel, nsIChannel, nsIRequest, nsIRequestObserver,
+ nsIStreamListener)
+
+// nsIconChannel methods
+nsIconChannel::nsIconChannel() {}
+
+nsIconChannel::~nsIconChannel() {
+ if (mLoadInfo) {
+ NS_ReleaseOnMainThread("nsIconChannel::mLoadInfo", mLoadInfo.forget());
+ }
+ if (mLoadGroup) {
+ NS_ReleaseOnMainThread("nsIconChannel::mLoadGroup", mLoadGroup.forget());
+ }
+}
+
+nsresult nsIconChannel::Init(nsIURI* uri) {
+ NS_ASSERTION(uri, "no uri");
+ mUrl = uri;
+ mOriginalURI = uri;
+ nsresult rv;
+ mPump = do_CreateInstance(NS_INPUTSTREAMPUMP_CONTRACTID, &rv);
+ return rv;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIRequest methods:
+
+NS_IMETHODIMP
+nsIconChannel::GetName(nsACString& result) { return mUrl->GetSpec(result); }
+
+NS_IMETHODIMP
+nsIconChannel::IsPending(bool* result) { return mPump->IsPending(result); }
+
+NS_IMETHODIMP
+nsIconChannel::GetStatus(nsresult* status) { return mPump->GetStatus(status); }
+
+NS_IMETHODIMP nsIconChannel::SetCanceledReason(const nsACString& aReason) {
+ return SetCanceledReasonImpl(aReason);
+}
+
+NS_IMETHODIMP nsIconChannel::GetCanceledReason(nsACString& aReason) {
+ return GetCanceledReasonImpl(aReason);
+}
+
+NS_IMETHODIMP nsIconChannel::CancelWithReason(nsresult aStatus,
+ const nsACString& aReason) {
+ return CancelWithReasonImpl(aStatus, aReason);
+}
+
+NS_IMETHODIMP
+nsIconChannel::Cancel(nsresult status) {
+ mCanceled = true;
+ return mPump->Cancel(status);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetCanceled(bool* result) {
+ *result = mCanceled;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::Suspend(void) { return mPump->Suspend(); }
+
+NS_IMETHODIMP
+nsIconChannel::Resume(void) { return mPump->Resume(); }
+NS_IMETHODIMP
+nsIconChannel::GetLoadGroup(nsILoadGroup** aLoadGroup) {
+ *aLoadGroup = mLoadGroup;
+ NS_IF_ADDREF(*aLoadGroup);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadGroup(nsILoadGroup* aLoadGroup) {
+ mLoadGroup = aLoadGroup;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetLoadFlags(uint32_t* aLoadAttributes) {
+ return mPump->GetLoadFlags(aLoadAttributes);
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadFlags(uint32_t aLoadAttributes) {
+ return mPump->SetLoadFlags(aLoadAttributes);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetTRRMode(nsIRequest::TRRMode* aTRRMode) {
+ return GetTRRModeImpl(aTRRMode);
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetTRRMode(nsIRequest::TRRMode aTRRMode) {
+ return SetTRRModeImpl(aTRRMode);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetIsDocument(bool* aIsDocument) {
+ return NS_GetIsDocumentChannel(this, aIsDocument);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// nsIChannel methods:
+
+NS_IMETHODIMP
+nsIconChannel::GetOriginalURI(nsIURI** aURI) {
+ *aURI = mOriginalURI;
+ NS_ADDREF(*aURI);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetOriginalURI(nsIURI* aURI) {
+ NS_ENSURE_ARG_POINTER(aURI);
+ mOriginalURI = aURI;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetURI(nsIURI** aURI) {
+ *aURI = mUrl;
+ NS_IF_ADDREF(*aURI);
+ return NS_OK;
+}
+
+// static
+RefPtr<nsIconChannel::ByteBufPromise> nsIconChannel::GetIconAsync(
+ nsIURI* aURI) {
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ nsresult rv = NS_OK;
+ nsCOMPtr<nsIMozIconURI> iconURI(do_QueryInterface(aURI, &rv));
+ if (NS_FAILED(rv)) {
+ return ByteBufPromise::CreateAndReject(rv, __func__);
+ }
+
+ return GetIconBufferFromURLAsync(iconURI);
+}
+
+NS_IMETHODIMP
+nsIconChannel::Open(nsIInputStream** aStream) {
+ nsCOMPtr<nsIStreamListener> listener;
+ nsresult rv =
+ nsContentSecurityManager::doContentSecurityCheck(this, listener);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MOZ_ASSERT(
+ mLoadInfo->GetSecurityMode() == 0 ||
+ mLoadInfo->GetInitialSecurityCheckDone() ||
+ (mLoadInfo->GetSecurityMode() ==
+ nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL &&
+ mLoadInfo->GetLoadingPrincipal() &&
+ mLoadInfo->GetLoadingPrincipal()->IsSystemPrincipal()),
+ "security flags in loadInfo but doContentSecurityCheck() not called");
+
+ // Double-check that we are actually an icon URL
+ nsCOMPtr<nsIMozIconURI> iconURI(do_QueryInterface(mUrl, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Get the handle for the given icon URI. This may involve the decode I/O
+ // thread, as we can only call SHGetFileInfo() from that thread
+ //
+ // Since this API is synchronous, this call will not return until the decode
+ // I/O thread returns with the icon handle
+ //
+ // Once we have the handle, we create a Windows ICO buffer with it and
+ // dump the buffer into the output end of the pipe. The input end will
+ // be returned to the caller
+ HICON hIcon = nullptr;
+ rv = GetIconHandleFromURLBlocking(iconURI, &hIcon);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ ByteBuf iconBuffer;
+ rv = MakeIconBuffer(hIcon, &iconBuffer);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Create the asynchronous pipe with a blocking read end
+ nsCOMPtr<nsIAsyncInputStream> inputStream;
+ nsCOMPtr<nsIAsyncOutputStream> outputStream;
+ NS_NewPipe2(getter_AddRefs(inputStream), getter_AddRefs(outputStream),
+ false /*nonBlockingInput*/, false /*nonBlockingOutput*/,
+ iconBuffer.mLen /*segmentSize*/, 1 /*segmentCount*/);
+
+ rv = WriteByteBufToOutputStream(iconBuffer, outputStream);
+
+ if (NS_SUCCEEDED(rv)) {
+ inputStream.forget(aStream);
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP
+nsIconChannel::AsyncOpen(nsIStreamListener* aListener) {
+ nsCOMPtr<nsIStreamListener> listener = aListener;
+ nsresult rv =
+ nsContentSecurityManager::doContentSecurityCheck(this, listener);
+ if (NS_FAILED(rv)) {
+ mCallbacks = nullptr;
+ return rv;
+ }
+
+ MOZ_ASSERT(
+ mLoadInfo->GetSecurityMode() == 0 ||
+ mLoadInfo->GetInitialSecurityCheckDone() ||
+ (mLoadInfo->GetSecurityMode() ==
+ nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL &&
+ mLoadInfo->GetLoadingPrincipal() &&
+ mLoadInfo->GetLoadingPrincipal()->IsSystemPrincipal()),
+ "security flags in loadInfo but doContentSecurityCheck() not called");
+
+ mListener = listener;
+
+ rv = StartAsyncOpen();
+ if (NS_FAILED(rv)) {
+ mListener = nullptr;
+ mCallbacks = nullptr;
+ return rv;
+ }
+
+ // Add ourself to the load group, if available
+ if (mLoadGroup) {
+ mLoadGroup->AddRequest(this, nullptr);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsIconChannel::StartAsyncOpen() {
+ // Double-check that we are actually an icon URL
+ nsresult rv = NS_OK;
+ nsCOMPtr<nsIMozIconURI> iconURI(do_QueryInterface(mUrl, &rv));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Create the asynchronous pipe with a non-blocking read end
+ nsCOMPtr<nsIAsyncInputStream> inputStream;
+ nsCOMPtr<nsIAsyncOutputStream> outputStream;
+ NS_NewPipe2(getter_AddRefs(inputStream), getter_AddRefs(outputStream),
+ true /*nonBlockingInput*/, false /*nonBlockingOutput*/,
+ 0 /*segmentSize*/, UINT32_MAX /*segmentCount*/);
+
+ // If we are in content, we asynchronously request the ICO buffer from
+ // the parent process because the APIs to load icons don't work with
+ // Win32k Lockdown
+ using ContentChild = mozilla::dom::ContentChild;
+ if (auto* contentChild = ContentChild::GetSingleton()) {
+ RefPtr<ContentChild::GetSystemIconPromise> iconPromise =
+ contentChild->SendGetSystemIcon(mUrl);
+ if (!iconPromise) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ iconPromise->Then(
+ mozilla::GetCurrentSerialEventTarget(), __func__,
+ [outputStream](std::tuple<nsresult, mozilla::Maybe<ByteBuf>>&& aArg) {
+ nsresult rv = std::get<0>(aArg);
+ mozilla::Maybe<ByteBuf> iconBuffer = std::move(std::get<1>(aArg));
+
+ if (NS_SUCCEEDED(rv)) {
+ MOZ_RELEASE_ASSERT(iconBuffer);
+ rv = WriteByteBufToOutputStream(*iconBuffer, outputStream);
+ }
+
+ outputStream->CloseWithStatus(rv);
+ },
+ [outputStream](mozilla::ipc::ResponseRejectReason) {
+ outputStream->CloseWithStatus(NS_ERROR_FAILURE);
+ });
+ } else {
+ // Get the handle for the given icon URI. This may involve the decode I/O
+ // thread, as we can only call SHGetFileInfo() from that thread
+ //
+ // Once we have the handle, we create a Windows ICO buffer with it and
+ // dump the buffer into the output end of the pipe. The input end will be
+ // pumped to our attached nsIStreamListener
+ GetIconBufferFromURLAsync(iconURI)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [outputStream](ByteBuf aIconBuffer) {
+ nsresult rv =
+ WriteByteBufToOutputStream(std::move(aIconBuffer), outputStream);
+ outputStream->CloseWithStatus(rv);
+ },
+ [outputStream](nsresult rv) { outputStream->CloseWithStatus(rv); });
+ }
+
+ // Use the main thread for the pumped events unless the load info
+ // specifies otherwise
+ nsCOMPtr<nsISerialEventTarget> listenerTarget =
+ nsContentUtils::GetEventTargetByLoadInfo(mLoadInfo,
+ mozilla::TaskCategory::Other);
+ if (!listenerTarget) {
+ listenerTarget = do_GetMainThread();
+ }
+
+ rv = mPump->Init(inputStream.get(), 0 /*segmentSize*/, 0 /*segmentCount*/,
+ false /*closeWhenDone*/, listenerTarget);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return mPump->AsyncRead(this);
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentType(nsACString& aContentType) {
+ aContentType.AssignLiteral(IMAGE_ICO);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentType(const nsACString& aContentType) {
+ // It doesn't make sense to set the content-type on this type
+ // of channel...
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP nsIconChannel::GetContentCharset(nsACString& aContentCharset) {
+ aContentCharset.Truncate();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentCharset(const nsACString& aContentCharset) {
+ // It doesn't make sense to set the content-charset on this type
+ // of channel...
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDisposition(uint32_t* aContentDisposition) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentDisposition(uint32_t aContentDisposition) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDispositionFilename(
+ nsAString& aContentDispositionFilename) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentDispositionFilename(
+ const nsAString& aContentDispositionFilename) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentDispositionHeader(
+ nsACString& aContentDispositionHeader) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetContentLength(int64_t* aContentLength) {
+ *aContentLength = 0;
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetContentLength(int64_t aContentLength) {
+ MOZ_ASSERT_UNREACHABLE("nsIconChannel::SetContentLength");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetOwner(nsISupports** aOwner) {
+ *aOwner = mOwner.get();
+ NS_IF_ADDREF(*aOwner);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetOwner(nsISupports* aOwner) {
+ mOwner = aOwner;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetLoadInfo(nsILoadInfo** aLoadInfo) {
+ NS_IF_ADDREF(*aLoadInfo = mLoadInfo);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetLoadInfo(nsILoadInfo* aLoadInfo) {
+ MOZ_RELEASE_ASSERT(aLoadInfo, "loadinfo can't be null");
+ mLoadInfo = aLoadInfo;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetNotificationCallbacks(
+ nsIInterfaceRequestor** aNotificationCallbacks) {
+ *aNotificationCallbacks = mCallbacks.get();
+ NS_IF_ADDREF(*aNotificationCallbacks);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::SetNotificationCallbacks(
+ nsIInterfaceRequestor* aNotificationCallbacks) {
+ mCallbacks = aNotificationCallbacks;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::GetSecurityInfo(nsITransportSecurityInfo** aSecurityInfo) {
+ *aSecurityInfo = nullptr;
+ return NS_OK;
+}
+
+// nsIRequestObserver methods
+NS_IMETHODIMP nsIconChannel::OnStartRequest(nsIRequest* aRequest) {
+ if (mListener) {
+ return mListener->OnStartRequest(this);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsIconChannel::OnStopRequest(nsIRequest* aRequest, nsresult aStatus) {
+ if (mListener) {
+ mListener->OnStopRequest(this, aStatus);
+ mListener = nullptr;
+ }
+
+ // Remove from load group
+ if (mLoadGroup) {
+ mLoadGroup->RemoveRequest(this, nullptr, aStatus);
+ }
+
+ // Drop notification callbacks to prevent cycles.
+ mCallbacks = nullptr;
+
+ return NS_OK;
+}
+
+// nsIStreamListener methods
+NS_IMETHODIMP
+nsIconChannel::OnDataAvailable(nsIRequest* aRequest, nsIInputStream* aStream,
+ uint64_t aOffset, uint32_t aCount) {
+ if (mListener) {
+ return mListener->OnDataAvailable(this, aStream, aOffset, aCount);
+ }
+ return NS_OK;
+}
diff --git a/image/decoders/icon/win/nsIconChannel.h b/image/decoders/icon/win/nsIconChannel.h
new file mode 100644
index 0000000000..4065be76e3
--- /dev/null
+++ b/image/decoders/icon/win/nsIconChannel.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_encoders_icon_win_nsIconChannel_h
+#define mozilla_image_encoders_icon_win_nsIconChannel_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MozPromise.h"
+
+#include "nsCOMPtr.h"
+#include "nsString.h"
+#include "nsIChannel.h"
+#include "nsILoadGroup.h"
+#include "nsILoadInfo.h"
+#include "nsIInterfaceRequestor.h"
+#include "nsIInterfaceRequestorUtils.h"
+#include "nsIURI.h"
+#include "nsIInputStreamPump.h"
+#include "nsIStreamListener.h"
+
+namespace mozilla::ipc {
+class ByteBuf;
+}
+
+class nsIconChannel final : public nsIChannel, public nsIStreamListener {
+ public:
+ using ByteBufPromise =
+ mozilla::MozPromise<mozilla::ipc::ByteBuf, nsresult, true>;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIREQUEST
+ NS_DECL_NSICHANNEL
+ NS_DECL_NSIREQUESTOBSERVER
+ NS_DECL_NSISTREAMLISTENER
+
+ nsIconChannel();
+
+ nsresult Init(nsIURI* uri);
+
+ /// Obtains an icon in Windows ICO format as a ByteBuf instead
+ /// of a channel. For use with IPC.
+ static RefPtr<ByteBufPromise> GetIconAsync(nsIURI* aURI);
+
+ private:
+ ~nsIconChannel();
+
+ nsresult StartAsyncOpen();
+
+ nsCOMPtr<nsIURI> mUrl;
+ nsCOMPtr<nsIURI> mOriginalURI;
+ nsCOMPtr<nsILoadGroup> mLoadGroup;
+ nsCOMPtr<nsIInterfaceRequestor> mCallbacks;
+ nsCOMPtr<nsISupports> mOwner;
+ nsCOMPtr<nsILoadInfo> mLoadInfo;
+
+ nsCOMPtr<nsIInputStreamPump> mPump;
+ nsCOMPtr<nsIStreamListener> mListener;
+
+ bool mCanceled = false;
+};
+
+#endif // mozilla_image_encoders_icon_win_nsIconChannel_h
diff --git a/image/decoders/moz.build b/image/decoders/moz.build
new file mode 100644
index 0000000000..d7e062f843
--- /dev/null
+++ b/image/decoders/moz.build
@@ -0,0 +1,62 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+toolkit = CONFIG["MOZ_WIDGET_TOOLKIT"]
+
+# The Icon Channel stuff really shouldn't live in decoders/icon, but we'll
+# fix that another time.
+if toolkit == "gtk":
+ DIRS += ["icon/gtk", "icon"]
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ DIRS += ["icon/win", "icon"]
+
+if toolkit == "cocoa":
+ DIRS += ["icon/mac", "icon"]
+elif toolkit == "android":
+ DIRS += ["icon/android", "icon"]
+
+UNIFIED_SOURCES += [
+ "EXIF.cpp",
+ "iccjpeg.c",
+ "nsBMPDecoder.cpp",
+ "nsGIFDecoder2.cpp",
+ "nsICODecoder.cpp",
+ "nsIconDecoder.cpp",
+ "nsJPEGDecoder.cpp",
+ "nsPNGDecoder.cpp",
+ "nsWebPDecoder.cpp",
+]
+
+if CONFIG["MOZ_AV1"]:
+ UNIFIED_SOURCES += [
+ "nsAVIFDecoder.cpp",
+ ]
+
+if CONFIG["MOZ_JXL"]:
+ UNIFIED_SOURCES += [
+ "nsJXLDecoder.cpp",
+ ]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+LOCAL_INCLUDES += [
+ # Access to Skia headers for Downscaler.
+ "/gfx/2d",
+ # Decoders need ImageLib headers.
+ "/image",
+ # for libyuv::ARGBAttenuate and ::ARGBUnattenuate
+ "/media/libyuv/libyuv/include",
+]
+
+LOCAL_INCLUDES += CONFIG["SKIA_INCLUDES"]
+
+FINAL_LIBRARY = "xul"
+
+CXXFLAGS += ["-Werror=switch"]
+
+# Add libFuzzer configuration directives
+include("/tools/fuzzing/libfuzzer-config.mozbuild")
diff --git a/image/decoders/nsAVIFDecoder.cpp b/image/decoders/nsAVIFDecoder.cpp
new file mode 100644
index 0000000000..06b5a60086
--- /dev/null
+++ b/image/decoders/nsAVIFDecoder.cpp
@@ -0,0 +1,1991 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageLogging.h" // Must appear first
+
+#include "nsAVIFDecoder.h"
+
+#include "aom/aomdx.h"
+
+#include "DAV1DDecoder.h"
+#include "gfxPlatform.h"
+#include "YCbCrUtils.h"
+#include "libyuv.h"
+
+#include "SurfacePipeFactory.h"
+
+#include "mozilla/Telemetry.h"
+#include "mozilla/TelemetryComms.h"
+#include "mozilla/UniquePtrExtensions.h"
+
+using namespace mozilla::gfx;
+
+namespace mozilla {
+
+namespace image {
+
+using Telemetry::LABELS_AVIF_A1LX;
+using Telemetry::LABELS_AVIF_A1OP;
+using Telemetry::LABELS_AVIF_ALPHA;
+using Telemetry::LABELS_AVIF_AOM_DECODE_ERROR;
+using Telemetry::LABELS_AVIF_BIT_DEPTH;
+using Telemetry::LABELS_AVIF_CICP_CP;
+using Telemetry::LABELS_AVIF_CICP_MC;
+using Telemetry::LABELS_AVIF_CICP_TC;
+using Telemetry::LABELS_AVIF_CLAP;
+using Telemetry::LABELS_AVIF_COLR;
+using Telemetry::LABELS_AVIF_DECODE_RESULT;
+using Telemetry::LABELS_AVIF_DECODER;
+using Telemetry::LABELS_AVIF_GRID;
+using Telemetry::LABELS_AVIF_IPRO;
+using Telemetry::LABELS_AVIF_ISPE;
+using Telemetry::LABELS_AVIF_LSEL;
+using Telemetry::LABELS_AVIF_MAJOR_BRAND;
+using Telemetry::LABELS_AVIF_PASP;
+using Telemetry::LABELS_AVIF_PIXI;
+using Telemetry::LABELS_AVIF_SEQUENCE;
+using Telemetry::LABELS_AVIF_YUV_COLOR_SPACE;
+
+static LazyLogModule sAVIFLog("AVIFDecoder");
+
+static const LABELS_AVIF_BIT_DEPTH gColorDepthLabel[] = {
+ LABELS_AVIF_BIT_DEPTH::color_8, LABELS_AVIF_BIT_DEPTH::color_10,
+ LABELS_AVIF_BIT_DEPTH::color_12, LABELS_AVIF_BIT_DEPTH::color_16};
+
+static const LABELS_AVIF_YUV_COLOR_SPACE gColorSpaceLabel[] = {
+ LABELS_AVIF_YUV_COLOR_SPACE::BT601, LABELS_AVIF_YUV_COLOR_SPACE::BT709,
+ LABELS_AVIF_YUV_COLOR_SPACE::BT2020, LABELS_AVIF_YUV_COLOR_SPACE::identity};
+
+static MaybeIntSize GetImageSize(const Mp4parseAvifInfo& aInfo) {
+ // Note this does not take cropping via CleanAperture (clap) into account
+ const struct Mp4parseImageSpatialExtents* ispe = aInfo.spatial_extents;
+
+ if (ispe) {
+ // Decoder::PostSize takes int32_t, but ispe contains uint32_t
+ CheckedInt<int32_t> width = ispe->image_width;
+ CheckedInt<int32_t> height = ispe->image_height;
+
+ if (width.isValid() && height.isValid()) {
+ return Some(IntSize{width.value(), height.value()});
+ }
+ }
+
+ return Nothing();
+}
+
+// Translate the MIAF/HEIF-based orientation transforms (imir, irot) into
+// ImageLib's representation. Note that the interpretation of imir was reversed
+// Between HEIF (ISO 23008-12:2017) and ISO/IEC 23008-12:2017/DAmd 2. This is
+// handled by mp4parse. See mp4parse::read_imir for details.
+Orientation GetImageOrientation(const Mp4parseAvifInfo& aInfo) {
+ // Per MIAF (ISO/IEC 23000-22:2019) § 7.3.6.7
+ // These properties, if used, shall be indicated to be applied in the
+ // following order: clean aperture first, then rotation, then mirror.
+ // The Orientation type does the same order, but opposite rotation direction
+
+ const Mp4parseIrot heifRot = aInfo.image_rotation;
+ const Mp4parseImir* heifMir = aInfo.image_mirror;
+ Angle mozRot;
+ Flip mozFlip;
+
+ if (!heifMir) { // No mirroring
+ mozFlip = Flip::Unflipped;
+
+ switch (heifRot) {
+ case MP4PARSE_IROT_D0:
+ // ⥠ UPWARDS HARPOON WITH BARB LEFT FROM BAR
+ mozRot = Angle::D0;
+ break;
+ case MP4PARSE_IROT_D90:
+ // ⥞ LEFTWARDS HARPOON WITH BARB DOWN FROM BAR
+ mozRot = Angle::D270;
+ break;
+ case MP4PARSE_IROT_D180:
+ // ⥝ DOWNWARDS HARPOON WITH BARB RIGHT FROM BAR
+ mozRot = Angle::D180;
+ break;
+ case MP4PARSE_IROT_D270:
+ // ⥛ RIGHTWARDS HARPOON WITH BARB UP FROM BAR
+ mozRot = Angle::D90;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ } else {
+ MOZ_ASSERT(heifMir);
+ mozFlip = Flip::Horizontal;
+
+ enum class HeifFlippedOrientation : uint8_t {
+ IROT_D0_IMIR_V = (MP4PARSE_IROT_D0 << 1) | MP4PARSE_IMIR_LEFT_RIGHT,
+ IROT_D0_IMIR_H = (MP4PARSE_IROT_D0 << 1) | MP4PARSE_IMIR_TOP_BOTTOM,
+ IROT_D90_IMIR_V = (MP4PARSE_IROT_D90 << 1) | MP4PARSE_IMIR_LEFT_RIGHT,
+ IROT_D90_IMIR_H = (MP4PARSE_IROT_D90 << 1) | MP4PARSE_IMIR_TOP_BOTTOM,
+ IROT_D180_IMIR_V = (MP4PARSE_IROT_D180 << 1) | MP4PARSE_IMIR_LEFT_RIGHT,
+ IROT_D180_IMIR_H = (MP4PARSE_IROT_D180 << 1) | MP4PARSE_IMIR_TOP_BOTTOM,
+ IROT_D270_IMIR_V = (MP4PARSE_IROT_D270 << 1) | MP4PARSE_IMIR_LEFT_RIGHT,
+ IROT_D270_IMIR_H = (MP4PARSE_IROT_D270 << 1) | MP4PARSE_IMIR_TOP_BOTTOM,
+ };
+
+ HeifFlippedOrientation heifO =
+ HeifFlippedOrientation((heifRot << 1) | *heifMir);
+
+ switch (heifO) {
+ case HeifFlippedOrientation::IROT_D0_IMIR_V:
+ case HeifFlippedOrientation::IROT_D180_IMIR_H:
+ // ⥜ UPWARDS HARPOON WITH BARB RIGHT FROM BAR
+ mozRot = Angle::D0;
+ break;
+ case HeifFlippedOrientation::IROT_D270_IMIR_V:
+ case HeifFlippedOrientation::IROT_D90_IMIR_H:
+ // ⥚ LEFTWARDS HARPOON WITH BARB UP FROM BAR
+ mozRot = Angle::D90;
+ break;
+ case HeifFlippedOrientation::IROT_D180_IMIR_V:
+ case HeifFlippedOrientation::IROT_D0_IMIR_H:
+ // ⥡ DOWNWARDS HARPOON WITH BARB LEFT FROM BAR
+ mozRot = Angle::D180;
+ break;
+ case HeifFlippedOrientation::IROT_D90_IMIR_V:
+ case HeifFlippedOrientation::IROT_D270_IMIR_H:
+ // ⥟ RIGHTWARDS HARPOON WITH BARB DOWN FROM BAR
+ mozRot = Angle::D270;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("GetImageOrientation: (rot%d, imir(%s)) -> (Angle%d, "
+ "Flip%d)",
+ static_cast<int>(heifRot),
+ heifMir ? (*heifMir == MP4PARSE_IMIR_LEFT_RIGHT ? "left-right"
+ : "top-bottom")
+ : "none",
+ static_cast<int>(mozRot), static_cast<int>(mozFlip)));
+ return Orientation{mozRot, mozFlip};
+}
+bool AVIFDecoderStream::ReadAt(int64_t offset, void* data, size_t size,
+ size_t* bytes_read) {
+ size = std::min(size, size_t(mBuffer->length() - offset));
+
+ if (size <= 0) {
+ return false;
+ }
+
+ memcpy(data, mBuffer->begin() + offset, size);
+ *bytes_read = size;
+ return true;
+}
+
+bool AVIFDecoderStream::Length(int64_t* size) {
+ *size =
+ static_cast<int64_t>(std::min<uint64_t>(mBuffer->length(), INT64_MAX));
+ return true;
+}
+
+const uint8_t* AVIFDecoderStream::GetContiguousAccess(int64_t aOffset,
+ size_t aSize) {
+ if (aOffset + aSize >= mBuffer->length()) {
+ return nullptr;
+ }
+
+ return mBuffer->begin() + aOffset;
+}
+
+AVIFParser::~AVIFParser() {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug, ("Destroy AVIFParser=%p", this));
+}
+
+Mp4parseStatus AVIFParser::Create(const Mp4parseIo* aIo, ByteStream* aBuffer,
+ UniquePtr<AVIFParser>& aParserOut,
+ bool aAllowSequences,
+ bool aAnimateAVIFMajor) {
+ MOZ_ASSERT(aIo);
+ MOZ_ASSERT(!aParserOut);
+
+ UniquePtr<AVIFParser> p(new AVIFParser(aIo));
+ Mp4parseStatus status = p->Init(aBuffer, aAllowSequences, aAnimateAVIFMajor);
+
+ if (status == MP4PARSE_STATUS_OK) {
+ MOZ_ASSERT(p->mParser);
+ aParserOut = std::move(p);
+ }
+
+ return status;
+}
+
+nsAVIFDecoder::DecodeResult AVIFParser::GetImage(AVIFImage& aImage) {
+ MOZ_ASSERT(mParser);
+
+ // If the AVIF is animated, get next frame and yield if sequence is not done.
+ if (IsAnimated()) {
+ aImage.mColorImage = mColorSampleIter->GetNext();
+
+ if (!aImage.mColorImage) {
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::NoSamples);
+ }
+
+ aImage.mFrameNum = mFrameNum++;
+ int64_t durationMs = aImage.mColorImage->mDuration.ToMilliseconds();
+ aImage.mDuration = FrameTimeout::FromRawMilliseconds(
+ static_cast<int32_t>(std::min<int64_t>(durationMs, INT32_MAX)));
+
+ if (mAlphaSampleIter) {
+ aImage.mAlphaImage = mAlphaSampleIter->GetNext();
+ if (!aImage.mAlphaImage) {
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::NoSamples);
+ }
+ }
+
+ bool hasNext = mColorSampleIter->HasNext();
+ if (mAlphaSampleIter && (hasNext != mAlphaSampleIter->HasNext())) {
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Warning,
+ ("[this=%p] The %s sequence ends before frame %d, aborting decode.",
+ this, hasNext ? "alpha" : "color", mFrameNum));
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::NoSamples);
+ }
+ if (!hasNext) {
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::Complete);
+ }
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::OutputAvailable);
+ }
+
+ if (!mInfo.has_primary_item) {
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::NoSamples);
+ }
+
+ // If the AVIF is not animated, get the pitm image and return Complete.
+ Mp4parseAvifImage image = {};
+ Mp4parseStatus status = mp4parse_avif_get_image(mParser.get(), &image);
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] mp4parse_avif_get_image -> %d; primary_item length: "
+ "%zu, alpha_item length: %zu",
+ this, status, image.primary_image.length, image.alpha_image.length));
+ if (status != MP4PARSE_STATUS_OK) {
+ return AsVariant(status);
+ }
+
+ MOZ_ASSERT(image.primary_image.data);
+ RefPtr<MediaRawData> colorImage =
+ new MediaRawData(image.primary_image.data, image.primary_image.length);
+ RefPtr<MediaRawData> alphaImage = nullptr;
+
+ if (image.alpha_image.length) {
+ alphaImage =
+ new MediaRawData(image.alpha_image.data, image.alpha_image.length);
+ }
+
+ aImage.mFrameNum = 0;
+ aImage.mDuration = FrameTimeout::Forever();
+ aImage.mColorImage = colorImage;
+ aImage.mAlphaImage = alphaImage;
+ return AsVariant(nsAVIFDecoder::NonDecoderResult::Complete);
+}
+
+AVIFParser::AVIFParser(const Mp4parseIo* aIo) : mIo(aIo) {
+ MOZ_ASSERT(mIo);
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("Create AVIFParser=%p, image.avif.compliance_strictness: %d", this,
+ StaticPrefs::image_avif_compliance_strictness()));
+}
+
+static Mp4parseStatus CreateSampleIterator(
+ Mp4parseAvifParser* aParser, ByteStream* aBuffer, uint32_t trackID,
+ UniquePtr<SampleIterator>& aIteratorOut) {
+ Mp4parseByteData data;
+ uint64_t timescale;
+ Mp4parseStatus rv =
+ mp4parse_avif_get_indice_table(aParser, trackID, &data, &timescale);
+ if (rv != MP4PARSE_STATUS_OK) {
+ return rv;
+ }
+
+ UniquePtr<IndiceWrapper> wrapper = MakeUnique<IndiceWrapper>(data);
+ RefPtr<MP4SampleIndex> index = new MP4SampleIndex(
+ *wrapper, aBuffer, trackID, false, AssertedCast<int32_t>(timescale));
+ aIteratorOut = MakeUnique<SampleIterator>(index);
+ return MP4PARSE_STATUS_OK;
+}
+
+Mp4parseStatus AVIFParser::Init(ByteStream* aBuffer, bool aAllowSequences,
+ bool aAnimateAVIFMajor) {
+#define CHECK_MP4PARSE_STATUS(v) \
+ do { \
+ if ((v) != MP4PARSE_STATUS_OK) { \
+ return v; \
+ } \
+ } while (false)
+
+ MOZ_ASSERT(!mParser);
+
+ Mp4parseAvifParser* parser = nullptr;
+ Mp4parseStatus status =
+ mp4parse_avif_new(mIo,
+ static_cast<enum Mp4parseStrictness>(
+ StaticPrefs::image_avif_compliance_strictness()),
+ &parser);
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] mp4parse_avif_new status: %d", this, status));
+ CHECK_MP4PARSE_STATUS(status);
+ MOZ_ASSERT(parser);
+ mParser.reset(parser);
+
+ status = mp4parse_avif_get_info(mParser.get(), &mInfo);
+ CHECK_MP4PARSE_STATUS(status);
+
+ bool useSequence = mInfo.has_sequence;
+ if (useSequence) {
+ if (!aAllowSequences) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] AVIF sequences disabled", this));
+ useSequence = false;
+ } else if (!aAnimateAVIFMajor &&
+ !!memcmp(mInfo.major_brand, "avis", sizeof(mInfo.major_brand))) {
+ useSequence = false;
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] AVIF prefers still image", this));
+ }
+ }
+
+ if (useSequence) {
+ status = CreateSampleIterator(parser, aBuffer, mInfo.color_track_id,
+ mColorSampleIter);
+ CHECK_MP4PARSE_STATUS(status);
+ MOZ_ASSERT(mColorSampleIter);
+
+ if (mInfo.alpha_track_id) {
+ status = CreateSampleIterator(parser, aBuffer, mInfo.alpha_track_id,
+ mAlphaSampleIter);
+ CHECK_MP4PARSE_STATUS(status);
+ MOZ_ASSERT(mAlphaSampleIter);
+ }
+ }
+
+ return status;
+}
+
+bool AVIFParser::IsAnimated() const { return !!mColorSampleIter; }
+
+// The gfx::YUVColorSpace value is only used in the conversion from YUV -> RGB.
+// Typically this comes directly from the CICP matrix_coefficients value, but
+// certain values require additionally considering the colour_primaries value.
+// See `gfxUtils::CicpToColorSpace` for details. We return a gfx::YUVColorSpace
+// rather than CICP::MatrixCoefficients, since that's what
+// `gfx::ConvertYCbCrATo[A]RGB` uses. `aBitstreamColorSpaceFunc` abstracts the
+// fact that different decoder libraries require different methods for
+// extracting the CICP values from the AV1 bitstream and we don't want to do
+// that work unnecessarily because in addition to wasted effort, it would make
+// the logging more confusing.
+template <typename F>
+static gfx::YUVColorSpace GetAVIFColorSpace(
+ const Mp4parseNclxColourInformation* aNclx, F&& aBitstreamColorSpaceFunc) {
+ return ToMaybe(aNclx)
+ .map([=](const auto& nclx) {
+ return gfxUtils::CicpToColorSpace(
+ static_cast<CICP::MatrixCoefficients>(nclx.matrix_coefficients),
+ static_cast<CICP::ColourPrimaries>(nclx.colour_primaries),
+ sAVIFLog);
+ })
+ .valueOrFrom(aBitstreamColorSpaceFunc)
+ .valueOr(gfx::YUVColorSpace::BT601);
+}
+
+static gfx::ColorRange GetAVIFColorRange(
+ const Mp4parseNclxColourInformation* aNclx,
+ const gfx::ColorRange av1ColorRange) {
+ return ToMaybe(aNclx)
+ .map([=](const auto& nclx) {
+ return aNclx->full_range_flag ? gfx::ColorRange::FULL
+ : gfx::ColorRange::LIMITED;
+ })
+ .valueOr(av1ColorRange);
+}
+
+void AVIFDecodedData::SetCicpValues(
+ const Mp4parseNclxColourInformation* aNclx,
+ const gfx::CICP::ColourPrimaries aAv1ColourPrimaries,
+ const gfx::CICP::TransferCharacteristics aAv1TransferCharacteristics,
+ const gfx::CICP::MatrixCoefficients aAv1MatrixCoefficients) {
+ auto cp = CICP::ColourPrimaries::CP_UNSPECIFIED;
+ auto tc = CICP::TransferCharacteristics::TC_UNSPECIFIED;
+ auto mc = CICP::MatrixCoefficients::MC_UNSPECIFIED;
+
+ if (aNclx) {
+ cp = static_cast<CICP::ColourPrimaries>(aNclx->colour_primaries);
+ tc = static_cast<CICP::TransferCharacteristics>(
+ aNclx->transfer_characteristics);
+ mc = static_cast<CICP::MatrixCoefficients>(aNclx->matrix_coefficients);
+ }
+
+ if (cp == CICP::ColourPrimaries::CP_UNSPECIFIED) {
+ if (aAv1ColourPrimaries != CICP::ColourPrimaries::CP_UNSPECIFIED) {
+ cp = aAv1ColourPrimaries;
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("Unspecified colour_primaries value specified in colr box, "
+ "using AV1 sequence header (%hhu)",
+ cp));
+ } else {
+ cp = CICP::ColourPrimaries::CP_BT709;
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("Unspecified colour_primaries value specified in colr box "
+ "or AV1 sequence header, using fallback value (%hhu)",
+ cp));
+ }
+ } else if (cp != aAv1ColourPrimaries) {
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("colour_primaries mismatch: colr box = %hhu, AV1 "
+ "sequence header = %hhu, using colr box",
+ cp, aAv1ColourPrimaries));
+ }
+
+ if (tc == CICP::TransferCharacteristics::TC_UNSPECIFIED) {
+ if (aAv1TransferCharacteristics !=
+ CICP::TransferCharacteristics::TC_UNSPECIFIED) {
+ tc = aAv1TransferCharacteristics;
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("Unspecified transfer_characteristics value specified in "
+ "colr box, using AV1 sequence header (%hhu)",
+ tc));
+ } else {
+ tc = CICP::TransferCharacteristics::TC_SRGB;
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("Unspecified transfer_characteristics value specified in "
+ "colr box or AV1 sequence header, using fallback value (%hhu)",
+ tc));
+ }
+ } else if (tc != aAv1TransferCharacteristics) {
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("transfer_characteristics mismatch: colr box = %hhu, "
+ "AV1 sequence header = %hhu, using colr box",
+ tc, aAv1TransferCharacteristics));
+ }
+
+ if (mc == CICP::MatrixCoefficients::MC_UNSPECIFIED) {
+ if (aAv1MatrixCoefficients != CICP::MatrixCoefficients::MC_UNSPECIFIED) {
+ mc = aAv1MatrixCoefficients;
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("Unspecified matrix_coefficients value specified in "
+ "colr box, using AV1 sequence header (%hhu)",
+ mc));
+ } else {
+ mc = CICP::MatrixCoefficients::MC_BT601;
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("Unspecified matrix_coefficients value specified in "
+ "colr box or AV1 sequence header, using fallback value (%hhu)",
+ mc));
+ }
+ } else if (mc != aAv1MatrixCoefficients) {
+ MOZ_LOG(sAVIFLog, LogLevel::Warning,
+ ("matrix_coefficients mismatch: colr box = %hhu, "
+ "AV1 sequence header = %hhu, using colr box",
+ mc, aAv1TransferCharacteristics));
+ }
+
+ mColourPrimaries = cp;
+ mTransferCharacteristics = tc;
+ mMatrixCoefficients = mc;
+}
+
+class Dav1dDecoder final : AVIFDecoderInterface {
+ public:
+ ~Dav1dDecoder() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Destroy Dav1dDecoder=%p", this));
+
+ if (mColorContext) {
+ dav1d_close(&mColorContext);
+ MOZ_ASSERT(!mColorContext);
+ }
+
+ if (mAlphaContext) {
+ dav1d_close(&mAlphaContext);
+ MOZ_ASSERT(!mAlphaContext);
+ }
+ }
+
+ static DecodeResult Create(UniquePtr<AVIFDecoderInterface>& aDecoder,
+ bool aHasAlpha) {
+ UniquePtr<Dav1dDecoder> d(new Dav1dDecoder());
+ Dav1dResult r = d->Init(aHasAlpha);
+ if (r == 0) {
+ aDecoder.reset(d.release());
+ }
+ return AsVariant(r);
+ }
+
+ DecodeResult Decode(bool aShouldSendTelemetry,
+ const Mp4parseAvifInfo& aAVIFInfo,
+ const AVIFImage& aSamples) override {
+ MOZ_ASSERT(mColorContext);
+ MOZ_ASSERT(!mDecodedData);
+ MOZ_ASSERT(aSamples.mColorImage);
+
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("[this=%p] Decoding color", this));
+
+ OwnedDav1dPicture colorPic = OwnedDav1dPicture(new Dav1dPicture());
+ OwnedDav1dPicture alphaPic = nullptr;
+ Dav1dResult r = GetPicture(*mColorContext, *aSamples.mColorImage,
+ colorPic.get(), aShouldSendTelemetry);
+ if (r != 0) {
+ return AsVariant(r);
+ }
+
+ if (aSamples.mAlphaImage) {
+ MOZ_ASSERT(mAlphaContext);
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("[this=%p] Decoding alpha", this));
+
+ alphaPic = OwnedDav1dPicture(new Dav1dPicture());
+ r = GetPicture(*mAlphaContext, *aSamples.mAlphaImage, alphaPic.get(),
+ aShouldSendTelemetry);
+ if (r != 0) {
+ return AsVariant(r);
+ }
+
+ // Per § 4 of the AVIF spec
+ // https://aomediacodec.github.io/av1-avif/#auxiliary-images: An AV1
+ // Alpha Image Item […] shall be encoded with the same bit depth as the
+ // associated master AV1 Image Item
+ if (colorPic->p.bpc != alphaPic->p.bpc) {
+ return AsVariant(NonDecoderResult::AlphaYColorDepthMismatch);
+ }
+
+ if (colorPic->stride[0] != alphaPic->stride[0]) {
+ return AsVariant(NonDecoderResult::AlphaYSizeMismatch);
+ }
+ }
+
+ MOZ_ASSERT_IF(!alphaPic, !aAVIFInfo.premultiplied_alpha);
+ mDecodedData = Dav1dPictureToDecodedData(
+ aAVIFInfo.nclx_colour_information, std::move(colorPic),
+ std::move(alphaPic), aAVIFInfo.premultiplied_alpha);
+
+ return AsVariant(r);
+ }
+
+ private:
+ explicit Dav1dDecoder() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Create Dav1dDecoder=%p", this));
+ }
+
+ Dav1dResult Init(bool aHasAlpha) {
+ MOZ_ASSERT(!mColorContext);
+ MOZ_ASSERT(!mAlphaContext);
+
+ Dav1dSettings settings;
+ dav1d_default_settings(&settings);
+ settings.all_layers = 0;
+ settings.max_frame_delay = 1;
+ // TODO: tune settings a la DAV1DDecoder for AV1 (Bug 1681816)
+
+ Dav1dResult r = dav1d_open(&mColorContext, &settings);
+ if (r != 0) {
+ return r;
+ }
+ MOZ_ASSERT(mColorContext);
+
+ if (aHasAlpha) {
+ r = dav1d_open(&mAlphaContext, &settings);
+ if (r != 0) {
+ return r;
+ }
+ MOZ_ASSERT(mAlphaContext);
+ }
+
+ return 0;
+ }
+
+ static Dav1dResult GetPicture(Dav1dContext& aContext,
+ const MediaRawData& aBytes,
+ Dav1dPicture* aPicture,
+ bool aShouldSendTelemetry) {
+ MOZ_ASSERT(aPicture);
+
+ Dav1dData dav1dData;
+ Dav1dResult r = dav1d_data_wrap(&dav1dData, aBytes.Data(), aBytes.Size(),
+ Dav1dFreeCallback_s, nullptr);
+
+ MOZ_LOG(
+ sAVIFLog, r == 0 ? LogLevel::Verbose : LogLevel::Error,
+ ("dav1d_data_wrap(%p, %zu) -> %d", dav1dData.data, dav1dData.sz, r));
+
+ if (r != 0) {
+ return r;
+ }
+
+ r = dav1d_send_data(&aContext, &dav1dData);
+
+ MOZ_LOG(sAVIFLog, r == 0 ? LogLevel::Debug : LogLevel::Error,
+ ("dav1d_send_data -> %d", r));
+
+ if (r != 0) {
+ return r;
+ }
+
+ r = dav1d_get_picture(&aContext, aPicture);
+
+ MOZ_LOG(sAVIFLog, r == 0 ? LogLevel::Debug : LogLevel::Error,
+ ("dav1d_get_picture -> %d", r));
+
+ // We already have the AVIF_DECODE_RESULT histogram to record all the
+ // successful calls, so only bother recording what type of errors we see
+ // via events. Unlike AOM, dav1d returns an int, not an enum, so this is
+ // the easiest way to see if we're getting unexpected behavior to
+ // investigate.
+ if (aShouldSendTelemetry && r != 0) {
+ // Uncomment once bug 1691156 is fixed
+ // mozilla::Telemetry::SetEventRecordingEnabled("avif"_ns, true);
+
+ mozilla::Telemetry::RecordEvent(
+ mozilla::Telemetry::EventID::Avif_Dav1dGetPicture_ReturnValue,
+ Some(nsPrintfCString("%d", r)), Nothing());
+ }
+
+ return r;
+ }
+
+ // A dummy callback for dav1d_data_wrap
+ static void Dav1dFreeCallback_s(const uint8_t* aBuf, void* aCookie) {
+ // The buf is managed by the mParser inside Dav1dDecoder itself. Do
+ // nothing here.
+ }
+
+ static UniquePtr<AVIFDecodedData> Dav1dPictureToDecodedData(
+ const Mp4parseNclxColourInformation* aNclx, OwnedDav1dPicture aPicture,
+ OwnedDav1dPicture aAlphaPlane, bool aPremultipliedAlpha);
+
+ Dav1dContext* mColorContext = nullptr;
+ Dav1dContext* mAlphaContext = nullptr;
+};
+
+OwnedAOMImage::OwnedAOMImage() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Create OwnedAOMImage=%p", this));
+}
+
+OwnedAOMImage::~OwnedAOMImage() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Destroy OwnedAOMImage=%p", this));
+}
+
+bool OwnedAOMImage::CloneFrom(aom_image_t* aImage, bool aIsAlpha) {
+ MOZ_ASSERT(aImage);
+ MOZ_ASSERT(!mImage);
+ MOZ_ASSERT(!mBuffer);
+
+ uint8_t* srcY = aImage->planes[AOM_PLANE_Y];
+ int yStride = aImage->stride[AOM_PLANE_Y];
+ int yHeight = aom_img_plane_height(aImage, AOM_PLANE_Y);
+ size_t yBufSize = yStride * yHeight;
+
+ // If aImage is alpha plane. The data is located in Y channel.
+ if (aIsAlpha) {
+ mBuffer = MakeUniqueFallible<uint8_t[]>(yBufSize);
+ if (!mBuffer) {
+ return false;
+ }
+ uint8_t* destY = mBuffer.get();
+ memcpy(destY, srcY, yBufSize);
+ mImage.emplace(*aImage);
+ mImage->planes[AOM_PLANE_Y] = destY;
+
+ return true;
+ }
+
+ uint8_t* srcCb = aImage->planes[AOM_PLANE_U];
+ int cbStride = aImage->stride[AOM_PLANE_U];
+ int cbHeight = aom_img_plane_height(aImage, AOM_PLANE_U);
+ size_t cbBufSize = cbStride * cbHeight;
+
+ uint8_t* srcCr = aImage->planes[AOM_PLANE_V];
+ int crStride = aImage->stride[AOM_PLANE_V];
+ int crHeight = aom_img_plane_height(aImage, AOM_PLANE_V);
+ size_t crBufSize = crStride * crHeight;
+
+ mBuffer = MakeUniqueFallible<uint8_t[]>(yBufSize + cbBufSize + crBufSize);
+ if (!mBuffer) {
+ return false;
+ }
+
+ uint8_t* destY = mBuffer.get();
+ uint8_t* destCb = destY + yBufSize;
+ uint8_t* destCr = destCb + cbBufSize;
+
+ memcpy(destY, srcY, yBufSize);
+ memcpy(destCb, srcCb, cbBufSize);
+ memcpy(destCr, srcCr, crBufSize);
+
+ mImage.emplace(*aImage);
+ mImage->planes[AOM_PLANE_Y] = destY;
+ mImage->planes[AOM_PLANE_U] = destCb;
+ mImage->planes[AOM_PLANE_V] = destCr;
+
+ return true;
+}
+
+/* static */
+OwnedAOMImage* OwnedAOMImage::CopyFrom(aom_image_t* aImage, bool aIsAlpha) {
+ MOZ_ASSERT(aImage);
+ UniquePtr<OwnedAOMImage> img(new OwnedAOMImage());
+ if (!img->CloneFrom(aImage, aIsAlpha)) {
+ return nullptr;
+ }
+ return img.release();
+}
+
+class AOMDecoder final : AVIFDecoderInterface {
+ public:
+ ~AOMDecoder() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Destroy AOMDecoder=%p", this));
+
+ if (mColorContext.isSome()) {
+ aom_codec_err_t r = aom_codec_destroy(mColorContext.ptr());
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] aom_codec_destroy -> %d", this, r));
+ }
+
+ if (mAlphaContext.isSome()) {
+ aom_codec_err_t r = aom_codec_destroy(mAlphaContext.ptr());
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] aom_codec_destroy -> %d", this, r));
+ }
+ }
+
+ static DecodeResult Create(UniquePtr<AVIFDecoderInterface>& aDecoder,
+ bool aHasAlpha) {
+ UniquePtr<AOMDecoder> d(new AOMDecoder());
+ aom_codec_err_t e = d->Init(aHasAlpha);
+ if (e == AOM_CODEC_OK) {
+ aDecoder.reset(d.release());
+ }
+ return AsVariant(AOMResult(e));
+ }
+
+ DecodeResult Decode(bool aShouldSendTelemetry,
+ const Mp4parseAvifInfo& aAVIFInfo,
+ const AVIFImage& aSamples) override {
+ MOZ_ASSERT(mColorContext.isSome());
+ MOZ_ASSERT(!mDecodedData);
+ MOZ_ASSERT(aSamples.mColorImage);
+
+ aom_image_t* aomImg = nullptr;
+ DecodeResult r = GetImage(*mColorContext, *aSamples.mColorImage, &aomImg,
+ aShouldSendTelemetry);
+ if (!IsDecodeSuccess(r)) {
+ return r;
+ }
+ MOZ_ASSERT(aomImg);
+
+ // The aomImg will be released in next GetImage call (aom_codec_decode
+ // actually). The GetImage could be called again immediately if parsedImg
+ // contains alpha data. Therefore, we need to copy the image and manage it
+ // by AOMDecoder itself.
+ OwnedAOMImage* clonedImg = OwnedAOMImage::CopyFrom(aomImg, false);
+ if (!clonedImg) {
+ return AsVariant(NonDecoderResult::OutOfMemory);
+ }
+ mOwnedImage.reset(clonedImg);
+
+ if (aSamples.mAlphaImage) {
+ MOZ_ASSERT(mAlphaContext.isSome());
+
+ aom_image_t* alphaImg = nullptr;
+ r = GetImage(*mAlphaContext, *aSamples.mAlphaImage, &alphaImg,
+ aShouldSendTelemetry);
+ if (!IsDecodeSuccess(r)) {
+ return r;
+ }
+ MOZ_ASSERT(alphaImg);
+
+ OwnedAOMImage* clonedAlphaImg = OwnedAOMImage::CopyFrom(alphaImg, true);
+ if (!clonedAlphaImg) {
+ return AsVariant(NonDecoderResult::OutOfMemory);
+ }
+ mOwnedAlphaPlane.reset(clonedAlphaImg);
+
+ // Per § 4 of the AVIF spec
+ // https://aomediacodec.github.io/av1-avif/#auxiliary-images: An AV1
+ // Alpha Image Item […] shall be encoded with the same bit depth as the
+ // associated master AV1 Image Item
+ MOZ_ASSERT(mOwnedImage->GetImage() && mOwnedAlphaPlane->GetImage());
+ if (mOwnedImage->GetImage()->bit_depth !=
+ mOwnedAlphaPlane->GetImage()->bit_depth) {
+ return AsVariant(NonDecoderResult::AlphaYColorDepthMismatch);
+ }
+
+ if (mOwnedImage->GetImage()->stride[AOM_PLANE_Y] !=
+ mOwnedAlphaPlane->GetImage()->stride[AOM_PLANE_Y]) {
+ return AsVariant(NonDecoderResult::AlphaYSizeMismatch);
+ }
+ }
+
+ MOZ_ASSERT_IF(!mOwnedAlphaPlane, !aAVIFInfo.premultiplied_alpha);
+ mDecodedData = AOMImageToToDecodedData(
+ aAVIFInfo.nclx_colour_information, std::move(mOwnedImage),
+ std::move(mOwnedAlphaPlane), aAVIFInfo.premultiplied_alpha);
+
+ return r;
+ }
+
+ private:
+ explicit AOMDecoder() {
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose, ("Create AOMDecoder=%p", this));
+ }
+
+ aom_codec_err_t Init(bool aHasAlpha) {
+ MOZ_ASSERT(mColorContext.isNothing());
+ MOZ_ASSERT(mAlphaContext.isNothing());
+
+ aom_codec_iface_t* iface = aom_codec_av1_dx();
+
+ // Init color decoder context
+ mColorContext.emplace();
+ aom_codec_err_t r = aom_codec_dec_init(
+ mColorContext.ptr(), iface, /* cfg = */ nullptr, /* flags = */ 0);
+
+ MOZ_LOG(sAVIFLog, r == AOM_CODEC_OK ? LogLevel::Verbose : LogLevel::Error,
+ ("[this=%p] color decoder: aom_codec_dec_init -> %d, name = %s",
+ this, r, mColorContext->name));
+
+ if (r != AOM_CODEC_OK) {
+ mColorContext.reset();
+ return r;
+ }
+
+ if (aHasAlpha) {
+ // Init alpha decoder context
+ mAlphaContext.emplace();
+ r = aom_codec_dec_init(mAlphaContext.ptr(), iface, /* cfg = */ nullptr,
+ /* flags = */ 0);
+
+ MOZ_LOG(sAVIFLog, r == AOM_CODEC_OK ? LogLevel::Verbose : LogLevel::Error,
+ ("[this=%p] color decoder: aom_codec_dec_init -> %d, name = %s",
+ this, r, mAlphaContext->name));
+
+ if (r != AOM_CODEC_OK) {
+ mAlphaContext.reset();
+ return r;
+ }
+ }
+
+ return r;
+ }
+
+ static DecodeResult GetImage(aom_codec_ctx_t& aContext,
+ const MediaRawData& aData, aom_image_t** aImage,
+ bool aShouldSendTelemetry) {
+ aom_codec_err_t r =
+ aom_codec_decode(&aContext, aData.Data(), aData.Size(), nullptr);
+
+ MOZ_LOG(sAVIFLog, r == AOM_CODEC_OK ? LogLevel::Verbose : LogLevel::Error,
+ ("aom_codec_decode -> %d", r));
+
+ if (aShouldSendTelemetry) {
+ switch (r) {
+ case AOM_CODEC_OK:
+ // No need to record any telemetry for the common case
+ break;
+ case AOM_CODEC_ERROR:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::error);
+ break;
+ case AOM_CODEC_MEM_ERROR:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::mem_error);
+ break;
+ case AOM_CODEC_ABI_MISMATCH:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::abi_mismatch);
+ break;
+ case AOM_CODEC_INCAPABLE:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::incapable);
+ break;
+ case AOM_CODEC_UNSUP_BITSTREAM:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::unsup_bitstream);
+ break;
+ case AOM_CODEC_UNSUP_FEATURE:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::unsup_feature);
+ break;
+ case AOM_CODEC_CORRUPT_FRAME:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::corrupt_frame);
+ break;
+ case AOM_CODEC_INVALID_PARAM:
+ AccumulateCategorical(LABELS_AVIF_AOM_DECODE_ERROR::invalid_param);
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE(
+ "Unknown aom_codec_err_t value from aom_codec_decode");
+ }
+ }
+
+ if (r != AOM_CODEC_OK) {
+ return AsVariant(AOMResult(r));
+ }
+
+ aom_codec_iter_t iter = nullptr;
+ aom_image_t* img = aom_codec_get_frame(&aContext, &iter);
+
+ MOZ_LOG(sAVIFLog, img == nullptr ? LogLevel::Error : LogLevel::Verbose,
+ ("aom_codec_get_frame -> %p", img));
+
+ if (img == nullptr) {
+ return AsVariant(AOMResult(NonAOMCodecError::NoFrame));
+ }
+
+ const CheckedInt<int> decoded_width = img->d_w;
+ const CheckedInt<int> decoded_height = img->d_h;
+
+ if (!decoded_height.isValid() || !decoded_width.isValid()) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("image dimensions can't be stored in int: d_w: %u, "
+ "d_h: %u",
+ img->d_w, img->d_h));
+ return AsVariant(AOMResult(NonAOMCodecError::SizeOverflow));
+ }
+
+ *aImage = img;
+ return AsVariant(AOMResult(r));
+ }
+
+ static UniquePtr<AVIFDecodedData> AOMImageToToDecodedData(
+ const Mp4parseNclxColourInformation* aNclx,
+ UniquePtr<OwnedAOMImage> aImage, UniquePtr<OwnedAOMImage> aAlphaPlane,
+ bool aPremultipliedAlpha);
+
+ Maybe<aom_codec_ctx_t> mColorContext;
+ Maybe<aom_codec_ctx_t> mAlphaContext;
+ UniquePtr<OwnedAOMImage> mOwnedImage;
+ UniquePtr<OwnedAOMImage> mOwnedAlphaPlane;
+};
+
+/* static */
+UniquePtr<AVIFDecodedData> Dav1dDecoder::Dav1dPictureToDecodedData(
+ const Mp4parseNclxColourInformation* aNclx, OwnedDav1dPicture aPicture,
+ OwnedDav1dPicture aAlphaPlane, bool aPremultipliedAlpha) {
+ MOZ_ASSERT(aPicture);
+
+ static_assert(std::is_same<int, decltype(aPicture->p.w)>::value);
+ static_assert(std::is_same<int, decltype(aPicture->p.h)>::value);
+
+ UniquePtr<AVIFDecodedData> data = MakeUnique<AVIFDecodedData>();
+
+ data->mRenderSize.emplace(aPicture->frame_hdr->render_width,
+ aPicture->frame_hdr->render_height);
+
+ data->mYChannel = static_cast<uint8_t*>(aPicture->data[0]);
+ data->mYStride = aPicture->stride[0];
+ data->mYSkip = aPicture->stride[0] - aPicture->p.w;
+ data->mCbChannel = static_cast<uint8_t*>(aPicture->data[1]);
+ data->mCrChannel = static_cast<uint8_t*>(aPicture->data[2]);
+ data->mCbCrStride = aPicture->stride[1];
+
+ switch (aPicture->p.layout) {
+ case DAV1D_PIXEL_LAYOUT_I400: // Monochrome, so no Cb or Cr channels
+ break;
+ case DAV1D_PIXEL_LAYOUT_I420:
+ data->mChromaSubsampling = ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ break;
+ case DAV1D_PIXEL_LAYOUT_I422:
+ data->mChromaSubsampling = ChromaSubsampling::HALF_WIDTH;
+ break;
+ case DAV1D_PIXEL_LAYOUT_I444:
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unknown pixel layout");
+ }
+
+ data->mCbSkip = aPicture->stride[1] - aPicture->p.w;
+ data->mCrSkip = aPicture->stride[1] - aPicture->p.w;
+ data->mPictureRect = IntRect(0, 0, aPicture->p.w, aPicture->p.h);
+ data->mStereoMode = StereoMode::MONO;
+ data->mColorDepth = ColorDepthForBitDepth(aPicture->p.bpc);
+
+ MOZ_ASSERT(aPicture->p.bpc == BitDepthForColorDepth(data->mColorDepth));
+
+ data->mYUVColorSpace = GetAVIFColorSpace(aNclx, [&]() {
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("YUVColorSpace cannot be determined from colr box, using AV1 "
+ "sequence header"));
+ return DAV1DDecoder::GetColorSpace(*aPicture, sAVIFLog);
+ });
+
+ auto av1ColourPrimaries = CICP::ColourPrimaries::CP_UNSPECIFIED;
+ auto av1TransferCharacteristics =
+ CICP::TransferCharacteristics::TC_UNSPECIFIED;
+ auto av1MatrixCoefficients = CICP::MatrixCoefficients::MC_UNSPECIFIED;
+
+ MOZ_ASSERT(aPicture->seq_hdr);
+ auto& seq_hdr = *aPicture->seq_hdr;
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("seq_hdr.color_description_present: %d",
+ seq_hdr.color_description_present));
+ if (seq_hdr.color_description_present) {
+ av1ColourPrimaries = static_cast<CICP::ColourPrimaries>(seq_hdr.pri);
+ av1TransferCharacteristics =
+ static_cast<CICP::TransferCharacteristics>(seq_hdr.trc);
+ av1MatrixCoefficients = static_cast<CICP::MatrixCoefficients>(seq_hdr.mtrx);
+ }
+
+ data->SetCicpValues(aNclx, av1ColourPrimaries, av1TransferCharacteristics,
+ av1MatrixCoefficients);
+
+ gfx::ColorRange av1ColorRange =
+ seq_hdr.color_range ? gfx::ColorRange::FULL : gfx::ColorRange::LIMITED;
+ data->mColorRange = GetAVIFColorRange(aNclx, av1ColorRange);
+
+ auto colorPrimaries =
+ gfxUtils::CicpToColorPrimaries(data->mColourPrimaries, sAVIFLog);
+ if (colorPrimaries.isSome()) {
+ data->mColorPrimaries = *colorPrimaries;
+ }
+
+ if (aAlphaPlane) {
+ MOZ_ASSERT(aAlphaPlane->stride[0] == data->mYStride);
+ data->mAlpha.emplace();
+ data->mAlpha->mChannel = static_cast<uint8_t*>(aAlphaPlane->data[0]);
+ data->mAlpha->mSize = gfx::IntSize(aAlphaPlane->p.w, aAlphaPlane->p.h);
+ data->mAlpha->mPremultiplied = aPremultipliedAlpha;
+ }
+
+ data->mColorDav1d = std::move(aPicture);
+ data->mAlphaDav1d = std::move(aAlphaPlane);
+
+ return data;
+}
+
+/* static */
+UniquePtr<AVIFDecodedData> AOMDecoder::AOMImageToToDecodedData(
+ const Mp4parseNclxColourInformation* aNclx, UniquePtr<OwnedAOMImage> aImage,
+ UniquePtr<OwnedAOMImage> aAlphaPlane, bool aPremultipliedAlpha) {
+ aom_image_t* colorImage = aImage->GetImage();
+ aom_image_t* alphaImage = aAlphaPlane ? aAlphaPlane->GetImage() : nullptr;
+
+ MOZ_ASSERT(colorImage);
+ MOZ_ASSERT(colorImage->stride[AOM_PLANE_Y] ==
+ colorImage->stride[AOM_PLANE_ALPHA]);
+ MOZ_ASSERT(colorImage->stride[AOM_PLANE_Y] >=
+ aom_img_plane_width(colorImage, AOM_PLANE_Y));
+ MOZ_ASSERT(colorImage->stride[AOM_PLANE_U] ==
+ colorImage->stride[AOM_PLANE_V]);
+ MOZ_ASSERT(colorImage->stride[AOM_PLANE_U] >=
+ aom_img_plane_width(colorImage, AOM_PLANE_U));
+ MOZ_ASSERT(colorImage->stride[AOM_PLANE_V] >=
+ aom_img_plane_width(colorImage, AOM_PLANE_V));
+ MOZ_ASSERT(aom_img_plane_width(colorImage, AOM_PLANE_U) ==
+ aom_img_plane_width(colorImage, AOM_PLANE_V));
+ MOZ_ASSERT(aom_img_plane_height(colorImage, AOM_PLANE_U) ==
+ aom_img_plane_height(colorImage, AOM_PLANE_V));
+
+ UniquePtr<AVIFDecodedData> data = MakeUnique<AVIFDecodedData>();
+
+ data->mRenderSize.emplace(colorImage->r_w, colorImage->r_h);
+
+ data->mYChannel = colorImage->planes[AOM_PLANE_Y];
+ data->mYStride = colorImage->stride[AOM_PLANE_Y];
+ data->mYSkip = colorImage->stride[AOM_PLANE_Y] -
+ aom_img_plane_width(colorImage, AOM_PLANE_Y);
+ data->mCbChannel = colorImage->planes[AOM_PLANE_U];
+ data->mCrChannel = colorImage->planes[AOM_PLANE_V];
+ data->mCbCrStride = colorImage->stride[AOM_PLANE_U];
+ data->mCbSkip = colorImage->stride[AOM_PLANE_U] -
+ aom_img_plane_width(colorImage, AOM_PLANE_U);
+ data->mCrSkip = colorImage->stride[AOM_PLANE_V] -
+ aom_img_plane_width(colorImage, AOM_PLANE_V);
+ data->mPictureRect = gfx::IntRect(0, 0, colorImage->d_w, colorImage->d_h);
+ data->mStereoMode = StereoMode::MONO;
+ data->mColorDepth = ColorDepthForBitDepth(colorImage->bit_depth);
+
+ if (colorImage->x_chroma_shift == 1 && colorImage->y_chroma_shift == 1) {
+ data->mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ } else if (colorImage->x_chroma_shift == 1 &&
+ colorImage->y_chroma_shift == 0) {
+ data->mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH;
+ } else if (colorImage->x_chroma_shift != 0 ||
+ colorImage->y_chroma_shift != 0) {
+ MOZ_ASSERT_UNREACHABLE("unexpected chroma shifts");
+ }
+
+ MOZ_ASSERT(colorImage->bit_depth == BitDepthForColorDepth(data->mColorDepth));
+
+ auto av1ColourPrimaries = static_cast<CICP::ColourPrimaries>(colorImage->cp);
+ auto av1TransferCharacteristics =
+ static_cast<CICP::TransferCharacteristics>(colorImage->tc);
+ auto av1MatrixCoefficients =
+ static_cast<CICP::MatrixCoefficients>(colorImage->mc);
+
+ data->mYUVColorSpace = GetAVIFColorSpace(aNclx, [=]() {
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("YUVColorSpace cannot be determined from colr box, using AV1 "
+ "sequence header"));
+ return gfxUtils::CicpToColorSpace(av1MatrixCoefficients, av1ColourPrimaries,
+ sAVIFLog);
+ });
+
+ gfx::ColorRange av1ColorRange;
+ if (colorImage->range == AOM_CR_STUDIO_RANGE) {
+ av1ColorRange = gfx::ColorRange::LIMITED;
+ } else {
+ MOZ_ASSERT(colorImage->range == AOM_CR_FULL_RANGE);
+ av1ColorRange = gfx::ColorRange::FULL;
+ }
+ data->mColorRange = GetAVIFColorRange(aNclx, av1ColorRange);
+
+ data->SetCicpValues(aNclx, av1ColourPrimaries, av1TransferCharacteristics,
+ av1MatrixCoefficients);
+
+ auto colorPrimaries =
+ gfxUtils::CicpToColorPrimaries(data->mColourPrimaries, sAVIFLog);
+ if (colorPrimaries.isSome()) {
+ data->mColorPrimaries = *colorPrimaries;
+ }
+
+ if (alphaImage) {
+ MOZ_ASSERT(alphaImage->stride[AOM_PLANE_Y] == data->mYStride);
+ data->mAlpha.emplace();
+ data->mAlpha->mChannel = alphaImage->planes[AOM_PLANE_Y];
+ data->mAlpha->mSize = gfx::IntSize(alphaImage->d_w, alphaImage->d_h);
+ data->mAlpha->mPremultiplied = aPremultipliedAlpha;
+ }
+
+ data->mColorAOM = std::move(aImage);
+ data->mAlphaAOM = std::move(aAlphaPlane);
+
+ return data;
+}
+
+// Wrapper to allow rust to call our read adaptor.
+intptr_t nsAVIFDecoder::ReadSource(uint8_t* aDestBuf, uintptr_t aDestBufSize,
+ void* aUserData) {
+ MOZ_ASSERT(aDestBuf);
+ MOZ_ASSERT(aUserData);
+
+ MOZ_LOG(sAVIFLog, LogLevel::Verbose,
+ ("AVIF ReadSource, aDestBufSize: %zu", aDestBufSize));
+
+ auto* decoder = reinterpret_cast<nsAVIFDecoder*>(aUserData);
+
+ MOZ_ASSERT(decoder->mReadCursor);
+
+ size_t bufferLength = decoder->mBufferedData.end() - decoder->mReadCursor;
+ size_t n_bytes = std::min(aDestBufSize, bufferLength);
+
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Verbose,
+ ("AVIF ReadSource, %zu bytes ready, copying %zu", bufferLength, n_bytes));
+
+ memcpy(aDestBuf, decoder->mReadCursor, n_bytes);
+ decoder->mReadCursor += n_bytes;
+
+ return n_bytes;
+}
+
+nsAVIFDecoder::nsAVIFDecoder(RasterImage* aImage) : Decoder(aImage) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] nsAVIFDecoder::nsAVIFDecoder", this));
+}
+
+nsAVIFDecoder::~nsAVIFDecoder() {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] nsAVIFDecoder::~nsAVIFDecoder", this));
+}
+
+LexerResult nsAVIFDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("[this=%p] nsAVIFDecoder::DoDecode start", this));
+
+ DecodeResult result = DoDecodeInternal(aIterator, aOnResume);
+
+ RecordDecodeResultTelemetry(result);
+
+ if (result.is<NonDecoderResult>()) {
+ NonDecoderResult r = result.as<NonDecoderResult>();
+ if (r == NonDecoderResult::NeedMoreData) {
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+ if (r == NonDecoderResult::OutputAvailable) {
+ MOZ_ASSERT(HasSize());
+ return LexerResult(Yield::OUTPUT_AVAILABLE);
+ }
+ if (r == NonDecoderResult::Complete) {
+ MOZ_ASSERT(HasSize());
+ return LexerResult(TerminalState::SUCCESS);
+ }
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ MOZ_ASSERT(result.is<Dav1dResult>() || result.is<AOMResult>() ||
+ result.is<Mp4parseStatus>());
+ // If IsMetadataDecode(), a successful parse should return
+ // NonDecoderResult::MetadataOk or else continue to the decode stage
+ MOZ_ASSERT_IF(result.is<Mp4parseStatus>(),
+ result.as<Mp4parseStatus>() != MP4PARSE_STATUS_OK);
+ auto rv = LexerResult(IsDecodeSuccess(result) ? TerminalState::SUCCESS
+ : TerminalState::FAILURE);
+ MOZ_LOG(sAVIFLog, LogLevel::Info,
+ ("[this=%p] nsAVIFDecoder::DoDecode end", this));
+ return rv;
+}
+
+Mp4parseStatus nsAVIFDecoder::CreateParser() {
+ if (!mParser) {
+ Mp4parseIo io = {nsAVIFDecoder::ReadSource, this};
+ mBufferStream = new AVIFDecoderStream(&mBufferedData);
+
+ Mp4parseStatus status = AVIFParser::Create(
+ &io, mBufferStream.get(), mParser,
+ bool(GetDecoderFlags() & DecoderFlags::AVIF_SEQUENCES_ENABLED),
+ bool(GetDecoderFlags() & DecoderFlags::AVIF_ANIMATE_AVIF_MAJOR));
+
+ if (status != MP4PARSE_STATUS_OK) {
+ return status;
+ }
+
+ const Mp4parseAvifInfo& info = mParser->GetInfo();
+ mIsAnimated = mParser->IsAnimated();
+ mHasAlpha = mIsAnimated ? !!info.alpha_track_id : info.has_alpha_item;
+ }
+
+ return MP4PARSE_STATUS_OK;
+}
+
+nsAVIFDecoder::DecodeResult nsAVIFDecoder::CreateDecoder() {
+ if (!mDecoder) {
+ DecodeResult r = StaticPrefs::image_avif_use_dav1d()
+ ? Dav1dDecoder::Create(mDecoder, mHasAlpha)
+ : AOMDecoder::Create(mDecoder, mHasAlpha);
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] Create %sDecoder %ssuccessfully", this,
+ StaticPrefs::image_avif_use_dav1d() ? "Dav1d" : "AOM",
+ IsDecodeSuccess(r) ? "" : "un"));
+
+ return r;
+ }
+
+ return StaticPrefs::image_avif_use_dav1d()
+ ? DecodeResult(Dav1dResult(0))
+ : DecodeResult(AOMResult(AOM_CODEC_OK));
+}
+
+// Records all telemetry available in the AVIF metadata, called only once during
+// the metadata decode to avoid multiple counts.
+static void RecordMetadataTelem(const Mp4parseAvifInfo& aInfo) {
+ if (aInfo.pixel_aspect_ratio) {
+ const uint32_t& h_spacing = aInfo.pixel_aspect_ratio->h_spacing;
+ const uint32_t& v_spacing = aInfo.pixel_aspect_ratio->v_spacing;
+
+ if (h_spacing == 0 || v_spacing == 0) {
+ AccumulateCategorical(LABELS_AVIF_PASP::invalid);
+ } else if (h_spacing == v_spacing) {
+ AccumulateCategorical(LABELS_AVIF_PASP::square);
+ } else {
+ AccumulateCategorical(LABELS_AVIF_PASP::nonsquare);
+ }
+ } else {
+ AccumulateCategorical(LABELS_AVIF_PASP::absent);
+ }
+
+ const auto& major_brand = aInfo.major_brand;
+ if (!memcmp(major_brand, "avif", sizeof(major_brand))) {
+ AccumulateCategorical(LABELS_AVIF_MAJOR_BRAND::avif);
+ } else if (!memcmp(major_brand, "avis", sizeof(major_brand))) {
+ AccumulateCategorical(LABELS_AVIF_MAJOR_BRAND::avis);
+ } else {
+ AccumulateCategorical(LABELS_AVIF_MAJOR_BRAND::other);
+ }
+
+ AccumulateCategorical(aInfo.has_sequence ? LABELS_AVIF_SEQUENCE::present
+ : LABELS_AVIF_SEQUENCE::absent);
+
+#define FEATURE_TELEMETRY(fourcc) \
+ AccumulateCategorical( \
+ (aInfo.unsupported_features_bitfield & (1 << MP4PARSE_FEATURE_##fourcc)) \
+ ? LABELS_AVIF_##fourcc::present \
+ : LABELS_AVIF_##fourcc::absent)
+ FEATURE_TELEMETRY(A1LX);
+ FEATURE_TELEMETRY(A1OP);
+ FEATURE_TELEMETRY(CLAP);
+ FEATURE_TELEMETRY(GRID);
+ FEATURE_TELEMETRY(IPRO);
+ FEATURE_TELEMETRY(LSEL);
+
+ if (aInfo.nclx_colour_information && aInfo.icc_colour_information.data) {
+ AccumulateCategorical(LABELS_AVIF_COLR::both);
+ } else if (aInfo.nclx_colour_information) {
+ AccumulateCategorical(LABELS_AVIF_COLR::nclx);
+ } else if (aInfo.icc_colour_information.data) {
+ AccumulateCategorical(LABELS_AVIF_COLR::icc);
+ } else {
+ AccumulateCategorical(LABELS_AVIF_COLR::absent);
+ }
+}
+
+static void RecordPixiTelemetry(uint8_t aPixiBitDepth,
+ uint8_t aBitstreamBitDepth,
+ const char* aItemName) {
+ if (aPixiBitDepth == 0) {
+ AccumulateCategorical(LABELS_AVIF_PIXI::absent);
+ } else if (aPixiBitDepth == aBitstreamBitDepth) {
+ AccumulateCategorical(LABELS_AVIF_PIXI::valid);
+ } else {
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("%s item pixi bit depth (%hhu) doesn't match "
+ "bitstream (%hhu)",
+ aItemName, aPixiBitDepth, aBitstreamBitDepth));
+ AccumulateCategorical(LABELS_AVIF_PIXI::bitstream_mismatch);
+ }
+}
+
+// This telemetry depends on the results of decoding.
+// These data must be recorded only on the first frame decoded after metadata
+// decode finishes.
+static void RecordFrameTelem(bool aAnimated, const Mp4parseAvifInfo& aInfo,
+ const AVIFDecodedData& aData) {
+ AccumulateCategorical(
+ gColorSpaceLabel[static_cast<size_t>(aData.mYUVColorSpace)]);
+ AccumulateCategorical(
+ gColorDepthLabel[static_cast<size_t>(aData.mColorDepth)]);
+
+ RecordPixiTelemetry(
+ aAnimated ? aInfo.color_track_bit_depth : aInfo.primary_item_bit_depth,
+ BitDepthForColorDepth(aData.mColorDepth), "color");
+
+ if (aData.mAlpha) {
+ AccumulateCategorical(LABELS_AVIF_ALPHA::present);
+ RecordPixiTelemetry(
+ aAnimated ? aInfo.alpha_track_bit_depth : aInfo.alpha_item_bit_depth,
+ BitDepthForColorDepth(aData.mColorDepth), "alpha");
+ } else {
+ AccumulateCategorical(LABELS_AVIF_ALPHA::absent);
+ }
+
+ if (CICP::IsReserved(aData.mColourPrimaries)) {
+ AccumulateCategorical(LABELS_AVIF_CICP_CP::RESERVED_REST);
+ } else {
+ AccumulateCategorical(
+ static_cast<LABELS_AVIF_CICP_CP>(aData.mColourPrimaries));
+ }
+
+ if (CICP::IsReserved(aData.mTransferCharacteristics)) {
+ AccumulateCategorical(LABELS_AVIF_CICP_TC::RESERVED);
+ } else {
+ AccumulateCategorical(
+ static_cast<LABELS_AVIF_CICP_TC>(aData.mTransferCharacteristics));
+ }
+
+ if (CICP::IsReserved(aData.mMatrixCoefficients)) {
+ AccumulateCategorical(LABELS_AVIF_CICP_MC::RESERVED);
+ } else {
+ AccumulateCategorical(
+ static_cast<LABELS_AVIF_CICP_MC>(aData.mMatrixCoefficients));
+ }
+}
+
+nsAVIFDecoder::DecodeResult nsAVIFDecoder::DoDecodeInternal(
+ SourceBufferIterator& aIterator, IResumable* aOnResume) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] nsAVIFDecoder::DoDecodeInternal", this));
+
+ // Since the SourceBufferIterator doesn't guarantee a contiguous buffer,
+ // but the current mp4parse-rust implementation requires it, always buffer
+ // locally. This keeps the code simpler at the cost of some performance, but
+ // this implementation is only experimental, so we don't want to spend time
+ // optimizing it prematurely.
+ while (!mReadCursor) {
+ SourceBufferIterator::State state =
+ aIterator.AdvanceOrScheduleResume(SIZE_MAX, aOnResume);
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] After advance, iterator state is %d", this, state));
+
+ switch (state) {
+ case SourceBufferIterator::WAITING:
+ return AsVariant(NonDecoderResult::NeedMoreData);
+
+ case SourceBufferIterator::COMPLETE:
+ mReadCursor = mBufferedData.begin();
+ break;
+
+ case SourceBufferIterator::READY: { // copy new data to buffer
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] SourceBufferIterator ready, %zu bytes available",
+ this, aIterator.Length()));
+
+ bool appendSuccess =
+ mBufferedData.append(aIterator.Data(), aIterator.Length());
+
+ if (!appendSuccess) {
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("[this=%p] Failed to append %zu bytes to buffer", this,
+ aIterator.Length()));
+ }
+
+ break;
+ }
+
+ default:
+ MOZ_ASSERT_UNREACHABLE("unexpected SourceBufferIterator state");
+ }
+ }
+
+ Mp4parseStatus parserStatus = CreateParser();
+
+ if (parserStatus != MP4PARSE_STATUS_OK) {
+ return AsVariant(parserStatus);
+ }
+
+ const Mp4parseAvifInfo& parsedInfo = mParser->GetInfo();
+
+ if (parsedInfo.icc_colour_information.data) {
+ const auto& icc = parsedInfo.icc_colour_information;
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Debug,
+ ("[this=%p] colr type ICC: %zu bytes %p", this, icc.length, icc.data));
+ }
+
+ if (IsMetadataDecode()) {
+ RecordMetadataTelem(parsedInfo);
+ }
+
+ if (parsedInfo.nclx_colour_information) {
+ const auto& nclx = *parsedInfo.nclx_colour_information;
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Debug,
+ ("[this=%p] colr type CICP: cp/tc/mc/full-range %u/%u/%u/%s", this,
+ nclx.colour_primaries, nclx.transfer_characteristics,
+ nclx.matrix_coefficients, nclx.full_range_flag ? "true" : "false"));
+ }
+
+ if (!parsedInfo.icc_colour_information.data &&
+ !parsedInfo.nclx_colour_information) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] colr box not present", this));
+ }
+
+ AVIFImage parsedImage;
+ DecodeResult r = mParser->GetImage(parsedImage);
+ if (!IsDecodeSuccess(r)) {
+ return r;
+ }
+ bool isDone =
+ !IsMetadataDecode() && r == DecodeResult(NonDecoderResult::Complete);
+
+ if (mIsAnimated) {
+ PostIsAnimated(parsedImage.mDuration);
+ }
+ if (mHasAlpha) {
+ PostHasTransparency();
+ }
+
+ Orientation orientation = StaticPrefs::image_avif_apply_transforms()
+ ? GetImageOrientation(parsedInfo)
+ : Orientation{};
+ // TODO: Orientation should probably also apply to animated AVIFs.
+ if (mIsAnimated) {
+ orientation = Orientation{};
+ }
+
+ MaybeIntSize ispeImageSize = GetImageSize(parsedInfo);
+
+ bool sendDecodeTelemetry = IsMetadataDecode();
+ if (ispeImageSize.isSome()) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] Parser returned image size %d x %d (%d/%d bit)", this,
+ ispeImageSize->width, ispeImageSize->height,
+ mIsAnimated ? parsedInfo.color_track_bit_depth
+ : parsedInfo.primary_item_bit_depth,
+ mIsAnimated ? parsedInfo.alpha_track_bit_depth
+ : parsedInfo.alpha_item_bit_depth));
+ PostSize(ispeImageSize->width, ispeImageSize->height, orientation);
+ if (IsMetadataDecode()) {
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Debug,
+ ("[this=%p] Finishing metadata decode without image decode", this));
+ return AsVariant(NonDecoderResult::Complete);
+ }
+ // If we're continuing to decode here, this means we skipped decode
+ // telemetry for the metadata decode pass. Send it this time.
+ sendDecodeTelemetry = true;
+ } else {
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("[this=%p] Parser returned no image size, decoding...", this));
+ }
+
+ r = CreateDecoder();
+ if (!IsDecodeSuccess(r)) {
+ return r;
+ }
+ MOZ_ASSERT(mDecoder);
+ r = mDecoder->Decode(sendDecodeTelemetry, parsedInfo, parsedImage);
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] Decoder%s->Decode() %s", this,
+ StaticPrefs::image_avif_use_dav1d() ? "Dav1d" : "AOM",
+ IsDecodeSuccess(r) ? "succeeds" : "fails"));
+
+ if (!IsDecodeSuccess(r)) {
+ return r;
+ }
+
+ UniquePtr<AVIFDecodedData> decodedData = mDecoder->GetDecodedData();
+
+ MOZ_ASSERT_IF(mHasAlpha, decodedData->mAlpha.isSome());
+
+ MOZ_ASSERT(decodedData->mColourPrimaries !=
+ CICP::ColourPrimaries::CP_UNSPECIFIED);
+ MOZ_ASSERT(decodedData->mTransferCharacteristics !=
+ CICP::TransferCharacteristics::TC_UNSPECIFIED);
+ MOZ_ASSERT(decodedData->mColorRange <= gfx::ColorRange::_Last);
+ MOZ_ASSERT(decodedData->mYUVColorSpace <= gfx::YUVColorSpace::_Last);
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] decodedData.mColorRange: %hhd", this,
+ static_cast<uint8_t>(decodedData->mColorRange)));
+
+ // Technically it's valid but we don't handle it now (Bug 1682318).
+ if (decodedData->mAlpha &&
+ decodedData->mAlpha->mSize != decodedData->YDataSize()) {
+ return AsVariant(NonDecoderResult::AlphaYSizeMismatch);
+ }
+
+ bool isFirstFrame = GetFrameCount() == 0;
+
+ if (!HasSize()) {
+ MOZ_ASSERT(isFirstFrame);
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Error,
+ ("[this=%p] Using decoded image size: %d x %d", this,
+ decodedData->mPictureRect.width, decodedData->mPictureRect.height));
+ PostSize(decodedData->mPictureRect.width, decodedData->mPictureRect.height,
+ orientation);
+ AccumulateCategorical(LABELS_AVIF_ISPE::absent);
+ } else {
+ // Verify that the bitstream hasn't changed the image size compared to
+ // either the ispe box or the previous frames.
+ IntSize expectedSize = GetImageMetadata()
+ .GetOrientation()
+ .ToUnoriented(Size())
+ .ToUnknownSize();
+ if (decodedData->mPictureRect.width != expectedSize.width ||
+ decodedData->mPictureRect.height != expectedSize.height) {
+ if (isFirstFrame) {
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Error,
+ ("[this=%p] Metadata image size doesn't match decoded image size: "
+ "(%d x %d) != (%d x %d)",
+ this, ispeImageSize->width, ispeImageSize->height,
+ decodedData->mPictureRect.width,
+ decodedData->mPictureRect.height));
+ AccumulateCategorical(LABELS_AVIF_ISPE::bitstream_mismatch);
+ return AsVariant(NonDecoderResult::MetadataImageSizeMismatch);
+ }
+
+ MOZ_LOG(
+ sAVIFLog, LogLevel::Error,
+ ("[this=%p] Frame size has changed in the bitstream: "
+ "(%d x %d) != (%d x %d)",
+ this, expectedSize.width, expectedSize.height,
+ decodedData->mPictureRect.width, decodedData->mPictureRect.height));
+ return AsVariant(NonDecoderResult::FrameSizeChanged);
+ }
+
+ if (isFirstFrame) {
+ AccumulateCategorical(LABELS_AVIF_ISPE::valid);
+ }
+ }
+
+ if (IsMetadataDecode()) {
+ return AsVariant(NonDecoderResult::Complete);
+ }
+
+ IntSize rgbSize = decodedData->mPictureRect.Size();
+
+ if (parsedImage.mFrameNum == 0) {
+ RecordFrameTelem(mIsAnimated, parsedInfo, *decodedData);
+ }
+
+ if (decodedData->mRenderSize &&
+ decodedData->mRenderSize->ToUnknownSize() != rgbSize) {
+ // This may be supported by allowing all metadata decodes to decode a frame
+ // and get the render size from the bitstream. However it's unlikely to be
+ // used often.
+ return AsVariant(NonDecoderResult::RenderSizeMismatch);
+ }
+
+ // Read color profile
+ if (mCMSMode != CMSMode::Off) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] Processing color profile", this));
+
+ // See comment on AVIFDecodedData
+ if (parsedInfo.icc_colour_information.data) {
+ // same profile for every frame of image, only create it once
+ if (!mInProfile) {
+ const auto& icc = parsedInfo.icc_colour_information;
+ mInProfile = qcms_profile_from_memory(icc.data, icc.length);
+ }
+ } else {
+ // potentially different profile every frame, destroy the old one
+ if (mInProfile) {
+ if (mTransform) {
+ qcms_transform_release(mTransform);
+ mTransform = nullptr;
+ }
+ qcms_profile_release(mInProfile);
+ mInProfile = nullptr;
+ }
+
+ const auto& cp = decodedData->mColourPrimaries;
+ const auto& tc = decodedData->mTransferCharacteristics;
+
+ if (CICP::IsReserved(cp)) {
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("[this=%p] colour_primaries reserved value (%hhu) is invalid; "
+ "failing",
+ this, cp));
+ return AsVariant(NonDecoderResult::InvalidCICP);
+ }
+
+ if (CICP::IsReserved(tc)) {
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("[this=%p] transfer_characteristics reserved value (%hhu) is "
+ "invalid; failing",
+ this, tc));
+ return AsVariant(NonDecoderResult::InvalidCICP);
+ }
+
+ MOZ_ASSERT(cp != CICP::ColourPrimaries::CP_UNSPECIFIED &&
+ !CICP::IsReserved(cp));
+ MOZ_ASSERT(tc != CICP::TransferCharacteristics::TC_UNSPECIFIED &&
+ !CICP::IsReserved(tc));
+
+ mInProfile = qcms_profile_create_cicp(cp, tc);
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] mInProfile %p", this, mInProfile));
+ } else {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] CMSMode::Off, skipping color profile", this));
+ }
+
+ if (mInProfile && GetCMSOutputProfile() && !mTransform) {
+ auto intent = static_cast<qcms_intent>(gfxPlatform::GetRenderingIntent());
+ qcms_data_type inType;
+ qcms_data_type outType;
+
+ // If we're not mandating an intent, use the one from the image.
+ if (gfxPlatform::GetRenderingIntent() == -1) {
+ intent = qcms_profile_get_rendering_intent(mInProfile);
+ }
+
+ uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
+ if (profileSpace != icSigGrayData) {
+ // If the transform happens with SurfacePipe, it will be in RGBA if we
+ // have an alpha channel, because the swizzle and premultiplication
+ // happens after color management. Otherwise it will be in BGRA because
+ // the swizzle happens at the start.
+ if (mHasAlpha) {
+ inType = QCMS_DATA_RGBA_8;
+ outType = QCMS_DATA_RGBA_8;
+ } else {
+ inType = gfxPlatform::GetCMSOSRGBAType();
+ outType = inType;
+ }
+ } else {
+ if (mHasAlpha) {
+ inType = QCMS_DATA_GRAYA_8;
+ outType = gfxPlatform::GetCMSOSRGBAType();
+ } else {
+ inType = QCMS_DATA_GRAY_8;
+ outType = gfxPlatform::GetCMSOSRGBAType();
+ }
+ }
+
+ mTransform = qcms_transform_create(mInProfile, inType,
+ GetCMSOutputProfile(), outType, intent);
+ }
+
+ // Get suggested format and size. Note that GetYCbCrToRGBDestFormatAndSize
+ // force format to be B8G8R8X8 if it's not.
+ gfx::SurfaceFormat format = SurfaceFormat::OS_RGBX;
+ gfx::GetYCbCrToRGBDestFormatAndSize(*decodedData, format, rgbSize);
+ if (mHasAlpha) {
+ // We would use libyuv to do the YCbCrA -> ARGB convertion, which only
+ // works for B8G8R8A8.
+ format = SurfaceFormat::B8G8R8A8;
+ }
+
+ const int bytesPerPixel = BytesPerPixel(format);
+
+ const CheckedInt rgbStride = CheckedInt<int>(rgbSize.width) * bytesPerPixel;
+ const CheckedInt rgbBufLength = rgbStride * rgbSize.height;
+
+ if (!rgbStride.isValid() || !rgbBufLength.isValid()) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] overflow calculating rgbBufLength: rbgSize.width: %d, "
+ "rgbSize.height: %d, "
+ "bytesPerPixel: %u",
+ this, rgbSize.width, rgbSize.height, bytesPerPixel));
+ return AsVariant(NonDecoderResult::SizeOverflow);
+ }
+
+ UniquePtr<uint8_t[]> rgbBuf =
+ MakeUniqueFallible<uint8_t[]>(rgbBufLength.value());
+ if (!rgbBuf) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] allocation of %u-byte rgbBuf failed", this,
+ rgbBufLength.value()));
+ return AsVariant(NonDecoderResult::OutOfMemory);
+ }
+
+ if (decodedData->mAlpha) {
+ const auto wantPremultiply =
+ !bool(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
+ const bool& hasPremultiply = decodedData->mAlpha->mPremultiplied;
+
+ PremultFunc premultOp = nullptr;
+ if (wantPremultiply && !hasPremultiply) {
+ premultOp = libyuv::ARGBAttenuate;
+ } else if (!wantPremultiply && hasPremultiply) {
+ premultOp = libyuv::ARGBUnattenuate;
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] calling gfx::ConvertYCbCrAToARGB premultOp: %p", this,
+ premultOp));
+ gfx::ConvertYCbCrAToARGB(*decodedData, *decodedData->mAlpha, format,
+ rgbSize, rgbBuf.get(), rgbStride.value(),
+ premultOp);
+ } else {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] calling gfx::ConvertYCbCrToRGB", this));
+ gfx::ConvertYCbCrToRGB(*decodedData, format, rgbSize, rgbBuf.get(),
+ rgbStride.value());
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] calling SurfacePipeFactory::CreateSurfacePipe", this));
+
+ Maybe<SurfacePipe> pipe = Nothing();
+
+ if (mIsAnimated) {
+ SurfaceFormat outFormat =
+ decodedData->mAlpha ? SurfaceFormat::OS_RGBA : SurfaceFormat::OS_RGBX;
+ Maybe<AnimationParams> animParams;
+ if (!IsFirstFrameDecode()) {
+ animParams.emplace(FullFrame().ToUnknownRect(), parsedImage.mDuration,
+ parsedImage.mFrameNum, BlendMethod::SOURCE,
+ DisposalMethod::CLEAR_ALL);
+ }
+ pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), FullFrame(), format, outFormat, animParams,
+ mTransform, SurfacePipeFlags());
+ } else {
+ pipe = SurfacePipeFactory::CreateReorientSurfacePipe(
+ this, Size(), OutputSize(), format, mTransform, GetOrientation());
+ }
+
+ if (pipe.isNothing()) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] could not initialize surface pipe", this));
+ return AsVariant(NonDecoderResult::PipeInitError);
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug, ("[this=%p] writing to surface", this));
+ const uint8_t* endOfRgbBuf = {rgbBuf.get() + rgbBufLength.value()};
+ WriteState writeBufferResult = WriteState::NEED_MORE_DATA;
+ for (uint8_t* rowPtr = rgbBuf.get(); rowPtr < endOfRgbBuf;
+ rowPtr += rgbStride.value()) {
+ writeBufferResult = pipe->WriteBuffer(reinterpret_cast<uint32_t*>(rowPtr));
+
+ Maybe<SurfaceInvalidRect> invalidRect = pipe->TakeInvalidRect();
+ if (invalidRect) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+
+ if (writeBufferResult == WriteState::FAILURE) {
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] error writing rowPtr to surface pipe", this));
+
+ } else if (writeBufferResult == WriteState::FINISHED) {
+ MOZ_ASSERT(rowPtr + rgbStride.value() == endOfRgbBuf);
+ }
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Debug,
+ ("[this=%p] writing to surface complete", this));
+
+ if (writeBufferResult == WriteState::FINISHED) {
+ PostFrameStop(mHasAlpha ? Opacity::SOME_TRANSPARENCY
+ : Opacity::FULLY_OPAQUE);
+
+ if (!mIsAnimated || IsFirstFrameDecode()) {
+ PostDecodeDone(0);
+ return DecodeResult(NonDecoderResult::Complete);
+ }
+
+ if (isDone) {
+ switch (mParser->GetInfo().loop_mode) {
+ case MP4PARSE_AVIF_LOOP_MODE_LOOP_BY_COUNT: {
+ auto loopCount = mParser->GetInfo().loop_count;
+ PostDecodeDone(
+ loopCount > INT32_MAX ? -1 : static_cast<int32_t>(loopCount));
+ break;
+ }
+ case MP4PARSE_AVIF_LOOP_MODE_LOOP_INFINITELY:
+ case MP4PARSE_AVIF_LOOP_MODE_NO_EDITS:
+ default:
+ PostDecodeDone(-1);
+ break;
+ }
+ return DecodeResult(NonDecoderResult::Complete);
+ }
+
+ return DecodeResult(NonDecoderResult::OutputAvailable);
+ }
+
+ return AsVariant(NonDecoderResult::WriteBufferError);
+}
+
+/* static */
+bool nsAVIFDecoder::IsDecodeSuccess(const DecodeResult& aResult) {
+ return aResult == DecodeResult(NonDecoderResult::OutputAvailable) ||
+ aResult == DecodeResult(NonDecoderResult::Complete) ||
+ aResult == DecodeResult(Dav1dResult(0)) ||
+ aResult == DecodeResult(AOMResult(AOM_CODEC_OK));
+}
+
+void nsAVIFDecoder::RecordDecodeResultTelemetry(
+ const nsAVIFDecoder::DecodeResult& aResult) {
+ if (aResult.is<Mp4parseStatus>()) {
+ switch (aResult.as<Mp4parseStatus>()) {
+ case MP4PARSE_STATUS_OK:
+ MOZ_ASSERT_UNREACHABLE(
+ "Expect NonDecoderResult, Dav1dResult or AOMResult");
+ return;
+ case MP4PARSE_STATUS_BAD_ARG:
+ case MP4PARSE_STATUS_INVALID:
+ case MP4PARSE_STATUS_UNSUPPORTED:
+ case MP4PARSE_STATUS_EOF:
+ case MP4PARSE_STATUS_IO:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::parse_error);
+ return;
+ case MP4PARSE_STATUS_OOM:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::out_of_memory);
+ return;
+ case MP4PARSE_STATUS_MISSING_AVIF_OR_AVIS_BRAND:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::missing_brand);
+ return;
+ case MP4PARSE_STATUS_FTYP_NOT_FIRST:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::ftyp_not_first);
+ return;
+ case MP4PARSE_STATUS_NO_IMAGE:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::no_image);
+ return;
+ case MP4PARSE_STATUS_MOOV_BAD_QUANTITY:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::multiple_moov);
+ return;
+ case MP4PARSE_STATUS_MOOV_MISSING:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::no_moov);
+ return;
+ case MP4PARSE_STATUS_LSEL_NO_ESSENTIAL:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::lsel_no_essential);
+ return;
+ case MP4PARSE_STATUS_A1OP_NO_ESSENTIAL:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::a1op_no_essential);
+ return;
+ case MP4PARSE_STATUS_A1LX_ESSENTIAL:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::a1lx_essential);
+ return;
+ case MP4PARSE_STATUS_TXFORM_NO_ESSENTIAL:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::txform_no_essential);
+ return;
+ case MP4PARSE_STATUS_PITM_MISSING:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::no_primary_item);
+ return;
+ case MP4PARSE_STATUS_IMAGE_ITEM_TYPE:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::image_item_type);
+ return;
+ case MP4PARSE_STATUS_ITEM_TYPE_MISSING:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::item_type_missing);
+ return;
+ case MP4PARSE_STATUS_CONSTRUCTION_METHOD:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::construction_method);
+ return;
+ case MP4PARSE_STATUS_PITM_NOT_FOUND:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::item_loc_not_found);
+ return;
+ case MP4PARSE_STATUS_IDAT_MISSING:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::no_item_data_box);
+ return;
+ default:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::uncategorized);
+ return;
+ }
+
+ MOZ_LOG(sAVIFLog, LogLevel::Error,
+ ("[this=%p] unexpected Mp4parseStatus value: %d", this,
+ aResult.as<Mp4parseStatus>()));
+ MOZ_ASSERT(false, "unexpected Mp4parseStatus value");
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::invalid_parse_status);
+
+ } else if (aResult.is<NonDecoderResult>()) {
+ switch (aResult.as<NonDecoderResult>()) {
+ case NonDecoderResult::NeedMoreData:
+ return;
+ case NonDecoderResult::OutputAvailable:
+ return;
+ case NonDecoderResult::Complete:
+ return;
+ case NonDecoderResult::SizeOverflow:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::size_overflow);
+ return;
+ case NonDecoderResult::OutOfMemory:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::out_of_memory);
+ return;
+ case NonDecoderResult::PipeInitError:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::pipe_init_error);
+ return;
+ case NonDecoderResult::WriteBufferError:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::write_buffer_error);
+ return;
+ case NonDecoderResult::AlphaYSizeMismatch:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::alpha_y_sz_mismatch);
+ return;
+ case NonDecoderResult::AlphaYColorDepthMismatch:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::alpha_y_bpc_mismatch);
+ return;
+ case NonDecoderResult::MetadataImageSizeMismatch:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::ispe_mismatch);
+ return;
+ case NonDecoderResult::RenderSizeMismatch:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::render_size_mismatch);
+ return;
+ case NonDecoderResult::FrameSizeChanged:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::frame_size_changed);
+ return;
+ case NonDecoderResult::InvalidCICP:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::invalid_cicp);
+ return;
+ case NonDecoderResult::NoSamples:
+ AccumulateCategorical(LABELS_AVIF_DECODE_RESULT::no_samples);
+ return;
+ }
+ MOZ_ASSERT_UNREACHABLE("unknown NonDecoderResult");
+ } else {
+ MOZ_ASSERT(aResult.is<Dav1dResult>() || aResult.is<AOMResult>());
+ AccumulateCategorical(aResult.is<Dav1dResult>() ? LABELS_AVIF_DECODER::dav1d
+ : LABELS_AVIF_DECODER::aom);
+ AccumulateCategorical(IsDecodeSuccess(aResult)
+ ? LABELS_AVIF_DECODE_RESULT::success
+ : LABELS_AVIF_DECODE_RESULT::decode_error);
+ }
+}
+
+Maybe<Telemetry::HistogramID> nsAVIFDecoder::SpeedHistogram() const {
+ return Some(Telemetry::IMAGE_DECODE_SPEED_AVIF);
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsAVIFDecoder.h b/image/decoders/nsAVIFDecoder.h
new file mode 100644
index 0000000000..59f6498202
--- /dev/null
+++ b/image/decoders/nsAVIFDecoder.h
@@ -0,0 +1,289 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsAVIFDecoder_h
+#define mozilla_image_decoders_nsAVIFDecoder_h
+
+#include "Decoder.h"
+#include "mozilla/gfx/Types.h"
+#include "MP4Metadata.h"
+#include "mp4parse.h"
+#include "SampleIterator.h"
+#include "SurfacePipe.h"
+
+#include "aom/aom_decoder.h"
+#include "dav1d/dav1d.h"
+
+#include "mozilla/Telemetry.h"
+
+namespace mozilla {
+namespace image {
+class RasterImage;
+class AVIFDecoderStream;
+class AVIFParser;
+class AVIFDecoderInterface;
+
+class nsAVIFDecoder final : public Decoder {
+ public:
+ virtual ~nsAVIFDecoder();
+
+ DecoderType GetType() const override { return DecoderType::AVIF; }
+
+ protected:
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ Maybe<Telemetry::HistogramID> SpeedHistogram() const override;
+
+ private:
+ friend class DecoderFactory;
+ friend class AVIFDecoderInterface;
+ friend class AVIFParser;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsAVIFDecoder(RasterImage* aImage);
+
+ static intptr_t ReadSource(uint8_t* aDestBuf, uintptr_t aDestBufSize,
+ void* aUserData);
+
+ typedef int Dav1dResult;
+ enum class NonAOMCodecError { NoFrame, SizeOverflow };
+ typedef Variant<aom_codec_err_t, NonAOMCodecError> AOMResult;
+ enum class NonDecoderResult {
+ NeedMoreData,
+ OutputAvailable,
+ Complete,
+ SizeOverflow,
+ OutOfMemory,
+ PipeInitError,
+ WriteBufferError,
+ AlphaYSizeMismatch,
+ AlphaYColorDepthMismatch,
+ MetadataImageSizeMismatch,
+ RenderSizeMismatch,
+ FrameSizeChanged,
+ InvalidCICP,
+ NoSamples,
+ };
+ using DecodeResult =
+ Variant<Mp4parseStatus, NonDecoderResult, Dav1dResult, AOMResult>;
+ Mp4parseStatus CreateParser();
+ DecodeResult CreateDecoder();
+ DecodeResult DoDecodeInternal(SourceBufferIterator& aIterator,
+ IResumable* aOnResume);
+
+ static bool IsDecodeSuccess(const DecodeResult& aResult);
+
+ void RecordDecodeResultTelemetry(const DecodeResult& aResult);
+
+ Vector<uint8_t> mBufferedData;
+ RefPtr<AVIFDecoderStream> mBufferStream;
+
+ /// Pointer to the next place to read from mBufferedData
+ const uint8_t* mReadCursor = nullptr;
+
+ UniquePtr<AVIFParser> mParser = nullptr;
+ UniquePtr<AVIFDecoderInterface> mDecoder = nullptr;
+
+ bool mIsAnimated = false;
+ bool mHasAlpha = false;
+};
+
+class AVIFDecoderStream : public ByteStream {
+ public:
+ explicit AVIFDecoderStream(Vector<uint8_t>* aBuffer) { mBuffer = aBuffer; }
+
+ virtual bool ReadAt(int64_t offset, void* data, size_t size,
+ size_t* bytes_read) override;
+ virtual bool CachedReadAt(int64_t offset, void* data, size_t size,
+ size_t* bytes_read) override {
+ return ReadAt(offset, data, size, bytes_read);
+ };
+ virtual bool Length(int64_t* size) override;
+ virtual const uint8_t* GetContiguousAccess(int64_t aOffset,
+ size_t aSize) override;
+
+ private:
+ Vector<uint8_t>* mBuffer;
+};
+
+struct AVIFImage {
+ uint32_t mFrameNum = 0;
+ FrameTimeout mDuration = FrameTimeout::Zero();
+ RefPtr<MediaRawData> mColorImage = nullptr;
+ RefPtr<MediaRawData> mAlphaImage = nullptr;
+};
+
+class AVIFParser {
+ public:
+ static Mp4parseStatus Create(const Mp4parseIo* aIo, ByteStream* aBuffer,
+ UniquePtr<AVIFParser>& aParserOut,
+ bool aAllowSequences, bool aAnimateAVIFMajor);
+
+ ~AVIFParser();
+
+ const Mp4parseAvifInfo& GetInfo() const { return mInfo; }
+
+ nsAVIFDecoder::DecodeResult GetImage(AVIFImage& aImage);
+
+ bool IsAnimated() const;
+
+ private:
+ explicit AVIFParser(const Mp4parseIo* aIo);
+
+ Mp4parseStatus Init(ByteStream* aBuffer, bool aAllowSequences,
+ bool aAnimateAVIFMajor);
+
+ struct FreeAvifParser {
+ void operator()(Mp4parseAvifParser* aPtr) { mp4parse_avif_free(aPtr); }
+ };
+
+ const Mp4parseIo* mIo;
+ UniquePtr<Mp4parseAvifParser, FreeAvifParser> mParser = nullptr;
+ Mp4parseAvifInfo mInfo = {};
+
+ UniquePtr<SampleIterator> mColorSampleIter = nullptr;
+ UniquePtr<SampleIterator> mAlphaSampleIter = nullptr;
+ uint32_t mFrameNum = 0;
+};
+
+struct Dav1dPictureUnref {
+ void operator()(Dav1dPicture* aPtr) {
+ dav1d_picture_unref(aPtr);
+ delete aPtr;
+ }
+};
+
+using OwnedDav1dPicture = UniquePtr<Dav1dPicture, Dav1dPictureUnref>;
+
+class OwnedAOMImage {
+ public:
+ ~OwnedAOMImage();
+
+ static OwnedAOMImage* CopyFrom(aom_image_t* aImage, bool aIsAlpha);
+
+ aom_image_t* GetImage() { return mImage.isSome() ? mImage.ptr() : nullptr; }
+
+ private:
+ OwnedAOMImage();
+
+ bool CloneFrom(aom_image_t* aImage, bool aIsAlpha);
+
+ // The mImage's planes are referenced to mBuffer
+ Maybe<aom_image_t> mImage;
+ UniquePtr<uint8_t[]> mBuffer;
+};
+
+struct AVIFDecodedData : layers::PlanarYCbCrData {
+ public:
+ Maybe<OrientedIntSize> mRenderSize = Nothing();
+ gfx::CICP::ColourPrimaries mColourPrimaries = gfx::CICP::CP_UNSPECIFIED;
+ gfx::CICP::TransferCharacteristics mTransferCharacteristics =
+ gfx::CICP::TC_UNSPECIFIED;
+ gfx::CICP::MatrixCoefficients mMatrixCoefficients = gfx::CICP::MC_UNSPECIFIED;
+
+ OwnedDav1dPicture mColorDav1d;
+ OwnedDav1dPicture mAlphaDav1d;
+ UniquePtr<OwnedAOMImage> mColorAOM;
+ UniquePtr<OwnedAOMImage> mAlphaAOM;
+
+ // CICP values (either from the BMFF container or the AV1 sequence header) are
+ // used to create the colorspace transform. CICP::MatrixCoefficients is only
+ // stored for the sake of telemetry, since the relevant information for YUV ->
+ // RGB conversion is stored in mYUVColorSpace.
+ //
+ // There are three potential sources of color information for an AVIF:
+ // 1. ICC profile via a ColourInformationBox (colr) defined in [ISOBMFF]
+ // § 12.1.5 "Colour information" and [MIAF] § 7.3.6.4 "Colour information
+ // property"
+ // 2. NCLX (AKA CICP see [ITU-T H.273]) values in the same
+ // ColourInformationBox
+ // which can have an ICC profile or NCLX values, not both).
+ // 3. NCLX values in the AV1 bitstream
+ //
+ // The 'colr' box is optional, but there are always CICP values in the AV1
+ // bitstream, so it is possible to have both. Per ISOBMFF § 12.1.5.1
+ // > If colour information is supplied in both this box, and also in the
+ // > video bitstream, this box takes precedence, and over-rides the
+ // > information in the bitstream.
+ //
+ // If present, the ICC profile takes precedence over CICP values, but only
+ // specifies the color space, not the matrix coefficients necessary to convert
+ // YCbCr data (as most AVIF are encoded) to RGB. The matrix coefficients are
+ // always derived from the CICP values for matrix_coefficients (and
+ // potentially colour_primaries, but in that case only the CICP values for
+ // colour_primaries will be used, not anything harvested from the ICC
+ // profile).
+ //
+ // If there is no ICC profile, the color space transform will be based on the
+ // CICP values either from the 'colr' box, or if absent/unspecified, the
+ // decoded AV1 sequence header.
+ //
+ // For values that are 2 (meaning unspecified) after trying both, the
+ // fallback values are:
+ // - CP: 1 (BT.709/sRGB)
+ // - TC: 13 (sRGB)
+ // - MC: 6 (BT.601)
+ // - Range: Full
+ //
+ // Additional details here:
+ // <https://github.com/AOMediaCodec/libavif/wiki/CICP#unspecified>. Note
+ // that this contradicts the current version of [MIAF] § 7.3.6.4 which
+ // specifies MC=1 (BT.709). This is revised in [MIAF DAMD2] and confirmed by
+ // <https://github.com/AOMediaCodec/av1-avif/issues/77#issuecomment-676526097>
+ //
+ // The precedence for applying the various values and defaults in the event
+ // no valid values are found are managed by the following functions.
+ //
+ // References:
+ // [ISOBMFF]: ISO/IEC 14496-12:2020 <https://www.iso.org/standard/74428.html>
+ // [MIAF]: ISO/IEC 23000-22:2019 <https://www.iso.org/standard/74417.html>
+ // [MIAF DAMD2]: ISO/IEC 23000-22:2019/FDAmd 2
+ // <https://www.iso.org/standard/81634.html>
+ // [ITU-T H.273]: Rec. ITU-T H.273 (12/2016)
+ // <https://www.itu.int/rec/T-REC-H.273-201612-I/en>
+ void SetCicpValues(
+ const Mp4parseNclxColourInformation* aNclx,
+ const gfx::CICP::ColourPrimaries aAv1ColourPrimaries,
+ const gfx::CICP::TransferCharacteristics aAv1TransferCharacteristics,
+ const gfx::CICP::MatrixCoefficients aAv1MatrixCoefficients);
+};
+
+// An interface to do decode and get the decoded data
+class AVIFDecoderInterface {
+ public:
+ using Dav1dResult = nsAVIFDecoder::Dav1dResult;
+ using NonAOMCodecError = nsAVIFDecoder::NonAOMCodecError;
+ using AOMResult = nsAVIFDecoder::AOMResult;
+ using NonDecoderResult = nsAVIFDecoder::NonDecoderResult;
+ using DecodeResult = nsAVIFDecoder::DecodeResult;
+
+ virtual ~AVIFDecoderInterface() = default;
+
+ // Set the mDecodedData if Decode() succeeds
+ virtual DecodeResult Decode(bool aShouldSendTelemetry,
+ const Mp4parseAvifInfo& aAVIFInfo,
+ const AVIFImage& aSamples) = 0;
+ // Must be called only once after Decode() succeeds
+ UniquePtr<AVIFDecodedData> GetDecodedData() {
+ MOZ_ASSERT(mDecodedData);
+ return std::move(mDecodedData);
+ }
+
+ protected:
+ explicit AVIFDecoderInterface() = default;
+
+ inline static bool IsDecodeSuccess(const DecodeResult& aResult) {
+ return nsAVIFDecoder::IsDecodeSuccess(aResult);
+ }
+
+ // The mDecodedData is valid after Decode() succeeds
+ UniquePtr<AVIFDecodedData> mDecodedData;
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsAVIFDecoder_h
diff --git a/image/decoders/nsBMPDecoder.cpp b/image/decoders/nsBMPDecoder.cpp
new file mode 100644
index 0000000000..da971e054f
--- /dev/null
+++ b/image/decoders/nsBMPDecoder.cpp
@@ -0,0 +1,1275 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This is a cross-platform BMP Decoder, which should work everywhere,
+// including big-endian machines like the PowerPC.
+//
+// BMP is a format that has been extended multiple times. To understand the
+// decoder you need to understand this history. The summary of the history
+// below was determined from the following documents.
+//
+// - http://www.fileformat.info/format/bmp/egff.htm
+// - http://www.fileformat.info/format/os2bmp/egff.htm
+// - http://fileformats.archiveteam.org/wiki/BMP
+// - http://fileformats.archiveteam.org/wiki/OS/2_BMP
+// - https://en.wikipedia.org/wiki/BMP_file_format
+// - https://upload.wikimedia.org/wikipedia/commons/c/c4/BMPfileFormat.png
+//
+// WINDOWS VERSIONS OF THE BMP FORMAT
+// ----------------------------------
+// WinBMPv1.
+// - This version is no longer used and can be ignored.
+//
+// WinBMPv2.
+// - First is a 14 byte file header that includes: the magic number ("BM"),
+// file size, and offset to the pixel data (|mDataOffset|).
+// - Next is a 12 byte info header which includes: the info header size
+// (mBIHSize), width, height, number of color planes, and bits-per-pixel
+// (|mBpp|) which must be 1, 4, 8 or 24.
+// - Next is the semi-optional color table, which has length 2^|mBpp| and has 3
+// bytes per value (BGR). The color table is required if |mBpp| is 1, 4, or 8.
+// - Next is an optional gap.
+// - Next is the pixel data, which is pointed to by |mDataOffset|.
+//
+// WinBMPv3. This is the most widely used version.
+// - It changed the info header to 40 bytes by taking the WinBMPv2 info
+// header, enlargening its width and height fields, and adding more fields
+// including: a compression type (|mCompression|) and number of colors
+// (|mNumColors|).
+// - The semi-optional color table is now 4 bytes per value (BGR0), and its
+// length is |mNumColors|, or 2^|mBpp| if |mNumColors| is zero.
+// - |mCompression| can be RGB (i.e. no compression), RLE4 (if |mBpp|==4) or
+// RLE8 (if |mBpp|==8) values.
+//
+// WinBMPv3-NT. A variant of WinBMPv3.
+// - It did not change the info header layout from WinBMPv3.
+// - |mBpp| can now be 16 or 32, in which case |mCompression| can be RGB or the
+// new BITFIELDS value; in the latter case an additional 12 bytes of color
+// bitfields follow the info header.
+//
+// WinBMPv4.
+// - It extended the info header to 108 bytes, including the 12 bytes of color
+// mask data from WinBMPv3-NT, plus alpha mask data, and also color-space and
+// gamma correction fields.
+//
+// WinBMPv5.
+// - It extended the info header to 124 bytes, adding color profile data.
+// - It also added an optional color profile table after the pixel data (and
+// another optional gap).
+//
+// WinBMPv3-ICO. This is a variant of WinBMPv3.
+// - It's the BMP format used for BMP images within ICO files.
+// - The only difference with WinBMPv3 is that if an image is 32bpp and has no
+// compression, then instead of treating the pixel data as 0RGB it is treated
+// as ARGB, but only if one or more of the A values are non-zero.
+//
+// Clipboard variants.
+// - It's the BMP format used for BMP images captured from the clipboard.
+// - It is missing the file header, containing the BM signature and the data
+// offset. Instead the data begins after the header.
+// - If it uses BITFIELDS compression, then there is always an additional 12
+// bytes of data after the header that must be read. In WinBMPv4+, the masks
+// are supposed to be included in the header size, which are the values we use
+// for decoding purposes, but there is additional three masks following the
+// header which must be skipped to get to the pixel data.
+//
+// OS/2 VERSIONS OF THE BMP FORMAT
+// -------------------------------
+// OS2-BMPv1.
+// - Almost identical to WinBMPv2; the differences are basically ignorable.
+//
+// OS2-BMPv2.
+// - Similar to WinBMPv3.
+// - The info header is 64 bytes but can be reduced to as little as 16; any
+// omitted fields are treated as zero. The first 40 bytes of these fields are
+// nearly identical to the WinBMPv3 info header; the remaining 24 bytes are
+// different.
+// - Also adds compression types "Huffman 1D" and "RLE24", which we don't
+// support.
+// - We treat OS2-BMPv2 files as if they are WinBMPv3 (i.e. ignore the extra 24
+// bytes in the info header), which in practice is good enough.
+
+#include "ImageLogging.h"
+#include "nsBMPDecoder.h"
+
+#include <stdlib.h>
+
+#include "mozilla/Attributes.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/Likely.h"
+#include "mozilla/UniquePtrExtensions.h"
+
+#include "RasterImage.h"
+#include "SurfacePipeFactory.h"
+#include "gfxPlatform.h"
+#include <algorithm>
+
+using namespace mozilla::gfx;
+
+namespace mozilla {
+namespace image {
+namespace bmp {
+
+struct Compression {
+ enum { RGB = 0, RLE8 = 1, RLE4 = 2, BITFIELDS = 3 };
+};
+
+// RLE escape codes and constants.
+struct RLE {
+ enum {
+ ESCAPE = 0,
+ ESCAPE_EOL = 0,
+ ESCAPE_EOF = 1,
+ ESCAPE_DELTA = 2,
+
+ SEGMENT_LENGTH = 2,
+ DELTA_LENGTH = 2
+ };
+};
+
+} // namespace bmp
+
+using namespace bmp;
+
+static double FixedPoint2Dot30_To_Double(uint32_t aFixed) {
+ constexpr double factor = 1.0 / 1073741824.0; // 2^-30
+ return double(aFixed) * factor;
+}
+
+static float FixedPoint16Dot16_To_Float(uint32_t aFixed) {
+ constexpr double factor = 1.0 / 65536.0; // 2^-16
+ return double(aFixed) * factor;
+}
+
+static float CalRbgEndpointToQcms(const CalRgbEndpoint& aIn,
+ qcms_CIE_xyY& aOut) {
+ aOut.x = FixedPoint2Dot30_To_Double(aIn.mX);
+ aOut.y = FixedPoint2Dot30_To_Double(aIn.mY);
+ aOut.Y = FixedPoint2Dot30_To_Double(aIn.mZ);
+ return FixedPoint16Dot16_To_Float(aIn.mGamma);
+}
+
+static void ReadCalRgbEndpoint(const char* aData, uint32_t aEndpointOffset,
+ uint32_t aGammaOffset, CalRgbEndpoint& aOut) {
+ aOut.mX = LittleEndian::readUint32(aData + aEndpointOffset);
+ aOut.mY = LittleEndian::readUint32(aData + aEndpointOffset + 4);
+ aOut.mZ = LittleEndian::readUint32(aData + aEndpointOffset + 8);
+ aOut.mGamma = LittleEndian::readUint32(aData + aGammaOffset);
+}
+
+/// Sets the pixel data in aDecoded to the given values.
+/// @param aDecoded pointer to pixel to be set, will be incremented to point to
+/// the next pixel.
+static void SetPixel(uint32_t*& aDecoded, uint8_t aRed, uint8_t aGreen,
+ uint8_t aBlue, uint8_t aAlpha = 0xFF) {
+ *aDecoded++ = gfxPackedPixelNoPreMultiply(aAlpha, aRed, aGreen, aBlue);
+}
+
+static void SetPixel(uint32_t*& aDecoded, uint8_t idx,
+ const UniquePtr<ColorTableEntry[]>& aColors) {
+ SetPixel(aDecoded, aColors[idx].mRed, aColors[idx].mGreen,
+ aColors[idx].mBlue);
+}
+
+/// Sets two (or one if aCount = 1) pixels
+/// @param aDecoded where the data is stored. Will be moved 4 resp 8 bytes
+/// depending on whether one or two pixels are written.
+/// @param aData The values for the two pixels
+/// @param aCount Current count. Is decremented by one or two.
+static void Set4BitPixel(uint32_t*& aDecoded, uint8_t aData, uint32_t& aCount,
+ const UniquePtr<ColorTableEntry[]>& aColors) {
+ uint8_t idx = aData >> 4;
+ SetPixel(aDecoded, idx, aColors);
+ if (--aCount > 0) {
+ idx = aData & 0xF;
+ SetPixel(aDecoded, idx, aColors);
+ --aCount;
+ }
+}
+
+static mozilla::LazyLogModule sBMPLog("BMPDecoder");
+
+// The length of the mBIHSize field in the info header.
+static const uint32_t BIHSIZE_FIELD_LENGTH = 4;
+
+nsBMPDecoder::nsBMPDecoder(RasterImage* aImage, State aState, size_t aLength,
+ bool aForClipboard)
+ : Decoder(aImage),
+ mLexer(Transition::To(aState, aLength), Transition::TerminateSuccess()),
+ mIsWithinICO(false),
+ mIsForClipboard(aForClipboard),
+ mMayHaveTransparency(false),
+ mDoesHaveTransparency(false),
+ mNumColors(0),
+ mColors(nullptr),
+ mBytesPerColor(0),
+ mPreGapLength(0),
+ mPixelRowSize(0),
+ mCurrentRow(0),
+ mCurrentPos(0),
+ mAbsoluteModeNumPixels(0) {}
+
+// Constructor for normal BMP files or from the clipboard.
+nsBMPDecoder::nsBMPDecoder(RasterImage* aImage, bool aForClipboard)
+ : nsBMPDecoder(aImage,
+ aForClipboard ? State::INFO_HEADER_SIZE : State::FILE_HEADER,
+ aForClipboard ? BIHSIZE_FIELD_LENGTH : FILE_HEADER_LENGTH,
+ aForClipboard) {}
+
+// Constructor used for WinBMPv3-ICO files, which lack a file header.
+nsBMPDecoder::nsBMPDecoder(RasterImage* aImage, uint32_t aDataOffset)
+ : nsBMPDecoder(aImage, State::INFO_HEADER_SIZE, BIHSIZE_FIELD_LENGTH,
+ /* aForClipboard */ false) {
+ SetIsWithinICO();
+
+ // Even though the file header isn't present in this case, the dataOffset
+ // field is set as if it is, and so we must increment mPreGapLength
+ // accordingly.
+ mPreGapLength += FILE_HEADER_LENGTH;
+
+ // This is the one piece of data we normally get from a BMP file header, so
+ // it must be provided via an argument.
+ mH.mDataOffset = aDataOffset;
+}
+
+nsBMPDecoder::~nsBMPDecoder() {}
+
+// Obtains the size of the compressed image resource.
+int32_t nsBMPDecoder::GetCompressedImageSize() const {
+ // In the RGB case mImageSize might not be set, so compute it manually.
+ MOZ_ASSERT(mPixelRowSize != 0);
+ return mH.mCompression == Compression::RGB ? mPixelRowSize * AbsoluteHeight()
+ : mH.mImageSize;
+}
+
+nsresult nsBMPDecoder::BeforeFinishInternal() {
+ if (!IsMetadataDecode() && !mImageData) {
+ return NS_ERROR_FAILURE; // No image; something went wrong.
+ }
+
+ return NS_OK;
+}
+
+nsresult nsBMPDecoder::FinishInternal() {
+ // We shouldn't be called in error cases.
+ MOZ_ASSERT(!HasError(), "Can't call FinishInternal on error!");
+
+ // We should never make multiple frames.
+ MOZ_ASSERT(GetFrameCount() <= 1, "Multiple BMP frames?");
+
+ // Send notifications if appropriate.
+ if (!IsMetadataDecode() && HasSize()) {
+ // We should have image data.
+ MOZ_ASSERT(mImageData);
+
+ // If it was truncated, fill in the missing pixels as black.
+ while (mCurrentRow > 0) {
+ uint32_t* dst = RowBuffer();
+ while (mCurrentPos < mH.mWidth) {
+ SetPixel(dst, 0, 0, 0);
+ mCurrentPos++;
+ }
+ mCurrentPos = 0;
+ FinishRow();
+ }
+
+ MOZ_ASSERT_IF(mDoesHaveTransparency, mMayHaveTransparency);
+
+ // We have transparency if we either detected some in the image itself
+ // (i.e., |mDoesHaveTransparency| is true) or we're in an ICO, which could
+ // mean we have an AND mask that provides transparency (i.e., |mIsWithinICO|
+ // is true).
+ // XXX(seth): We can tell when we create the decoder if the AND mask is
+ // present, so we could be more precise about this.
+ const Opacity opacity = mDoesHaveTransparency || mIsWithinICO
+ ? Opacity::SOME_TRANSPARENCY
+ : Opacity::FULLY_OPAQUE;
+
+ PostFrameStop(opacity);
+ PostDecodeDone();
+ }
+
+ return NS_OK;
+}
+
+// ----------------------------------------
+// Actual Data Processing
+// ----------------------------------------
+
+void BitFields::Value::Set(uint32_t aMask) {
+ mMask = aMask;
+
+ // Handle this exceptional case first. The chosen values don't matter
+ // (because a mask of zero will always give a value of zero) except that
+ // mBitWidth:
+ // - shouldn't be zero, because that would cause an infinite loop in Get();
+ // - shouldn't be 5 or 8, because that could cause a false positive match in
+ // IsR5G5B5() or IsR8G8B8().
+ if (mMask == 0x0) {
+ mRightShift = 0;
+ mBitWidth = 1;
+ return;
+ }
+
+ // Find the rightmost 1.
+ uint8_t i;
+ for (i = 0; i < 32; i++) {
+ if (mMask & (1 << i)) {
+ break;
+ }
+ }
+ mRightShift = i;
+
+ // Now find the leftmost 1 in the same run of 1s. (If there are multiple runs
+ // of 1s -- which isn't valid -- we'll behave as if only the lowest run was
+ // present, which seems reasonable.)
+ for (i = i + 1; i < 32; i++) {
+ if (!(mMask & (1 << i))) {
+ break;
+ }
+ }
+ mBitWidth = i - mRightShift;
+}
+
+MOZ_ALWAYS_INLINE uint8_t BitFields::Value::Get(uint32_t aValue) const {
+ // Extract the unscaled value.
+ uint32_t v = (aValue & mMask) >> mRightShift;
+
+ // Idea: to upscale v precisely we need to duplicate its bits, possibly
+ // repeatedly, possibly partially in the last case, from bit 7 down to bit 0
+ // in v2. For example:
+ //
+ // - mBitWidth=1: v2 = v<<7 | v<<6 | ... | v<<1 | v>>0 k -> kkkkkkkk
+ // - mBitWidth=2: v2 = v<<6 | v<<4 | v<<2 | v>>0 jk -> jkjkjkjk
+ // - mBitWidth=3: v2 = v<<5 | v<<2 | v>>1 ijk -> ijkijkij
+ // - mBitWidth=4: v2 = v<<4 | v>>0 hijk -> hijkhijk
+ // - mBitWidth=5: v2 = v<<3 | v>>2 ghijk -> ghijkghi
+ // - mBitWidth=6: v2 = v<<2 | v>>4 fghijk -> fghijkfg
+ // - mBitWidth=7: v2 = v<<1 | v>>6 efghijk -> efghijke
+ // - mBitWidth=8: v2 = v>>0 defghijk -> defghijk
+ // - mBitWidth=9: v2 = v>>1 cdefghijk -> cdefghij
+ // - mBitWidth=10: v2 = v>>2 bcdefghijk -> bcdefghi
+ // - mBitWidth=11: v2 = v>>3 abcdefghijk -> abcdefgh
+ // - etc.
+ //
+ uint8_t v2 = 0;
+ int32_t i; // must be a signed integer
+ for (i = 8 - mBitWidth; i > 0; i -= mBitWidth) {
+ v2 |= v << uint32_t(i);
+ }
+ v2 |= v >> uint32_t(-i);
+ return v2;
+}
+
+MOZ_ALWAYS_INLINE uint8_t BitFields::Value::GetAlpha(uint32_t aValue,
+ bool& aHasAlphaOut) const {
+ if (mMask == 0x0) {
+ return 0xff;
+ }
+ aHasAlphaOut = true;
+ return Get(aValue);
+}
+
+MOZ_ALWAYS_INLINE uint8_t BitFields::Value::Get5(uint32_t aValue) const {
+ MOZ_ASSERT(mBitWidth == 5);
+ uint32_t v = (aValue & mMask) >> mRightShift;
+ return (v << 3u) | (v >> 2u);
+}
+
+MOZ_ALWAYS_INLINE uint8_t BitFields::Value::Get8(uint32_t aValue) const {
+ MOZ_ASSERT(mBitWidth == 8);
+ uint32_t v = (aValue & mMask) >> mRightShift;
+ return v;
+}
+
+void BitFields::SetR5G5B5() {
+ mRed.Set(0x7c00);
+ mGreen.Set(0x03e0);
+ mBlue.Set(0x001f);
+}
+
+void BitFields::SetR8G8B8() {
+ mRed.Set(0xff0000);
+ mGreen.Set(0xff00);
+ mBlue.Set(0x00ff);
+}
+
+bool BitFields::IsR5G5B5() const {
+ return mRed.mBitWidth == 5 && mGreen.mBitWidth == 5 && mBlue.mBitWidth == 5 &&
+ mAlpha.mMask == 0x0;
+}
+
+bool BitFields::IsR8G8B8() const {
+ return mRed.mBitWidth == 8 && mGreen.mBitWidth == 8 && mBlue.mBitWidth == 8 &&
+ mAlpha.mMask == 0x0;
+}
+
+uint32_t* nsBMPDecoder::RowBuffer() { return mRowBuffer.get() + mCurrentPos; }
+
+void nsBMPDecoder::ClearRowBufferRemainder() {
+ int32_t len = mH.mWidth - mCurrentPos;
+ memset(RowBuffer(), mMayHaveTransparency ? 0 : 0xFF, len * sizeof(uint32_t));
+}
+
+void nsBMPDecoder::FinishRow() {
+ mPipe.WriteBuffer(mRowBuffer.get());
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (invalidRect) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+ mCurrentRow--;
+}
+
+LexerResult nsBMPDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(
+ aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::FILE_HEADER:
+ return ReadFileHeader(aData, aLength);
+ case State::INFO_HEADER_SIZE:
+ return ReadInfoHeaderSize(aData, aLength);
+ case State::INFO_HEADER_REST:
+ return ReadInfoHeaderRest(aData, aLength);
+ case State::BITFIELDS:
+ return ReadBitfields(aData, aLength);
+ case State::SKIP_TO_COLOR_PROFILE:
+ return Transition::ContinueUnbuffered(State::SKIP_TO_COLOR_PROFILE);
+ case State::FOUND_COLOR_PROFILE:
+ return Transition::To(State::COLOR_PROFILE,
+ mH.mColorSpace.mProfile.mLength);
+ case State::COLOR_PROFILE:
+ return ReadColorProfile(aData, aLength);
+ case State::ALLOCATE_SURFACE:
+ return AllocateSurface();
+ case State::COLOR_TABLE:
+ return ReadColorTable(aData, aLength);
+ case State::GAP:
+ return SkipGap();
+ case State::AFTER_GAP:
+ return AfterGap();
+ case State::PIXEL_ROW:
+ return ReadPixelRow(aData);
+ case State::RLE_SEGMENT:
+ return ReadRLESegment(aData);
+ case State::RLE_DELTA:
+ return ReadRLEDelta(aData);
+ case State::RLE_ABSOLUTE:
+ return ReadRLEAbsolute(aData, aLength);
+ default:
+ MOZ_CRASH("Unknown State");
+ }
+ });
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadFileHeader(
+ const char* aData, size_t aLength) {
+ mPreGapLength += aLength;
+
+ bool signatureOk = aData[0] == 'B' && aData[1] == 'M';
+ if (!signatureOk) {
+ return Transition::TerminateFailure();
+ }
+
+ // We ignore the filesize (aData + 2) and reserved (aData + 6) fields.
+
+ mH.mDataOffset = LittleEndian::readUint32(aData + 10);
+
+ return Transition::To(State::INFO_HEADER_SIZE, BIHSIZE_FIELD_LENGTH);
+}
+
+// We read the info header in two steps: (a) read the mBIHSize field to
+// determine how long the header is; (b) read the rest of the header.
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadInfoHeaderSize(
+ const char* aData, size_t aLength) {
+ mH.mBIHSize = LittleEndian::readUint32(aData);
+
+ // Data offset can be wrong so fix it using the BIH size.
+ if (!mIsForClipboard && mH.mDataOffset < mPreGapLength + mH.mBIHSize) {
+ mH.mDataOffset = mPreGapLength + mH.mBIHSize;
+ }
+
+ mPreGapLength += aLength;
+
+ bool bihSizeOk = mH.mBIHSize == InfoHeaderLength::WIN_V2 ||
+ mH.mBIHSize == InfoHeaderLength::WIN_V3 ||
+ mH.mBIHSize == InfoHeaderLength::WIN_V4 ||
+ mH.mBIHSize == InfoHeaderLength::WIN_V5 ||
+ (mH.mBIHSize >= InfoHeaderLength::OS2_V2_MIN &&
+ mH.mBIHSize <= InfoHeaderLength::OS2_V2_MAX);
+ if (!bihSizeOk) {
+ return Transition::TerminateFailure();
+ }
+ // ICO BMPs must have a WinBMPv3 header. nsICODecoder should have already
+ // terminated decoding if this isn't the case.
+ MOZ_ASSERT_IF(mIsWithinICO, mH.mBIHSize == InfoHeaderLength::WIN_V3);
+
+ return Transition::To(State::INFO_HEADER_REST,
+ mH.mBIHSize - BIHSIZE_FIELD_LENGTH);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadInfoHeaderRest(
+ const char* aData, size_t aLength) {
+ mPreGapLength += aLength;
+
+ // |mWidth| and |mHeight| may be signed (Windows) or unsigned (OS/2). We just
+ // read as unsigned because in practice that's good enough.
+ if (mH.mBIHSize == InfoHeaderLength::WIN_V2) {
+ mH.mWidth = LittleEndian::readUint16(aData + 0);
+ mH.mHeight = LittleEndian::readUint16(aData + 2);
+ // We ignore the planes (aData + 4) field; it should always be 1.
+ mH.mBpp = LittleEndian::readUint16(aData + 6);
+ } else {
+ mH.mWidth = LittleEndian::readUint32(aData + 0);
+ mH.mHeight = LittleEndian::readUint32(aData + 4);
+ // We ignore the planes (aData + 4) field; it should always be 1.
+ mH.mBpp = LittleEndian::readUint16(aData + 10);
+
+ // For OS2-BMPv2 the info header may be as little as 16 bytes, so be
+ // careful for these fields.
+ mH.mCompression = aLength >= 16 ? LittleEndian::readUint32(aData + 12) : 0;
+ mH.mImageSize = aLength >= 20 ? LittleEndian::readUint32(aData + 16) : 0;
+ // We ignore the xppm (aData + 20) and yppm (aData + 24) fields.
+ mH.mNumColors = aLength >= 32 ? LittleEndian::readUint32(aData + 28) : 0;
+ // We ignore the important_colors (aData + 36) field.
+
+ // Read color management properties we may need later.
+ mH.mCsType =
+ aLength >= 56
+ ? static_cast<InfoColorSpace>(LittleEndian::readUint32(aData + 52))
+ : InfoColorSpace::SRGB;
+ mH.mCsIntent = aLength >= 108 ? static_cast<InfoColorIntent>(
+ LittleEndian::readUint32(aData + 104))
+ : InfoColorIntent::IMAGES;
+
+ switch (mH.mCsType) {
+ case InfoColorSpace::CALIBRATED_RGB:
+ if (aLength >= 104) {
+ ReadCalRgbEndpoint(aData, 56, 92, mH.mColorSpace.mCalibrated.mRed);
+ ReadCalRgbEndpoint(aData, 68, 96, mH.mColorSpace.mCalibrated.mGreen);
+ ReadCalRgbEndpoint(aData, 80, 100, mH.mColorSpace.mCalibrated.mBlue);
+ } else {
+ mH.mCsType = InfoColorSpace::SRGB;
+ }
+ break;
+ case InfoColorSpace::EMBEDDED:
+ if (aLength >= 116) {
+ mH.mColorSpace.mProfile.mOffset =
+ LittleEndian::readUint32(aData + 108);
+ mH.mColorSpace.mProfile.mLength =
+ LittleEndian::readUint32(aData + 112);
+ } else {
+ mH.mCsType = InfoColorSpace::SRGB;
+ }
+ break;
+ case InfoColorSpace::LINKED:
+ case InfoColorSpace::SRGB:
+ case InfoColorSpace::WIN:
+ default:
+ // Nothing to be done at this time.
+ break;
+ }
+
+ // For WinBMPv4, WinBMPv5 and (possibly) OS2-BMPv2 there are additional
+ // fields in the info header which we ignore, with the possible exception
+ // of the color bitfields (see below).
+ }
+
+ // The height for BMPs embedded inside an ICO includes spaces for the AND
+ // mask even if it is not present, thus we need to adjust for that here.
+ if (mIsWithinICO) {
+ // XXX(seth): Should we really be writing the absolute value from
+ // the BIH below? Seems like this could be problematic for inverted BMPs.
+ mH.mHeight = abs(mH.mHeight) / 2;
+ }
+
+ // Run with MOZ_LOG=BMPDecoder:5 set to see this output.
+ MOZ_LOG(sBMPLog, LogLevel::Debug,
+ ("BMP: bihsize=%u, %d x %d, bpp=%u, compression=%u, colors=%u, "
+ "data-offset=%u\n",
+ mH.mBIHSize, mH.mWidth, mH.mHeight, uint32_t(mH.mBpp),
+ mH.mCompression, mH.mNumColors, mH.mDataOffset));
+
+ // BMPs with negative width are invalid. Also, reject extremely wide images
+ // to keep the math sane. And reject INT_MIN as a height because you can't
+ // get its absolute value (because -INT_MIN is one more than INT_MAX).
+ const int32_t k64KWidth = 0x0000FFFF;
+ bool sizeOk =
+ 0 <= mH.mWidth && mH.mWidth <= k64KWidth && mH.mHeight != INT_MIN;
+ if (!sizeOk) {
+ return Transition::TerminateFailure();
+ }
+
+ // Check mBpp and mCompression.
+ bool bppCompressionOk =
+ (mH.mCompression == Compression::RGB &&
+ (mH.mBpp == 1 || mH.mBpp == 4 || mH.mBpp == 8 || mH.mBpp == 16 ||
+ mH.mBpp == 24 || mH.mBpp == 32)) ||
+ (mH.mCompression == Compression::RLE8 && mH.mBpp == 8) ||
+ (mH.mCompression == Compression::RLE4 && mH.mBpp == 4) ||
+ (mH.mCompression == Compression::BITFIELDS &&
+ // For BITFIELDS compression we require an exact match for one of the
+ // WinBMP BIH sizes; this clearly isn't an OS2 BMP.
+ (mH.mBIHSize == InfoHeaderLength::WIN_V3 ||
+ mH.mBIHSize == InfoHeaderLength::WIN_V4 ||
+ mH.mBIHSize == InfoHeaderLength::WIN_V5) &&
+ (mH.mBpp == 16 || mH.mBpp == 32));
+ if (!bppCompressionOk) {
+ return Transition::TerminateFailure();
+ }
+
+ // Initialize our current row to the top of the image.
+ mCurrentRow = AbsoluteHeight();
+
+ // Round it up to the nearest byte count, then pad to 4-byte boundary.
+ // Compute this even for a metadate decode because GetCompressedImageSize()
+ // relies on it.
+ mPixelRowSize = (mH.mBpp * mH.mWidth + 7) / 8;
+ uint32_t surplus = mPixelRowSize % 4;
+ if (surplus != 0) {
+ mPixelRowSize += 4 - surplus;
+ }
+
+ size_t bitFieldsLengthStillToRead = 0;
+ if (mH.mCompression == Compression::BITFIELDS) {
+ // Need to read bitfields.
+ if (mH.mBIHSize >= InfoHeaderLength::WIN_V4) {
+ // Bitfields are present in the info header, so we can read them
+ // immediately.
+ mBitFields.ReadFromHeader(aData + 36, /* aReadAlpha = */ true);
+
+ // If this came from the clipboard, then we know that even if the header
+ // explicitly includes the bitfield masks, we need to add an additional
+ // offset for the start of the RGB data.
+ if (mIsForClipboard) {
+ mH.mDataOffset += BitFields::LENGTH;
+ }
+ } else {
+ // Bitfields are present after the info header, so we will read them in
+ // ReadBitfields().
+ bitFieldsLengthStillToRead = BitFields::LENGTH;
+ }
+ } else if (mH.mBpp == 16) {
+ // No bitfields specified; use the default 5-5-5 values.
+ mBitFields.SetR5G5B5();
+ } else if (mH.mBpp == 32) {
+ // No bitfields specified; use the default 8-8-8 values.
+ mBitFields.SetR8G8B8();
+ }
+
+ return Transition::To(State::BITFIELDS, bitFieldsLengthStillToRead);
+}
+
+void BitFields::ReadFromHeader(const char* aData, bool aReadAlpha) {
+ mRed.Set(LittleEndian::readUint32(aData + 0));
+ mGreen.Set(LittleEndian::readUint32(aData + 4));
+ mBlue.Set(LittleEndian::readUint32(aData + 8));
+ if (aReadAlpha) {
+ mAlpha.Set(LittleEndian::readUint32(aData + 12));
+ }
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadBitfields(
+ const char* aData, size_t aLength) {
+ mPreGapLength += aLength;
+
+ // If aLength is zero there are no bitfields to read, or we already read them
+ // in ReadInfoHeader().
+ if (aLength != 0) {
+ mBitFields.ReadFromHeader(aData, /* aReadAlpha = */ false);
+ }
+
+ // Note that RLE-encoded BMPs might be transparent because the 'delta' mode
+ // can skip pixels and cause implicit transparency.
+ mMayHaveTransparency = mIsWithinICO || mH.mCompression == Compression::RLE8 ||
+ mH.mCompression == Compression::RLE4 ||
+ (mH.mCompression == Compression::BITFIELDS &&
+ mBitFields.mAlpha.IsPresent());
+ if (mMayHaveTransparency) {
+ PostHasTransparency();
+ }
+
+ // Post our size to the superclass.
+ PostSize(mH.mWidth, AbsoluteHeight());
+ if (HasError()) {
+ return Transition::TerminateFailure();
+ }
+
+ // We've now read all the headers. If we're doing a metadata decode, we're
+ // done.
+ if (IsMetadataDecode()) {
+ return Transition::TerminateSuccess();
+ }
+
+ // Set up the color table, if present; it'll be filled in by ReadColorTable().
+ if (mH.mBpp <= 8) {
+ mNumColors = 1 << mH.mBpp;
+ if (0 < mH.mNumColors && mH.mNumColors < mNumColors) {
+ mNumColors = mH.mNumColors;
+ }
+
+ // Always allocate and zero 256 entries, even though mNumColors might be
+ // smaller, because the file might erroneously index past mNumColors.
+ mColors = MakeUniqueFallible<ColorTableEntry[]>(256);
+ if (NS_WARN_IF(!mColors)) {
+ return Transition::TerminateFailure();
+ }
+ memset(mColors.get(), 0, 256 * sizeof(ColorTableEntry));
+
+ // OS/2 Bitmaps have no padding byte.
+ mBytesPerColor = (mH.mBIHSize == InfoHeaderLength::WIN_V2) ? 3 : 4;
+ }
+
+ if (mCMSMode != CMSMode::Off) {
+ switch (mH.mCsType) {
+ case InfoColorSpace::EMBEDDED:
+ return SeekColorProfile(aLength);
+ case InfoColorSpace::CALIBRATED_RGB:
+ PrepareCalibratedColorProfile();
+ break;
+ case InfoColorSpace::SRGB:
+ case InfoColorSpace::WIN:
+ MOZ_LOG(sBMPLog, LogLevel::Debug, ("using sRGB color profile\n"));
+ if (mColors) {
+ // We will transform the color table instead of the output pixels.
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::R8G8B8);
+ } else {
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::OS_RGBA);
+ }
+ break;
+ case InfoColorSpace::LINKED:
+ default:
+ // Not supported, no color management.
+ MOZ_LOG(sBMPLog, LogLevel::Debug, ("color space type not provided\n"));
+ break;
+ }
+ }
+
+ return Transition::To(State::ALLOCATE_SURFACE, 0);
+}
+
+void nsBMPDecoder::PrepareCalibratedColorProfile() {
+ // BMP does not define a white point. Use the same as sRGB. This matches what
+ // Chrome does as well.
+ qcms_CIE_xyY white_point = qcms_white_point_sRGB();
+
+ qcms_CIE_xyYTRIPLE primaries;
+ float redGamma =
+ CalRbgEndpointToQcms(mH.mColorSpace.mCalibrated.mRed, primaries.red);
+ float greenGamma =
+ CalRbgEndpointToQcms(mH.mColorSpace.mCalibrated.mGreen, primaries.green);
+ float blueGamma =
+ CalRbgEndpointToQcms(mH.mColorSpace.mCalibrated.mBlue, primaries.blue);
+
+ // Explicitly verify the profile because sometimes the values from the BMP
+ // header are just garbage.
+ mInProfile = qcms_profile_create_rgb_with_gamma_set(
+ white_point, primaries, redGamma, greenGamma, blueGamma);
+ if (mInProfile && qcms_profile_is_bogus(mInProfile)) {
+ // Bad profile, just use sRGB instead. Release the profile here, so that
+ // our destructor doesn't assume we are the owner for the transform.
+ qcms_profile_release(mInProfile);
+ mInProfile = nullptr;
+ }
+
+ if (mInProfile) {
+ MOZ_LOG(sBMPLog, LogLevel::Debug, ("using calibrated RGB color profile\n"));
+ PrepareColorProfileTransform();
+ } else {
+ MOZ_LOG(sBMPLog, LogLevel::Debug,
+ ("failed to create calibrated RGB color profile, using sRGB\n"));
+ if (mColors) {
+ // We will transform the color table instead of the output pixels.
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::R8G8B8);
+ } else {
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::OS_RGBA);
+ }
+ }
+}
+
+void nsBMPDecoder::PrepareColorProfileTransform() {
+ if (!mInProfile || !GetCMSOutputProfile()) {
+ return;
+ }
+
+ qcms_data_type inType;
+ qcms_data_type outType;
+ if (mColors) {
+ // We will transform the color table instead of the output pixels.
+ inType = QCMS_DATA_RGB_8;
+ outType = QCMS_DATA_RGB_8;
+ } else {
+ inType = gfxPlatform::GetCMSOSRGBAType();
+ outType = inType;
+ }
+
+ qcms_intent intent;
+ switch (mH.mCsIntent) {
+ case InfoColorIntent::BUSINESS:
+ intent = QCMS_INTENT_SATURATION;
+ break;
+ case InfoColorIntent::GRAPHICS:
+ intent = QCMS_INTENT_RELATIVE_COLORIMETRIC;
+ break;
+ case InfoColorIntent::ABS_COLORIMETRIC:
+ intent = QCMS_INTENT_ABSOLUTE_COLORIMETRIC;
+ break;
+ case InfoColorIntent::IMAGES:
+ default:
+ intent = QCMS_INTENT_PERCEPTUAL;
+ break;
+ }
+
+ mTransform = qcms_transform_create(mInProfile, inType, GetCMSOutputProfile(),
+ outType, intent);
+ if (!mTransform) {
+ MOZ_LOG(sBMPLog, LogLevel::Debug,
+ ("failed to create color profile transform\n"));
+ }
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::SeekColorProfile(
+ size_t aLength) {
+ // The offset needs to be at least after the color table.
+ uint32_t offset = mH.mColorSpace.mProfile.mOffset;
+ if (offset <= mH.mBIHSize + aLength + mNumColors * mBytesPerColor ||
+ mH.mColorSpace.mProfile.mLength == 0) {
+ return Transition::To(State::ALLOCATE_SURFACE, 0);
+ }
+
+ // We have already read the header and bitfields.
+ offset -= mH.mBIHSize + aLength;
+
+ // We need to skip ahead to search for the embedded color profile. We want
+ // to return to this point once we read it.
+ mReturnIterator = mLexer.Clone(*mIterator, SIZE_MAX);
+ if (!mReturnIterator) {
+ return Transition::TerminateFailure();
+ }
+
+ return Transition::ToUnbuffered(State::FOUND_COLOR_PROFILE,
+ State::SKIP_TO_COLOR_PROFILE, offset);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadColorProfile(
+ const char* aData, size_t aLength) {
+ mInProfile = qcms_profile_from_memory(aData, aLength);
+ if (mInProfile) {
+ MOZ_LOG(sBMPLog, LogLevel::Debug, ("using embedded color profile\n"));
+ PrepareColorProfileTransform();
+ }
+
+ // Jump back to where we left off.
+ mIterator = std::move(mReturnIterator);
+ return Transition::To(State::ALLOCATE_SURFACE, 0);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::AllocateSurface() {
+ SurfaceFormat format;
+ SurfacePipeFlags pipeFlags = SurfacePipeFlags();
+
+ if (mMayHaveTransparency) {
+ format = SurfaceFormat::OS_RGBA;
+ if (!(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA)) {
+ pipeFlags |= SurfacePipeFlags::PREMULTIPLY_ALPHA;
+ }
+ } else {
+ format = SurfaceFormat::OS_RGBX;
+ }
+
+ if (mH.mHeight >= 0) {
+ // BMPs store their rows in reverse order, so we may need to flip.
+ pipeFlags |= SurfacePipeFlags::FLIP_VERTICALLY;
+ }
+
+ mRowBuffer.reset(new (fallible) uint32_t[mH.mWidth]);
+ if (!mRowBuffer) {
+ return Transition::TerminateFailure();
+ }
+
+ // Only give the color transform to the SurfacePipe if we are not transforming
+ // the color table in advance.
+ qcms_transform* transform = mColors ? nullptr : mTransform;
+
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), FullFrame(), format, format, Nothing(),
+ transform, pipeFlags);
+ if (!pipe) {
+ return Transition::TerminateFailure();
+ }
+
+ mPipe = std::move(*pipe);
+ ClearRowBufferRemainder();
+ return Transition::To(State::COLOR_TABLE, mNumColors * mBytesPerColor);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadColorTable(
+ const char* aData, size_t aLength) {
+ MOZ_ASSERT_IF(aLength != 0, mNumColors > 0 && mColors);
+
+ mPreGapLength += aLength;
+
+ for (uint32_t i = 0; i < mNumColors; i++) {
+ // The format is BGR or BGR0.
+ mColors[i].mBlue = uint8_t(aData[0]);
+ mColors[i].mGreen = uint8_t(aData[1]);
+ mColors[i].mRed = uint8_t(aData[2]);
+ aData += mBytesPerColor;
+ }
+
+ // If we have a color table and a transform, we can avoid transforming each
+ // pixel by doing the table in advance. We color manage every entry in the
+ // table, even if it is smaller in case the BMP is malformed and overruns
+ // its stated color range.
+ if (mColors && mTransform) {
+ qcms_transform_data(mTransform, mColors.get(), mColors.get(), 256);
+ }
+
+ // If we are decoding a BMP from the clipboard, we did not know the data
+ // offset in advance. It is just defined as after the header and color table.
+ if (mIsForClipboard) {
+ mH.mDataOffset += mPreGapLength;
+ }
+
+ // We know how many bytes we've read so far (mPreGapLength) and we know the
+ // offset of the pixel data (mH.mDataOffset), so we can determine the length
+ // of the gap (possibly zero) between the color table and the pixel data.
+ //
+ // If the gap is negative the file must be malformed (e.g. mH.mDataOffset
+ // points into the middle of the color palette instead of past the end) and
+ // we give up.
+ if (mPreGapLength > mH.mDataOffset) {
+ return Transition::TerminateFailure();
+ }
+
+ uint32_t gapLength = mH.mDataOffset - mPreGapLength;
+
+ return Transition::ToUnbuffered(State::AFTER_GAP, State::GAP, gapLength);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::SkipGap() {
+ return Transition::ContinueUnbuffered(State::GAP);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::AfterGap() {
+ // If there are no pixels we can stop.
+ //
+ // XXX: normally, if there are no pixels we will have stopped decoding before
+ // now, outside of this decoder. However, if the BMP is within an ICO file,
+ // it's possible that the ICO claimed the image had a non-zero size while the
+ // BMP claims otherwise. This test is to catch that awkward case. If we ever
+ // come up with a more general solution to this ICO-and-BMP-disagree-on-size
+ // problem, this test can be removed.
+ if (mH.mWidth == 0 || mH.mHeight == 0) {
+ return Transition::TerminateSuccess();
+ }
+
+ bool hasRLE = mH.mCompression == Compression::RLE8 ||
+ mH.mCompression == Compression::RLE4;
+ return hasRLE ? Transition::To(State::RLE_SEGMENT, RLE::SEGMENT_LENGTH)
+ : Transition::To(State::PIXEL_ROW, mPixelRowSize);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadPixelRow(
+ const char* aData) {
+ MOZ_ASSERT(mCurrentRow > 0);
+ MOZ_ASSERT(mCurrentPos == 0);
+
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(aData);
+ uint32_t* dst = RowBuffer();
+ uint32_t lpos = mH.mWidth;
+ switch (mH.mBpp) {
+ case 1:
+ while (lpos > 0) {
+ int8_t bit;
+ uint8_t idx;
+ for (bit = 7; bit >= 0 && lpos > 0; bit--) {
+ idx = (*src >> bit) & 1;
+ SetPixel(dst, idx, mColors);
+ --lpos;
+ }
+ ++src;
+ }
+ break;
+
+ case 4:
+ while (lpos > 0) {
+ Set4BitPixel(dst, *src, lpos, mColors);
+ ++src;
+ }
+ break;
+
+ case 8:
+ while (lpos > 0) {
+ SetPixel(dst, *src, mColors);
+ --lpos;
+ ++src;
+ }
+ break;
+
+ case 16:
+ if (mBitFields.IsR5G5B5()) {
+ // Specialize this common case.
+ while (lpos > 0) {
+ uint16_t val = LittleEndian::readUint16(src);
+ SetPixel(dst, mBitFields.mRed.Get5(val), mBitFields.mGreen.Get5(val),
+ mBitFields.mBlue.Get5(val));
+ --lpos;
+ src += 2;
+ }
+ } else {
+ bool anyHasAlpha = false;
+ while (lpos > 0) {
+ uint16_t val = LittleEndian::readUint16(src);
+ SetPixel(dst, mBitFields.mRed.Get(val), mBitFields.mGreen.Get(val),
+ mBitFields.mBlue.Get(val),
+ mBitFields.mAlpha.GetAlpha(val, anyHasAlpha));
+ --lpos;
+ src += 2;
+ }
+ if (anyHasAlpha) {
+ MOZ_ASSERT(mMayHaveTransparency);
+ mDoesHaveTransparency = true;
+ }
+ }
+ break;
+
+ case 24:
+ while (lpos > 0) {
+ SetPixel(dst, src[2], src[1], src[0]);
+ --lpos;
+ src += 3;
+ }
+ break;
+
+ case 32:
+ if (mH.mCompression == Compression::RGB && mIsWithinICO &&
+ mH.mBpp == 32) {
+ // This is a special case only used for 32bpp WinBMPv3-ICO files, which
+ // could be in either 0RGB or ARGB format. We start by assuming it's
+ // an 0RGB image. If we hit a non-zero alpha value, then we know it's
+ // actually an ARGB image, and change tack accordingly.
+ // (Note: a fully-transparent ARGB image is indistinguishable from a
+ // 0RGB image, and we will render such an image as a 0RGB image, i.e.
+ // opaquely. This is unlikely to be a problem in practice.)
+ while (lpos > 0) {
+ if (!mDoesHaveTransparency && src[3] != 0) {
+ // Up until now this looked like an 0RGB image, but we now know
+ // it's actually an ARGB image. Which means every pixel we've seen
+ // so far has been fully transparent. So we go back and redo them.
+
+ // Tell the SurfacePipe to go back to the start.
+ mPipe.ResetToFirstRow();
+
+ // Redo the complete rows we've already done.
+ MOZ_ASSERT(mCurrentPos == 0);
+ int32_t currentRow = mCurrentRow;
+ mCurrentRow = AbsoluteHeight();
+ ClearRowBufferRemainder();
+ while (mCurrentRow > currentRow) {
+ FinishRow();
+ }
+
+ // Reset the row pointer back to where we started.
+ dst = RowBuffer() + (mH.mWidth - lpos);
+
+ MOZ_ASSERT(mMayHaveTransparency);
+ mDoesHaveTransparency = true;
+ }
+
+ // If mDoesHaveTransparency is false, treat this as an 0RGB image.
+ // Otherwise, treat this as an ARGB image.
+ SetPixel(dst, src[2], src[1], src[0],
+ mDoesHaveTransparency ? src[3] : 0xff);
+ src += 4;
+ --lpos;
+ }
+ } else if (mBitFields.IsR8G8B8()) {
+ // Specialize this common case.
+ while (lpos > 0) {
+ uint32_t val = LittleEndian::readUint32(src);
+ SetPixel(dst, mBitFields.mRed.Get8(val), mBitFields.mGreen.Get8(val),
+ mBitFields.mBlue.Get8(val));
+ --lpos;
+ src += 4;
+ }
+ } else {
+ bool anyHasAlpha = false;
+ while (lpos > 0) {
+ uint32_t val = LittleEndian::readUint32(src);
+ SetPixel(dst, mBitFields.mRed.Get(val), mBitFields.mGreen.Get(val),
+ mBitFields.mBlue.Get(val),
+ mBitFields.mAlpha.GetAlpha(val, anyHasAlpha));
+ --lpos;
+ src += 4;
+ }
+ if (anyHasAlpha) {
+ MOZ_ASSERT(mMayHaveTransparency);
+ mDoesHaveTransparency = true;
+ }
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unsupported color depth; earlier check didn't catch it?");
+ }
+
+ FinishRow();
+ return mCurrentRow == 0 ? Transition::TerminateSuccess()
+ : Transition::To(State::PIXEL_ROW, mPixelRowSize);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadRLESegment(
+ const char* aData) {
+ if (mCurrentRow == 0) {
+ return Transition::TerminateSuccess();
+ }
+
+ uint8_t byte1 = uint8_t(aData[0]);
+ uint8_t byte2 = uint8_t(aData[1]);
+
+ if (byte1 != RLE::ESCAPE) {
+ // Encoded mode consists of two bytes: byte1 specifies the number of
+ // consecutive pixels to be drawn using the color index contained in
+ // byte2.
+ //
+ // Work around bitmaps that specify too many pixels.
+ uint32_t pixelsNeeded = std::min<uint32_t>(mH.mWidth - mCurrentPos, byte1);
+ if (pixelsNeeded) {
+ uint32_t* dst = RowBuffer();
+ mCurrentPos += pixelsNeeded;
+ if (mH.mCompression == Compression::RLE8) {
+ do {
+ SetPixel(dst, byte2, mColors);
+ pixelsNeeded--;
+ } while (pixelsNeeded);
+ } else {
+ do {
+ Set4BitPixel(dst, byte2, pixelsNeeded, mColors);
+ } while (pixelsNeeded);
+ }
+ }
+ return Transition::To(State::RLE_SEGMENT, RLE::SEGMENT_LENGTH);
+ }
+
+ if (byte2 == RLE::ESCAPE_EOL) {
+ ClearRowBufferRemainder();
+ mCurrentPos = 0;
+ FinishRow();
+ return mCurrentRow == 0
+ ? Transition::TerminateSuccess()
+ : Transition::To(State::RLE_SEGMENT, RLE::SEGMENT_LENGTH);
+ }
+
+ if (byte2 == RLE::ESCAPE_EOF) {
+ return Transition::TerminateSuccess();
+ }
+
+ if (byte2 == RLE::ESCAPE_DELTA) {
+ return Transition::To(State::RLE_DELTA, RLE::DELTA_LENGTH);
+ }
+
+ // Absolute mode. |byte2| gives the number of pixels. The length depends on
+ // whether it's 4-bit or 8-bit RLE. Also, the length must be even (and zero
+ // padding is used to achieve this when necessary).
+ MOZ_ASSERT(mAbsoluteModeNumPixels == 0);
+ mAbsoluteModeNumPixels = byte2;
+ uint32_t length = byte2;
+ if (mH.mCompression == Compression::RLE4) {
+ length = (length + 1) / 2; // halve, rounding up
+ }
+ if (length & 1) {
+ length++;
+ }
+ return Transition::To(State::RLE_ABSOLUTE, length);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadRLEDelta(
+ const char* aData) {
+ // Delta encoding makes it possible to skip pixels making part of the image
+ // transparent.
+ MOZ_ASSERT(mMayHaveTransparency);
+ mDoesHaveTransparency = true;
+
+ // Clear the skipped pixels. (This clears to the end of the row,
+ // which is perfect if there's a Y delta and harmless if not).
+ ClearRowBufferRemainder();
+
+ // Handle the XDelta.
+ mCurrentPos += uint8_t(aData[0]);
+ if (mCurrentPos > mH.mWidth) {
+ mCurrentPos = mH.mWidth;
+ }
+
+ // Handle the Y Delta.
+ int32_t yDelta = std::min<int32_t>(uint8_t(aData[1]), mCurrentRow);
+ if (yDelta > 0) {
+ // Commit the current row (the first of the skipped rows).
+ FinishRow();
+
+ // Clear and commit the remaining skipped rows. We want to be careful not
+ // to change mCurrentPos here.
+ memset(mRowBuffer.get(), 0, mH.mWidth * sizeof(uint32_t));
+ for (int32_t line = 1; line < yDelta; line++) {
+ FinishRow();
+ }
+ }
+
+ return mCurrentRow == 0
+ ? Transition::TerminateSuccess()
+ : Transition::To(State::RLE_SEGMENT, RLE::SEGMENT_LENGTH);
+}
+
+LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadRLEAbsolute(
+ const char* aData, size_t aLength) {
+ uint32_t n = mAbsoluteModeNumPixels;
+ mAbsoluteModeNumPixels = 0;
+
+ if (mCurrentPos + n > uint32_t(mH.mWidth)) {
+ // Some DIB RLE8 encoders count a padding byte as the absolute mode
+ // pixel number at the end of the row.
+ if (mH.mCompression == Compression::RLE8 && n > 0 && (n & 1) == 0 &&
+ mCurrentPos + n - uint32_t(mH.mWidth) == 1 && aLength > 0 &&
+ aData[aLength - 1] == 0) {
+ n--;
+ } else {
+ // Bad data. Stop decoding; at least part of the image may have been
+ // decoded.
+ return Transition::TerminateSuccess();
+ }
+ }
+
+ // In absolute mode, n represents the number of pixels that follow, each of
+ // which contains the color index of a single pixel.
+ uint32_t* dst = RowBuffer();
+ uint32_t iSrc = 0;
+ uint32_t* oldPos = dst;
+ if (mH.mCompression == Compression::RLE8) {
+ while (n > 0) {
+ SetPixel(dst, aData[iSrc], mColors);
+ n--;
+ iSrc++;
+ }
+ } else {
+ while (n > 0) {
+ Set4BitPixel(dst, aData[iSrc], n, mColors);
+ iSrc++;
+ }
+ }
+ mCurrentPos += dst - oldPos;
+
+ // We should read all the data (unless the last byte is zero padding).
+ MOZ_ASSERT(iSrc == aLength - 1 || iSrc == aLength);
+
+ return Transition::To(State::RLE_SEGMENT, RLE::SEGMENT_LENGTH);
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsBMPDecoder.h b/image/decoders/nsBMPDecoder.h
new file mode 100644
index 0000000000..c7990834b9
--- /dev/null
+++ b/image/decoders/nsBMPDecoder.h
@@ -0,0 +1,285 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsBMPDecoder_h
+#define mozilla_image_decoders_nsBMPDecoder_h
+
+#include "BMPHeaders.h"
+#include "Decoder.h"
+#include "gfxColor.h"
+#include "StreamingLexer.h"
+#include "SurfacePipe.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+namespace image {
+
+namespace bmp {
+
+struct CalRgbEndpoint {
+ uint32_t mGamma;
+ uint32_t mX;
+ uint32_t mY;
+ uint32_t mZ;
+};
+
+/// This struct contains the fields from the file header and info header that
+/// we use during decoding. (Excluding bitfields fields, which are kept in
+/// BitFields.)
+struct Header {
+ uint32_t mDataOffset; // Offset to raster data.
+ uint32_t mBIHSize; // Header size.
+ int32_t mWidth; // Image width.
+ int32_t mHeight; // Image height.
+ uint16_t mBpp; // Bits per pixel.
+ uint32_t mCompression; // See struct Compression for valid values.
+ uint32_t mImageSize; // (compressed) image size. Can be 0 if
+ // mCompression==0.
+ uint32_t mNumColors; // Used colors.
+ InfoColorSpace mCsType; // Color space type.
+ InfoColorIntent mCsIntent; // Color space intent.
+
+ union {
+ struct {
+ CalRgbEndpoint mRed;
+ CalRgbEndpoint mGreen;
+ CalRgbEndpoint mBlue;
+ } mCalibrated;
+
+ struct {
+ uint32_t mOffset;
+ uint32_t mLength;
+ } mProfile;
+ } mColorSpace;
+
+ Header()
+ : mDataOffset(0),
+ mBIHSize(0),
+ mWidth(0),
+ mHeight(0),
+ mBpp(0),
+ mCompression(0),
+ mImageSize(0),
+ mNumColors(0),
+ mCsType(InfoColorSpace::SRGB),
+ mCsIntent(InfoColorIntent::IMAGES) {}
+};
+
+/// An entry in the color table.
+struct ColorTableEntry {
+ uint8_t mRed;
+ uint8_t mGreen;
+ uint8_t mBlue;
+};
+
+/// All the color-related bitfields for 16bpp and 32bpp images. We use this
+/// even for older format BMPs that don't have explicit bitfields.
+class BitFields {
+ class Value {
+ friend class BitFields;
+
+ uint32_t mMask; // The mask for the value.
+ uint8_t mRightShift; // The amount to right-shift after masking.
+ uint8_t mBitWidth; // The width (in bits) of the value.
+
+ /// Sets the mask (and thus the right-shift and bit-width as well).
+ void Set(uint32_t aMask);
+
+ public:
+ Value() {
+ mMask = 0;
+ mRightShift = 0;
+ mBitWidth = 0;
+ }
+
+ /// Returns true if this channel is used. Only used for alpha.
+ bool IsPresent() const { return mMask != 0x0; }
+
+ /// Extracts the single color value from the multi-color value.
+ uint8_t Get(uint32_t aVal) const;
+
+ /// Like Get(), but specially for alpha.
+ uint8_t GetAlpha(uint32_t aVal, bool& aHasAlphaOut) const;
+
+ /// Specialized versions of Get() for when the bit-width is 5 or 8.
+ /// (They will assert if called and the bit-width is not 5 or 8.)
+ uint8_t Get5(uint32_t aVal) const;
+ uint8_t Get8(uint32_t aVal) const;
+ };
+
+ public:
+ /// The individual color channels.
+ Value mRed;
+ Value mGreen;
+ Value mBlue;
+ Value mAlpha;
+
+ /// Set bitfields to the standard 5-5-5 16bpp values.
+ void SetR5G5B5();
+
+ /// Set bitfields to the standard 8-8-8 32bpp values.
+ void SetR8G8B8();
+
+ /// Test if bitfields have the standard 5-5-5 16bpp values.
+ bool IsR5G5B5() const;
+
+ /// Test if bitfields have the standard 8-8-8 32bpp values.
+ bool IsR8G8B8() const;
+
+ /// Read the bitfields from a header. The reading of the alpha mask is
+ /// optional.
+ void ReadFromHeader(const char* aData, bool aReadAlpha);
+
+ /// Length of the bitfields structure in the BMP file.
+ static const size_t LENGTH = 12;
+};
+
+} // namespace bmp
+
+class RasterImage;
+
+/// Decoder for BMP-Files, as used by Windows and OS/2.
+
+class nsBMPDecoder : public Decoder {
+ public:
+ ~nsBMPDecoder();
+
+ DecoderType GetType() const override { return DecoderType::BMP; }
+
+ /// @return true if this BMP is a valid ICO resource.
+ bool IsValidICOResource() const override { return true; }
+
+ /// Obtains the internal output image buffer.
+ uint32_t* GetImageData() { return reinterpret_cast<uint32_t*>(mImageData); }
+
+ /// Obtains the length of the internal output image buffer.
+ size_t GetImageDataLength() const { return mImageDataLength; }
+
+ /// Obtains the size of the compressed image resource.
+ int32_t GetCompressedImageSize() const;
+
+ /// Mark this BMP as being within an ICO file. Only used for testing purposes
+ /// because the ICO-specific constructor does this marking automatically.
+ void SetIsWithinICO() { mIsWithinICO = true; }
+
+ /// Did the BMP file have alpha data of any kind? (Only use this after the
+ /// bitmap has been fully decoded.)
+ bool HasTransparency() const { return mDoesHaveTransparency; }
+
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ nsresult BeforeFinishInternal() override;
+ nsresult FinishInternal() override;
+
+ private:
+ friend class DecoderFactory;
+
+ enum class State {
+ FILE_HEADER,
+ INFO_HEADER_SIZE,
+ INFO_HEADER_REST,
+ BITFIELDS,
+ SKIP_TO_COLOR_PROFILE,
+ FOUND_COLOR_PROFILE,
+ COLOR_PROFILE,
+ ALLOCATE_SURFACE,
+ COLOR_TABLE,
+ GAP,
+ AFTER_GAP,
+ PIXEL_ROW,
+ RLE_SEGMENT,
+ RLE_DELTA,
+ RLE_ABSOLUTE
+ };
+
+ // This is the constructor used for normal and clipboard BMP images.
+ explicit nsBMPDecoder(RasterImage* aImage, bool aForClipboard = false);
+
+ // This is the constructor used for BMP resources in ICO images.
+ nsBMPDecoder(RasterImage* aImage, uint32_t aDataOffset);
+
+ // Helper constructor called by the other two.
+ nsBMPDecoder(RasterImage* aImage, State aState, size_t aLength,
+ bool aForClipboard);
+
+ int32_t AbsoluteHeight() const { return abs(mH.mHeight); }
+
+ uint32_t* RowBuffer();
+ void ClearRowBufferRemainder();
+
+ void FinishRow();
+
+ void PrepareCalibratedColorProfile();
+ void PrepareColorProfileTransform();
+
+ LexerTransition<State> ReadFileHeader(const char* aData, size_t aLength);
+ LexerTransition<State> ReadInfoHeaderSize(const char* aData, size_t aLength);
+ LexerTransition<State> ReadInfoHeaderRest(const char* aData, size_t aLength);
+ LexerTransition<State> ReadBitfields(const char* aData, size_t aLength);
+ LexerTransition<State> SeekColorProfile(size_t aLength);
+ LexerTransition<State> ReadColorProfile(const char* aData, size_t aLength);
+ LexerTransition<State> AllocateSurface();
+ LexerTransition<State> ReadColorTable(const char* aData, size_t aLength);
+ LexerTransition<State> SkipGap();
+ LexerTransition<State> AfterGap();
+ LexerTransition<State> ReadPixelRow(const char* aData);
+ LexerTransition<State> ReadRLESegment(const char* aData);
+ LexerTransition<State> ReadRLEDelta(const char* aData);
+ LexerTransition<State> ReadRLEAbsolute(const char* aData, size_t aLength);
+
+ SurfacePipe mPipe;
+
+ StreamingLexer<State> mLexer;
+
+ // Iterator to save return point.
+ Maybe<SourceBufferIterator> mReturnIterator;
+
+ UniquePtr<uint32_t[]> mRowBuffer;
+
+ bmp::Header mH;
+
+ // If the BMP is within an ICO file our treatment of it differs slightly.
+ bool mIsWithinICO;
+
+ // If the BMP decoded from the clipboard, we don't start with a data offset.
+ bool mIsForClipboard;
+
+ bmp::BitFields mBitFields;
+
+ // Might the image have transparency? Determined from the headers during
+ // metadata decode. (Does not guarantee the image actually has transparency.)
+ bool mMayHaveTransparency;
+
+ // Does the image have transparency? Determined during full decoding, so only
+ // use this after that has been completed.
+ bool mDoesHaveTransparency;
+
+ uint32_t mNumColors; // The number of used colors, i.e. the number of
+ // entries in mColors, if it's present.
+ UniquePtr<bmp::ColorTableEntry[]>
+ mColors; // The color table, if it's present.
+ uint32_t mBytesPerColor; // 3 or 4, depending on the format
+
+ // The number of bytes prior to the optional gap that have been read. This
+ // is used to find the start of the pixel data.
+ uint32_t mPreGapLength;
+
+ uint32_t mPixelRowSize; // The number of bytes per pixel row.
+
+ int32_t mCurrentRow; // Index of the row of the image that's currently
+ // being decoded: [height,1].
+ int32_t mCurrentPos; // Index into the current line. Used when
+ // doing RLE decoding and when filling in pixels
+ // for truncated files.
+
+ // Only used in RLE_ABSOLUTE state: the number of pixels to read.
+ uint32_t mAbsoluteModeNumPixels;
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsBMPDecoder_h
diff --git a/image/decoders/nsGIFDecoder2.cpp b/image/decoders/nsGIFDecoder2.cpp
new file mode 100644
index 0000000000..9b2de9124a
--- /dev/null
+++ b/image/decoders/nsGIFDecoder2.cpp
@@ -0,0 +1,1073 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+/*
+The Graphics Interchange Format(c) is the copyright property of CompuServe
+Incorporated. Only CompuServe Incorporated is authorized to define, redefine,
+enhance, alter, modify or change in any way the definition of the format.
+
+CompuServe Incorporated hereby grants a limited, non-exclusive, royalty-free
+license for the use of the Graphics Interchange Format(sm) in computer
+software; computer software utilizing GIF(sm) must acknowledge ownership of the
+Graphics Interchange Format and its Service Mark by CompuServe Incorporated, in
+User and Technical Documentation. Computer software utilizing GIF, which is
+distributed or may be distributed without User or Technical Documentation must
+display to the screen or printer a message acknowledging ownership of the
+Graphics Interchange Format and the Service Mark by CompuServe Incorporated; in
+this case, the acknowledgement may be displayed in an opening screen or leading
+banner, or a closing screen or trailing banner. A message such as the following
+may be used:
+
+ "The Graphics Interchange Format(c) is the Copyright property of
+ CompuServe Incorporated. GIF(sm) is a Service Mark property of
+ CompuServe Incorporated."
+
+For further information, please contact :
+
+ CompuServe Incorporated
+ Graphics Technology Department
+ 5000 Arlington Center Boulevard
+ Columbus, Ohio 43220
+ U. S. A.
+
+CompuServe Incorporated maintains a mailing list with all those individuals and
+organizations who wish to receive copies of this document when it is corrected
+or revised. This service is offered free of charge; please provide us with your
+mailing address.
+*/
+
+#include "nsGIFDecoder2.h"
+
+#include <stddef.h>
+
+#include "imgFrame.h"
+#include "mozilla/EndianUtils.h"
+#include "RasterImage.h"
+#include "SurfacePipeFactory.h"
+
+#include "gfxColor.h"
+#include "gfxPlatform.h"
+#include "qcms.h"
+#include <algorithm>
+#include "mozilla/Telemetry.h"
+
+using namespace mozilla::gfx;
+
+using std::max;
+
+namespace mozilla {
+namespace image {
+
+//////////////////////////////////////////////////////////////////////
+// GIF Decoder Implementation
+
+static const size_t GIF_HEADER_LEN = 6;
+static const size_t GIF_SCREEN_DESCRIPTOR_LEN = 7;
+static const size_t BLOCK_HEADER_LEN = 1;
+static const size_t SUB_BLOCK_HEADER_LEN = 1;
+static const size_t EXTENSION_HEADER_LEN = 2;
+static const size_t GRAPHIC_CONTROL_EXTENSION_LEN = 4;
+static const size_t APPLICATION_EXTENSION_LEN = 11;
+static const size_t IMAGE_DESCRIPTOR_LEN = 9;
+
+// Masks for reading color table information from packed fields in the screen
+// descriptor and image descriptor blocks.
+static const uint8_t PACKED_FIELDS_COLOR_TABLE_BIT = 0x80;
+static const uint8_t PACKED_FIELDS_INTERLACED_BIT = 0x40;
+static const uint8_t PACKED_FIELDS_TABLE_DEPTH_MASK = 0x07;
+
+nsGIFDecoder2::nsGIFDecoder2(RasterImage* aImage)
+ : Decoder(aImage),
+ mLexer(Transition::To(State::GIF_HEADER, GIF_HEADER_LEN),
+ Transition::TerminateSuccess()),
+ mOldColor(0),
+ mCurrentFrameIndex(-1),
+ mColorTablePos(0),
+ mColormap(nullptr),
+ mColormapSize(0),
+ mColorMask('\0'),
+ mGIFOpen(false),
+ mSawTransparency(false),
+ mSwizzleFn(nullptr) {
+ // Clear out the structure, excluding the arrays. Ensure that the global
+ // colormap is initialized as opaque.
+ memset(&mGIFStruct, 0, sizeof(mGIFStruct));
+ memset(mGIFStruct.global_colormap, 0xFF, sizeof(mGIFStruct.global_colormap));
+
+ // Each color table will need to be unpacked.
+ mSwizzleFn = SwizzleRow(SurfaceFormat::R8G8B8, SurfaceFormat::OS_RGBA);
+ MOZ_ASSERT(mSwizzleFn);
+}
+
+nsGIFDecoder2::~nsGIFDecoder2() { free(mGIFStruct.local_colormap); }
+
+nsresult nsGIFDecoder2::FinishInternal() {
+ MOZ_ASSERT(!HasError(), "Shouldn't call FinishInternal after error!");
+
+ // If the GIF got cut off, handle it anyway
+ if (!IsMetadataDecode() && mGIFOpen) {
+ if (mCurrentFrameIndex == mGIFStruct.images_decoded) {
+ EndImageFrame();
+ }
+ PostDecodeDone(mGIFStruct.loop_count);
+ mGIFOpen = false;
+ }
+
+ return NS_OK;
+}
+
+void nsGIFDecoder2::FlushImageData() {
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (!invalidRect) {
+ return;
+ }
+
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+}
+
+//******************************************************************************
+// GIF decoder callback methods. Part of public API for GIF2
+//******************************************************************************
+
+//******************************************************************************
+void nsGIFDecoder2::BeginGIF() {
+ if (mGIFOpen) {
+ return;
+ }
+
+ mGIFOpen = true;
+
+ PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height);
+}
+
+bool nsGIFDecoder2::CheckForTransparency(const OrientedIntRect& aFrameRect) {
+ // Check if the image has a transparent color in its palette.
+ if (mGIFStruct.is_transparent) {
+ PostHasTransparency();
+ return true;
+ }
+
+ // This is a bit of a hack. Some sites will use a 1x1 gif that includes no
+ // header information indicating it is transparent, no palette, and no image
+ // data at all (so no pixels get written) to represent a transparent pixel
+ // using the absolute least number of bytes. Generally things are setup to
+ // detect transparency without decoding the image data. So to detect this kind
+ // of transparency without decoing the image data we would have to assume
+ // every gif is transparent, which we would like to avoid. Changing things so
+ // that we can detect transparency at any point of decoding is a bigger change
+ // and not worth it for one questionable 1x1 gif. Using this "trick" for
+ // anything but 1x1 transparent spacer gifs doesn't make sense, so it's
+ // reasonable to target 1x1 gifs just for this.
+ if (mGIFStruct.screen_width == 1 && mGIFStruct.screen_height == 1) {
+ PostHasTransparency();
+ return true;
+ }
+
+ if (mGIFStruct.images_decoded > 0) {
+ return false; // We only care about first frame padding below.
+ }
+
+ // If we need padding on the first frame, that means we don't draw into part
+ // of the image at all. Report that as transparency.
+ OrientedIntRect imageRect(0, 0, mGIFStruct.screen_width,
+ mGIFStruct.screen_height);
+ if (!imageRect.IsEqualEdges(aFrameRect)) {
+ PostHasTransparency();
+ mSawTransparency = true; // Make sure we don't optimize it away.
+ return true;
+ }
+
+ return false;
+}
+
+//******************************************************************************
+nsresult nsGIFDecoder2::BeginImageFrame(const OrientedIntRect& aFrameRect,
+ uint16_t aDepth, bool aIsInterlaced) {
+ MOZ_ASSERT(HasSize());
+
+ bool hasTransparency = CheckForTransparency(aFrameRect);
+
+ // Make sure there's no animation if we're downscaling.
+ MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
+
+ Maybe<AnimationParams> animParams;
+ if (!IsFirstFrameDecode()) {
+ animParams.emplace(aFrameRect.ToUnknownRect(),
+ FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time),
+ uint32_t(mGIFStruct.images_decoded), BlendMethod::OVER,
+ DisposalMethod(mGIFStruct.disposal_method));
+ }
+
+ SurfacePipeFlags pipeFlags =
+ aIsInterlaced ? SurfacePipeFlags::DEINTERLACE : SurfacePipeFlags();
+
+ gfx::SurfaceFormat format;
+ if (mGIFStruct.images_decoded == 0) {
+ // The first frame may be displayed progressively.
+ pipeFlags |= SurfacePipeFlags::PROGRESSIVE_DISPLAY;
+
+ // Only allow opaque surfaces if we are decoding a single image without
+ // transparency. For an animation, there isn't much benefit to RGBX given
+ // the current frame is constantly changing, and there are many risks
+ // since BlendAnimationFilter is able to clear rows of data.
+ format = hasTransparency || animParams ? SurfaceFormat::OS_RGBA
+ : SurfaceFormat::OS_RGBX;
+ } else {
+ format = SurfaceFormat::OS_RGBA;
+ }
+
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), aFrameRect, format, format, animParams,
+ mTransform, pipeFlags);
+ mCurrentFrameIndex = mGIFStruct.images_decoded;
+
+ if (!pipe) {
+ mPipe = SurfacePipe();
+ return NS_ERROR_FAILURE;
+ }
+
+ mPipe = std::move(*pipe);
+ return NS_OK;
+}
+
+//******************************************************************************
+void nsGIFDecoder2::EndImageFrame() {
+ Opacity opacity = Opacity::SOME_TRANSPARENCY;
+
+ if (mGIFStruct.images_decoded == 0) {
+ // We need to send invalidations for the first frame.
+ FlushImageData();
+
+ // The first frame was preallocated with alpha; if it wasn't transparent, we
+ // should fix that. We can also mark it opaque unconditionally if we didn't
+ // actually see any transparent pixels - this test is only valid for the
+ // first frame.
+ if (!mGIFStruct.is_transparent && !mSawTransparency) {
+ opacity = Opacity::FULLY_OPAQUE;
+ }
+ }
+
+ // Unconditionally increment images_decoded, because we unconditionally
+ // append frames in BeginImageFrame(). This ensures that images_decoded
+ // always refers to the frame in mImage we're currently decoding,
+ // even if some of them weren't decoded properly and thus are blank.
+ mGIFStruct.images_decoded++;
+
+ // Reset graphic control extension parameters that we shouldn't reuse
+ // between frames.
+ mGIFStruct.delay_time = 0;
+
+ // Tell the superclass we finished a frame
+ PostFrameStop(opacity);
+
+ // Reset the transparent pixel
+ if (mOldColor) {
+ mColormap[mGIFStruct.tpixel] = mOldColor;
+ mOldColor = 0;
+ }
+
+ mColormap = nullptr;
+ mColormapSize = 0;
+ mCurrentFrameIndex = -1;
+}
+
+template <typename PixelSize>
+PixelSize nsGIFDecoder2::ColormapIndexToPixel(uint8_t aIndex) {
+ MOZ_ASSERT(sizeof(PixelSize) == sizeof(uint32_t));
+
+ // Retrieve the next color, clamping to the size of the colormap.
+ uint32_t color = mColormap[aIndex & mColorMask];
+
+ // Check for transparency.
+ if (mGIFStruct.is_transparent) {
+ mSawTransparency = mSawTransparency || color == 0;
+ }
+
+ return color;
+}
+
+template <>
+uint8_t nsGIFDecoder2::ColormapIndexToPixel<uint8_t>(uint8_t aIndex) {
+ return aIndex & mColorMask;
+}
+
+template <typename PixelSize>
+std::tuple<int32_t, Maybe<WriteState>> nsGIFDecoder2::YieldPixels(
+ const uint8_t* aData, size_t aLength, size_t* aBytesReadOut,
+ PixelSize* aPixelBlock, int32_t aBlockSize) {
+ MOZ_ASSERT(aData);
+ MOZ_ASSERT(aBytesReadOut);
+ MOZ_ASSERT(mGIFStruct.stackp >= mGIFStruct.stack);
+
+ // Advance to the next byte we should read.
+ const uint8_t* data = aData + *aBytesReadOut;
+
+ int32_t written = 0;
+ while (aBlockSize > written) {
+ // If we don't have any decoded data to yield, try to read some input and
+ // produce some.
+ if (mGIFStruct.stackp == mGIFStruct.stack) {
+ while (mGIFStruct.bits < mGIFStruct.codesize &&
+ *aBytesReadOut < aLength) {
+ // Feed the next byte into the decoder's 32-bit input buffer.
+ mGIFStruct.datum += int32_t(*data) << mGIFStruct.bits;
+ mGIFStruct.bits += 8;
+ data += 1;
+ *aBytesReadOut += 1;
+ }
+
+ if (mGIFStruct.bits < mGIFStruct.codesize) {
+ return std::make_tuple(written, Some(WriteState::NEED_MORE_DATA));
+ }
+
+ // Get the leading variable-length symbol from the data stream.
+ int code = mGIFStruct.datum & mGIFStruct.codemask;
+ mGIFStruct.datum >>= mGIFStruct.codesize;
+ mGIFStruct.bits -= mGIFStruct.codesize;
+
+ const int clearCode = ClearCode();
+
+ // Reset the dictionary to its original state, if requested
+ if (code == clearCode) {
+ mGIFStruct.codesize = mGIFStruct.datasize + 1;
+ mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
+ mGIFStruct.avail = clearCode + 2;
+ mGIFStruct.oldcode = -1;
+ return std::make_tuple(written, Some(WriteState::NEED_MORE_DATA));
+ }
+
+ // Check for explicit end-of-stream code. It should only appear after all
+ // image data, but if that was the case we wouldn't be in this function,
+ // so this is always an error condition.
+ if (code == (clearCode + 1)) {
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+
+ if (mGIFStruct.oldcode == -1) {
+ if (code >= MAX_BITS) {
+ // The code's too big; something's wrong.
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+
+ mGIFStruct.firstchar = mGIFStruct.oldcode = code;
+
+ // Yield a pixel at the appropriate index in the colormap.
+ mGIFStruct.pixels_remaining--;
+ aPixelBlock[written++] =
+ ColormapIndexToPixel<PixelSize>(mGIFStruct.suffix[code]);
+ continue;
+ }
+
+ int incode = code;
+ if (code >= mGIFStruct.avail) {
+ *mGIFStruct.stackp++ = mGIFStruct.firstchar;
+ code = mGIFStruct.oldcode;
+
+ if (mGIFStruct.stackp >= mGIFStruct.stack + MAX_BITS) {
+ // Stack overflow; something's wrong.
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+ }
+
+ while (code >= clearCode) {
+ if ((code >= MAX_BITS) || (code == mGIFStruct.prefix[code])) {
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+
+ *mGIFStruct.stackp++ = mGIFStruct.suffix[code];
+ code = mGIFStruct.prefix[code];
+
+ if (mGIFStruct.stackp >= mGIFStruct.stack + MAX_BITS) {
+ // Stack overflow; something's wrong.
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+ }
+
+ *mGIFStruct.stackp++ = mGIFStruct.firstchar = mGIFStruct.suffix[code];
+
+ // Define a new codeword in the dictionary.
+ if (mGIFStruct.avail < 4096) {
+ mGIFStruct.prefix[mGIFStruct.avail] = mGIFStruct.oldcode;
+ mGIFStruct.suffix[mGIFStruct.avail] = mGIFStruct.firstchar;
+ mGIFStruct.avail++;
+
+ // If we've used up all the codewords of a given length increase the
+ // length of codewords by one bit, but don't exceed the specified
+ // maximum codeword size of 12 bits.
+ if (((mGIFStruct.avail & mGIFStruct.codemask) == 0) &&
+ (mGIFStruct.avail < 4096)) {
+ mGIFStruct.codesize++;
+ mGIFStruct.codemask += mGIFStruct.avail;
+ }
+ }
+
+ mGIFStruct.oldcode = incode;
+ }
+
+ if (MOZ_UNLIKELY(mGIFStruct.stackp <= mGIFStruct.stack)) {
+ MOZ_ASSERT_UNREACHABLE("No decoded data but we didn't return early?");
+ return std::make_tuple(written, Some(WriteState::FAILURE));
+ }
+
+ // Yield a pixel at the appropriate index in the colormap.
+ mGIFStruct.pixels_remaining--;
+ aPixelBlock[written++] =
+ ColormapIndexToPixel<PixelSize>(*--mGIFStruct.stackp);
+ }
+
+ return std::make_tuple(written, Maybe<WriteState>());
+}
+
+/// Expand the colormap from RGB to Packed ARGB as needed by Cairo.
+/// And apply any LCMS transformation.
+void nsGIFDecoder2::ConvertColormap(uint32_t* aColormap, uint32_t aColors) {
+ if (!aColors) {
+ return;
+ }
+
+ // Apply CMS transformation if enabled and available
+ if (mCMSMode == CMSMode::All) {
+ qcms_transform* transform = GetCMSsRGBTransform(SurfaceFormat::R8G8B8);
+ if (transform) {
+ qcms_transform_data(transform, aColormap, aColormap, aColors);
+ }
+ }
+
+ // Expand color table from RGB to BGRA.
+ MOZ_ASSERT(mSwizzleFn);
+ uint8_t* data = reinterpret_cast<uint8_t*>(aColormap);
+ mSwizzleFn(data, data, aColors);
+}
+
+LexerResult nsGIFDecoder2::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(
+ aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::GIF_HEADER:
+ return ReadGIFHeader(aData);
+ case State::SCREEN_DESCRIPTOR:
+ return ReadScreenDescriptor(aData);
+ case State::GLOBAL_COLOR_TABLE:
+ return ReadGlobalColorTable(aData, aLength);
+ case State::FINISHED_GLOBAL_COLOR_TABLE:
+ return FinishedGlobalColorTable();
+ case State::BLOCK_HEADER:
+ return ReadBlockHeader(aData);
+ case State::EXTENSION_HEADER:
+ return ReadExtensionHeader(aData);
+ case State::GRAPHIC_CONTROL_EXTENSION:
+ return ReadGraphicControlExtension(aData);
+ case State::APPLICATION_IDENTIFIER:
+ return ReadApplicationIdentifier(aData);
+ case State::NETSCAPE_EXTENSION_SUB_BLOCK:
+ return ReadNetscapeExtensionSubBlock(aData);
+ case State::NETSCAPE_EXTENSION_DATA:
+ return ReadNetscapeExtensionData(aData);
+ case State::IMAGE_DESCRIPTOR:
+ return ReadImageDescriptor(aData);
+ case State::FINISH_IMAGE_DESCRIPTOR:
+ return FinishImageDescriptor(aData);
+ case State::LOCAL_COLOR_TABLE:
+ return ReadLocalColorTable(aData, aLength);
+ case State::FINISHED_LOCAL_COLOR_TABLE:
+ return FinishedLocalColorTable();
+ case State::IMAGE_DATA_BLOCK:
+ return ReadImageDataBlock(aData);
+ case State::IMAGE_DATA_SUB_BLOCK:
+ return ReadImageDataSubBlock(aData);
+ case State::LZW_DATA:
+ return ReadLZWData(aData, aLength);
+ case State::SKIP_LZW_DATA:
+ return Transition::ContinueUnbuffered(State::SKIP_LZW_DATA);
+ case State::FINISHED_LZW_DATA:
+ return Transition::To(State::IMAGE_DATA_SUB_BLOCK,
+ SUB_BLOCK_HEADER_LEN);
+ case State::SKIP_SUB_BLOCKS:
+ return SkipSubBlocks(aData);
+ case State::SKIP_DATA_THEN_SKIP_SUB_BLOCKS:
+ return Transition::ContinueUnbuffered(
+ State::SKIP_DATA_THEN_SKIP_SUB_BLOCKS);
+ case State::FINISHED_SKIPPING_DATA:
+ return Transition::To(State::SKIP_SUB_BLOCKS, SUB_BLOCK_HEADER_LEN);
+ default:
+ MOZ_CRASH("Unknown State");
+ }
+ });
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadGIFHeader(
+ const char* aData) {
+ // We retrieve the version here but because many GIF encoders set header
+ // fields incorrectly, we barely use it; features which should only appear in
+ // GIF89a are always accepted.
+ if (strncmp(aData, "GIF87a", GIF_HEADER_LEN) == 0) {
+ mGIFStruct.version = 87;
+ } else if (strncmp(aData, "GIF89a", GIF_HEADER_LEN) == 0) {
+ mGIFStruct.version = 89;
+ } else {
+ return Transition::TerminateFailure();
+ }
+
+ return Transition::To(State::SCREEN_DESCRIPTOR, GIF_SCREEN_DESCRIPTOR_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadScreenDescriptor(
+ const char* aData) {
+ mGIFStruct.screen_width = LittleEndian::readUint16(aData + 0);
+ mGIFStruct.screen_height = LittleEndian::readUint16(aData + 2);
+
+ const uint8_t packedFields = aData[4];
+
+ // XXX: Should we be capturing these values even if there is no global color
+ // table?
+ mGIFStruct.global_colormap_depth =
+ (packedFields & PACKED_FIELDS_TABLE_DEPTH_MASK) + 1;
+ mGIFStruct.global_colormap_count = 1 << mGIFStruct.global_colormap_depth;
+
+ // We ignore several fields in the header. We don't care about the 'sort
+ // flag', which indicates if the global color table's entries are sorted in
+ // order of importance - if we need to render this image for a device with a
+ // narrower color gamut than GIF supports we'll handle that at a different
+ // layer. We have no use for the pixel aspect ratio as well. Finally, we
+ // intentionally ignore the background color index, as implementing that
+ // feature would not be web compatible - when a GIF image frame doesn't cover
+ // the entire area of the image, the area that's not covered should always be
+ // transparent.
+
+ if (packedFields & PACKED_FIELDS_COLOR_TABLE_BIT) {
+ MOZ_ASSERT(mColorTablePos == 0);
+
+ // We read the global color table in unbuffered mode since it can be quite
+ // large and it'd be preferable to avoid unnecessary copies.
+ const size_t globalColorTableSize = 3 * mGIFStruct.global_colormap_count;
+ return Transition::ToUnbuffered(State::FINISHED_GLOBAL_COLOR_TABLE,
+ State::GLOBAL_COLOR_TABLE,
+ globalColorTableSize);
+ }
+
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadGlobalColorTable(
+ const char* aData, size_t aLength) {
+ uint8_t* dest =
+ reinterpret_cast<uint8_t*>(mGIFStruct.global_colormap) + mColorTablePos;
+ memcpy(dest, aData, aLength);
+ mColorTablePos += aLength;
+ return Transition::ContinueUnbuffered(State::GLOBAL_COLOR_TABLE);
+}
+
+LexerTransition<nsGIFDecoder2::State>
+nsGIFDecoder2::FinishedGlobalColorTable() {
+ ConvertColormap(mGIFStruct.global_colormap, mGIFStruct.global_colormap_count);
+ mColorTablePos = 0;
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadBlockHeader(
+ const char* aData) {
+ // Determine what type of block we're dealing with.
+ switch (aData[0]) {
+ case GIF_EXTENSION_INTRODUCER:
+ return Transition::To(State::EXTENSION_HEADER, EXTENSION_HEADER_LEN);
+
+ case GIF_IMAGE_SEPARATOR:
+ return Transition::To(State::IMAGE_DESCRIPTOR, IMAGE_DESCRIPTOR_LEN);
+
+ case GIF_TRAILER:
+ FinishInternal();
+ return Transition::TerminateSuccess();
+
+ default:
+ // If we get anything other than GIF_IMAGE_SEPARATOR,
+ // GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data
+ // between blocks. The GIF87a spec tells us to keep reading until we find
+ // an image separator, but GIF89a says such a file is corrupt. We follow
+ // GIF89a and bail out.
+
+ if (mGIFStruct.images_decoded > 0) {
+ // The file is corrupt, but we successfully decoded some frames, so we
+ // may as well consider the decode successful and display them.
+ FinishInternal();
+ return Transition::TerminateSuccess();
+ }
+
+ // No images decoded; there is nothing to display.
+ return Transition::TerminateFailure();
+ }
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadExtensionHeader(
+ const char* aData) {
+ const uint8_t label = aData[0];
+ const uint8_t extensionHeaderLength = aData[1];
+
+ // If the extension header is zero length, just treat it as a block terminator
+ // and move on to the next block immediately.
+ if (extensionHeaderLength == 0) {
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+ }
+
+ switch (label) {
+ case GIF_GRAPHIC_CONTROL_LABEL:
+ // The GIF spec mandates that the Control Extension header block length is
+ // 4 bytes, and the parser for this block reads 4 bytes, so we must
+ // enforce that the buffer contains at least this many bytes. If the GIF
+ // specifies a different length, we allow that, so long as it's larger;
+ // the additional data will simply be ignored.
+ return Transition::To(
+ State::GRAPHIC_CONTROL_EXTENSION,
+ max<uint8_t>(extensionHeaderLength, GRAPHIC_CONTROL_EXTENSION_LEN));
+
+ case GIF_APPLICATION_EXTENSION_LABEL:
+ // Again, the spec specifies that an application extension header is 11
+ // bytes, but for compatibility with GIFs in the wild, we allow deviation
+ // from the spec. This is important for real-world compatibility, as GIFs
+ // in the wild exist with application extension headers that are both
+ // shorter and longer than 11 bytes. However, we only try to actually
+ // interpret the application extension if the length is correct;
+ // otherwise, we just skip the block unconditionally.
+ return extensionHeaderLength == APPLICATION_EXTENSION_LEN
+ ? Transition::To(State::APPLICATION_IDENTIFIER,
+ extensionHeaderLength)
+ : Transition::ToUnbuffered(
+ State::FINISHED_SKIPPING_DATA,
+ State::SKIP_DATA_THEN_SKIP_SUB_BLOCKS,
+ extensionHeaderLength);
+
+ default:
+ // Skip over any other type of extension block, including comment and
+ // plain text blocks.
+ return Transition::ToUnbuffered(State::FINISHED_SKIPPING_DATA,
+ State::SKIP_DATA_THEN_SKIP_SUB_BLOCKS,
+ extensionHeaderLength);
+ }
+}
+
+LexerTransition<nsGIFDecoder2::State>
+nsGIFDecoder2::ReadGraphicControlExtension(const char* aData) {
+ mGIFStruct.is_transparent = aData[0] & 0x1;
+ mGIFStruct.tpixel = uint8_t(aData[3]);
+ mGIFStruct.disposal_method = (aData[0] >> 2) & 0x7;
+
+ if (mGIFStruct.disposal_method == 4) {
+ // Some encoders (and apparently some specs) represent
+ // DisposalMethod::RESTORE_PREVIOUS as 4, but 3 is used in the canonical
+ // spec and is more popular, so we normalize to 3.
+ mGIFStruct.disposal_method = 3;
+ } else if (mGIFStruct.disposal_method > 4) {
+ // This GIF is using a disposal method which is undefined in the spec.
+ // Treat it as DisposalMethod::NOT_SPECIFIED.
+ mGIFStruct.disposal_method = 0;
+ }
+
+ DisposalMethod method = DisposalMethod(mGIFStruct.disposal_method);
+ if (method == DisposalMethod::CLEAR_ALL || method == DisposalMethod::CLEAR) {
+ // We may have to display the background under this image during animation
+ // playback, so we regard it as transparent.
+ PostHasTransparency();
+ }
+
+ mGIFStruct.delay_time = LittleEndian::readUint16(aData + 1) * 10;
+ if (!HasAnimation() && mGIFStruct.delay_time > 0) {
+ PostIsAnimated(FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time));
+ }
+
+ return Transition::To(State::SKIP_SUB_BLOCKS, SUB_BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadApplicationIdentifier(
+ const char* aData) {
+ if ((strncmp(aData, "NETSCAPE2.0", 11) == 0) ||
+ (strncmp(aData, "ANIMEXTS1.0", 11) == 0)) {
+ // This is a Netscape application extension block.
+ return Transition::To(State::NETSCAPE_EXTENSION_SUB_BLOCK,
+ SUB_BLOCK_HEADER_LEN);
+ }
+
+ // This is an application extension we don't care about. Just skip it.
+ return Transition::To(State::SKIP_SUB_BLOCKS, SUB_BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State>
+nsGIFDecoder2::ReadNetscapeExtensionSubBlock(const char* aData) {
+ const uint8_t blockLength = aData[0];
+ if (blockLength == 0) {
+ // We hit the block terminator.
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+ }
+
+ // We consume a minimum of 3 bytes in accordance with the specs for the
+ // Netscape application extension block, such as they are.
+ const size_t extensionLength = max<uint8_t>(blockLength, 3);
+ return Transition::To(State::NETSCAPE_EXTENSION_DATA, extensionLength);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadNetscapeExtensionData(
+ const char* aData) {
+ // Documentation for NETSCAPE2.0 / ANIMEXTS1.0 extensions can be found at:
+ // https://wiki.whatwg.org/wiki/GIF
+ static const uint8_t NETSCAPE_LOOPING_EXTENSION_SUB_BLOCK_ID = 1;
+ static const uint8_t NETSCAPE_BUFFERING_EXTENSION_SUB_BLOCK_ID = 2;
+
+ const uint8_t subBlockID = aData[0] & 7;
+ switch (subBlockID) {
+ case NETSCAPE_LOOPING_EXTENSION_SUB_BLOCK_ID:
+ // This is looping extension.
+ mGIFStruct.loop_count = LittleEndian::readUint16(aData + 1);
+ // Zero loop count is infinite animation loop request.
+ if (mGIFStruct.loop_count == 0) {
+ mGIFStruct.loop_count = -1;
+ }
+
+ return Transition::To(State::NETSCAPE_EXTENSION_SUB_BLOCK,
+ SUB_BLOCK_HEADER_LEN);
+
+ case NETSCAPE_BUFFERING_EXTENSION_SUB_BLOCK_ID:
+ // We allow, but ignore, this extension.
+ return Transition::To(State::NETSCAPE_EXTENSION_SUB_BLOCK,
+ SUB_BLOCK_HEADER_LEN);
+
+ default:
+ return Transition::TerminateFailure();
+ }
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadImageDescriptor(
+ const char* aData) {
+ // On the first frame, we don't need to yield, and none of the other checks
+ // below apply, so we can just jump right into FinishImageDescriptor().
+ if (mGIFStruct.images_decoded == 0) {
+ return FinishImageDescriptor(aData);
+ }
+
+ if (!HasAnimation()) {
+ // We should've already called PostIsAnimated(); this must be a corrupt
+ // animated image with a first frame timeout of zero. Signal that we're
+ // animated now, before the first-frame decode early exit below, so that
+ // RasterImage can detect that this happened.
+ PostIsAnimated(FrameTimeout::FromRawMilliseconds(0));
+ }
+
+ if (IsFirstFrameDecode()) {
+ // We're about to get a second frame, but we only want the first. Stop
+ // decoding now.
+ FinishInternal();
+ return Transition::TerminateSuccess();
+ }
+
+ MOZ_ASSERT(Size() == OutputSize(), "Downscaling an animated image?");
+
+ // Yield to allow access to the previous frame before we start a new one.
+ return Transition::ToAfterYield(State::FINISH_IMAGE_DESCRIPTOR);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::FinishImageDescriptor(
+ const char* aData) {
+ OrientedIntRect frameRect;
+
+ // Get image offsets with respect to the screen origin.
+ frameRect.SetRect(
+ LittleEndian::readUint16(aData + 0), LittleEndian::readUint16(aData + 2),
+ LittleEndian::readUint16(aData + 4), LittleEndian::readUint16(aData + 6));
+
+ if (!mGIFStruct.images_decoded) {
+ // Work around GIF files where
+ // * at least one of the logical screen dimensions is smaller than the
+ // same dimension in the first image, or
+ // * GIF87a files where the first image's dimensions do not match the
+ // logical screen dimensions.
+ if (mGIFStruct.screen_height < frameRect.Height() ||
+ mGIFStruct.screen_width < frameRect.Width() ||
+ mGIFStruct.version == 87) {
+ mGIFStruct.screen_height = frameRect.Height();
+ mGIFStruct.screen_width = frameRect.Width();
+ frameRect.MoveTo(0, 0);
+ }
+
+ // Create the image container with the right size.
+ BeginGIF();
+ if (HasError()) {
+ // Setting the size led to an error.
+ return Transition::TerminateFailure();
+ }
+
+ // If we're doing a metadata decode, we're done.
+ if (IsMetadataDecode()) {
+ CheckForTransparency(frameRect);
+ FinishInternal();
+ return Transition::TerminateSuccess();
+ }
+ }
+
+ // Work around broken GIF files that have zero frame width or height; in this
+ // case, we'll treat the frame as having the same size as the overall image.
+ if (frameRect.Height() == 0 || frameRect.Width() == 0) {
+ frameRect.SetHeight(mGIFStruct.screen_height);
+ frameRect.SetWidth(mGIFStruct.screen_width);
+
+ // If that still resulted in zero frame width or height, give up.
+ if (frameRect.Height() == 0 || frameRect.Width() == 0) {
+ return Transition::TerminateFailure();
+ }
+ }
+
+ // Determine |depth| (log base 2 of the number of colors in the palette).
+ bool haveLocalColorTable = false;
+ uint16_t depth = 0;
+ uint8_t packedFields = aData[8];
+
+ if (packedFields & PACKED_FIELDS_COLOR_TABLE_BIT) {
+ // Get the palette depth from the local color table.
+ depth = (packedFields & PACKED_FIELDS_TABLE_DEPTH_MASK) + 1;
+ haveLocalColorTable = true;
+ } else {
+ // Get the palette depth from the global color table.
+ depth = mGIFStruct.global_colormap_depth;
+ }
+
+ // If the transparent color index is greater than the number of colors in the
+ // color table, we may need a higher color depth than |depth| would specify.
+ // Our internal representation of the image will instead use |realDepth|,
+ // which is the smallest color depth that can accommodate the existing palette
+ // *and* the transparent color index.
+ uint16_t realDepth = depth;
+ while (mGIFStruct.tpixel >= (1 << realDepth) && realDepth < 8) {
+ realDepth++;
+ }
+
+ // Create a mask used to ensure that color values fit within the colormap.
+ mColorMask = 0xFF >> (8 - realDepth);
+
+ // Determine if this frame is interlaced or not.
+ const bool isInterlaced = packedFields & PACKED_FIELDS_INTERLACED_BIT;
+
+ // Create the SurfacePipe we'll use to write output for this frame.
+ if (NS_FAILED(BeginImageFrame(frameRect, realDepth, isInterlaced))) {
+ return Transition::TerminateFailure();
+ }
+
+ // Clear state from last image.
+ mGIFStruct.pixels_remaining =
+ int64_t(frameRect.Width()) * int64_t(frameRect.Height());
+
+ if (haveLocalColorTable) {
+ // We have a local color table, so prepare to read it into the palette of
+ // the current frame.
+ mGIFStruct.local_colormap_size = 1 << depth;
+
+ if (!mColormap) {
+ // Ensure our current colormap buffer is large enough to hold the new one.
+ mColormapSize = sizeof(uint32_t) << realDepth;
+ if (mGIFStruct.local_colormap_buffer_size < mColormapSize) {
+ if (mGIFStruct.local_colormap) {
+ free(mGIFStruct.local_colormap);
+ }
+ mGIFStruct.local_colormap_buffer_size = mColormapSize;
+ mGIFStruct.local_colormap =
+ static_cast<uint32_t*>(moz_xmalloc(mColormapSize));
+ // Ensure the local colormap is initialized as opaque.
+ memset(mGIFStruct.local_colormap, 0xFF, mColormapSize);
+ } else {
+ mColormapSize = mGIFStruct.local_colormap_buffer_size;
+ }
+
+ mColormap = mGIFStruct.local_colormap;
+ }
+
+ MOZ_ASSERT(mColormap);
+
+ const size_t size = 3 << depth;
+ if (mColormapSize > size) {
+ // Clear the part of the colormap which will be unused with this palette.
+ // If a GIF references an invalid palette entry, ensure the entry is
+ // opaque white. This is needed for Skia as if it isn't, RGBX surfaces
+ // will cause blending issues with Skia.
+ memset(reinterpret_cast<uint8_t*>(mColormap) + size, 0xFF,
+ mColormapSize - size);
+ }
+
+ MOZ_ASSERT(mColorTablePos == 0);
+
+ // We read the local color table in unbuffered mode since it can be quite
+ // large and it'd be preferable to avoid unnecessary copies.
+ return Transition::ToUnbuffered(State::FINISHED_LOCAL_COLOR_TABLE,
+ State::LOCAL_COLOR_TABLE, size);
+ }
+
+ // There's no local color table; copy the global color table into the palette
+ // of the current frame.
+ if (mColormap) {
+ memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize);
+ } else {
+ mColormap = mGIFStruct.global_colormap;
+ }
+
+ return Transition::To(State::IMAGE_DATA_BLOCK, BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadLocalColorTable(
+ const char* aData, size_t aLength) {
+ uint8_t* dest = reinterpret_cast<uint8_t*>(mColormap) + mColorTablePos;
+ memcpy(dest, aData, aLength);
+ mColorTablePos += aLength;
+ return Transition::ContinueUnbuffered(State::LOCAL_COLOR_TABLE);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::FinishedLocalColorTable() {
+ ConvertColormap(mColormap, mGIFStruct.local_colormap_size);
+ mColorTablePos = 0;
+ return Transition::To(State::IMAGE_DATA_BLOCK, BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadImageDataBlock(
+ const char* aData) {
+ // Make sure the transparent pixel is transparent in the colormap.
+ if (mGIFStruct.is_transparent) {
+ // Save the old value so we can restore it later.
+ if (mColormap == mGIFStruct.global_colormap) {
+ mOldColor = mColormap[mGIFStruct.tpixel];
+ }
+ mColormap[mGIFStruct.tpixel] = 0;
+ }
+
+ // Initialize the LZW decoder.
+ mGIFStruct.datasize = uint8_t(aData[0]);
+ if (mGIFStruct.datasize > MAX_LZW_BITS) {
+ return Transition::TerminateFailure();
+ }
+ const int clearCode = ClearCode();
+ if (clearCode >= MAX_BITS) {
+ return Transition::TerminateFailure();
+ }
+
+ mGIFStruct.avail = clearCode + 2;
+ mGIFStruct.oldcode = -1;
+ mGIFStruct.codesize = mGIFStruct.datasize + 1;
+ mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
+ mGIFStruct.datum = mGIFStruct.bits = 0;
+
+ // Initialize the tables.
+ for (int i = 0; i < clearCode; i++) {
+ mGIFStruct.suffix[i] = i;
+ }
+
+ mGIFStruct.stackp = mGIFStruct.stack;
+
+ // Begin reading image data sub-blocks.
+ return Transition::To(State::IMAGE_DATA_SUB_BLOCK, SUB_BLOCK_HEADER_LEN);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadImageDataSubBlock(
+ const char* aData) {
+ const uint8_t subBlockLength = aData[0];
+ if (subBlockLength == 0) {
+ // We hit the block terminator.
+ EndImageFrame();
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+ }
+
+ if (mGIFStruct.pixels_remaining == 0) {
+ // We've already written to the entire image; we should've hit the block
+ // terminator at this point. This image is corrupt, but we'll tolerate it.
+
+ if (subBlockLength == GIF_TRAILER) {
+ // This GIF is missing the block terminator for the final block; we'll put
+ // up with it.
+ FinishInternal();
+ return Transition::TerminateSuccess();
+ }
+
+ // We're not at the end of the image, so just skip the extra data.
+ return Transition::ToUnbuffered(State::FINISHED_LZW_DATA,
+ State::SKIP_LZW_DATA, subBlockLength);
+ }
+
+ // Handle the standard case: there's data in the sub-block and pixels left to
+ // fill in the image. We read the sub-block unbuffered so we can get pixels on
+ // the screen as soon as possible.
+ return Transition::ToUnbuffered(State::FINISHED_LZW_DATA, State::LZW_DATA,
+ subBlockLength);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadLZWData(
+ const char* aData, size_t aLength) {
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(aData);
+ size_t length = aLength;
+
+ while (mGIFStruct.pixels_remaining > 0 &&
+ (length > 0 || mGIFStruct.bits >= mGIFStruct.codesize)) {
+ size_t bytesRead = 0;
+
+ auto result = mPipe.WritePixelBlocks<uint32_t>(
+ [&](uint32_t* aPixelBlock, int32_t aBlockSize) {
+ return YieldPixels<uint32_t>(data, length, &bytesRead, aPixelBlock,
+ aBlockSize);
+ });
+
+ if (MOZ_UNLIKELY(bytesRead > length)) {
+ MOZ_ASSERT_UNREACHABLE("Overread?");
+ bytesRead = length;
+ }
+
+ // Advance our position in the input based upon what YieldPixel() consumed.
+ data += bytesRead;
+ length -= bytesRead;
+
+ switch (result) {
+ case WriteState::NEED_MORE_DATA:
+ continue;
+
+ case WriteState::FINISHED:
+ NS_WARNING_ASSERTION(mGIFStruct.pixels_remaining <= 0,
+ "too many pixels");
+ mGIFStruct.pixels_remaining = 0;
+ break;
+
+ case WriteState::FAILURE:
+ return Transition::TerminateFailure();
+ }
+ }
+
+ // We're done, but keep going until we consume all the data in the sub-block.
+ return Transition::ContinueUnbuffered(State::LZW_DATA);
+}
+
+LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::SkipSubBlocks(
+ const char* aData) {
+ // In the SKIP_SUB_BLOCKS state we skip over data sub-blocks that we're not
+ // interested in. Blocks consist of a block header (which can be up to 255
+ // bytes in length) and a series of data sub-blocks. Each data sub-block
+ // consists of a single byte length value, followed by the data itself. A data
+ // sub-block with a length of zero terminates the overall block.
+ // SKIP_SUB_BLOCKS reads a sub-block length value. If it's zero, we've arrived
+ // at the next block. Otherwise, we enter the SKIP_DATA_THEN_SKIP_SUB_BLOCKS
+ // state to skip over the sub-block data and return to SKIP_SUB_BLOCKS at the
+ // start of the next sub-block.
+
+ const uint8_t nextSubBlockLength = aData[0];
+ if (nextSubBlockLength == 0) {
+ // We hit the block terminator, so the sequence of data sub-blocks is over;
+ // begin processing another block.
+ return Transition::To(State::BLOCK_HEADER, BLOCK_HEADER_LEN);
+ }
+
+ // Skip to the next sub-block length value.
+ return Transition::ToUnbuffered(State::FINISHED_SKIPPING_DATA,
+ State::SKIP_DATA_THEN_SKIP_SUB_BLOCKS,
+ nextSubBlockLength);
+}
+
+Maybe<Telemetry::HistogramID> nsGIFDecoder2::SpeedHistogram() const {
+ return Some(Telemetry::IMAGE_DECODE_SPEED_GIF);
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsGIFDecoder2.h b/image/decoders/nsGIFDecoder2.h
new file mode 100644
index 0000000000..5a6c501778
--- /dev/null
+++ b/image/decoders/nsGIFDecoder2.h
@@ -0,0 +1,166 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsGIFDecoder2_h
+#define mozilla_image_decoders_nsGIFDecoder2_h
+
+#include "Decoder.h"
+#include "GIF2.h"
+#include "StreamingLexer.h"
+#include "SurfacePipe.h"
+#include "mozilla/gfx/Swizzle.h"
+
+namespace mozilla {
+namespace image {
+class RasterImage;
+
+//////////////////////////////////////////////////////////////////////
+// nsGIFDecoder2 Definition
+
+class nsGIFDecoder2 : public Decoder {
+ public:
+ ~nsGIFDecoder2();
+
+ DecoderType GetType() const override { return DecoderType::GIF; }
+
+ protected:
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ nsresult FinishInternal() override;
+
+ Maybe<Telemetry::HistogramID> SpeedHistogram() const override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsGIFDecoder2(RasterImage* aImage);
+
+ /// Called when we begin decoding the image.
+ void BeginGIF();
+
+ /**
+ * Called when we begin decoding a frame.
+ *
+ * @param aFrameRect The region of the image that contains data. The region
+ * outside this rect is transparent.
+ * @param aDepth The palette depth of this frame.
+ * @param aIsInterlaced If true, this frame is an interlaced frame.
+ */
+ nsresult BeginImageFrame(const OrientedIntRect& aFrameRect, uint16_t aDepth,
+ bool aIsInterlaced);
+
+ /// Called when we finish decoding a frame.
+ void EndImageFrame();
+
+ /// Called when we finish decoding the entire image.
+ void FlushImageData();
+
+ /// Convert color map to BGRA, applying any necessary CMS transforms.
+ void ConvertColormap(uint32_t* aColormap, uint32_t aColors);
+
+ /// Transforms a palette index into a pixel.
+ template <typename PixelSize>
+ PixelSize ColormapIndexToPixel(uint8_t aIndex);
+
+ /// A generator function that performs LZW decompression and yields pixels.
+ template <typename PixelSize>
+ std::tuple<int32_t, Maybe<WriteState>> YieldPixels(const uint8_t* aData,
+ size_t aLength,
+ size_t* aBytesReadOut,
+ PixelSize* aPixelBlock,
+ int32_t aBlockSize);
+
+ /// Checks if we have transparency, either because the header indicates that
+ /// there's alpha, or because the frame rect doesn't cover the entire image.
+ bool CheckForTransparency(const OrientedIntRect& aFrameRect);
+
+ // @return the clear code used for LZW decompression.
+ int ClearCode() const {
+ MOZ_ASSERT(mGIFStruct.datasize <= MAX_LZW_BITS);
+ return 1 << mGIFStruct.datasize;
+ }
+
+ enum class State {
+ FAILURE,
+ SUCCESS,
+ GIF_HEADER,
+ SCREEN_DESCRIPTOR,
+ GLOBAL_COLOR_TABLE,
+ FINISHED_GLOBAL_COLOR_TABLE,
+ BLOCK_HEADER,
+ EXTENSION_HEADER,
+ GRAPHIC_CONTROL_EXTENSION,
+ APPLICATION_IDENTIFIER,
+ NETSCAPE_EXTENSION_SUB_BLOCK,
+ NETSCAPE_EXTENSION_DATA,
+ IMAGE_DESCRIPTOR,
+ FINISH_IMAGE_DESCRIPTOR,
+ LOCAL_COLOR_TABLE,
+ FINISHED_LOCAL_COLOR_TABLE,
+ IMAGE_DATA_BLOCK,
+ IMAGE_DATA_SUB_BLOCK,
+ LZW_DATA,
+ SKIP_LZW_DATA,
+ FINISHED_LZW_DATA,
+ SKIP_SUB_BLOCKS,
+ SKIP_DATA_THEN_SKIP_SUB_BLOCKS,
+ FINISHED_SKIPPING_DATA
+ };
+
+ LexerTransition<State> ReadGIFHeader(const char* aData);
+ LexerTransition<State> ReadScreenDescriptor(const char* aData);
+ LexerTransition<State> ReadGlobalColorTable(const char* aData,
+ size_t aLength);
+ LexerTransition<State> FinishedGlobalColorTable();
+ LexerTransition<State> ReadBlockHeader(const char* aData);
+ LexerTransition<State> ReadExtensionHeader(const char* aData);
+ LexerTransition<State> ReadGraphicControlExtension(const char* aData);
+ LexerTransition<State> ReadApplicationIdentifier(const char* aData);
+ LexerTransition<State> ReadNetscapeExtensionSubBlock(const char* aData);
+ LexerTransition<State> ReadNetscapeExtensionData(const char* aData);
+ LexerTransition<State> ReadImageDescriptor(const char* aData);
+ LexerTransition<State> FinishImageDescriptor(const char* aData);
+ LexerTransition<State> ReadLocalColorTable(const char* aData, size_t aLength);
+ LexerTransition<State> FinishedLocalColorTable();
+ LexerTransition<State> ReadImageDataBlock(const char* aData);
+ LexerTransition<State> ReadImageDataSubBlock(const char* aData);
+ LexerTransition<State> ReadLZWData(const char* aData, size_t aLength);
+ LexerTransition<State> SkipSubBlocks(const char* aData);
+
+ // The StreamingLexer used to manage input. The initial size of the buffer is
+ // chosen as a little larger than the maximum size of any fixed-length data we
+ // have to read for a state. We read variable-length data in unbuffered mode
+ // so the buffer shouldn't have to be resized during decoding.
+ StreamingLexer<State, 16> mLexer;
+
+ uint32_t mOldColor; // The old value of the transparent pixel
+
+ // The frame number of the currently-decoding frame when we're in the middle
+ // of decoding it, and -1 otherwise.
+ int32_t mCurrentFrameIndex;
+
+ // When we're reading in the global or local color table, this records our
+ // current position - i.e., the offset into which the next byte should be
+ // written.
+ size_t mColorTablePos;
+ uint32_t* mColormap; // Current colormap to be used in Cairo format
+ uint32_t mColormapSize;
+
+ uint8_t mColorMask; // Apply this to the pixel to keep within colormap
+ bool mGIFOpen;
+ bool mSawTransparency;
+
+ gif_struct mGIFStruct;
+
+ gfx::SwizzleRowFn mSwizzleFn; /// Method to unpack color tables from RGB.
+ SurfacePipe mPipe; /// The SurfacePipe used to write to the output surface.
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsGIFDecoder2_h
diff --git a/image/decoders/nsICODecoder.cpp b/image/decoders/nsICODecoder.cpp
new file mode 100644
index 0000000000..ff37355429
--- /dev/null
+++ b/image/decoders/nsICODecoder.cpp
@@ -0,0 +1,709 @@
+/* vim:set tw=80 expandtab softtabstop=2 ts=2 sw=2: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is a Cross-Platform ICO Decoder, which should work everywhere, including
+ * Big-Endian machines like the PowerPC. */
+
+#include "nsICODecoder.h"
+
+#include <stdlib.h>
+
+#include <utility>
+
+#include "RasterImage.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/gfx/Swizzle.h"
+#include "mozilla/UniquePtrExtensions.h"
+
+using namespace mozilla::gfx;
+
+namespace mozilla {
+namespace image {
+
+// Constants.
+static const uint32_t ICOHEADERSIZE = 6;
+static const uint32_t BITMAPINFOSIZE = bmp::InfoHeaderLength::WIN_ICO;
+
+// ----------------------------------------
+// Actual Data Processing
+// ----------------------------------------
+
+// Obtains the number of colors from the bits per pixel
+uint16_t nsICODecoder::GetNumColors() {
+ uint16_t numColors = 0;
+ if (mBPP <= 8) {
+ switch (mBPP) {
+ case 1:
+ numColors = 2;
+ break;
+ case 4:
+ numColors = 16;
+ break;
+ case 8:
+ numColors = 256;
+ break;
+ default:
+ numColors = (uint16_t)-1;
+ }
+ }
+ return numColors;
+}
+
+nsICODecoder::nsICODecoder(RasterImage* aImage)
+ : Decoder(aImage),
+ mLexer(Transition::To(ICOState::HEADER, ICOHEADERSIZE),
+ Transition::TerminateSuccess()),
+ mDirEntry(nullptr),
+ mNumIcons(0),
+ mCurrIcon(0),
+ mBPP(0),
+ mMaskRowSize(0),
+ mCurrMaskLine(0),
+ mIsCursor(false),
+ mHasMaskAlpha(false) {}
+
+nsresult nsICODecoder::FinishInternal() {
+ // We shouldn't be called in error cases
+ MOZ_ASSERT(!HasError(), "Shouldn't call FinishInternal after error!");
+
+ return GetFinalStateFromContainedDecoder();
+}
+
+nsresult nsICODecoder::FinishWithErrorInternal() {
+ // No need to assert !mInFrame here because this condition is enforced by
+ // mContainedDecoder.
+ return GetFinalStateFromContainedDecoder();
+}
+
+nsresult nsICODecoder::GetFinalStateFromContainedDecoder() {
+ if (!mContainedDecoder) {
+ return NS_OK;
+ }
+
+ // Let the contained decoder finish up if necessary.
+ FlushContainedDecoder();
+
+ // Make our state the same as the state of the contained decoder.
+ mDecodeDone = mContainedDecoder->GetDecodeDone();
+ mProgress |= mContainedDecoder->TakeProgress();
+ mInvalidRect.UnionRect(mInvalidRect, mContainedDecoder->TakeInvalidRect());
+ mCurrentFrame = mContainedDecoder->GetCurrentFrameRef();
+
+ // Finalize the frame which we deferred to ensure we could modify the final
+ // result (e.g. to apply the BMP mask).
+ MOZ_ASSERT(!mContainedDecoder->GetFinalizeFrames());
+ if (mCurrentFrame) {
+ mCurrentFrame->FinalizeSurface();
+ }
+
+ // Propagate errors.
+ nsresult rv =
+ HasError() || mContainedDecoder->HasError() ? NS_ERROR_FAILURE : NS_OK;
+
+ MOZ_ASSERT(NS_FAILED(rv) || !mCurrentFrame || mCurrentFrame->IsFinished());
+ return rv;
+}
+
+LexerTransition<ICOState> nsICODecoder::ReadHeader(const char* aData) {
+ // If the third byte is 1, this is an icon. If 2, a cursor.
+ if ((aData[2] != 1) && (aData[2] != 2)) {
+ return Transition::TerminateFailure();
+ }
+ mIsCursor = (aData[2] == 2);
+
+ // The fifth and sixth bytes specify the number of resources in the file.
+ mNumIcons = LittleEndian::readUint16(aData + 4);
+ if (mNumIcons == 0) {
+ return Transition::TerminateSuccess(); // Nothing to do.
+ }
+
+ // Downscale-during-decode can end up decoding different resources in the ICO
+ // file depending on the target size. Since the resources are not necessarily
+ // scaled versions of the same image, some may be transparent and some may not
+ // be. We could be precise about transparency if we decoded the metadata of
+ // every resource, but for now we don't and it's safest to assume that
+ // transparency could be present.
+ PostHasTransparency();
+
+ return Transition::To(ICOState::DIR_ENTRY, ICODIRENTRYSIZE);
+}
+
+size_t nsICODecoder::FirstResourceOffset() const {
+ MOZ_ASSERT(mNumIcons > 0,
+ "Calling FirstResourceOffset before processing header");
+
+ // The first resource starts right after the directory, which starts right
+ // after the ICO header.
+ return ICOHEADERSIZE + mNumIcons * ICODIRENTRYSIZE;
+}
+
+LexerTransition<ICOState> nsICODecoder::ReadDirEntry(const char* aData) {
+ mCurrIcon++;
+
+ // Ensure the resource has an offset past the ICO headers.
+ uint32_t offset = LittleEndian::readUint32(aData + 12);
+ if (offset >= FirstResourceOffset()) {
+ // Read the directory entry.
+ IconDirEntryEx e;
+ e.mWidth = aData[0];
+ e.mHeight = aData[1];
+ e.mColorCount = aData[2];
+ e.mReserved = aData[3];
+ e.mPlanes = LittleEndian::readUint16(aData + 4);
+ e.mBitCount = LittleEndian::readUint16(aData + 6);
+ e.mBytesInRes = LittleEndian::readUint32(aData + 8);
+ e.mImageOffset = offset;
+ e.mSize = OrientedIntSize(e.mWidth, e.mHeight);
+
+ // Only accept entries with sufficient resource data to actually contain
+ // some image data.
+ if (e.mBytesInRes > BITMAPINFOSIZE) {
+ if (e.mWidth == 0 || e.mHeight == 0) {
+ mUnsizedDirEntries.AppendElement(e);
+ } else {
+ mDirEntries.AppendElement(e);
+ }
+ }
+ }
+
+ if (mCurrIcon == mNumIcons) {
+ if (mUnsizedDirEntries.IsEmpty()) {
+ return Transition::To(ICOState::FINISHED_DIR_ENTRY, 0);
+ }
+ return Transition::To(ICOState::ITERATE_UNSIZED_DIR_ENTRY, 0);
+ }
+
+ return Transition::To(ICOState::DIR_ENTRY, ICODIRENTRYSIZE);
+}
+
+LexerTransition<ICOState> nsICODecoder::IterateUnsizedDirEntry() {
+ MOZ_ASSERT(!mUnsizedDirEntries.IsEmpty());
+
+ if (!mDirEntry) {
+ // The first time we are here, there is no entry selected. We must prepare a
+ // new iterator for the contained decoder to advance as it wills. Cloning at
+ // this point ensures it will begin at the end of the dir entries.
+ mReturnIterator = mLexer.Clone(*mIterator, SIZE_MAX);
+ if (mReturnIterator.isNothing()) {
+ // If we cannot read further than this point, then there is no resource
+ // data to read.
+ return Transition::TerminateFailure();
+ }
+ } else {
+ // We have already selected an entry which means a metadata decoder has
+ // finished. Verify the size is valid and if so, add to the discovered
+ // resources.
+ if (mDirEntry->mSize.width > 0 && mDirEntry->mSize.height > 0) {
+ mDirEntries.AppendElement(*mDirEntry);
+ }
+
+ // Remove the entry from the unsized list either way.
+ mDirEntry = nullptr;
+ mUnsizedDirEntries.RemoveElementAt(0);
+
+ // Our iterator is at an unknown point, so reset it to the point that we
+ // saved.
+ mIterator = mLexer.Clone(*mReturnIterator, SIZE_MAX);
+ if (mIterator.isNothing()) {
+ MOZ_ASSERT_UNREACHABLE("Cannot re-clone return iterator");
+ return Transition::TerminateFailure();
+ }
+ }
+
+ // There are no more unsized entries, so we can finally decide which entry to
+ // select for decoding.
+ if (mUnsizedDirEntries.IsEmpty()) {
+ mReturnIterator.reset();
+ return Transition::To(ICOState::FINISHED_DIR_ENTRY, 0);
+ }
+
+ // Move to the resource data to start metadata decoding.
+ mDirEntry = &mUnsizedDirEntries[0];
+ size_t offsetToResource = mDirEntry->mImageOffset - FirstResourceOffset();
+ return Transition::ToUnbuffered(ICOState::FOUND_RESOURCE,
+ ICOState::SKIP_TO_RESOURCE, offsetToResource);
+}
+
+LexerTransition<ICOState> nsICODecoder::FinishDirEntry() {
+ MOZ_ASSERT(!mDirEntry);
+
+ if (mDirEntries.IsEmpty()) {
+ return Transition::TerminateFailure();
+ }
+
+ // If an explicit output size was specified, we'll try to select the resource
+ // that matches it best below.
+ const Maybe<OrientedIntSize> desiredSize = ExplicitOutputSize();
+
+ // Determine the biggest resource. We always use the biggest resource for the
+ // intrinsic size, and if we don't have a specific desired size, we select it
+ // as the best resource as well.
+ int32_t bestDelta = INT32_MIN;
+ IconDirEntryEx* biggestEntry = nullptr;
+
+ for (size_t i = 0; i < mDirEntries.Length(); ++i) {
+ IconDirEntryEx& e = mDirEntries[i];
+ mImageMetadata.AddNativeSize(e.mSize);
+
+ if (!biggestEntry ||
+ (e.mBitCount >= biggestEntry->mBitCount &&
+ e.mSize.width * e.mSize.height >=
+ biggestEntry->mSize.width * biggestEntry->mSize.height)) {
+ biggestEntry = &e;
+
+ if (!desiredSize) {
+ mDirEntry = &e;
+ }
+ }
+
+ if (desiredSize) {
+ // Calculate the delta between this resource's size and the desired size,
+ // so we can see if it is better than our current-best option. In the
+ // case of several equally-good resources, we use the last one. "Better"
+ // in this case is determined by |delta|, a measure of the difference in
+ // size between the entry we've found and the desired size. We will choose
+ // the smallest resource that is greater than or equal to the desired size
+ // (i.e. we assume it's better to downscale a larger icon than to upscale
+ // a smaller one).
+ int32_t delta = std::min(e.mSize.width - desiredSize->width,
+ e.mSize.height - desiredSize->height);
+ if (!mDirEntry || (e.mBitCount >= mDirEntry->mBitCount &&
+ ((bestDelta < 0 && delta >= bestDelta) ||
+ (delta >= 0 && delta <= bestDelta)))) {
+ mDirEntry = &e;
+ bestDelta = delta;
+ }
+ }
+ }
+
+ MOZ_ASSERT(mDirEntry);
+ MOZ_ASSERT(biggestEntry);
+
+ // If this is a cursor, set the hotspot. We use the hotspot from the biggest
+ // resource since we also use that resource for the intrinsic size.
+ if (mIsCursor) {
+ mImageMetadata.SetHotspot(biggestEntry->mXHotspot, biggestEntry->mYHotspot);
+ }
+
+ // We always report the biggest resource's size as the intrinsic size; this
+ // is necessary for downscale-during-decode to work since we won't even
+ // attempt to *upscale* while decoding.
+ PostSize(biggestEntry->mSize.width, biggestEntry->mSize.height);
+ if (HasError()) {
+ return Transition::TerminateFailure();
+ }
+
+ if (IsMetadataDecode()) {
+ return Transition::TerminateSuccess();
+ }
+
+ if (mDirEntry->mSize == OutputSize()) {
+ // If the resource we selected matches the output size perfectly, we don't
+ // need to do any downscaling.
+ MOZ_ASSERT_IF(desiredSize, mDirEntry->mSize == *desiredSize);
+ MOZ_ASSERT_IF(!desiredSize, mDirEntry->mSize == Size());
+ } else if (OutputSize().width < mDirEntry->mSize.width ||
+ OutputSize().height < mDirEntry->mSize.height) {
+ // Create a downscaler if we need to downscale.
+ //
+ // TODO(aosmond): This is the last user of Downscaler. We should switch this
+ // to SurfacePipe as well so we can remove the code from tree.
+ mDownscaler.emplace(OutputSize().ToUnknownSize());
+ }
+
+ size_t offsetToResource = mDirEntry->mImageOffset - FirstResourceOffset();
+ return Transition::ToUnbuffered(ICOState::FOUND_RESOURCE,
+ ICOState::SKIP_TO_RESOURCE, offsetToResource);
+}
+
+LexerTransition<ICOState> nsICODecoder::SniffResource(const char* aData) {
+ MOZ_ASSERT(mDirEntry);
+
+ // We have BITMAPINFOSIZE bytes buffered at this point. We know an embedded
+ // BMP will have at least that many bytes by definition. We can also infer
+ // that any valid embedded PNG will contain that many bytes as well because:
+ // BITMAPINFOSIZE
+ // <
+ // signature (8 bytes) +
+ // IHDR (12 bytes header + 13 bytes data)
+ // IDAT (12 bytes header)
+
+ // We use the first PNGSIGNATURESIZE bytes to determine whether this resource
+ // is a PNG or a BMP.
+ bool isPNG =
+ !memcmp(aData, nsPNGDecoder::pngSignatureBytes, PNGSIGNATURESIZE);
+ if (isPNG) {
+ if (mDirEntry->mBytesInRes <= BITMAPINFOSIZE) {
+ return Transition::TerminateFailure();
+ }
+
+ // Prepare a new iterator for the contained decoder to advance as it wills.
+ // Cloning at the point ensures it will begin at the resource offset.
+ Maybe<SourceBufferIterator> containedIterator =
+ mLexer.Clone(*mIterator, mDirEntry->mBytesInRes);
+ if (containedIterator.isNothing()) {
+ return Transition::TerminateFailure();
+ }
+
+ // Create a PNG decoder which will do the rest of the work for us.
+ bool metadataDecode = mReturnIterator.isSome();
+ Maybe<OrientedIntSize> expectedSize =
+ metadataDecode ? Nothing() : Some(mDirEntry->mSize);
+ mContainedDecoder = DecoderFactory::CreateDecoderForICOResource(
+ DecoderType::PNG, std::move(containedIterator.ref()), WrapNotNull(this),
+ metadataDecode, expectedSize);
+
+ // Read in the rest of the PNG unbuffered.
+ size_t toRead = mDirEntry->mBytesInRes - BITMAPINFOSIZE;
+ return Transition::ToUnbuffered(ICOState::FINISHED_RESOURCE,
+ ICOState::READ_RESOURCE, toRead);
+ }
+
+ // Make sure we have a sane size for the bitmap information header.
+ int32_t bihSize = LittleEndian::readUint32(aData);
+ if (bihSize != static_cast<int32_t>(BITMAPINFOSIZE)) {
+ return Transition::TerminateFailure();
+ }
+
+ // Read in the rest of the bitmap information header.
+ return ReadBIH(aData);
+}
+
+LexerTransition<ICOState> nsICODecoder::ReadResource() {
+ if (!FlushContainedDecoder()) {
+ return Transition::TerminateFailure();
+ }
+
+ return Transition::ContinueUnbuffered(ICOState::READ_RESOURCE);
+}
+
+LexerTransition<ICOState> nsICODecoder::ReadBIH(const char* aData) {
+ MOZ_ASSERT(mDirEntry);
+
+ // Extract the BPP from the BIH header; it should be trusted over the one
+ // we have from the ICO header which is usually set to 0.
+ mBPP = LittleEndian::readUint16(aData + 14);
+
+ // Check to make sure we have valid color settings.
+ uint16_t numColors = GetNumColors();
+ if (numColors == uint16_t(-1)) {
+ return Transition::TerminateFailure();
+ }
+
+ // The color table is present only if BPP is <= 8.
+ MOZ_ASSERT_IF(mBPP > 8, numColors == 0);
+
+ // The ICO format when containing a BMP does not include the 14 byte
+ // bitmap file header. So we create the BMP decoder via the constructor that
+ // tells it to skip this, and pass in the required data (dataOffset) that
+ // would have been present in the header.
+ uint32_t dataOffset =
+ bmp::FILE_HEADER_LENGTH + BITMAPINFOSIZE + 4 * numColors;
+
+ // Prepare a new iterator for the contained decoder to advance as it wills.
+ // Cloning at the point ensures it will begin at the resource offset.
+ Maybe<SourceBufferIterator> containedIterator =
+ mLexer.Clone(*mIterator, mDirEntry->mBytesInRes);
+ if (containedIterator.isNothing()) {
+ return Transition::TerminateFailure();
+ }
+
+ // Create a BMP decoder which will do most of the work for us; the exception
+ // is the AND mask, which isn't present in standalone BMPs.
+ bool metadataDecode = mReturnIterator.isSome();
+ Maybe<OrientedIntSize> expectedSize =
+ metadataDecode ? Nothing() : Some(mDirEntry->mSize);
+ mContainedDecoder = DecoderFactory::CreateDecoderForICOResource(
+ DecoderType::BMP, std::move(containedIterator.ref()), WrapNotNull(this),
+ metadataDecode, expectedSize, Some(dataOffset));
+
+ RefPtr<nsBMPDecoder> bmpDecoder =
+ static_cast<nsBMPDecoder*>(mContainedDecoder.get());
+
+ // Ensure the decoder has parsed at least the BMP's bitmap info header.
+ if (!FlushContainedDecoder()) {
+ return Transition::TerminateFailure();
+ }
+
+ // If this is a metadata decode, FinishResource will any necessary checks.
+ if (mContainedDecoder->IsMetadataDecode()) {
+ return Transition::To(ICOState::FINISHED_RESOURCE, 0);
+ }
+
+ // Do we have an AND mask on this BMP? If so, we need to read it after we read
+ // the BMP data itself.
+ uint32_t bmpDataLength = bmpDecoder->GetCompressedImageSize() + 4 * numColors;
+ bool hasANDMask = (BITMAPINFOSIZE + bmpDataLength) < mDirEntry->mBytesInRes;
+ ICOState afterBMPState =
+ hasANDMask ? ICOState::PREPARE_FOR_MASK : ICOState::FINISHED_RESOURCE;
+
+ // Read in the rest of the BMP unbuffered.
+ return Transition::ToUnbuffered(afterBMPState, ICOState::READ_RESOURCE,
+ bmpDataLength);
+}
+
+LexerTransition<ICOState> nsICODecoder::PrepareForMask() {
+ MOZ_ASSERT(mDirEntry);
+ MOZ_ASSERT(mContainedDecoder->GetDecodeDone());
+
+ // We have received all of the data required by the BMP decoder so flushing
+ // here guarantees the decode has finished.
+ if (!FlushContainedDecoder()) {
+ return Transition::TerminateFailure();
+ }
+
+ MOZ_ASSERT(mContainedDecoder->GetDecodeDone());
+
+ RefPtr<nsBMPDecoder> bmpDecoder =
+ static_cast<nsBMPDecoder*>(mContainedDecoder.get());
+
+ uint16_t numColors = GetNumColors();
+ MOZ_ASSERT(numColors != uint16_t(-1));
+
+ // Determine the length of the AND mask.
+ uint32_t bmpLengthWithHeader =
+ BITMAPINFOSIZE + bmpDecoder->GetCompressedImageSize() + 4 * numColors;
+ MOZ_ASSERT(bmpLengthWithHeader < mDirEntry->mBytesInRes);
+ uint32_t maskLength = mDirEntry->mBytesInRes - bmpLengthWithHeader;
+
+ // If the BMP provides its own transparency, we ignore the AND mask.
+ if (bmpDecoder->HasTransparency()) {
+ return Transition::ToUnbuffered(ICOState::FINISHED_RESOURCE,
+ ICOState::SKIP_MASK, maskLength);
+ }
+
+ // Compute the row size for the mask.
+ mMaskRowSize = ((mDirEntry->mSize.width + 31) / 32) * 4; // + 31 to round up
+
+ // If the expected size of the AND mask is larger than its actual size, then
+ // we must have a truncated (and therefore corrupt) AND mask.
+ uint32_t expectedLength = mMaskRowSize * mDirEntry->mSize.height;
+ if (maskLength < expectedLength) {
+ return Transition::TerminateFailure();
+ }
+
+ // If we're downscaling, the mask is the wrong size for the surface we've
+ // produced, so we need to downscale the mask into a temporary buffer and then
+ // combine the mask's alpha values with the color values from the image.
+ if (mDownscaler) {
+ MOZ_ASSERT(bmpDecoder->GetImageDataLength() ==
+ mDownscaler->TargetSize().width *
+ mDownscaler->TargetSize().height * sizeof(uint32_t));
+ mMaskBuffer =
+ MakeUniqueFallible<uint8_t[]>(bmpDecoder->GetImageDataLength());
+ if (NS_WARN_IF(!mMaskBuffer)) {
+ return Transition::TerminateFailure();
+ }
+ nsresult rv = mDownscaler->BeginFrame(mDirEntry->mSize.ToUnknownSize(),
+ Nothing(), mMaskBuffer.get(),
+ /* aHasAlpha = */ true,
+ /* aFlipVertically = */ true);
+ if (NS_FAILED(rv)) {
+ return Transition::TerminateFailure();
+ }
+ }
+
+ mCurrMaskLine = mDirEntry->mSize.height;
+ return Transition::To(ICOState::READ_MASK_ROW, mMaskRowSize);
+}
+
+LexerTransition<ICOState> nsICODecoder::ReadMaskRow(const char* aData) {
+ MOZ_ASSERT(mDirEntry);
+
+ mCurrMaskLine--;
+
+ uint8_t sawTransparency = 0;
+
+ // Get the mask row we're reading.
+ const uint8_t* mask = reinterpret_cast<const uint8_t*>(aData);
+ const uint8_t* maskRowEnd = mask + mMaskRowSize;
+
+ // Get the corresponding row of the mask buffer (if we're downscaling) or the
+ // decoded image data (if we're not).
+ uint32_t* decoded = nullptr;
+ if (mDownscaler) {
+ // Initialize the row to all white and fully opaque.
+ memset(mDownscaler->RowBuffer(), 0xFF,
+ mDirEntry->mSize.width * sizeof(uint32_t));
+
+ decoded = reinterpret_cast<uint32_t*>(mDownscaler->RowBuffer());
+ } else {
+ RefPtr<nsBMPDecoder> bmpDecoder =
+ static_cast<nsBMPDecoder*>(mContainedDecoder.get());
+ uint32_t* imageData = bmpDecoder->GetImageData();
+ if (!imageData) {
+ return Transition::TerminateFailure();
+ }
+
+ decoded = imageData + mCurrMaskLine * mDirEntry->mSize.width;
+ }
+
+ MOZ_ASSERT(decoded);
+ uint32_t* decodedRowEnd = decoded + mDirEntry->mSize.width;
+
+ // Iterate simultaneously through the AND mask and the image data.
+ while (mask < maskRowEnd) {
+ uint8_t idx = *mask++;
+ sawTransparency |= idx;
+ for (uint8_t bit = 0x80; bit && decoded < decodedRowEnd; bit >>= 1) {
+ // Clear pixel completely for transparency.
+ if (idx & bit) {
+ *decoded = 0;
+ }
+ decoded++;
+ }
+ }
+
+ if (mDownscaler) {
+ mDownscaler->CommitRow();
+ }
+
+ // If any bits are set in sawTransparency, then we know at least one pixel was
+ // transparent.
+ if (sawTransparency) {
+ mHasMaskAlpha = true;
+ }
+
+ if (mCurrMaskLine == 0) {
+ return Transition::To(ICOState::FINISH_MASK, 0);
+ }
+
+ return Transition::To(ICOState::READ_MASK_ROW, mMaskRowSize);
+}
+
+LexerTransition<ICOState> nsICODecoder::FinishMask() {
+ // If we're downscaling, we now have the appropriate alpha values in
+ // mMaskBuffer. We just need to transfer them to the image.
+ if (mDownscaler) {
+ // Retrieve the image data.
+ RefPtr<nsBMPDecoder> bmpDecoder =
+ static_cast<nsBMPDecoder*>(mContainedDecoder.get());
+ uint8_t* imageData = reinterpret_cast<uint8_t*>(bmpDecoder->GetImageData());
+ if (!imageData) {
+ return Transition::TerminateFailure();
+ }
+
+ // Iterate through the alpha values, copying from mask to image.
+ MOZ_ASSERT(mMaskBuffer);
+ MOZ_ASSERT(bmpDecoder->GetImageDataLength() > 0);
+ for (size_t i = 3; i < bmpDecoder->GetImageDataLength(); i += 4) {
+ imageData[i] = mMaskBuffer[i];
+ }
+ int32_t stride = mDownscaler->TargetSize().width * sizeof(uint32_t);
+ DebugOnly<bool> ret =
+ // We know the format is OS_RGBA because we always assume bmp's inside
+ // ico's are transparent.
+ PremultiplyData(imageData, stride, SurfaceFormat::OS_RGBA, imageData,
+ stride, SurfaceFormat::OS_RGBA,
+ mDownscaler->TargetSize());
+ MOZ_ASSERT(ret);
+ }
+
+ return Transition::To(ICOState::FINISHED_RESOURCE, 0);
+}
+
+LexerTransition<ICOState> nsICODecoder::FinishResource() {
+ MOZ_ASSERT(mDirEntry);
+
+ // We have received all of the data required by the PNG/BMP decoder so
+ // flushing here guarantees the decode has finished.
+ if (!FlushContainedDecoder()) {
+ return Transition::TerminateFailure();
+ }
+
+ MOZ_ASSERT(mContainedDecoder->GetDecodeDone());
+
+ // If it is a metadata decode, all we were trying to get was the size
+ // information missing from the dir entry.
+ if (mContainedDecoder->IsMetadataDecode()) {
+ if (mContainedDecoder->HasSize()) {
+ mDirEntry->mSize = mContainedDecoder->Size();
+ }
+ return Transition::To(ICOState::ITERATE_UNSIZED_DIR_ENTRY, 0);
+ }
+
+ // Raymond Chen says that 32bpp only are valid PNG ICOs
+ // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
+ if (!mContainedDecoder->IsValidICOResource()) {
+ return Transition::TerminateFailure();
+ }
+
+ // This size from the resource should match that from the dir entry.
+ MOZ_ASSERT_IF(mContainedDecoder->HasSize(),
+ mContainedDecoder->Size() == mDirEntry->mSize);
+
+ return Transition::TerminateSuccess();
+}
+
+LexerResult nsICODecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(
+ aIterator, aOnResume,
+ [=](ICOState aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case ICOState::HEADER:
+ return ReadHeader(aData);
+ case ICOState::DIR_ENTRY:
+ return ReadDirEntry(aData);
+ case ICOState::FINISHED_DIR_ENTRY:
+ return FinishDirEntry();
+ case ICOState::ITERATE_UNSIZED_DIR_ENTRY:
+ return IterateUnsizedDirEntry();
+ case ICOState::SKIP_TO_RESOURCE:
+ return Transition::ContinueUnbuffered(ICOState::SKIP_TO_RESOURCE);
+ case ICOState::FOUND_RESOURCE:
+ return Transition::To(ICOState::SNIFF_RESOURCE, BITMAPINFOSIZE);
+ case ICOState::SNIFF_RESOURCE:
+ return SniffResource(aData);
+ case ICOState::READ_RESOURCE:
+ return ReadResource();
+ case ICOState::PREPARE_FOR_MASK:
+ return PrepareForMask();
+ case ICOState::READ_MASK_ROW:
+ return ReadMaskRow(aData);
+ case ICOState::FINISH_MASK:
+ return FinishMask();
+ case ICOState::SKIP_MASK:
+ return Transition::ContinueUnbuffered(ICOState::SKIP_MASK);
+ case ICOState::FINISHED_RESOURCE:
+ return FinishResource();
+ default:
+ MOZ_CRASH("Unknown ICOState");
+ }
+ });
+}
+
+bool nsICODecoder::FlushContainedDecoder() {
+ MOZ_ASSERT(mContainedDecoder);
+
+ bool succeeded = true;
+
+ // If we run out of data, the ICO decoder will get resumed when there's more
+ // data available, as usual, so we don't need the contained decoder to get
+ // resumed too. To avoid that, we provide an IResumable which just does
+ // nothing. All the caller needs to do is flush when there is new data.
+ LexerResult result = mContainedDecoder->Decode();
+ if (result == LexerResult(TerminalState::FAILURE)) {
+ succeeded = false;
+ }
+
+ MOZ_ASSERT(result != LexerResult(Yield::OUTPUT_AVAILABLE),
+ "Unexpected yield");
+
+ // Make our state the same as the state of the contained decoder, and
+ // propagate errors.
+ mProgress |= mContainedDecoder->TakeProgress();
+ mInvalidRect.UnionRect(mInvalidRect, mContainedDecoder->TakeInvalidRect());
+ if (mContainedDecoder->HasError()) {
+ succeeded = false;
+ }
+
+ return succeeded;
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsICODecoder.h b/image/decoders/nsICODecoder.h
new file mode 100644
index 0000000000..4e2665334e
--- /dev/null
+++ b/image/decoders/nsICODecoder.h
@@ -0,0 +1,106 @@
+/* vim:set tw=80 expandtab softtabstop=4 ts=4 sw=2: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsICODecoder_h
+#define mozilla_image_decoders_nsICODecoder_h
+
+#include "StreamingLexer.h"
+#include "Decoder.h"
+#include "Downscaler.h"
+#include "imgFrame.h"
+#include "mozilla/gfx/2D.h"
+#include "nsBMPDecoder.h"
+#include "nsPNGDecoder.h"
+#include "ICOFileHeaders.h"
+
+namespace mozilla {
+namespace image {
+
+class RasterImage;
+
+enum class ICOState {
+ HEADER,
+ DIR_ENTRY,
+ FINISHED_DIR_ENTRY,
+ ITERATE_UNSIZED_DIR_ENTRY,
+ SKIP_TO_RESOURCE,
+ FOUND_RESOURCE,
+ SNIFF_RESOURCE,
+ READ_RESOURCE,
+ PREPARE_FOR_MASK,
+ READ_MASK_ROW,
+ FINISH_MASK,
+ SKIP_MASK,
+ FINISHED_RESOURCE
+};
+
+class nsICODecoder : public Decoder {
+ public:
+ virtual ~nsICODecoder() {}
+
+ /// @return The offset from the beginning of the ICO to the first resource.
+ size_t FirstResourceOffset() const;
+
+ DecoderType GetType() const override { return DecoderType::ICO; }
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ nsresult FinishInternal() override;
+ nsresult FinishWithErrorInternal() override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsICODecoder(RasterImage* aImage);
+
+ // Flushes the contained decoder to read all available data and sets the
+ // appropriate errors. Returns true if there are no errors.
+ bool FlushContainedDecoder();
+
+ // Gets decoder state from the contained decoder so it's visible externally.
+ nsresult GetFinalStateFromContainedDecoder();
+
+ // Obtains the number of colors from the BPP, mBPP must be filled in
+ uint16_t GetNumColors();
+
+ LexerTransition<ICOState> ReadHeader(const char* aData);
+ LexerTransition<ICOState> ReadDirEntry(const char* aData);
+ LexerTransition<ICOState> IterateUnsizedDirEntry();
+ LexerTransition<ICOState> FinishDirEntry();
+ LexerTransition<ICOState> SniffResource(const char* aData);
+ LexerTransition<ICOState> ReadResource();
+ LexerTransition<ICOState> ReadBIH(const char* aData);
+ LexerTransition<ICOState> PrepareForMask();
+ LexerTransition<ICOState> ReadMaskRow(const char* aData);
+ LexerTransition<ICOState> FinishMask();
+ LexerTransition<ICOState> FinishResource();
+
+ struct IconDirEntryEx : public IconDirEntry {
+ OrientedIntSize mSize;
+ };
+
+ StreamingLexer<ICOState, 32> mLexer; // The lexer.
+ Maybe<Downscaler> mDownscaler; // The downscaler used for the mask.
+ RefPtr<Decoder> mContainedDecoder; // Either a BMP or PNG decoder.
+ Maybe<SourceBufferIterator>
+ mReturnIterator; // Iterator to save return point.
+ UniquePtr<uint8_t[]> mMaskBuffer; // A temporary buffer for the alpha mask.
+ nsTArray<IconDirEntryEx> mDirEntries; // Valid dir entries with a size.
+ nsTArray<IconDirEntryEx> mUnsizedDirEntries; // Dir entries without a size.
+ IconDirEntryEx* mDirEntry; // The dir entry for the selected resource.
+ uint16_t mNumIcons; // Stores the number of icons in the ICO file.
+ uint16_t mCurrIcon; // Stores the current dir entry index we are processing.
+ uint16_t mBPP; // The BPP of the resource we're decoding.
+ uint32_t
+ mMaskRowSize; // The size in bytes of each row in the BMP alpha mask.
+ uint32_t mCurrMaskLine; // The line of the BMP alpha mask we're processing.
+ bool mIsCursor; // Is this ICO a cursor?
+ bool mHasMaskAlpha; // Did the BMP alpha mask have any transparency?
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsICODecoder_h
diff --git a/image/decoders/nsIconDecoder.cpp b/image/decoders/nsIconDecoder.cpp
new file mode 100644
index 0000000000..c4ed8b7b06
--- /dev/null
+++ b/image/decoders/nsIconDecoder.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIconDecoder.h"
+#include "RasterImage.h"
+#include "SurfacePipeFactory.h"
+#include "gfxPlatform.h"
+
+using namespace mozilla::gfx;
+
+namespace mozilla {
+namespace image {
+
+static const uint32_t ICON_HEADER_SIZE = 4;
+
+nsIconDecoder::nsIconDecoder(RasterImage* aImage)
+ : Decoder(aImage),
+ mLexer(Transition::To(State::HEADER, ICON_HEADER_SIZE),
+ Transition::TerminateSuccess()),
+ mBytesPerRow() // set by ReadHeader()
+{
+ // Nothing to do
+}
+
+nsIconDecoder::~nsIconDecoder() {}
+
+LexerResult nsIconDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::HEADER:
+ return ReadHeader(aData);
+ case State::ROW_OF_PIXELS:
+ return ReadRowOfPixels(aData, aLength);
+ case State::FINISH:
+ return Finish();
+ default:
+ MOZ_CRASH("Unknown State");
+ }
+ });
+}
+
+LexerTransition<nsIconDecoder::State> nsIconDecoder::ReadHeader(
+ const char* aData) {
+ // Grab the width and height.
+ uint8_t width = uint8_t(aData[0]);
+ uint8_t height = uint8_t(aData[1]);
+ SurfaceFormat format = SurfaceFormat(aData[2]);
+ bool transform = bool(aData[3]);
+
+ // FIXME(aosmond): On OSX we get the icon in device space and already
+ // premultiplied, so we can't support the surface flags with icons right now.
+ SurfacePipeFlags pipeFlags = SurfacePipeFlags();
+ if (transform) {
+ if (mCMSMode == CMSMode::All) {
+ mTransform = GetCMSsRGBTransform(format);
+ }
+
+ if (!(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA)) {
+ pipeFlags |= SurfacePipeFlags::PREMULTIPLY_ALPHA;
+ }
+ }
+
+ // The input is 32bpp, so we expect 4 bytes of data per pixel.
+ mBytesPerRow = width * 4;
+
+ // Post our size to the superclass.
+ PostSize(width, height);
+
+ // Icons have alpha.
+ PostHasTransparency();
+
+ // If we're doing a metadata decode, we're done.
+ if (IsMetadataDecode()) {
+ return Transition::TerminateSuccess();
+ }
+
+ MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), FullFrame(), format, SurfaceFormat::OS_RGBA,
+ /* aAnimParams */ Nothing(), mTransform, pipeFlags);
+ if (!pipe) {
+ return Transition::TerminateFailure();
+ }
+
+ mPipe = std::move(*pipe);
+
+ MOZ_ASSERT(mImageData, "Should have a buffer now");
+
+ return Transition::To(State::ROW_OF_PIXELS, mBytesPerRow);
+}
+
+LexerTransition<nsIconDecoder::State> nsIconDecoder::ReadRowOfPixels(
+ const char* aData, size_t aLength) {
+ MOZ_ASSERT(aLength % 4 == 0, "Rows should contain a multiple of four bytes");
+
+ auto result = mPipe.WriteBuffer(reinterpret_cast<const uint32_t*>(aData));
+ MOZ_ASSERT(result != WriteState::FAILURE);
+
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (invalidRect) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+
+ return result == WriteState::FINISHED
+ ? Transition::To(State::FINISH, 0)
+ : Transition::To(State::ROW_OF_PIXELS, mBytesPerRow);
+}
+
+LexerTransition<nsIconDecoder::State> nsIconDecoder::Finish() {
+ PostFrameStop();
+ PostDecodeDone();
+
+ return Transition::TerminateSuccess();
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsIconDecoder.h b/image/decoders/nsIconDecoder.h
new file mode 100644
index 0000000000..73bc1b5731
--- /dev/null
+++ b/image/decoders/nsIconDecoder.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsIconDecoder_h
+#define mozilla_image_decoders_nsIconDecoder_h
+
+#include "Decoder.h"
+#include "StreamingLexer.h"
+#include "SurfacePipe.h"
+
+namespace mozilla {
+namespace image {
+
+class RasterImage;
+
+////////////////////////////////////////////////////////////////////////////////
+// The icon decoder is a decoder specifically tailored for loading icons
+// from the OS. We've defined our own little format to represent these icons
+// and this decoder takes that format and converts it into 24-bit RGB with
+// alpha channel support. It was modeled a bit off the PPM decoder.
+//
+// The format of the incoming data is as follows:
+//
+// The first two bytes contain the width and the height of the icon.
+// The remaining bytes contain the icon data, 4 bytes per pixel, in
+// ARGB order (platform endianness, A in highest bits, B in lowest
+// bits), row-primary, top-to-bottom, left-to-right, with
+// premultiplied alpha.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+class nsIconDecoder : public Decoder {
+ public:
+ virtual ~nsIconDecoder();
+
+ DecoderType GetType() const override { return DecoderType::ICON; }
+
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsIconDecoder(RasterImage* aImage);
+
+ enum class State { HEADER, ROW_OF_PIXELS, FINISH };
+
+ LexerTransition<State> ReadHeader(const char* aData);
+ LexerTransition<State> ReadRowOfPixels(const char* aData, size_t aLength);
+ LexerTransition<State> Finish();
+
+ StreamingLexer<State> mLexer;
+ SurfacePipe mPipe;
+ uint32_t mBytesPerRow;
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsIconDecoder_h
diff --git a/image/decoders/nsJPEGDecoder.cpp b/image/decoders/nsJPEGDecoder.cpp
new file mode 100644
index 0000000000..0a9c2cc478
--- /dev/null
+++ b/image/decoders/nsJPEGDecoder.cpp
@@ -0,0 +1,999 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageLogging.h" // Must appear first.
+
+#include "nsJPEGDecoder.h"
+
+#include <cstdint>
+
+#include "imgFrame.h"
+#include "Orientation.h"
+#include "EXIF.h"
+#include "SurfacePipeFactory.h"
+
+#include "nspr.h"
+#include "nsCRT.h"
+#include "gfxColor.h"
+
+#include "jerror.h"
+
+#include "gfxPlatform.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/gfx/Types.h"
+#include "mozilla/Telemetry.h"
+
+extern "C" {
+#include "iccjpeg.h"
+}
+
+#if MOZ_BIG_ENDIAN()
+# define MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB JCS_EXT_XRGB
+#else
+# define MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB JCS_EXT_BGRX
+#endif
+
+static void cmyk_convert_bgra(uint32_t* aInput, uint32_t* aOutput,
+ int32_t aWidth);
+
+using mozilla::gfx::SurfaceFormat;
+
+namespace mozilla {
+namespace image {
+
+static mozilla::LazyLogModule sJPEGLog("JPEGDecoder");
+
+static mozilla::LazyLogModule sJPEGDecoderAccountingLog(
+ "JPEGDecoderAccounting");
+
+static qcms_profile* GetICCProfile(struct jpeg_decompress_struct& info) {
+ JOCTET* profilebuf;
+ uint32_t profileLength;
+ qcms_profile* profile = nullptr;
+
+ if (read_icc_profile(&info, &profilebuf, &profileLength)) {
+ profile = qcms_profile_from_memory(profilebuf, profileLength);
+ free(profilebuf);
+ }
+
+ return profile;
+}
+
+METHODDEF(void) init_source(j_decompress_ptr jd);
+METHODDEF(boolean) fill_input_buffer(j_decompress_ptr jd);
+METHODDEF(void) skip_input_data(j_decompress_ptr jd, long num_bytes);
+METHODDEF(void) term_source(j_decompress_ptr jd);
+METHODDEF(void) my_error_exit(j_common_ptr cinfo);
+METHODDEF(void) progress_monitor(j_common_ptr info);
+
+// Normal JFIF markers can't have more bytes than this.
+#define MAX_JPEG_MARKER_LENGTH (((uint32_t)1 << 16) - 1)
+
+nsJPEGDecoder::nsJPEGDecoder(RasterImage* aImage,
+ Decoder::DecodeStyle aDecodeStyle)
+ : Decoder(aImage),
+ mLexer(Transition::ToUnbuffered(State::FINISHED_JPEG_DATA,
+ State::JPEG_DATA, SIZE_MAX),
+ Transition::TerminateSuccess()),
+ mProfile(nullptr),
+ mProfileLength(0),
+ mCMSLine(nullptr),
+ mDecodeStyle(aDecodeStyle) {
+ this->mErr.pub.error_exit = nullptr;
+ this->mErr.pub.emit_message = nullptr;
+ this->mErr.pub.output_message = nullptr;
+ this->mErr.pub.format_message = nullptr;
+ this->mErr.pub.reset_error_mgr = nullptr;
+ this->mErr.pub.msg_code = 0;
+ this->mErr.pub.trace_level = 0;
+ this->mErr.pub.num_warnings = 0;
+ this->mErr.pub.jpeg_message_table = nullptr;
+ this->mErr.pub.last_jpeg_message = 0;
+ this->mErr.pub.addon_message_table = nullptr;
+ this->mErr.pub.first_addon_message = 0;
+ this->mErr.pub.last_addon_message = 0;
+ mState = JPEG_HEADER;
+ mReading = true;
+ mImageData = nullptr;
+
+ mBytesToSkip = 0;
+ memset(&mInfo, 0, sizeof(jpeg_decompress_struct));
+ memset(&mSourceMgr, 0, sizeof(mSourceMgr));
+ memset(&mProgressMgr, 0, sizeof(mProgressMgr));
+ mInfo.client_data = (void*)this;
+
+ mSegment = nullptr;
+ mSegmentLen = 0;
+
+ mBackBuffer = nullptr;
+ mBackBufferLen = mBackBufferSize = mBackBufferUnreadLen = 0;
+
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("nsJPEGDecoder::nsJPEGDecoder: Creating JPEG decoder %p", this));
+}
+
+nsJPEGDecoder::~nsJPEGDecoder() {
+ // Step 8: Release JPEG decompression object
+ mInfo.src = nullptr;
+ jpeg_destroy_decompress(&mInfo);
+
+ free(mBackBuffer);
+ mBackBuffer = nullptr;
+
+ delete[] mCMSLine;
+
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("nsJPEGDecoder::~nsJPEGDecoder: Destroying JPEG decoder %p", this));
+}
+
+Maybe<Telemetry::HistogramID> nsJPEGDecoder::SpeedHistogram() const {
+ return Some(Telemetry::IMAGE_DECODE_SPEED_JPEG);
+}
+
+nsresult nsJPEGDecoder::InitInternal() {
+ // We set up the normal JPEG error routines, then override error_exit.
+ mInfo.err = jpeg_std_error(&mErr.pub);
+ // mInfo.err = jpeg_std_error(&mErr.pub);
+ mErr.pub.error_exit = my_error_exit;
+ // Establish the setjmp return context for my_error_exit to use.
+ if (setjmp(mErr.setjmp_buffer)) {
+ // If we get here, the JPEG code has signaled an error, and initialization
+ // has failed.
+ return NS_ERROR_FAILURE;
+ }
+
+ // Step 1: allocate and initialize JPEG decompression object
+ jpeg_create_decompress(&mInfo);
+ // Set the source manager
+ mInfo.src = &mSourceMgr;
+
+ // Step 2: specify data source (eg, a file)
+
+ // Setup callback functions.
+ mSourceMgr.init_source = init_source;
+ mSourceMgr.fill_input_buffer = fill_input_buffer;
+ mSourceMgr.skip_input_data = skip_input_data;
+ mSourceMgr.resync_to_restart = jpeg_resync_to_restart;
+ mSourceMgr.term_source = term_source;
+
+ mInfo.mem->max_memory_to_use = static_cast<long>(
+ std::min<size_t>(SurfaceCache::MaximumCapacity(), LONG_MAX));
+
+ mProgressMgr.progress_monitor = &progress_monitor;
+ mInfo.progress = &mProgressMgr;
+
+ // Record app markers for ICC data
+ for (uint32_t m = 0; m < 16; m++) {
+ jpeg_save_markers(&mInfo, JPEG_APP0 + m, 0xFFFF);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsJPEGDecoder::FinishInternal() {
+ // If we're not in any sort of error case, force our state to JPEG_DONE.
+ if ((mState != JPEG_DONE && mState != JPEG_SINK_NON_JPEG_TRAILER) &&
+ (mState != JPEG_ERROR) && !IsMetadataDecode()) {
+ mState = JPEG_DONE;
+ }
+
+ return NS_OK;
+}
+
+LexerResult nsJPEGDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::JPEG_DATA:
+ return ReadJPEGData(aData, aLength);
+ case State::FINISHED_JPEG_DATA:
+ return FinishedJPEGData();
+ }
+ MOZ_CRASH("Unknown State");
+ });
+}
+
+LexerTransition<nsJPEGDecoder::State> nsJPEGDecoder::ReadJPEGData(
+ const char* aData, size_t aLength) {
+ mSegment = reinterpret_cast<const JOCTET*>(aData);
+ mSegmentLen = aLength;
+
+ // Return here if there is a error within libjpeg.
+ nsresult error_code;
+ // This cast to nsresult makes sense because setjmp() returns whatever we
+ // passed to longjmp(), which was actually an nsresult. These error codes
+ // have been translated from libjpeg error codes, like so:
+ // JERR_OUT_OF_MEMORY => NS_ERROR_OUT_OF_MEMORY
+ // JERR_UNKNOWN_MARKER => NS_ERROR_ILLEGAL_VALUE
+ // JERR_SOF_UNSUPPORTED => NS_ERROR_INVALID_CONTENT_ENCODING
+ // <any other error> => NS_ERROR_FAILURE
+ if ((error_code = static_cast<nsresult>(setjmp(mErr.setjmp_buffer))) !=
+ NS_OK) {
+ bool fatal = true;
+ if (error_code == NS_ERROR_FAILURE) {
+ // Error due to corrupt data. Make sure that we don't feed any more data
+ // to libjpeg-turbo.
+ mState = JPEG_SINK_NON_JPEG_TRAILER;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (setjmp returned NS_ERROR_FAILURE)"));
+ } else if (error_code == NS_ERROR_ILLEGAL_VALUE) {
+ // This is a recoverable error. Consume the marker and continue.
+ mInfo.unread_marker = 0;
+ fatal = false;
+ } else if (error_code == NS_ERROR_INVALID_CONTENT_ENCODING) {
+ // The content is encoding frames with a format that libjpeg can't handle.
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (setjmp returned NS_ERROR_INVALID_CONTENT_ENCODING)"));
+ // Check to see if we're in the done state, which indicates that we've
+ // already processed the main JPEG data.
+ bool inDoneState = (mState == JPEG_DONE);
+ // Whether we succeed or fail, we shouldn't send any more data.
+ mState = JPEG_SINK_NON_JPEG_TRAILER;
+
+ // If we're in the done state, we exit successfully and attempt to
+ // display the content we've already received. Otherwise, we fallthrough
+ // and treat this as a fatal error.
+ if (inDoneState) {
+ return Transition::TerminateSuccess();
+ }
+ } else {
+ // Error for another reason. (Possibly OOM.)
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (setjmp returned an error)"));
+ }
+
+ if (fatal) {
+ return Transition::TerminateFailure();
+ }
+ }
+
+ MOZ_LOG(sJPEGLog, LogLevel::Debug,
+ ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this));
+
+ switch (mState) {
+ case JPEG_HEADER: {
+ LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
+ "nsJPEGDecoder::Write -- entering JPEG_HEADER"
+ " case");
+
+ // Step 3: read file parameters with jpeg_read_header()
+ if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) {
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (JPEG_SUSPENDED)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+
+ // Post our size to the superclass
+ EXIFData exif = ReadExifData();
+ PostSize(mInfo.image_width, mInfo.image_height, exif.orientation,
+ exif.resolution);
+ if (HasError()) {
+ // Setting the size led to an error.
+ mState = JPEG_ERROR;
+ return Transition::TerminateFailure();
+ }
+
+ // If we're doing a metadata decode, we're done.
+ if (IsMetadataDecode()) {
+ return Transition::TerminateSuccess();
+ }
+
+ // We're doing a full decode.
+ switch (mInfo.jpeg_color_space) {
+ case JCS_GRAYSCALE:
+ case JCS_RGB:
+ case JCS_YCbCr:
+ // By default, we will output directly to BGRA. If we need to apply
+ // special color transforms, this may change.
+ switch (SurfaceFormat::OS_RGBX) {
+ case SurfaceFormat::B8G8R8X8:
+ mInfo.out_color_space = JCS_EXT_BGRX;
+ break;
+ case SurfaceFormat::X8R8G8B8:
+ mInfo.out_color_space = JCS_EXT_XRGB;
+ break;
+ case SurfaceFormat::R8G8B8X8:
+ mInfo.out_color_space = JCS_EXT_RGBX;
+ break;
+ default:
+ mState = JPEG_ERROR;
+ return Transition::TerminateFailure();
+ }
+ break;
+ case JCS_CMYK:
+ case JCS_YCCK:
+ // libjpeg can convert from YCCK to CMYK, but not to XRGB.
+ mInfo.out_color_space = JCS_CMYK;
+ break;
+ default:
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (unknown colorspace (3))"));
+ return Transition::TerminateFailure();
+ }
+
+ if (mCMSMode != CMSMode::Off) {
+ if ((mInProfile = GetICCProfile(mInfo)) != nullptr &&
+ GetCMSOutputProfile()) {
+ uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
+
+ qcms_data_type outputType = gfxPlatform::GetCMSOSRGBAType();
+ Maybe<qcms_data_type> inputType;
+ if (profileSpace == icSigRgbData) {
+ // We can always color manage RGB profiles since it happens at the
+ // end of the pipeline.
+ inputType.emplace(outputType);
+ } else if (profileSpace == icSigGrayData &&
+ mInfo.jpeg_color_space == JCS_GRAYSCALE) {
+ // We can only color manage gray profiles if the original color
+ // space is grayscale. This means we must downscale after color
+ // management since the downscaler assumes BGRA.
+ mInfo.out_color_space = JCS_GRAYSCALE;
+ inputType.emplace(QCMS_DATA_GRAY_8);
+ }
+
+#if 0
+ // We don't currently support CMYK profiles. The following
+ // code dealt with lcms types. Add something like this
+ // back when we gain support for CMYK.
+
+ // Adobe Photoshop writes YCCK/CMYK files with inverted data
+ if (mInfo.out_color_space == JCS_CMYK) {
+ type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0);
+ }
+#endif
+
+ if (inputType) {
+ // Calculate rendering intent.
+ int intent = gfxPlatform::GetRenderingIntent();
+ if (intent == -1) {
+ intent = qcms_profile_get_rendering_intent(mInProfile);
+ }
+
+ // Create the color management transform.
+ mTransform = qcms_transform_create(mInProfile, *inputType,
+ GetCMSOutputProfile(),
+ outputType, (qcms_intent)intent);
+ }
+ } else if (mCMSMode == CMSMode::All) {
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::OS_RGBX);
+ }
+ }
+
+ // We don't want to use the pipe buffers directly because we don't want
+ // any reads on non-BGRA formatted data.
+ if (mInfo.out_color_space == JCS_GRAYSCALE ||
+ mInfo.out_color_space == JCS_CMYK) {
+ mCMSLine = new (std::nothrow) uint32_t[mInfo.image_width];
+ if (!mCMSLine) {
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (could allocate buffer for color conversion)"));
+ return Transition::TerminateFailure();
+ }
+ }
+
+ // Don't allocate a giant and superfluous memory buffer
+ // when not doing a progressive decode.
+ mInfo.buffered_image =
+ mDecodeStyle == PROGRESSIVE && jpeg_has_multiple_scans(&mInfo);
+
+ /* Used to set up image size so arrays can be allocated */
+ jpeg_calc_output_dimensions(&mInfo);
+
+ // We handle the transform outside the pipeline if we are outputting in
+ // grayscale, because the pipeline wants BGRA pixels, particularly the
+ // downscaling filter, so we can't handle it after downscaling as would
+ // be optimal.
+ qcms_transform* pipeTransform =
+ mInfo.out_color_space != JCS_GRAYSCALE ? mTransform : nullptr;
+
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateReorientSurfacePipe(
+ this, Size(), OutputSize(), SurfaceFormat::OS_RGBX, pipeTransform,
+ GetOrientation());
+ if (!pipe) {
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (could not initialize surface pipe)"));
+ return Transition::TerminateFailure();
+ }
+
+ mPipe = std::move(*pipe);
+
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ (" JPEGDecoderAccounting: nsJPEGDecoder::"
+ "Write -- created image frame with %ux%u pixels",
+ mInfo.image_width, mInfo.image_height));
+
+ mState = JPEG_START_DECOMPRESS;
+ [[fallthrough]]; // to start decompressing.
+ }
+
+ case JPEG_START_DECOMPRESS: {
+ LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
+ "nsJPEGDecoder::Write -- entering"
+ " JPEG_START_DECOMPRESS case");
+ // Step 4: set parameters for decompression
+
+ // FIXME -- Should reset dct_method and dither mode
+ // for final pass of progressive JPEG
+
+ mInfo.dct_method = JDCT_ISLOW;
+ mInfo.dither_mode = JDITHER_FS;
+ mInfo.do_fancy_upsampling = TRUE;
+ mInfo.enable_2pass_quant = FALSE;
+ mInfo.do_block_smoothing = TRUE;
+
+ // Step 5: Start decompressor
+ if (jpeg_start_decompress(&mInfo) == FALSE) {
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after jpeg_start_decompress())"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+
+ // If this is a progressive JPEG ...
+ mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE
+ : JPEG_DECOMPRESS_SEQUENTIAL;
+ [[fallthrough]]; // to decompress sequential JPEG.
+ }
+
+ case JPEG_DECOMPRESS_SEQUENTIAL: {
+ if (mState == JPEG_DECOMPRESS_SEQUENTIAL) {
+ LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
+ "nsJPEGDecoder::Write -- "
+ "JPEG_DECOMPRESS_SEQUENTIAL case");
+
+ switch (OutputScanlines()) {
+ case WriteState::NEED_MORE_DATA:
+ MOZ_LOG(
+ sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ case WriteState::FINISHED:
+ NS_ASSERTION(mInfo.output_scanline == mInfo.output_height,
+ "We didn't process all of the data!");
+ mState = JPEG_DONE;
+ break;
+ case WriteState::FAILURE:
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (Error in pipeline from OutputScalines())"));
+ return Transition::TerminateFailure();
+ }
+ }
+ [[fallthrough]]; // to decompress progressive JPEG.
+ }
+
+ case JPEG_DECOMPRESS_PROGRESSIVE: {
+ if (mState == JPEG_DECOMPRESS_PROGRESSIVE) {
+ LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
+ "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case");
+ auto AllComponentsSeen = [](jpeg_decompress_struct& info) {
+ bool all_components_seen = true;
+ if (info.coef_bits) {
+ for (int c = 0; c < info.num_components; ++c) {
+ bool current_component_seen = info.coef_bits[c][0] != -1;
+ all_components_seen &= current_component_seen;
+ }
+ }
+ return all_components_seen;
+ };
+ int status;
+ int scan_to_display_first = 0;
+ bool all_components_seen;
+ all_components_seen = AllComponentsSeen(mInfo);
+ if (all_components_seen) {
+ scan_to_display_first = mInfo.input_scan_number;
+ }
+
+ do {
+ status = jpeg_consume_input(&mInfo);
+
+ if (status == JPEG_REACHED_SOS || status == JPEG_REACHED_EOI ||
+ status == JPEG_SUSPENDED) {
+ // record the first scan where all components are present
+ all_components_seen = AllComponentsSeen(mInfo);
+ if (!scan_to_display_first && all_components_seen) {
+ scan_to_display_first = mInfo.input_scan_number;
+ }
+ }
+ } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI));
+
+ if (!all_components_seen) {
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+ // make sure we never try to access the non-exsitent scan 0
+ if (!scan_to_display_first) {
+ scan_to_display_first = 1;
+ }
+ while (mState != JPEG_DONE) {
+ if (mInfo.output_scanline == 0) {
+ int scan = mInfo.input_scan_number;
+
+ // if we haven't displayed anything yet (output_scan_number==0)
+ // and we have enough data for a complete scan, force output
+ // of the last full scan, but only if this last scan has seen
+ // DC data from all components
+ if ((mInfo.output_scan_number == 0) &&
+ (scan > scan_to_display_first) &&
+ (status != JPEG_REACHED_EOI)) {
+ scan--;
+ }
+ MOZ_ASSERT(scan > 0, "scan number to small!");
+ if (!jpeg_start_output(&mInfo, scan)) {
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after jpeg_start_output() -"
+ " PROGRESSIVE)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+ }
+
+ if (mInfo.output_scanline == 0xffffff) {
+ mInfo.output_scanline = 0;
+ }
+
+ switch (OutputScanlines()) {
+ case WriteState::NEED_MORE_DATA:
+ if (mInfo.output_scanline == 0) {
+ // didn't manage to read any lines - flag so we don't call
+ // jpeg_start_output() multiple times for the same scan
+ mInfo.output_scanline = 0xffffff;
+ }
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after OutputScanlines() - "
+ "PROGRESSIVE)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ case WriteState::FINISHED:
+ NS_ASSERTION(mInfo.output_scanline == mInfo.output_height,
+ "We didn't process all of the data!");
+
+ if (!jpeg_finish_output(&mInfo)) {
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after jpeg_finish_output() -"
+ " PROGRESSIVE)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+
+ if (jpeg_input_complete(&mInfo) &&
+ (mInfo.input_scan_number == mInfo.output_scan_number)) {
+ mState = JPEG_DONE;
+ } else {
+ mInfo.output_scanline = 0;
+ mPipe.ResetToFirstRow();
+ }
+ break;
+ case WriteState::FAILURE:
+ mState = JPEG_ERROR;
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (Error in pipeline from OutputScalines())"));
+ return Transition::TerminateFailure();
+ }
+ }
+ }
+ [[fallthrough]]; // to finish decompressing.
+ }
+
+ case JPEG_DONE: {
+ LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
+ "nsJPEGDecoder::ProcessData -- entering"
+ " JPEG_DONE case");
+
+ // Step 7: Finish decompression
+
+ if (jpeg_finish_decompress(&mInfo) == FALSE) {
+ MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
+ ("} (I/O suspension after jpeg_finish_decompress() - DONE)"));
+ return Transition::ContinueUnbuffered(
+ State::JPEG_DATA); // I/O suspension
+ }
+
+ // Make sure we don't feed any more data to libjpeg-turbo.
+ mState = JPEG_SINK_NON_JPEG_TRAILER;
+
+ // We're done.
+ return Transition::TerminateSuccess();
+ }
+ case JPEG_SINK_NON_JPEG_TRAILER:
+ MOZ_LOG(sJPEGLog, LogLevel::Debug,
+ ("[this=%p] nsJPEGDecoder::ProcessData -- entering"
+ " JPEG_SINK_NON_JPEG_TRAILER case\n",
+ this));
+
+ MOZ_ASSERT_UNREACHABLE(
+ "Should stop getting data after entering state "
+ "JPEG_SINK_NON_JPEG_TRAILER");
+
+ return Transition::TerminateSuccess();
+
+ case JPEG_ERROR:
+ MOZ_ASSERT_UNREACHABLE(
+ "Should stop getting data after entering state "
+ "JPEG_ERROR");
+
+ return Transition::TerminateFailure();
+ }
+
+ MOZ_ASSERT_UNREACHABLE("Escaped the JPEG decoder state machine");
+ return Transition::TerminateFailure();
+} // namespace image
+
+LexerTransition<nsJPEGDecoder::State> nsJPEGDecoder::FinishedJPEGData() {
+ // Since we set up an unbuffered read for SIZE_MAX bytes, if we actually read
+ // all that data something is really wrong.
+ MOZ_ASSERT_UNREACHABLE("Read the entire address space?");
+ return Transition::TerminateFailure();
+}
+
+EXIFData nsJPEGDecoder::ReadExifData() const {
+ jpeg_saved_marker_ptr marker;
+
+ // Locate the APP1 marker, where EXIF data is stored, in the marker list.
+ for (marker = mInfo.marker_list; marker != nullptr; marker = marker->next) {
+ if (marker->marker == JPEG_APP0 + 1) {
+ break;
+ }
+ }
+
+ // If we're at the end of the list, there's no EXIF data.
+ if (!marker) {
+ return EXIFData();
+ }
+
+ return EXIFParser::Parse(marker->data,
+ static_cast<uint32_t>(marker->data_length),
+ gfx::IntSize(mInfo.image_width, mInfo.image_height));
+}
+
+void nsJPEGDecoder::NotifyDone() {
+ PostFrameStop(Opacity::FULLY_OPAQUE);
+ PostDecodeDone();
+}
+
+WriteState nsJPEGDecoder::OutputScanlines() {
+ auto result = mPipe.WritePixelBlocks<uint32_t>(
+ [&](uint32_t* aPixelBlock, int32_t aBlockSize) {
+ JSAMPROW sampleRow = (JSAMPROW)(mCMSLine ? mCMSLine : aPixelBlock);
+ if (jpeg_read_scanlines(&mInfo, &sampleRow, 1) != 1) {
+ return std::make_tuple(/* aWritten */ 0,
+ Some(WriteState::NEED_MORE_DATA));
+ }
+
+ switch (mInfo.out_color_space) {
+ default:
+ // Already outputted directly to aPixelBlock as BGRA.
+ MOZ_ASSERT(!mCMSLine);
+ break;
+ case JCS_GRAYSCALE:
+ // The transform here does both color management, and converts the
+ // pixels from grayscale to BGRA. This is why we do it here, instead
+ // of using ColorManagementFilter in the SurfacePipe, because the
+ // other filters (e.g. DownscalingFilter) require BGRA pixels.
+ MOZ_ASSERT(mCMSLine);
+ qcms_transform_data(mTransform, mCMSLine, aPixelBlock,
+ mInfo.output_width);
+ break;
+ case JCS_CMYK:
+ // Convert from CMYK to BGRA
+ MOZ_ASSERT(mCMSLine);
+ cmyk_convert_bgra(mCMSLine, aPixelBlock, aBlockSize);
+ break;
+ }
+
+ return std::make_tuple(aBlockSize, Maybe<WriteState>());
+ });
+
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (invalidRect) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+
+ return result;
+}
+
+// Override the standard error method in the IJG JPEG decoder code.
+METHODDEF(void)
+my_error_exit(j_common_ptr cinfo) {
+ decoder_error_mgr* err = (decoder_error_mgr*)cinfo->err;
+
+ // Convert error to a browser error code
+ nsresult error_code;
+ switch (err->pub.msg_code) {
+ case JERR_OUT_OF_MEMORY:
+ error_code = NS_ERROR_OUT_OF_MEMORY;
+ break;
+ case JERR_UNKNOWN_MARKER:
+ error_code = NS_ERROR_ILLEGAL_VALUE;
+ break;
+ case JERR_SOF_UNSUPPORTED:
+ error_code = NS_ERROR_INVALID_CONTENT_ENCODING;
+ break;
+ default:
+ error_code = NS_ERROR_FAILURE;
+ }
+
+#ifdef DEBUG
+ char buffer[JMSG_LENGTH_MAX];
+
+ // Create the message
+ (*err->pub.format_message)(cinfo, buffer);
+
+ fprintf(stderr, "JPEG decoding error:\n%s\n", buffer);
+#endif
+
+ // Return control to the setjmp point. We pass an nsresult masquerading as
+ // an int, which works because the setjmp() caller casts it back.
+ longjmp(err->setjmp_buffer, static_cast<int>(error_code));
+}
+
+static void progress_monitor(j_common_ptr info) {
+ int scan = ((j_decompress_ptr)info)->input_scan_number;
+ // Progressive images with a very large number of scans can cause the decoder
+ // to hang. Here we use the progress monitor to abort on a very large number
+ // of scans. 1000 is arbitrary, but much larger than the number of scans we
+ // might expect in a normal image.
+ if (scan >= 1000) {
+ my_error_exit(info);
+ }
+}
+
+/*******************************************************************************
+ * This is the callback routine from the IJG JPEG library used to supply new
+ * data to the decompressor when its input buffer is exhausted. It juggles
+ * multiple buffers in an attempt to avoid unnecessary copying of input data.
+ *
+ * (A simpler scheme is possible: It's much easier to use only a single
+ * buffer; when fill_input_buffer() is called, move any unconsumed data
+ * (beyond the current pointer/count) down to the beginning of this buffer and
+ * then load new data into the remaining buffer space. This approach requires
+ * a little more data copying but is far easier to get right.)
+ *
+ * At any one time, the JPEG decompressor is either reading from the necko
+ * input buffer, which is volatile across top-level calls to the IJG library,
+ * or the "backtrack" buffer. The backtrack buffer contains the remaining
+ * unconsumed data from the necko buffer after parsing was suspended due
+ * to insufficient data in some previous call to the IJG library.
+ *
+ * When suspending, the decompressor will back up to a convenient restart
+ * point (typically the start of the current MCU). The variables
+ * next_input_byte & bytes_in_buffer indicate where the restart point will be
+ * if the current call returns FALSE. Data beyond this point must be
+ * rescanned after resumption, so it must be preserved in case the decompressor
+ * decides to backtrack.
+ *
+ * Returns:
+ * TRUE if additional data is available, FALSE if no data present and
+ * the JPEG library should therefore suspend processing of input stream
+ ******************************************************************************/
+
+/******************************************************************************/
+/* data source manager method */
+/******************************************************************************/
+
+/******************************************************************************/
+/* data source manager method
+ Initialize source. This is called by jpeg_read_header() before any
+ data is actually read. May leave
+ bytes_in_buffer set to 0 (in which case a fill_input_buffer() call
+ will occur immediately).
+*/
+METHODDEF(void)
+init_source(j_decompress_ptr jd) {}
+
+/******************************************************************************/
+/* data source manager method
+ Skip num_bytes worth of data. The buffer pointer and count should
+ be advanced over num_bytes input bytes, refilling the buffer as
+ needed. This is used to skip over a potentially large amount of
+ uninteresting data (such as an APPn marker). In some applications
+ it may be possible to optimize away the reading of the skipped data,
+ but it's not clear that being smart is worth much trouble; large
+ skips are uncommon. bytes_in_buffer may be zero on return.
+ A zero or negative skip count should be treated as a no-op.
+*/
+METHODDEF(void)
+skip_input_data(j_decompress_ptr jd, long num_bytes) {
+ struct jpeg_source_mgr* src = jd->src;
+ nsJPEGDecoder* decoder = (nsJPEGDecoder*)(jd->client_data);
+
+ if (num_bytes > (long)src->bytes_in_buffer) {
+ // Can't skip it all right now until we get more data from
+ // network stream. Set things up so that fill_input_buffer
+ // will skip remaining amount.
+ decoder->mBytesToSkip = (size_t)num_bytes - src->bytes_in_buffer;
+ src->next_input_byte += src->bytes_in_buffer;
+ src->bytes_in_buffer = 0;
+
+ } else {
+ // Simple case. Just advance buffer pointer
+
+ src->bytes_in_buffer -= (size_t)num_bytes;
+ src->next_input_byte += num_bytes;
+ }
+}
+
+/******************************************************************************/
+/* data source manager method
+ This is called whenever bytes_in_buffer has reached zero and more
+ data is wanted. In typical applications, it should read fresh data
+ into the buffer (ignoring the current state of next_input_byte and
+ bytes_in_buffer), reset the pointer & count to the start of the
+ buffer, and return TRUE indicating that the buffer has been reloaded.
+ It is not necessary to fill the buffer entirely, only to obtain at
+ least one more byte. bytes_in_buffer MUST be set to a positive value
+ if TRUE is returned. A FALSE return should only be used when I/O
+ suspension is desired.
+*/
+METHODDEF(boolean)
+fill_input_buffer(j_decompress_ptr jd) {
+ struct jpeg_source_mgr* src = jd->src;
+ nsJPEGDecoder* decoder = (nsJPEGDecoder*)(jd->client_data);
+
+ if (decoder->mReading) {
+ const JOCTET* new_buffer = decoder->mSegment;
+ uint32_t new_buflen = decoder->mSegmentLen;
+
+ if (!new_buffer || new_buflen == 0) {
+ return false; // suspend
+ }
+
+ decoder->mSegmentLen = 0;
+
+ if (decoder->mBytesToSkip) {
+ if (decoder->mBytesToSkip < new_buflen) {
+ // All done skipping bytes; Return what's left.
+ new_buffer += decoder->mBytesToSkip;
+ new_buflen -= decoder->mBytesToSkip;
+ decoder->mBytesToSkip = 0;
+ } else {
+ // Still need to skip some more data in the future
+ decoder->mBytesToSkip -= (size_t)new_buflen;
+ return false; // suspend
+ }
+ }
+
+ decoder->mBackBufferUnreadLen = src->bytes_in_buffer;
+
+ src->next_input_byte = new_buffer;
+ src->bytes_in_buffer = (size_t)new_buflen;
+ decoder->mReading = false;
+
+ return true;
+ }
+
+ if (src->next_input_byte != decoder->mSegment) {
+ // Backtrack data has been permanently consumed.
+ decoder->mBackBufferUnreadLen = 0;
+ decoder->mBackBufferLen = 0;
+ }
+
+ // Save remainder of netlib buffer in backtrack buffer
+ const uint32_t new_backtrack_buflen =
+ src->bytes_in_buffer + decoder->mBackBufferLen;
+
+ // Make sure backtrack buffer is big enough to hold new data.
+ if (decoder->mBackBufferSize < new_backtrack_buflen) {
+ // Check for malformed MARKER segment lengths, before allocating space
+ // for it
+ if (new_backtrack_buflen > MAX_JPEG_MARKER_LENGTH) {
+ my_error_exit((j_common_ptr)(&decoder->mInfo));
+ }
+
+ // Round up to multiple of 256 bytes.
+ const size_t roundup_buflen = ((new_backtrack_buflen + 255) >> 8) << 8;
+ JOCTET* buf = (JOCTET*)realloc(decoder->mBackBuffer, roundup_buflen);
+ // Check for OOM
+ if (!buf) {
+ decoder->mInfo.err->msg_code = JERR_OUT_OF_MEMORY;
+ my_error_exit((j_common_ptr)(&decoder->mInfo));
+ }
+ decoder->mBackBuffer = buf;
+ decoder->mBackBufferSize = roundup_buflen;
+ }
+
+ // Ensure we actually have a backtrack buffer. Without it, then we know that
+ // there is no data to copy and bytes_in_buffer is already zero.
+ if (decoder->mBackBuffer) {
+ // Copy remainder of netlib segment into backtrack buffer.
+ memmove(decoder->mBackBuffer + decoder->mBackBufferLen,
+ src->next_input_byte, src->bytes_in_buffer);
+ } else {
+ MOZ_ASSERT(src->bytes_in_buffer == 0);
+ MOZ_ASSERT(decoder->mBackBufferLen == 0);
+ MOZ_ASSERT(decoder->mBackBufferUnreadLen == 0);
+ }
+
+ // Point to start of data to be rescanned.
+ src->next_input_byte = decoder->mBackBuffer + decoder->mBackBufferLen -
+ decoder->mBackBufferUnreadLen;
+ src->bytes_in_buffer += decoder->mBackBufferUnreadLen;
+ decoder->mBackBufferLen = (size_t)new_backtrack_buflen;
+ decoder->mReading = true;
+
+ return false;
+}
+
+/******************************************************************************/
+/* data source manager method */
+/*
+ * Terminate source --- called by jpeg_finish_decompress() after all
+ * data has been read to clean up JPEG source manager. NOT called by
+ * jpeg_abort() or jpeg_destroy().
+ */
+METHODDEF(void)
+term_source(j_decompress_ptr jd) {
+ nsJPEGDecoder* decoder = (nsJPEGDecoder*)(jd->client_data);
+
+ // This function shouldn't be called if we ran into an error we didn't
+ // recover from.
+ MOZ_ASSERT(decoder->mState != JPEG_ERROR,
+ "Calling term_source on a JPEG with mState == JPEG_ERROR!");
+
+ // Notify using a helper method to get around protectedness issues.
+ decoder->NotifyDone();
+}
+
+} // namespace image
+} // namespace mozilla
+
+///*************** Inverted CMYK -> RGB conversion *************************
+/// Input is (Inverted) CMYK stored as 4 bytes per pixel.
+/// Output is RGB stored as 3 bytes per pixel.
+/// @param aInput Points to row buffer containing the CMYK bytes for each pixel
+/// in the row.
+/// @param aOutput Points to row buffer to write BGRA to.
+/// @param aWidth Number of pixels in the row.
+static void cmyk_convert_bgra(uint32_t* aInput, uint32_t* aOutput,
+ int32_t aWidth) {
+ uint8_t* input = reinterpret_cast<uint8_t*>(aInput);
+
+ for (int32_t i = 0; i < aWidth; ++i) {
+ // Source is 'Inverted CMYK', output is RGB.
+ // See: http://www.easyrgb.com/math.php?MATH=M12#text12
+ // Or: http://www.ilkeratalay.com/colorspacesfaq.php#rgb
+
+ // From CMYK to CMY
+ // C = ( C * ( 1 - K ) + K )
+ // M = ( M * ( 1 - K ) + K )
+ // Y = ( Y * ( 1 - K ) + K )
+
+ // From Inverted CMYK to CMY is thus:
+ // C = ( (1-iC) * (1 - (1-iK)) + (1-iK) ) => 1 - iC*iK
+ // Same for M and Y
+
+ // Convert from CMY (0..1) to RGB (0..1)
+ // R = 1 - C => 1 - (1 - iC*iK) => iC*iK
+ // G = 1 - M => 1 - (1 - iM*iK) => iM*iK
+ // B = 1 - Y => 1 - (1 - iY*iK) => iY*iK
+
+ // Convert from Inverted CMYK (0..255) to RGB (0..255)
+ const uint32_t iC = input[0];
+ const uint32_t iM = input[1];
+ const uint32_t iY = input[2];
+ const uint32_t iK = input[3];
+
+ const uint8_t r = iC * iK / 255;
+ const uint8_t g = iM * iK / 255;
+ const uint8_t b = iY * iK / 255;
+
+ *aOutput++ = (0xFF << mozilla::gfx::SurfaceFormatBit::OS_A) |
+ (r << mozilla::gfx::SurfaceFormatBit::OS_R) |
+ (g << mozilla::gfx::SurfaceFormatBit::OS_G) |
+ (b << mozilla::gfx::SurfaceFormatBit::OS_B);
+ input += 4;
+ }
+}
diff --git a/image/decoders/nsJPEGDecoder.h b/image/decoders/nsJPEGDecoder.h
new file mode 100644
index 0000000000..fa010f9677
--- /dev/null
+++ b/image/decoders/nsJPEGDecoder.h
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsJPEGDecoder_h
+#define mozilla_image_decoders_nsJPEGDecoder_h
+
+#include "RasterImage.h"
+#include "SurfacePipe.h"
+#include "EXIF.h"
+
+// On Windows systems, RasterImage.h brings in 'windows.h', which defines INT32.
+// But the jpeg decoder has its own definition of INT32. To avoid build issues,
+// we need to undefine the version from 'windows.h'.
+#undef INT32
+
+#include "Decoder.h"
+
+extern "C" {
+#include "jpeglib.h"
+}
+
+#include <setjmp.h>
+
+namespace mozilla::image {
+
+typedef struct {
+ struct jpeg_error_mgr pub; // "public" fields for IJG library
+ jmp_buf setjmp_buffer; // For handling catastropic errors
+} decoder_error_mgr;
+
+typedef enum {
+ JPEG_HEADER, // Reading JFIF headers
+ JPEG_START_DECOMPRESS,
+ JPEG_DECOMPRESS_PROGRESSIVE, // Output progressive pixels
+ JPEG_DECOMPRESS_SEQUENTIAL, // Output sequential pixels
+ JPEG_DONE,
+ JPEG_SINK_NON_JPEG_TRAILER, // Some image files have a
+ // non-JPEG trailer
+ JPEG_ERROR
+} jstate;
+
+class RasterImage;
+struct Orientation;
+
+class nsJPEGDecoder : public Decoder {
+ public:
+ virtual ~nsJPEGDecoder();
+
+ DecoderType GetType() const override { return DecoderType::JPEG; }
+
+ void NotifyDone();
+
+ protected:
+ nsresult InitInternal() override;
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ nsresult FinishInternal() override;
+
+ Maybe<Telemetry::HistogramID> SpeedHistogram() const override;
+
+ protected:
+ EXIFData ReadExifData() const;
+ WriteState OutputScanlines();
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ nsJPEGDecoder(RasterImage* aImage, Decoder::DecodeStyle aDecodeStyle);
+
+ enum class State { JPEG_DATA, FINISHED_JPEG_DATA };
+
+ void FinishRow(uint32_t aLastSourceRow);
+ LexerTransition<State> ReadJPEGData(const char* aData, size_t aLength);
+ LexerTransition<State> FinishedJPEGData();
+
+ StreamingLexer<State> mLexer;
+
+ public:
+ struct jpeg_decompress_struct mInfo;
+ struct jpeg_source_mgr mSourceMgr;
+ struct jpeg_progress_mgr mProgressMgr;
+ decoder_error_mgr mErr;
+ jstate mState;
+
+ uint32_t mBytesToSkip;
+
+ const JOCTET* mSegment; // The current segment we are decoding from
+ uint32_t mSegmentLen; // amount of data in mSegment
+
+ JOCTET* mBackBuffer;
+ uint32_t mBackBufferLen; // Offset of end of active backtrack data
+ uint32_t mBackBufferSize; // size in bytes what mBackBuffer was created with
+ uint32_t mBackBufferUnreadLen; // amount of data currently in mBackBuffer
+
+ JOCTET* mProfile;
+ uint32_t mProfileLength;
+
+ uint32_t* mCMSLine;
+
+ bool mReading;
+
+ const Decoder::DecodeStyle mDecodeStyle;
+
+ SurfacePipe mPipe;
+};
+
+} // namespace mozilla::image
+
+#endif // mozilla_image_decoders_nsJPEGDecoder_h
diff --git a/image/decoders/nsJXLDecoder.cpp b/image/decoders/nsJXLDecoder.cpp
new file mode 100644
index 0000000000..b3610f9075
--- /dev/null
+++ b/image/decoders/nsJXLDecoder.cpp
@@ -0,0 +1,163 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageLogging.h" // Must appear first
+#include "gfxPlatform.h"
+#include "jxl/codestream_header.h"
+#include "jxl/decode_cxx.h"
+#include "jxl/types.h"
+#include "mozilla/TelemetryHistogramEnums.h"
+#include "mozilla/gfx/Point.h"
+#include "nsJXLDecoder.h"
+
+#include "RasterImage.h"
+#include "SurfacePipeFactory.h"
+
+using namespace mozilla::gfx;
+
+namespace mozilla::image {
+
+#define JXL_TRY(expr) \
+ do { \
+ JxlDecoderStatus _status = (expr); \
+ if (_status != JXL_DEC_SUCCESS) { \
+ return Transition::TerminateFailure(); \
+ } \
+ } while (0);
+
+#define JXL_TRY_BOOL(expr) \
+ do { \
+ bool succeeded = (expr); \
+ if (!succeeded) { \
+ return Transition::TerminateFailure(); \
+ } \
+ } while (0);
+
+static LazyLogModule sJXLLog("JXLDecoder");
+
+nsJXLDecoder::nsJXLDecoder(RasterImage* aImage)
+ : Decoder(aImage),
+ mLexer(Transition::ToUnbuffered(State::FINISHED_JXL_DATA, State::JXL_DATA,
+ SIZE_MAX),
+ Transition::TerminateSuccess()),
+ mDecoder(JxlDecoderMake(nullptr)),
+ mParallelRunner(
+ JxlThreadParallelRunnerMake(nullptr, PreferredThreadCount())) {
+ JxlDecoderSubscribeEvents(mDecoder.get(),
+ JXL_DEC_BASIC_INFO | JXL_DEC_FULL_IMAGE);
+ JxlDecoderSetParallelRunner(mDecoder.get(), JxlThreadParallelRunner,
+ mParallelRunner.get());
+
+ MOZ_LOG(sJXLLog, LogLevel::Debug,
+ ("[this=%p] nsJXLDecoder::nsJXLDecoder", this));
+}
+
+nsJXLDecoder::~nsJXLDecoder() {
+ MOZ_LOG(sJXLLog, LogLevel::Debug,
+ ("[this=%p] nsJXLDecoder::~nsJXLDecoder", this));
+}
+
+size_t nsJXLDecoder::PreferredThreadCount() {
+ if (IsMetadataDecode()) {
+ return 0; // no additional worker thread
+ }
+ return JxlThreadParallelRunnerDefaultNumWorkerThreads();
+}
+
+LexerResult nsJXLDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ // return LexerResult(TerminalState::FAILURE);
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::JXL_DATA:
+ return ReadJXLData(aData, aLength);
+ case State::FINISHED_JXL_DATA:
+ return FinishedJXLData();
+ }
+ MOZ_CRASH("Unknown State");
+ });
+};
+
+LexerTransition<nsJXLDecoder::State> nsJXLDecoder::ReadJXLData(
+ const char* aData, size_t aLength) {
+ const uint8_t* input = (const uint8_t*)aData;
+ size_t length = aLength;
+ if (mBuffer.length() != 0) {
+ JXL_TRY_BOOL(mBuffer.append(aData, aLength));
+ input = mBuffer.begin();
+ length = mBuffer.length();
+ }
+ JXL_TRY(JxlDecoderSetInput(mDecoder.get(), input, length));
+
+ while (true) {
+ JxlDecoderStatus status = JxlDecoderProcessInput(mDecoder.get());
+ switch (status) {
+ case JXL_DEC_ERROR:
+ default:
+ return Transition::TerminateFailure();
+
+ case JXL_DEC_NEED_MORE_INPUT: {
+ size_t remaining = JxlDecoderReleaseInput(mDecoder.get());
+ mBuffer.clear();
+ JXL_TRY_BOOL(mBuffer.append(aData + aLength - remaining, remaining));
+ return Transition::ContinueUnbuffered(State::JXL_DATA);
+ }
+
+ case JXL_DEC_BASIC_INFO: {
+ JXL_TRY(JxlDecoderGetBasicInfo(mDecoder.get(), &mInfo));
+ PostSize(mInfo.xsize, mInfo.ysize);
+ if (mInfo.alpha_bits > 0) {
+ PostHasTransparency();
+ }
+ if (IsMetadataDecode()) {
+ return Transition::TerminateSuccess();
+ }
+ break;
+ }
+
+ case JXL_DEC_NEED_IMAGE_OUT_BUFFER: {
+ size_t size = 0;
+ JxlPixelFormat format{4, JXL_TYPE_UINT8, JXL_LITTLE_ENDIAN, 0};
+ JXL_TRY(JxlDecoderImageOutBufferSize(mDecoder.get(), &format, &size));
+
+ mOutBuffer.clear();
+ JXL_TRY_BOOL(mOutBuffer.growBy(size));
+ JXL_TRY(JxlDecoderSetImageOutBuffer(mDecoder.get(), &format,
+ mOutBuffer.begin(), size));
+ break;
+ }
+
+ case JXL_DEC_FULL_IMAGE: {
+ OrientedIntSize size(mInfo.xsize, mInfo.ysize);
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, size, OutputSize(), FullFrame(), SurfaceFormat::R8G8B8A8,
+ SurfaceFormat::OS_RGBA, Nothing(), nullptr, SurfacePipeFlags());
+ for (uint8_t* rowPtr = mOutBuffer.begin(); rowPtr < mOutBuffer.end();
+ rowPtr += mInfo.xsize * 4) {
+ pipe->WriteBuffer(reinterpret_cast<uint32_t*>(rowPtr));
+ }
+
+ if (Maybe<SurfaceInvalidRect> invalidRect = pipe->TakeInvalidRect()) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+ PostFrameStop();
+ PostDecodeDone();
+ return Transition::TerminateSuccess();
+ }
+ }
+ }
+}
+
+LexerTransition<nsJXLDecoder::State> nsJXLDecoder::FinishedJXLData() {
+ MOZ_ASSERT_UNREACHABLE("Read the entire address space?");
+ return Transition::TerminateFailure();
+}
+
+} // namespace mozilla::image
diff --git a/image/decoders/nsJXLDecoder.h b/image/decoders/nsJXLDecoder.h
new file mode 100644
index 0000000000..6cde7456ca
--- /dev/null
+++ b/image/decoders/nsJXLDecoder.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsJXLDecoder_h
+#define mozilla_image_decoders_nsJXLDecoder_h
+
+#include "Decoder.h"
+#include "mp4parse.h"
+#include "SurfacePipe.h"
+
+#include "jxl/decode_cxx.h"
+#include "jxl/thread_parallel_runner_cxx.h"
+
+#include "mozilla/Telemetry.h"
+
+namespace mozilla::image {
+class RasterImage;
+
+class nsJXLDecoder final : public Decoder {
+ public:
+ virtual ~nsJXLDecoder();
+
+ DecoderType GetType() const override { return DecoderType::JXL; }
+
+ protected:
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsJXLDecoder(RasterImage* aImage);
+
+ size_t PreferredThreadCount();
+
+ enum class State { JXL_DATA, FINISHED_JXL_DATA };
+
+ LexerTransition<State> ReadJXLData(const char* aData, size_t aLength);
+ LexerTransition<State> FinishedJXLData();
+
+ StreamingLexer<State> mLexer;
+ JxlDecoderPtr mDecoder;
+ JxlThreadParallelRunnerPtr mParallelRunner;
+ Vector<uint8_t> mBuffer;
+ Vector<uint8_t> mOutBuffer;
+ JxlBasicInfo mInfo{};
+};
+
+} // namespace mozilla::image
+
+#endif // mozilla_image_decoders_nsJXLDecoder_h
diff --git a/image/decoders/nsPNGDecoder.cpp b/image/decoders/nsPNGDecoder.cpp
new file mode 100644
index 0000000000..afc2762515
--- /dev/null
+++ b/image/decoders/nsPNGDecoder.cpp
@@ -0,0 +1,1035 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageLogging.h" // Must appear first
+#include "nsPNGDecoder.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "gfxColor.h"
+#include "gfxPlatform.h"
+#include "imgFrame.h"
+#include "nsColor.h"
+#include "nsRect.h"
+#include "nspr.h"
+#include "png.h"
+
+#include "RasterImage.h"
+#include "SurfaceCache.h"
+#include "SurfacePipeFactory.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Telemetry.h"
+
+using namespace mozilla::gfx;
+
+using std::min;
+
+namespace mozilla {
+namespace image {
+
+static LazyLogModule sPNGLog("PNGDecoder");
+static LazyLogModule sPNGDecoderAccountingLog("PNGDecoderAccounting");
+
+// limit image dimensions (bug #251381, #591822, #967656, and #1283961)
+#ifndef MOZ_PNG_MAX_WIDTH
+# define MOZ_PNG_MAX_WIDTH 0x7fffffff // Unlimited
+#endif
+#ifndef MOZ_PNG_MAX_HEIGHT
+# define MOZ_PNG_MAX_HEIGHT 0x7fffffff // Unlimited
+#endif
+
+/* Controls the maximum chunk size configuration for libpng. We set this to a
+ * very large number, 256MB specifically. */
+static constexpr png_alloc_size_t kPngMaxChunkSize = 0x10000000;
+
+nsPNGDecoder::AnimFrameInfo::AnimFrameInfo()
+ : mDispose(DisposalMethod::KEEP), mBlend(BlendMethod::OVER), mTimeout(0) {}
+
+#ifdef PNG_APNG_SUPPORTED
+
+int32_t GetNextFrameDelay(png_structp aPNG, png_infop aInfo) {
+ // Delay, in seconds, is delayNum / delayDen.
+ png_uint_16 delayNum = png_get_next_frame_delay_num(aPNG, aInfo);
+ png_uint_16 delayDen = png_get_next_frame_delay_den(aPNG, aInfo);
+
+ if (delayNum == 0) {
+ return 0; // SetFrameTimeout() will set to a minimum.
+ }
+
+ if (delayDen == 0) {
+ delayDen = 100; // So says the APNG spec.
+ }
+
+ // Need to cast delay_num to float to have a proper division and
+ // the result to int to avoid a compiler warning.
+ return static_cast<int32_t>(static_cast<double>(delayNum) * 1000 / delayDen);
+}
+
+nsPNGDecoder::AnimFrameInfo::AnimFrameInfo(png_structp aPNG, png_infop aInfo)
+ : mDispose(DisposalMethod::KEEP), mBlend(BlendMethod::OVER), mTimeout(0) {
+ png_byte dispose_op = png_get_next_frame_dispose_op(aPNG, aInfo);
+ png_byte blend_op = png_get_next_frame_blend_op(aPNG, aInfo);
+
+ if (dispose_op == PNG_DISPOSE_OP_PREVIOUS) {
+ mDispose = DisposalMethod::RESTORE_PREVIOUS;
+ } else if (dispose_op == PNG_DISPOSE_OP_BACKGROUND) {
+ mDispose = DisposalMethod::CLEAR;
+ } else {
+ mDispose = DisposalMethod::KEEP;
+ }
+
+ if (blend_op == PNG_BLEND_OP_SOURCE) {
+ mBlend = BlendMethod::SOURCE;
+ } else {
+ mBlend = BlendMethod::OVER;
+ }
+
+ mTimeout = GetNextFrameDelay(aPNG, aInfo);
+}
+#endif
+
+// First 8 bytes of a PNG file
+const uint8_t nsPNGDecoder::pngSignatureBytes[] = {137, 80, 78, 71,
+ 13, 10, 26, 10};
+
+nsPNGDecoder::nsPNGDecoder(RasterImage* aImage)
+ : Decoder(aImage),
+ mLexer(Transition::ToUnbuffered(State::FINISHED_PNG_DATA, State::PNG_DATA,
+ SIZE_MAX),
+ Transition::TerminateSuccess()),
+ mNextTransition(Transition::ContinueUnbuffered(State::PNG_DATA)),
+ mLastChunkLength(0),
+ mPNG(nullptr),
+ mInfo(nullptr),
+ mCMSLine(nullptr),
+ interlacebuf(nullptr),
+ mFormat(SurfaceFormat::UNKNOWN),
+ mChannels(0),
+ mPass(0),
+ mFrameIsHidden(false),
+ mDisablePremultipliedAlpha(false),
+ mGotInfoCallback(false),
+ mUsePipeTransform(false),
+ mNumFrames(0) {}
+
+nsPNGDecoder::~nsPNGDecoder() {
+ if (mPNG) {
+ png_destroy_read_struct(&mPNG, mInfo ? &mInfo : nullptr, nullptr);
+ }
+ if (mCMSLine) {
+ free(mCMSLine);
+ }
+ if (interlacebuf) {
+ free(interlacebuf);
+ }
+}
+
+nsPNGDecoder::TransparencyType nsPNGDecoder::GetTransparencyType(
+ const OrientedIntRect& aFrameRect) {
+ // Check if the image has a transparent color in its palette.
+ if (HasAlphaChannel()) {
+ return TransparencyType::eAlpha;
+ }
+ if (!aFrameRect.IsEqualEdges(FullFrame())) {
+ MOZ_ASSERT(HasAnimation());
+ return TransparencyType::eFrameRect;
+ }
+
+ return TransparencyType::eNone;
+}
+
+void nsPNGDecoder::PostHasTransparencyIfNeeded(
+ TransparencyType aTransparencyType) {
+ switch (aTransparencyType) {
+ case TransparencyType::eNone:
+ return;
+
+ case TransparencyType::eAlpha:
+ PostHasTransparency();
+ return;
+
+ case TransparencyType::eFrameRect:
+ // If the first frame of animated image doesn't draw into the whole image,
+ // then record that it is transparent. For subsequent frames, this doesn't
+ // affect transparency, because they're composited on top of all previous
+ // frames.
+ if (mNumFrames == 0) {
+ PostHasTransparency();
+ }
+ return;
+ }
+}
+
+// CreateFrame() is used for both simple and animated images.
+nsresult nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) {
+ MOZ_ASSERT(HasSize());
+ MOZ_ASSERT(!IsMetadataDecode());
+
+ // Check if we have transparency, and send notifications if needed.
+ auto transparency = GetTransparencyType(aFrameInfo.mFrameRect);
+ PostHasTransparencyIfNeeded(transparency);
+ mFormat = transparency == TransparencyType::eNone ? SurfaceFormat::OS_RGBX
+ : SurfaceFormat::OS_RGBA;
+
+ // Make sure there's no animation or padding if we're downscaling.
+ MOZ_ASSERT_IF(Size() != OutputSize(), mNumFrames == 0);
+ MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
+ MOZ_ASSERT_IF(Size() != OutputSize(),
+ transparency != TransparencyType::eFrameRect);
+
+ Maybe<AnimationParams> animParams;
+#ifdef PNG_APNG_SUPPORTED
+ if (!IsFirstFrameDecode() && png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
+ mAnimInfo = AnimFrameInfo(mPNG, mInfo);
+
+ if (mAnimInfo.mDispose == DisposalMethod::CLEAR) {
+ // We may have to display the background under this image during
+ // animation playback, so we regard it as transparent.
+ PostHasTransparency();
+ }
+
+ animParams.emplace(
+ AnimationParams{aFrameInfo.mFrameRect.ToUnknownRect(),
+ FrameTimeout::FromRawMilliseconds(mAnimInfo.mTimeout),
+ mNumFrames, mAnimInfo.mBlend, mAnimInfo.mDispose});
+ }
+#endif
+
+ // If this image is interlaced, we can display better quality intermediate
+ // results to the user by post processing them with ADAM7InterpolatingFilter.
+ SurfacePipeFlags pipeFlags = aFrameInfo.mIsInterlaced
+ ? SurfacePipeFlags::ADAM7_INTERPOLATE
+ : SurfacePipeFlags();
+
+ if (mNumFrames == 0) {
+ // The first frame may be displayed progressively.
+ pipeFlags |= SurfacePipeFlags::PROGRESSIVE_DISPLAY;
+ }
+
+ SurfaceFormat inFormat;
+ if (mTransform && !mUsePipeTransform) {
+ // QCMS will output in the correct format.
+ inFormat = mFormat;
+ } else if (transparency == TransparencyType::eAlpha) {
+ // We are outputting directly as RGBA, so we need to swap at this step.
+ inFormat = SurfaceFormat::R8G8B8A8;
+ } else {
+ // We have no alpha channel, so we need to unpack from RGB to BGRA.
+ inFormat = SurfaceFormat::R8G8B8;
+ }
+
+ // Only apply premultiplication if the frame has true alpha. If we ever
+ // support downscaling animated images, we will need to premultiply for frame
+ // rect transparency when downscaling as well.
+ if (transparency == TransparencyType::eAlpha && !mDisablePremultipliedAlpha) {
+ pipeFlags |= SurfacePipeFlags::PREMULTIPLY_ALPHA;
+ }
+
+ qcms_transform* pipeTransform = mUsePipeTransform ? mTransform : nullptr;
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), aFrameInfo.mFrameRect, inFormat, mFormat,
+ animParams, pipeTransform, pipeFlags);
+
+ if (!pipe) {
+ mPipe = SurfacePipe();
+ return NS_ERROR_FAILURE;
+ }
+
+ mPipe = std::move(*pipe);
+
+ mFrameRect = aFrameInfo.mFrameRect;
+ mPass = 0;
+
+ MOZ_LOG(sPNGDecoderAccountingLog, LogLevel::Debug,
+ ("PNGDecoderAccounting: nsPNGDecoder::CreateFrame -- created "
+ "image frame with %dx%d pixels for decoder %p",
+ mFrameRect.Width(), mFrameRect.Height(), this));
+
+ return NS_OK;
+}
+
+// set timeout and frame disposal method for the current frame
+void nsPNGDecoder::EndImageFrame() {
+ if (mFrameIsHidden) {
+ return;
+ }
+
+ mNumFrames++;
+
+ Opacity opacity = mFormat == SurfaceFormat::OS_RGBX
+ ? Opacity::FULLY_OPAQUE
+ : Opacity::SOME_TRANSPARENCY;
+
+ PostFrameStop(opacity);
+}
+
+nsresult nsPNGDecoder::InitInternal() {
+ mDisablePremultipliedAlpha =
+ bool(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
+
+#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
+ static png_byte color_chunks[] = {99, 72, 82, 77, '\0', // cHRM
+ 105, 67, 67, 80, '\0'}; // iCCP
+ static png_byte unused_chunks[] = {98, 75, 71, 68, '\0', // bKGD
+ 101, 88, 73, 102, '\0', // eXIf
+ 104, 73, 83, 84, '\0', // hIST
+ 105, 84, 88, 116, '\0', // iTXt
+ 111, 70, 70, 115, '\0', // oFFs
+ 112, 67, 65, 76, '\0', // pCAL
+ 115, 67, 65, 76, '\0', // sCAL
+ 112, 72, 89, 115, '\0', // pHYs
+ 115, 66, 73, 84, '\0', // sBIT
+ 115, 80, 76, 84, '\0', // sPLT
+ 116, 69, 88, 116, '\0', // tEXt
+ 116, 73, 77, 69, '\0', // tIME
+ 122, 84, 88, 116, '\0'}; // zTXt
+#endif
+
+ // Initialize the container's source image header
+ // Always decode to 24 bit pixdepth
+
+ mPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr,
+ nsPNGDecoder::error_callback,
+ nsPNGDecoder::warning_callback);
+ if (!mPNG) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ mInfo = png_create_info_struct(mPNG);
+ if (!mInfo) {
+ png_destroy_read_struct(&mPNG, nullptr, nullptr);
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
+ // Ignore unused chunks
+ if (mCMSMode == CMSMode::Off || IsMetadataDecode()) {
+ png_set_keep_unknown_chunks(mPNG, 1, color_chunks, 2);
+ }
+
+ png_set_keep_unknown_chunks(mPNG, 1, unused_chunks,
+ (int)sizeof(unused_chunks) / 5);
+#endif
+
+#ifdef PNG_SET_USER_LIMITS_SUPPORTED
+ png_set_user_limits(mPNG, MOZ_PNG_MAX_WIDTH, MOZ_PNG_MAX_HEIGHT);
+ png_set_chunk_malloc_max(mPNG, kPngMaxChunkSize);
+#endif
+
+#ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED
+ // Disallow palette-index checking, for speed; we would ignore the warning
+ // anyhow. This feature was added at libpng version 1.5.10 and is disabled
+ // in the embedded libpng but enabled by default in the system libpng. This
+ // call also disables it in the system libpng, for decoding speed.
+ // Bug #745202.
+ png_set_check_for_invalid_index(mPNG, 0);
+#endif
+
+#ifdef PNG_SET_OPTION_SUPPORTED
+# if defined(PNG_sRGB_PROFILE_CHECKS) && PNG_sRGB_PROFILE_CHECKS >= 0
+ // Skip checking of sRGB ICC profiles
+ png_set_option(mPNG, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
+# endif
+
+# ifdef PNG_MAXIMUM_INFLATE_WINDOW
+ // Force a larger zlib inflate window as some images in the wild have
+ // incorrectly set metadata (specifically CMF bits) which prevent us from
+ // decoding them otherwise.
+ png_set_option(mPNG, PNG_MAXIMUM_INFLATE_WINDOW, PNG_OPTION_ON);
+# endif
+#endif
+
+ // use this as libpng "progressive pointer" (retrieve in callbacks)
+ png_set_progressive_read_fn(
+ mPNG, static_cast<png_voidp>(this), nsPNGDecoder::info_callback,
+ nsPNGDecoder::row_callback, nsPNGDecoder::end_callback);
+
+ return NS_OK;
+}
+
+LexerResult nsPNGDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ return mLexer.Lex(aIterator, aOnResume,
+ [=](State aState, const char* aData, size_t aLength) {
+ switch (aState) {
+ case State::PNG_DATA:
+ return ReadPNGData(aData, aLength);
+ case State::FINISHED_PNG_DATA:
+ return FinishedPNGData();
+ }
+ MOZ_CRASH("Unknown State");
+ });
+}
+
+LexerTransition<nsPNGDecoder::State> nsPNGDecoder::ReadPNGData(
+ const char* aData, size_t aLength) {
+ // If we were waiting until after returning from a yield to call
+ // CreateFrame(), call it now.
+ if (mNextFrameInfo) {
+ if (NS_FAILED(CreateFrame(*mNextFrameInfo))) {
+ return Transition::TerminateFailure();
+ }
+
+ MOZ_ASSERT(mImageData, "Should have a buffer now");
+ mNextFrameInfo = Nothing();
+ }
+
+ // libpng uses setjmp/longjmp for error handling.
+ if (setjmp(png_jmpbuf(mPNG))) {
+ return Transition::TerminateFailure();
+ }
+
+ // Pass the data off to libpng.
+ mLastChunkLength = aLength;
+ mNextTransition = Transition::ContinueUnbuffered(State::PNG_DATA);
+ png_process_data(mPNG, mInfo,
+ reinterpret_cast<unsigned char*>(const_cast<char*>((aData))),
+ aLength);
+
+ // Make sure that we've reached a terminal state if decoding is done.
+ MOZ_ASSERT_IF(GetDecodeDone(), mNextTransition.NextStateIsTerminal());
+ MOZ_ASSERT_IF(HasError(), mNextTransition.NextStateIsTerminal());
+
+ // Continue with whatever transition the callback code requested. We
+ // initialized this to Transition::ContinueUnbuffered(State::PNG_DATA) above,
+ // so by default we just continue the unbuffered read.
+ return mNextTransition;
+}
+
+LexerTransition<nsPNGDecoder::State> nsPNGDecoder::FinishedPNGData() {
+ // Since we set up an unbuffered read for SIZE_MAX bytes, if we actually read
+ // all that data something is really wrong.
+ MOZ_ASSERT_UNREACHABLE("Read the entire address space?");
+ return Transition::TerminateFailure();
+}
+
+// Sets up gamma pre-correction in libpng before our callback gets called.
+// We need to do this if we don't end up with a CMS profile.
+static void PNGDoGammaCorrection(png_structp png_ptr, png_infop info_ptr) {
+ double aGamma;
+
+ if (png_get_gAMA(png_ptr, info_ptr, &aGamma)) {
+ if ((aGamma <= 0.0) || (aGamma > 21474.83)) {
+ aGamma = 0.45455;
+ png_set_gAMA(png_ptr, info_ptr, aGamma);
+ }
+ png_set_gamma(png_ptr, 2.2, aGamma);
+ } else {
+ png_set_gamma(png_ptr, 2.2, 0.45455);
+ }
+}
+
+// Adapted from http://www.littlecms.com/pngchrm.c example code
+uint32_t nsPNGDecoder::ReadColorProfile(png_structp png_ptr, png_infop info_ptr,
+ int color_type, bool* sRGBTag) {
+ // First try to see if iCCP chunk is present
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_iCCP)) {
+ png_uint_32 profileLen;
+ png_bytep profileData;
+ png_charp profileName;
+ int compression;
+
+ png_get_iCCP(png_ptr, info_ptr, &profileName, &compression, &profileData,
+ &profileLen);
+
+ mInProfile = qcms_profile_from_memory((char*)profileData, profileLen);
+ if (mInProfile) {
+ uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
+
+ bool mismatch = false;
+ if (color_type & PNG_COLOR_MASK_COLOR) {
+ if (profileSpace != icSigRgbData) {
+ mismatch = true;
+ }
+ } else {
+ if (profileSpace == icSigRgbData) {
+ png_set_gray_to_rgb(png_ptr);
+ } else if (profileSpace != icSigGrayData) {
+ mismatch = true;
+ }
+ }
+
+ if (mismatch) {
+ qcms_profile_release(mInProfile);
+ mInProfile = nullptr;
+ } else {
+ return qcms_profile_get_rendering_intent(mInProfile);
+ }
+ }
+ }
+
+ // Check sRGB chunk
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
+ *sRGBTag = true;
+
+ int fileIntent;
+ png_set_gray_to_rgb(png_ptr);
+ png_get_sRGB(png_ptr, info_ptr, &fileIntent);
+ uint32_t map[] = {QCMS_INTENT_PERCEPTUAL, QCMS_INTENT_RELATIVE_COLORIMETRIC,
+ QCMS_INTENT_SATURATION,
+ QCMS_INTENT_ABSOLUTE_COLORIMETRIC};
+ return map[fileIntent];
+ }
+
+ // Check gAMA/cHRM chunks
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
+ png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
+ qcms_CIE_xyYTRIPLE primaries;
+ qcms_CIE_xyY whitePoint;
+
+ png_get_cHRM(png_ptr, info_ptr, &whitePoint.x, &whitePoint.y,
+ &primaries.red.x, &primaries.red.y, &primaries.green.x,
+ &primaries.green.y, &primaries.blue.x, &primaries.blue.y);
+ whitePoint.Y = primaries.red.Y = primaries.green.Y = primaries.blue.Y = 1.0;
+
+ double gammaOfFile;
+
+ png_get_gAMA(png_ptr, info_ptr, &gammaOfFile);
+
+ mInProfile = qcms_profile_create_rgb_with_gamma(whitePoint, primaries,
+ 1.0 / gammaOfFile);
+
+ if (mInProfile) {
+ png_set_gray_to_rgb(png_ptr);
+ }
+ }
+
+ return QCMS_INTENT_PERCEPTUAL; // Our default
+}
+
+void nsPNGDecoder::info_callback(png_structp png_ptr, png_infop info_ptr) {
+ png_uint_32 width, height;
+ int bit_depth, color_type, interlace_type, compression_type, filter_type;
+ unsigned int channels;
+
+ png_bytep trans = nullptr;
+ int num_trans = 0;
+
+ nsPNGDecoder* decoder =
+ static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
+
+ if (decoder->mGotInfoCallback) {
+ MOZ_LOG(sPNGLog, LogLevel::Warning,
+ ("libpng called info_callback more than once\n"));
+ return;
+ }
+
+ decoder->mGotInfoCallback = true;
+
+ // Always decode to 24-bit RGB or 32-bit RGBA
+ png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type,
+ &interlace_type, &compression_type, &filter_type);
+
+ const OrientedIntRect frameRect(0, 0, width, height);
+
+ // Post our size to the superclass
+ decoder->PostSize(frameRect.Width(), frameRect.Height());
+
+ if (width > SurfaceCache::MaximumCapacity() / (bit_depth > 8 ? 16 : 8)) {
+ // libpng needs space to allocate two row buffers
+ png_error(decoder->mPNG, "Image is too wide");
+ }
+
+ if (decoder->HasError()) {
+ // Setting the size led to an error.
+ png_error(decoder->mPNG, "Sizing error");
+ }
+
+ if (color_type == PNG_COLOR_TYPE_PALETTE) {
+ png_set_expand(png_ptr);
+ }
+
+ if (color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8) {
+ png_set_expand(png_ptr);
+ }
+
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) {
+ png_color_16p trans_values;
+ png_get_tRNS(png_ptr, info_ptr, &trans, &num_trans, &trans_values);
+ // libpng doesn't reject a tRNS chunk with out-of-range samples
+ // so we check it here to avoid setting up a useless opacity
+ // channel or producing unexpected transparent pixels (bug #428045)
+ if (bit_depth < 16) {
+ png_uint_16 sample_max = (1 << bit_depth) - 1;
+ if ((color_type == PNG_COLOR_TYPE_GRAY &&
+ trans_values->gray > sample_max) ||
+ (color_type == PNG_COLOR_TYPE_RGB &&
+ (trans_values->red > sample_max ||
+ trans_values->green > sample_max ||
+ trans_values->blue > sample_max))) {
+ // clear the tRNS valid flag and release tRNS memory
+ png_free_data(png_ptr, info_ptr, PNG_FREE_TRNS, 0);
+ num_trans = 0;
+ }
+ }
+ if (num_trans != 0) {
+ png_set_expand(png_ptr);
+ }
+ }
+
+ if (bit_depth == 16) {
+ png_set_scale_16(png_ptr);
+ }
+
+ // We only need to extract the color profile for non-metadata decodes. It is
+ // fairly expensive to read the profile and create the transform so we should
+ // avoid it if not necessary.
+ uint32_t intent = -1;
+ bool sRGBTag = false;
+ if (!decoder->IsMetadataDecode()) {
+ if (decoder->mCMSMode != CMSMode::Off) {
+ intent = gfxPlatform::GetRenderingIntent();
+ uint32_t pIntent =
+ decoder->ReadColorProfile(png_ptr, info_ptr, color_type, &sRGBTag);
+ // If we're not mandating an intent, use the one from the image.
+ if (intent == uint32_t(-1)) {
+ intent = pIntent;
+ }
+ }
+ if (!decoder->mInProfile || !decoder->GetCMSOutputProfile()) {
+ png_set_gray_to_rgb(png_ptr);
+
+ // only do gamma correction if CMS isn't entirely disabled
+ if (decoder->mCMSMode != CMSMode::Off) {
+ PNGDoGammaCorrection(png_ptr, info_ptr);
+ }
+ }
+ }
+
+ // Let libpng expand interlaced images.
+ const bool isInterlaced = interlace_type == PNG_INTERLACE_ADAM7;
+ if (isInterlaced) {
+ png_set_interlace_handling(png_ptr);
+ }
+
+ // now all of those things we set above are used to update various struct
+ // members and whatnot, after which we can get channels, rowbytes, etc.
+ png_read_update_info(png_ptr, info_ptr);
+ decoder->mChannels = channels = png_get_channels(png_ptr, info_ptr);
+
+ //---------------------------------------------------------------//
+ // copy PNG info into imagelib structs (formerly png_set_dims()) //
+ //---------------------------------------------------------------//
+
+ if (channels < 1 || channels > 4) {
+ png_error(decoder->mPNG, "Invalid number of channels");
+ }
+
+#ifdef PNG_APNG_SUPPORTED
+ bool isAnimated = png_get_valid(png_ptr, info_ptr, PNG_INFO_acTL);
+ if (isAnimated) {
+ int32_t rawTimeout = GetNextFrameDelay(png_ptr, info_ptr);
+ decoder->PostIsAnimated(FrameTimeout::FromRawMilliseconds(rawTimeout));
+
+ if (decoder->Size() != decoder->OutputSize() &&
+ !decoder->IsFirstFrameDecode()) {
+ MOZ_ASSERT_UNREACHABLE(
+ "Doing downscale-during-decode "
+ "for an animated image?");
+ png_error(decoder->mPNG, "Invalid downscale attempt"); // Abort decode.
+ }
+ }
+#endif
+
+ auto transparency = decoder->GetTransparencyType(frameRect);
+ if (decoder->IsMetadataDecode()) {
+ // If we are animated then the first frame rect is either:
+ // 1) the whole image if the IDAT chunk is part of the animation
+ // 2) the frame rect of the first fDAT chunk otherwise.
+ // If we are not animated then we want to make sure to call
+ // PostHasTransparency in the metadata decode if we need to. So it's
+ // okay to pass IntRect(0, 0, width, height) here for animated images;
+ // they will call with the proper first frame rect in the full decode.
+ decoder->PostHasTransparencyIfNeeded(transparency);
+
+ // We have the metadata we're looking for, so stop here, before we allocate
+ // buffers below.
+ return decoder->DoTerminate(png_ptr, TerminalState::SUCCESS);
+ }
+
+ if (decoder->mInProfile && decoder->GetCMSOutputProfile()) {
+ qcms_data_type inType;
+ qcms_data_type outType;
+
+ uint32_t profileSpace = qcms_profile_get_color_space(decoder->mInProfile);
+ decoder->mUsePipeTransform = profileSpace != icSigGrayData;
+ if (decoder->mUsePipeTransform) {
+ // If the transform happens with SurfacePipe, it will be in RGBA if we
+ // have an alpha channel, because the swizzle and premultiplication
+ // happens after color management. Otherwise it will be in BGRA because
+ // the swizzle happens at the start.
+ if (transparency == TransparencyType::eAlpha) {
+ inType = QCMS_DATA_RGBA_8;
+ outType = QCMS_DATA_RGBA_8;
+ } else {
+ inType = gfxPlatform::GetCMSOSRGBAType();
+ outType = inType;
+ }
+ } else {
+ if (color_type & PNG_COLOR_MASK_ALPHA) {
+ inType = QCMS_DATA_GRAYA_8;
+ outType = gfxPlatform::GetCMSOSRGBAType();
+ } else {
+ inType = QCMS_DATA_GRAY_8;
+ outType = gfxPlatform::GetCMSOSRGBAType();
+ }
+ }
+
+ decoder->mTransform = qcms_transform_create(decoder->mInProfile, inType,
+ decoder->GetCMSOutputProfile(),
+ outType, (qcms_intent)intent);
+ } else if ((sRGBTag && decoder->mCMSMode == CMSMode::TaggedOnly) ||
+ decoder->mCMSMode == CMSMode::All) {
+ // If the transform happens with SurfacePipe, it will be in RGBA if we
+ // have an alpha channel, because the swizzle and premultiplication
+ // happens after color management. Otherwise it will be in OS_RGBA because
+ // the swizzle happens at the start.
+ if (transparency == TransparencyType::eAlpha) {
+ decoder->mTransform =
+ decoder->GetCMSsRGBTransform(SurfaceFormat::R8G8B8A8);
+ } else {
+ decoder->mTransform =
+ decoder->GetCMSsRGBTransform(SurfaceFormat::OS_RGBA);
+ }
+ decoder->mUsePipeTransform = true;
+ }
+
+#ifdef PNG_APNG_SUPPORTED
+ if (isAnimated) {
+ png_set_progressive_frame_fn(png_ptr, nsPNGDecoder::frame_info_callback,
+ nullptr);
+ }
+
+ if (png_get_first_frame_is_hidden(png_ptr, info_ptr)) {
+ decoder->mFrameIsHidden = true;
+ } else {
+#endif
+ nsresult rv = decoder->CreateFrame(FrameInfo{frameRect, isInterlaced});
+ if (NS_FAILED(rv)) {
+ png_error(decoder->mPNG, "CreateFrame failed");
+ }
+ MOZ_ASSERT(decoder->mImageData, "Should have a buffer now");
+#ifdef PNG_APNG_SUPPORTED
+ }
+#endif
+
+ if (decoder->mTransform && !decoder->mUsePipeTransform) {
+ decoder->mCMSLine =
+ static_cast<uint8_t*>(malloc(sizeof(uint32_t) * frameRect.Width()));
+ if (!decoder->mCMSLine) {
+ png_error(decoder->mPNG, "malloc of mCMSLine failed");
+ }
+ }
+
+ if (interlace_type == PNG_INTERLACE_ADAM7) {
+ if (frameRect.Height() <
+ INT32_MAX / (frameRect.Width() * int32_t(channels))) {
+ const size_t bufferSize =
+ channels * frameRect.Width() * frameRect.Height();
+
+ if (bufferSize > SurfaceCache::MaximumCapacity()) {
+ png_error(decoder->mPNG, "Insufficient memory to deinterlace image");
+ }
+
+ decoder->interlacebuf = static_cast<uint8_t*>(malloc(bufferSize));
+ }
+ if (!decoder->interlacebuf) {
+ png_error(decoder->mPNG, "malloc of interlacebuf failed");
+ }
+ }
+}
+
+void nsPNGDecoder::PostInvalidationIfNeeded() {
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (!invalidRect) {
+ return;
+ }
+
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+}
+
+void nsPNGDecoder::row_callback(png_structp png_ptr, png_bytep new_row,
+ png_uint_32 row_num, int pass) {
+ /* libpng comments:
+ *
+ * This function is called for every row in the image. If the
+ * image is interlacing, and you turned on the interlace handler,
+ * this function will be called for every row in every pass.
+ * Some of these rows will not be changed from the previous pass.
+ * When the row is not changed, the new_row variable will be
+ * nullptr. The rows and passes are called in order, so you don't
+ * really need the row_num and pass, but I'm supplying them
+ * because it may make your life easier.
+ *
+ * For the non-nullptr rows of interlaced images, you must call
+ * png_progressive_combine_row() passing in the row and the
+ * old row. You can call this function for nullptr rows (it will
+ * just return) and for non-interlaced images (it just does the
+ * memcpy for you) if it will make the code easier. Thus, you
+ * can just do this for all cases:
+ *
+ * png_progressive_combine_row(png_ptr, old_row, new_row);
+ *
+ * where old_row is what was displayed for previous rows. Note
+ * that the first pass (pass == 0 really) will completely cover
+ * the old row, so the rows do not have to be initialized. After
+ * the first pass (and only for interlaced images), you will have
+ * to pass the current row, and the function will combine the
+ * old row and the new row.
+ */
+ nsPNGDecoder* decoder =
+ static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
+
+ if (decoder->mFrameIsHidden) {
+ return; // Skip this frame.
+ }
+
+ MOZ_ASSERT_IF(decoder->IsFirstFrameDecode(), decoder->mNumFrames == 0);
+
+ while (pass > decoder->mPass) {
+ // Advance to the next pass. We may have to do this multiple times because
+ // libpng will skip passes if the image is so small that no pixels have
+ // changed on a given pass, but ADAM7InterpolatingFilter needs to be reset
+ // once for every pass to perform interpolation properly.
+ decoder->mPipe.ResetToFirstRow();
+ decoder->mPass++;
+ }
+
+ const png_uint_32 height =
+ static_cast<png_uint_32>(decoder->mFrameRect.Height());
+
+ if (row_num >= height) {
+ // Bail if we receive extra rows. This is especially important because if we
+ // didn't, we might overflow the deinterlacing buffer.
+ MOZ_ASSERT_UNREACHABLE("libpng producing extra rows?");
+ return;
+ }
+
+ // Note that |new_row| may be null here, indicating that this is an interlaced
+ // image and |row_callback| is being called for a row that hasn't changed.
+ MOZ_ASSERT_IF(!new_row, decoder->interlacebuf);
+
+ if (decoder->interlacebuf) {
+ uint32_t width = uint32_t(decoder->mFrameRect.Width());
+
+ // We'll output the deinterlaced version of the row.
+ uint8_t* rowToWrite =
+ decoder->interlacebuf + (row_num * decoder->mChannels * width);
+
+ // Update the deinterlaced version of this row with the new data.
+ png_progressive_combine_row(png_ptr, rowToWrite, new_row);
+
+ decoder->WriteRow(rowToWrite);
+ } else {
+ decoder->WriteRow(new_row);
+ }
+}
+
+void nsPNGDecoder::WriteRow(uint8_t* aRow) {
+ MOZ_ASSERT(aRow);
+
+ uint8_t* rowToWrite = aRow;
+ uint32_t width = uint32_t(mFrameRect.Width());
+
+ // Apply color management to the row, if necessary, before writing it out.
+ // This is only needed for grayscale images.
+ if (mTransform && !mUsePipeTransform) {
+ MOZ_ASSERT(mCMSLine);
+ qcms_transform_data(mTransform, rowToWrite, mCMSLine, width);
+ rowToWrite = mCMSLine;
+ }
+
+ // Write this row to the SurfacePipe.
+ DebugOnly<WriteState> result =
+ mPipe.WriteBuffer(reinterpret_cast<uint32_t*>(rowToWrite));
+ MOZ_ASSERT(WriteState(result) != WriteState::FAILURE);
+
+ PostInvalidationIfNeeded();
+}
+
+void nsPNGDecoder::DoTerminate(png_structp aPNGStruct, TerminalState aState) {
+ // Stop processing data. Note that we intentionally ignore the return value of
+ // png_process_data_pause(), which tells us how many bytes of the data that
+ // was passed to png_process_data() have not been consumed yet, because now
+ // that we've reached a terminal state, we won't do any more decoding or call
+ // back into libpng anymore.
+ png_process_data_pause(aPNGStruct, /* save = */ false);
+
+ mNextTransition = aState == TerminalState::SUCCESS
+ ? Transition::TerminateSuccess()
+ : Transition::TerminateFailure();
+}
+
+void nsPNGDecoder::DoYield(png_structp aPNGStruct) {
+ // Pause data processing. png_process_data_pause() returns how many bytes of
+ // the data that was passed to png_process_data() have not been consumed yet.
+ // We use this information to tell StreamingLexer where to place us in the
+ // input stream when we come back from the yield.
+ png_size_t pendingBytes = png_process_data_pause(aPNGStruct,
+ /* save = */ false);
+
+ MOZ_ASSERT(pendingBytes < mLastChunkLength);
+ size_t consumedBytes = mLastChunkLength - min(pendingBytes, mLastChunkLength);
+
+ mNextTransition =
+ Transition::ContinueUnbufferedAfterYield(State::PNG_DATA, consumedBytes);
+}
+
+nsresult nsPNGDecoder::FinishInternal() {
+ // We shouldn't be called in error cases.
+ MOZ_ASSERT(!HasError(), "Can't call FinishInternal on error!");
+
+ if (IsMetadataDecode()) {
+ return NS_OK;
+ }
+
+ int32_t loop_count = 0;
+#ifdef PNG_APNG_SUPPORTED
+ if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
+ int32_t num_plays = png_get_num_plays(mPNG, mInfo);
+ loop_count = num_plays - 1;
+ }
+#endif
+
+ if (InFrame()) {
+ EndImageFrame();
+ }
+ PostDecodeDone(loop_count);
+
+ return NS_OK;
+}
+
+#ifdef PNG_APNG_SUPPORTED
+// got the header of a new frame that's coming
+void nsPNGDecoder::frame_info_callback(png_structp png_ptr,
+ png_uint_32 frame_num) {
+ nsPNGDecoder* decoder =
+ static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
+
+ // old frame is done
+ decoder->EndImageFrame();
+
+ const bool previousFrameWasHidden = decoder->mFrameIsHidden;
+
+ if (!previousFrameWasHidden && decoder->IsFirstFrameDecode()) {
+ // We're about to get a second non-hidden frame, but we only want the first.
+ // Stop decoding now. (And avoid allocating the unnecessary buffers below.)
+ return decoder->DoTerminate(png_ptr, TerminalState::SUCCESS);
+ }
+
+ // Only the first frame can be hidden, so unhide unconditionally here.
+ decoder->mFrameIsHidden = false;
+
+ // Save the information necessary to create the frame; we'll actually create
+ // it when we return from the yield.
+ const OrientedIntRect frameRect(
+ png_get_next_frame_x_offset(png_ptr, decoder->mInfo),
+ png_get_next_frame_y_offset(png_ptr, decoder->mInfo),
+ png_get_next_frame_width(png_ptr, decoder->mInfo),
+ png_get_next_frame_height(png_ptr, decoder->mInfo));
+ const bool isInterlaced = bool(decoder->interlacebuf);
+
+# ifndef MOZ_EMBEDDED_LIBPNG
+ // if using system library, check frame_width and height against 0
+ if (frameRect.width == 0) {
+ png_error(png_ptr, "Frame width must not be 0");
+ }
+ if (frameRect.height == 0) {
+ png_error(png_ptr, "Frame height must not be 0");
+ }
+# endif
+
+ const FrameInfo info{frameRect, isInterlaced};
+
+ // If the previous frame was hidden, skip the yield (which will mislead the
+ // caller, who will think the previous frame was real) and just allocate the
+ // new frame here.
+ if (previousFrameWasHidden) {
+ if (NS_FAILED(decoder->CreateFrame(info))) {
+ return decoder->DoTerminate(png_ptr, TerminalState::FAILURE);
+ }
+
+ MOZ_ASSERT(decoder->mImageData, "Should have a buffer now");
+ return; // No yield, so we'll just keep decoding.
+ }
+
+ // Yield to the caller to notify them that the previous frame is now complete.
+ decoder->mNextFrameInfo = Some(info);
+ return decoder->DoYield(png_ptr);
+}
+#endif
+
+void nsPNGDecoder::end_callback(png_structp png_ptr, png_infop info_ptr) {
+ /* libpng comments:
+ *
+ * this function is called when the whole image has been read,
+ * including any chunks after the image (up to and including
+ * the IEND). You will usually have the same info chunk as you
+ * had in the header, although some data may have been added
+ * to the comments and time fields.
+ *
+ * Most people won't do much here, perhaps setting a flag that
+ * marks the image as finished.
+ */
+
+ nsPNGDecoder* decoder =
+ static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
+
+ // We shouldn't get here if we've hit an error
+ MOZ_ASSERT(!decoder->HasError(), "Finishing up PNG but hit error!");
+
+ return decoder->DoTerminate(png_ptr, TerminalState::SUCCESS);
+}
+
+void nsPNGDecoder::error_callback(png_structp png_ptr,
+ png_const_charp error_msg) {
+ MOZ_LOG(sPNGLog, LogLevel::Error, ("libpng error: %s\n", error_msg));
+ png_longjmp(png_ptr, 1);
+}
+
+void nsPNGDecoder::warning_callback(png_structp png_ptr,
+ png_const_charp warning_msg) {
+ MOZ_LOG(sPNGLog, LogLevel::Warning, ("libpng warning: %s\n", warning_msg));
+}
+
+Maybe<Telemetry::HistogramID> nsPNGDecoder::SpeedHistogram() const {
+ return Some(Telemetry::IMAGE_DECODE_SPEED_PNG);
+}
+
+bool nsPNGDecoder::IsValidICOResource() const {
+ // Only 32-bit RGBA PNGs are valid ICO resources; see here:
+ // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
+
+ // If there are errors in the call to png_get_IHDR, the error_callback in
+ // nsPNGDecoder.cpp is called. In this error callback we do a longjmp, so
+ // we need to save the jump buffer here. Otherwise we'll end up without a
+ // proper callstack.
+ if (setjmp(png_jmpbuf(mPNG))) {
+ // We got here from a longjmp call indirectly from png_get_IHDR
+ return false;
+ }
+
+ png_uint_32 png_width, // Unused
+ png_height; // Unused
+
+ int png_bit_depth, png_color_type;
+
+ if (png_get_IHDR(mPNG, mInfo, &png_width, &png_height, &png_bit_depth,
+ &png_color_type, nullptr, nullptr, nullptr)) {
+ return ((png_color_type == PNG_COLOR_TYPE_RGB_ALPHA ||
+ png_color_type == PNG_COLOR_TYPE_RGB) &&
+ png_bit_depth == 8);
+ } else {
+ return false;
+ }
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsPNGDecoder.h b/image/decoders/nsPNGDecoder.h
new file mode 100644
index 0000000000..89d66fa5eb
--- /dev/null
+++ b/image/decoders/nsPNGDecoder.h
@@ -0,0 +1,148 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsPNGDecoder_h
+#define mozilla_image_decoders_nsPNGDecoder_h
+
+#include "Decoder.h"
+#include "png.h"
+#include "StreamingLexer.h"
+#include "SurfacePipe.h"
+#include "mozilla/gfx/Swizzle.h"
+
+namespace mozilla {
+namespace image {
+class RasterImage;
+
+class nsPNGDecoder : public Decoder {
+ public:
+ virtual ~nsPNGDecoder();
+
+ /// @return true if this PNG is a valid ICO resource.
+ bool IsValidICOResource() const override;
+
+ DecoderType GetType() const override { return DecoderType::PNG; }
+
+ protected:
+ nsresult InitInternal() override;
+ nsresult FinishInternal() override;
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+
+ Maybe<Telemetry::HistogramID> SpeedHistogram() const override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsPNGDecoder(RasterImage* aImage);
+
+ /// The information necessary to create a frame.
+ struct FrameInfo {
+ OrientedIntRect mFrameRect;
+ bool mIsInterlaced;
+ };
+
+ nsresult CreateFrame(const FrameInfo& aFrameInfo);
+ void EndImageFrame();
+
+ uint32_t ReadColorProfile(png_structp png_ptr, png_infop info_ptr,
+ int color_type, bool* sRGBTag);
+
+ bool HasAlphaChannel() const { return mChannels == 2 || mChannels == 4; }
+
+ enum class TransparencyType { eNone, eAlpha, eFrameRect };
+
+ TransparencyType GetTransparencyType(const OrientedIntRect& aFrameRect);
+ void PostHasTransparencyIfNeeded(TransparencyType aTransparencyType);
+
+ void PostInvalidationIfNeeded();
+
+ void WriteRow(uint8_t* aRow);
+
+ // Convenience methods to make interacting with StreamingLexer from inside
+ // a libpng callback easier.
+ void DoTerminate(png_structp aPNGStruct, TerminalState aState);
+ void DoYield(png_structp aPNGStruct);
+
+ enum class State { PNG_DATA, FINISHED_PNG_DATA };
+
+ LexerTransition<State> ReadPNGData(const char* aData, size_t aLength);
+ LexerTransition<State> FinishedPNGData();
+
+ StreamingLexer<State> mLexer;
+
+ // The next lexer state transition. We need to store it here because we can't
+ // directly return arbitrary values from libpng callbacks.
+ LexerTransition<State> mNextTransition;
+
+ // We yield to the caller every time we finish decoding a frame. When this
+ // happens, we need to allocate the next frame after returning from the yield.
+ // |mNextFrameInfo| is used to store the information needed to allocate the
+ // next frame.
+ Maybe<FrameInfo> mNextFrameInfo;
+
+ // The length of the last chunk of data passed to ReadPNGData(). We use this
+ // to arrange to arrive back at the correct spot in the data after yielding.
+ size_t mLastChunkLength;
+
+ public:
+ png_structp mPNG;
+ png_infop mInfo;
+ OrientedIntRect mFrameRect;
+ uint8_t* mCMSLine;
+ uint8_t* interlacebuf;
+ gfx::SurfaceFormat mFormat;
+
+ uint8_t mChannels;
+ uint8_t mPass;
+ bool mFrameIsHidden;
+ bool mDisablePremultipliedAlpha;
+ bool mGotInfoCallback;
+ bool mUsePipeTransform;
+
+ struct AnimFrameInfo {
+ AnimFrameInfo();
+#ifdef PNG_APNG_SUPPORTED
+ AnimFrameInfo(png_structp aPNG, png_infop aInfo);
+#endif
+
+ DisposalMethod mDispose;
+ BlendMethod mBlend;
+ int32_t mTimeout;
+ };
+
+ AnimFrameInfo mAnimInfo;
+
+ SurfacePipe mPipe; /// The SurfacePipe used to write to the output surface.
+
+ // The number of frames we've finished.
+ uint32_t mNumFrames;
+
+ // libpng callbacks
+ // We put these in the class so that they can access protected members.
+ static void PNGAPI info_callback(png_structp png_ptr, png_infop info_ptr);
+ static void PNGAPI row_callback(png_structp png_ptr, png_bytep new_row,
+ png_uint_32 row_num, int pass);
+#ifdef PNG_APNG_SUPPORTED
+ static void PNGAPI frame_info_callback(png_structp png_ptr,
+ png_uint_32 frame_num);
+#endif
+ static void PNGAPI end_callback(png_structp png_ptr, png_infop info_ptr);
+ static void PNGAPI error_callback(png_structp png_ptr,
+ png_const_charp error_msg);
+ static void PNGAPI warning_callback(png_structp png_ptr,
+ png_const_charp warning_msg);
+
+ // This is defined in the PNG spec as an invariant. We use it to
+ // do manual validation without libpng.
+ static const uint8_t pngSignatureBytes[];
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsPNGDecoder_h
diff --git a/image/decoders/nsWebPDecoder.cpp b/image/decoders/nsWebPDecoder.cpp
new file mode 100644
index 0000000000..e7467f0066
--- /dev/null
+++ b/image/decoders/nsWebPDecoder.cpp
@@ -0,0 +1,605 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageLogging.h" // Must appear first
+#include "gfxPlatform.h"
+#include "mozilla/TelemetryHistogramEnums.h"
+#include "nsWebPDecoder.h"
+
+#include "RasterImage.h"
+#include "SurfacePipeFactory.h"
+
+using namespace mozilla::gfx;
+
+namespace mozilla {
+namespace image {
+
+static LazyLogModule sWebPLog("WebPDecoder");
+
+nsWebPDecoder::nsWebPDecoder(RasterImage* aImage)
+ : Decoder(aImage),
+ mDecoder(nullptr),
+ mBlend(BlendMethod::OVER),
+ mDisposal(DisposalMethod::KEEP),
+ mTimeout(FrameTimeout::Forever()),
+ mFormat(SurfaceFormat::OS_RGBX),
+ mLastRow(0),
+ mCurrentFrame(0),
+ mData(nullptr),
+ mLength(0),
+ mIteratorComplete(false),
+ mNeedDemuxer(true),
+ mGotColorProfile(false) {
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::nsWebPDecoder", this));
+}
+
+nsWebPDecoder::~nsWebPDecoder() {
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::~nsWebPDecoder", this));
+ if (mDecoder) {
+ WebPIDelete(mDecoder);
+ WebPFreeDecBuffer(&mBuffer);
+ }
+}
+
+LexerResult nsWebPDecoder::ReadData() {
+ MOZ_ASSERT(mData);
+ MOZ_ASSERT(mLength > 0);
+
+ WebPDemuxer* demuxer = nullptr;
+ bool complete = mIteratorComplete;
+
+ if (mNeedDemuxer) {
+ WebPDemuxState state;
+ WebPData fragment;
+ fragment.bytes = mData;
+ fragment.size = mLength;
+
+ demuxer = WebPDemuxPartial(&fragment, &state);
+ if (state == WEBP_DEMUX_PARSE_ERROR) {
+ MOZ_LOG(
+ sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ReadData -- demux parse error\n", this));
+ WebPDemuxDelete(demuxer);
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ if (state == WEBP_DEMUX_PARSING_HEADER) {
+ WebPDemuxDelete(demuxer);
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ if (!demuxer) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ReadData -- no demuxer\n", this));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ complete = complete || state == WEBP_DEMUX_DONE;
+ }
+
+ LexerResult rv(TerminalState::FAILURE);
+ if (!HasSize()) {
+ rv = ReadHeader(demuxer, complete);
+ } else {
+ rv = ReadPayload(demuxer, complete);
+ }
+
+ WebPDemuxDelete(demuxer);
+ return rv;
+}
+
+LexerResult nsWebPDecoder::DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) {
+ while (true) {
+ SourceBufferIterator::State state = SourceBufferIterator::COMPLETE;
+ if (!mIteratorComplete) {
+ state = aIterator.AdvanceOrScheduleResume(SIZE_MAX, aOnResume);
+
+ // We need to remember since we can't advance a complete iterator.
+ mIteratorComplete = state == SourceBufferIterator::COMPLETE;
+ }
+
+ if (state == SourceBufferIterator::WAITING) {
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ LexerResult rv = UpdateBuffer(aIterator, state);
+ if (rv.is<Yield>() && rv.as<Yield>() == Yield::NEED_MORE_DATA) {
+ // We need to check the iterator to see if more is available before
+ // giving up unless we are already complete.
+ if (mIteratorComplete) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::DoDecode -- read all data, "
+ "but needs more\n",
+ this));
+ return LexerResult(TerminalState::FAILURE);
+ }
+ continue;
+ }
+
+ return rv;
+ }
+}
+
+LexerResult nsWebPDecoder::UpdateBuffer(SourceBufferIterator& aIterator,
+ SourceBufferIterator::State aState) {
+ MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!");
+
+ switch (aState) {
+ case SourceBufferIterator::READY:
+ if (!aIterator.IsContiguous()) {
+ // We need to buffer. This should be rare, but expensive.
+ break;
+ }
+ if (!mData) {
+ // For as long as we hold onto an iterator, we know the data pointers
+ // to the chunks cannot change underneath us, so save the pointer to
+ // the first block.
+ MOZ_ASSERT(mLength == 0);
+ mData = reinterpret_cast<const uint8_t*>(aIterator.Data());
+ }
+ mLength += aIterator.Length();
+ return ReadData();
+ case SourceBufferIterator::COMPLETE:
+ if (!mData) {
+ // We must have hit an error, such as an OOM, when buffering the
+ // first set of encoded data.
+ MOZ_LOG(
+ sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::DoDecode -- complete no data\n", this));
+ return LexerResult(TerminalState::FAILURE);
+ }
+ return ReadData();
+ default:
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::DoDecode -- bad state\n", this));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ // We need to buffer. If we have no data buffered, we need to get everything
+ // from the first chunk of the source buffer before appending the new data.
+ if (mBufferedData.empty()) {
+ MOZ_ASSERT(mData);
+ MOZ_ASSERT(mLength > 0);
+
+ if (!mBufferedData.append(mData, mLength)) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::DoDecode -- oom, initialize %zu\n",
+ this, mLength));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::DoDecode -- buffered %zu bytes\n", this,
+ mLength));
+ }
+
+ // Append the incremental data from the iterator.
+ if (!mBufferedData.append(aIterator.Data(), aIterator.Length())) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::DoDecode -- oom, append %zu on %zu\n",
+ this, aIterator.Length(), mBufferedData.length()));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::DoDecode -- buffered %zu -> %zu bytes\n",
+ this, aIterator.Length(), mBufferedData.length()));
+ mData = mBufferedData.begin();
+ mLength = mBufferedData.length();
+ return ReadData();
+}
+
+nsresult nsWebPDecoder::CreateFrame(const OrientedIntRect& aFrameRect) {
+ MOZ_ASSERT(HasSize());
+ MOZ_ASSERT(!mDecoder);
+
+ MOZ_LOG(
+ sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::CreateFrame -- frame %u, (%d, %d) %d x %d\n",
+ this, mCurrentFrame, aFrameRect.x, aFrameRect.y, aFrameRect.width,
+ aFrameRect.height));
+
+ if (aFrameRect.width <= 0 || aFrameRect.height <= 0) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::CreateFrame -- bad frame rect\n", this));
+ return NS_ERROR_FAILURE;
+ }
+
+ // If this is our first frame in an animation and it doesn't cover the
+ // full frame, then we are transparent even if there is no alpha
+ if (mCurrentFrame == 0 && !aFrameRect.IsEqualEdges(FullFrame())) {
+ MOZ_ASSERT(HasAnimation());
+ mFormat = SurfaceFormat::OS_RGBA;
+ PostHasTransparency();
+ }
+
+ WebPInitDecBuffer(&mBuffer);
+
+ switch (SurfaceFormat::OS_RGBA) {
+ case SurfaceFormat::B8G8R8A8:
+ mBuffer.colorspace = MODE_BGRA;
+ break;
+ case SurfaceFormat::A8R8G8B8:
+ mBuffer.colorspace = MODE_ARGB;
+ break;
+ case SurfaceFormat::R8G8B8A8:
+ mBuffer.colorspace = MODE_RGBA;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unknown OS_RGBA");
+ return NS_ERROR_FAILURE;
+ }
+
+ mDecoder = WebPINewDecoder(&mBuffer);
+ if (!mDecoder) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::CreateFrame -- create decoder error\n",
+ this));
+ return NS_ERROR_FAILURE;
+ }
+
+ // WebP doesn't guarantee that the alpha generated matches the hint in the
+ // header, so we always need to claim the input is BGRA. If the output is
+ // BGRX, swizzling will mask off the alpha channel.
+ SurfaceFormat inFormat = SurfaceFormat::OS_RGBA;
+
+ SurfacePipeFlags pipeFlags = SurfacePipeFlags();
+ if (mFormat == SurfaceFormat::OS_RGBA &&
+ !(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA)) {
+ pipeFlags |= SurfacePipeFlags::PREMULTIPLY_ALPHA;
+ }
+
+ Maybe<AnimationParams> animParams;
+ if (!IsFirstFrameDecode()) {
+ animParams.emplace(aFrameRect.ToUnknownRect(), mTimeout, mCurrentFrame,
+ mBlend, mDisposal);
+ }
+
+ Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+ this, Size(), OutputSize(), aFrameRect, inFormat, mFormat, animParams,
+ mTransform, pipeFlags);
+ if (!pipe) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::CreateFrame -- no pipe\n", this));
+ return NS_ERROR_FAILURE;
+ }
+
+ mFrameRect = aFrameRect;
+ mPipe = std::move(*pipe);
+ return NS_OK;
+}
+
+void nsWebPDecoder::EndFrame() {
+ MOZ_ASSERT(HasSize());
+ MOZ_ASSERT(mDecoder);
+
+ auto opacity = mFormat == SurfaceFormat::OS_RGBA ? Opacity::SOME_TRANSPARENCY
+ : Opacity::FULLY_OPAQUE;
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::EndFrame -- frame %u, opacity %d, "
+ "disposal %d, timeout %d, blend %d\n",
+ this, mCurrentFrame, (int)opacity, (int)mDisposal,
+ mTimeout.AsEncodedValueDeprecated(), (int)mBlend));
+
+ PostFrameStop(opacity);
+ WebPIDelete(mDecoder);
+ WebPFreeDecBuffer(&mBuffer);
+ mDecoder = nullptr;
+ mLastRow = 0;
+ ++mCurrentFrame;
+}
+
+void nsWebPDecoder::ApplyColorProfile(const char* aProfile, size_t aLength) {
+ MOZ_ASSERT(!mGotColorProfile);
+ mGotColorProfile = true;
+
+ if (mCMSMode == CMSMode::Off || !GetCMSOutputProfile() ||
+ (mCMSMode == CMSMode::TaggedOnly && !aProfile)) {
+ return;
+ }
+
+ if (!aProfile) {
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ApplyColorProfile -- not tagged, use "
+ "sRGB transform\n",
+ this));
+ mTransform = GetCMSsRGBTransform(SurfaceFormat::OS_RGBA);
+ return;
+ }
+
+ mInProfile = qcms_profile_from_memory(aProfile, aLength);
+ if (!mInProfile) {
+ MOZ_LOG(
+ sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ApplyColorProfile -- bad color profile\n",
+ this));
+ return;
+ }
+
+ uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
+ if (profileSpace != icSigRgbData) {
+ // WebP doesn't produce grayscale data, this must be corrupt.
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ApplyColorProfile -- ignoring non-rgb "
+ "color profile\n",
+ this));
+ return;
+ }
+
+ // Calculate rendering intent.
+ int intent = gfxPlatform::GetRenderingIntent();
+ if (intent == -1) {
+ intent = qcms_profile_get_rendering_intent(mInProfile);
+ }
+
+ // Create the color management transform.
+ qcms_data_type type = gfxPlatform::GetCMSOSRGBAType();
+ mTransform = qcms_transform_create(mInProfile, type, GetCMSOutputProfile(),
+ type, (qcms_intent)intent);
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ApplyColorProfile -- use tagged "
+ "transform\n",
+ this));
+}
+
+LexerResult nsWebPDecoder::ReadHeader(WebPDemuxer* aDemuxer, bool aIsComplete) {
+ MOZ_ASSERT(aDemuxer);
+
+ MOZ_LOG(
+ sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadHeader -- %zu bytes\n", this, mLength));
+
+ uint32_t flags = WebPDemuxGetI(aDemuxer, WEBP_FF_FORMAT_FLAGS);
+
+ if (!IsMetadataDecode() && !mGotColorProfile) {
+ if (flags & WebPFeatureFlags::ICCP_FLAG) {
+ WebPChunkIterator iter;
+ if (WebPDemuxGetChunk(aDemuxer, "ICCP", 1, &iter)) {
+ ApplyColorProfile(reinterpret_cast<const char*>(iter.chunk.bytes),
+ iter.chunk.size);
+ WebPDemuxReleaseChunkIterator(&iter);
+
+ } else {
+ if (!aIsComplete) {
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ MOZ_LOG(sWebPLog, LogLevel::Warning,
+ ("[this=%p] nsWebPDecoder::ReadHeader header specified ICCP "
+ "but no ICCP chunk found, ignoring\n",
+ this));
+
+ ApplyColorProfile(nullptr, 0);
+ }
+ } else {
+ ApplyColorProfile(nullptr, 0);
+ }
+ }
+
+ if (flags & WebPFeatureFlags::ANIMATION_FLAG) {
+ // A metadata decode expects to get the correct first frame timeout which
+ // sadly is not provided by the normal WebP header parsing.
+ WebPIterator iter;
+ if (!WebPDemuxGetFrame(aDemuxer, 1, &iter)) {
+ return aIsComplete ? LexerResult(TerminalState::FAILURE)
+ : LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ PostIsAnimated(FrameTimeout::FromRawMilliseconds(iter.duration));
+ WebPDemuxReleaseIterator(&iter);
+ } else {
+ // Single frames don't need a demuxer to be created.
+ mNeedDemuxer = false;
+ }
+
+ uint32_t width = WebPDemuxGetI(aDemuxer, WEBP_FF_CANVAS_WIDTH);
+ uint32_t height = WebPDemuxGetI(aDemuxer, WEBP_FF_CANVAS_HEIGHT);
+ if (width > INT32_MAX || height > INT32_MAX) {
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ PostSize(width, height);
+
+ bool alpha = flags & WebPFeatureFlags::ALPHA_FLAG;
+ if (alpha) {
+ mFormat = SurfaceFormat::OS_RGBA;
+ PostHasTransparency();
+ }
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadHeader -- %u x %u, alpha %d, "
+ "animation %d, metadata decode %d, first frame decode %d\n",
+ this, width, height, alpha, HasAnimation(), IsMetadataDecode(),
+ IsFirstFrameDecode()));
+
+ if (IsMetadataDecode()) {
+ return LexerResult(TerminalState::SUCCESS);
+ }
+
+ return ReadPayload(aDemuxer, aIsComplete);
+}
+
+LexerResult nsWebPDecoder::ReadPayload(WebPDemuxer* aDemuxer,
+ bool aIsComplete) {
+ if (!HasAnimation()) {
+ auto rv = ReadSingle(mData, mLength, FullFrame());
+ if (rv.is<TerminalState>() &&
+ rv.as<TerminalState>() == TerminalState::SUCCESS) {
+ PostDecodeDone();
+ }
+ return rv;
+ }
+ return ReadMultiple(aDemuxer, aIsComplete);
+}
+
+LexerResult nsWebPDecoder::ReadSingle(const uint8_t* aData, size_t aLength,
+ const OrientedIntRect& aFrameRect) {
+ MOZ_ASSERT(!IsMetadataDecode());
+ MOZ_ASSERT(aData);
+ MOZ_ASSERT(aLength > 0);
+
+ MOZ_LOG(
+ sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadSingle -- %zu bytes\n", this, aLength));
+
+ if (!mDecoder && NS_FAILED(CreateFrame(aFrameRect))) {
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ bool complete;
+ do {
+ VP8StatusCode status = WebPIUpdate(mDecoder, aData, aLength);
+ switch (status) {
+ case VP8_STATUS_OK:
+ complete = true;
+ break;
+ case VP8_STATUS_SUSPENDED:
+ complete = false;
+ break;
+ default:
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ReadSingle -- append error %d\n",
+ this, status));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ int lastRow = -1;
+ int width = 0;
+ int height = 0;
+ int stride = 0;
+ uint8_t* rowStart =
+ WebPIDecGetRGB(mDecoder, &lastRow, &width, &height, &stride);
+
+ MOZ_LOG(
+ sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadSingle -- complete %d, read %d rows, "
+ "has %d rows available\n",
+ this, complete, mLastRow, lastRow));
+
+ if (!rowStart || lastRow == -1 || lastRow == mLastRow) {
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ if (width != mFrameRect.width || height != mFrameRect.height ||
+ stride < mFrameRect.width * 4 || lastRow > mFrameRect.height) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ReadSingle -- bad (w,h,s) = (%d, %d, "
+ "%d)\n",
+ this, width, height, stride));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ for (int row = mLastRow; row < lastRow; row++) {
+ uint32_t* src = reinterpret_cast<uint32_t*>(rowStart + row * stride);
+ WriteState result = mPipe.WriteBuffer(src);
+
+ Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
+ if (invalidRect) {
+ PostInvalidation(invalidRect->mInputSpaceRect,
+ Some(invalidRect->mOutputSpaceRect));
+ }
+
+ if (result == WriteState::FAILURE) {
+ MOZ_LOG(sWebPLog, LogLevel::Error,
+ ("[this=%p] nsWebPDecoder::ReadSingle -- write pixels error\n",
+ this));
+ return LexerResult(TerminalState::FAILURE);
+ }
+
+ if (result == WriteState::FINISHED) {
+ MOZ_ASSERT(row == lastRow - 1, "There was more data to read?");
+ complete = true;
+ break;
+ }
+ }
+
+ mLastRow = lastRow;
+ } while (!complete);
+
+ if (!complete) {
+ return LexerResult(Yield::NEED_MORE_DATA);
+ }
+
+ EndFrame();
+ return LexerResult(TerminalState::SUCCESS);
+}
+
+LexerResult nsWebPDecoder::ReadMultiple(WebPDemuxer* aDemuxer,
+ bool aIsComplete) {
+ MOZ_ASSERT(!IsMetadataDecode());
+ MOZ_ASSERT(aDemuxer);
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadMultiple\n", this));
+
+ bool complete = aIsComplete;
+ WebPIterator iter;
+ auto rv = LexerResult(Yield::NEED_MORE_DATA);
+ if (WebPDemuxGetFrame(aDemuxer, mCurrentFrame + 1, &iter)) {
+ switch (iter.blend_method) {
+ case WEBP_MUX_BLEND:
+ mBlend = BlendMethod::OVER;
+ break;
+ case WEBP_MUX_NO_BLEND:
+ mBlend = BlendMethod::SOURCE;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unhandled blend method");
+ break;
+ }
+
+ switch (iter.dispose_method) {
+ case WEBP_MUX_DISPOSE_NONE:
+ mDisposal = DisposalMethod::KEEP;
+ break;
+ case WEBP_MUX_DISPOSE_BACKGROUND:
+ mDisposal = DisposalMethod::CLEAR;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unhandled dispose method");
+ break;
+ }
+
+ mFormat = iter.has_alpha || mCurrentFrame > 0 ? SurfaceFormat::OS_RGBA
+ : SurfaceFormat::OS_RGBX;
+ mTimeout = FrameTimeout::FromRawMilliseconds(iter.duration);
+ OrientedIntRect frameRect(iter.x_offset, iter.y_offset, iter.width,
+ iter.height);
+
+ rv = ReadSingle(iter.fragment.bytes, iter.fragment.size, frameRect);
+ complete = complete && !WebPDemuxNextFrame(&iter);
+ WebPDemuxReleaseIterator(&iter);
+ }
+
+ if (rv.is<TerminalState>() &&
+ rv.as<TerminalState>() == TerminalState::SUCCESS) {
+ // If we extracted one frame, and it is not the last, we need to yield to
+ // the lexer to allow the upper layers to acknowledge the frame.
+ if (!complete && !IsFirstFrameDecode()) {
+ rv = LexerResult(Yield::OUTPUT_AVAILABLE);
+ } else {
+ uint32_t loopCount = WebPDemuxGetI(aDemuxer, WEBP_FF_LOOP_COUNT);
+
+ MOZ_LOG(sWebPLog, LogLevel::Debug,
+ ("[this=%p] nsWebPDecoder::ReadMultiple -- loop count %u\n", this,
+ loopCount));
+ PostDecodeDone(loopCount - 1);
+ }
+ }
+
+ return rv;
+}
+
+Maybe<Telemetry::HistogramID> nsWebPDecoder::SpeedHistogram() const {
+ return Some(Telemetry::IMAGE_DECODE_SPEED_WEBP);
+}
+
+} // namespace image
+} // namespace mozilla
diff --git a/image/decoders/nsWebPDecoder.h b/image/decoders/nsWebPDecoder.h
new file mode 100644
index 0000000000..e69122d19a
--- /dev/null
+++ b/image/decoders/nsWebPDecoder.h
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_decoders_nsWebPDecoder_h
+#define mozilla_image_decoders_nsWebPDecoder_h
+
+#include "Decoder.h"
+#include "webp/demux.h"
+#include "StreamingLexer.h"
+#include "SurfacePipe.h"
+
+namespace mozilla {
+namespace image {
+class RasterImage;
+
+class nsWebPDecoder final : public Decoder {
+ public:
+ virtual ~nsWebPDecoder();
+
+ DecoderType GetType() const override { return DecoderType::WEBP; }
+
+ protected:
+ LexerResult DoDecode(SourceBufferIterator& aIterator,
+ IResumable* aOnResume) override;
+ Maybe<Telemetry::HistogramID> SpeedHistogram() const override;
+
+ private:
+ friend class DecoderFactory;
+
+ // Decoders should only be instantiated via DecoderFactory.
+ explicit nsWebPDecoder(RasterImage* aImage);
+
+ void ApplyColorProfile(const char* aProfile, size_t aLength);
+
+ LexerResult UpdateBuffer(SourceBufferIterator& aIterator,
+ SourceBufferIterator::State aState);
+ LexerResult ReadData();
+ LexerResult ReadHeader(WebPDemuxer* aDemuxer, bool aIsComplete);
+ LexerResult ReadPayload(WebPDemuxer* aDemuxer, bool aIsComplete);
+
+ nsresult CreateFrame(const OrientedIntRect& aFrameRect);
+ void EndFrame();
+
+ LexerResult ReadSingle(const uint8_t* aData, size_t aLength,
+ const OrientedIntRect& aFrameRect);
+
+ LexerResult ReadMultiple(WebPDemuxer* aDemuxer, bool aIsComplete);
+
+ /// The SurfacePipe used to write to the output surface.
+ SurfacePipe mPipe;
+
+ /// The buffer used to accumulate data until the complete WebP header is
+ /// received, if and only if the iterator is discontiguous.
+ Vector<uint8_t> mBufferedData;
+
+ /// The libwebp output buffer descriptor pointing to the decoded data.
+ WebPDecBuffer mBuffer;
+
+ /// The libwebp incremental decoder descriptor, wraps mBuffer.
+ WebPIDecoder* mDecoder;
+
+ /// Blend method for the current frame.
+ BlendMethod mBlend;
+
+ /// Disposal method for the current frame.
+ DisposalMethod mDisposal;
+
+ /// Frame timeout for the current frame;
+ FrameTimeout mTimeout;
+
+ /// Surface format for the current frame.
+ gfx::SurfaceFormat mFormat;
+
+ /// Frame rect for the current frame.
+ OrientedIntRect mFrameRect;
+
+ /// The last row of decoded pixels written to mPipe.
+ int mLastRow;
+
+ /// Number of decoded frames.
+ uint32_t mCurrentFrame;
+
+ /// Pointer to the start of the contiguous encoded image data.
+ const uint8_t* mData;
+
+ /// Length of data pointed to by mData.
+ size_t mLength;
+
+ /// True if the iterator has reached its end.
+ bool mIteratorComplete;
+
+ /// True if this decoding pass requires a WebPDemuxer.
+ bool mNeedDemuxer;
+
+ /// True if we have setup the color profile for the image.
+ bool mGotColorProfile;
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_decoders_nsWebPDecoder_h