summaryrefslogtreecommitdiffstats
path: root/third_party/rust/image/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/image/src
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/image/src')
-rw-r--r--third_party/rust/image/src/animation.rs330
-rw-r--r--third_party/rust/image/src/bmp/decoder.rs1367
-rw-r--r--third_party/rust/image/src/bmp/encoder.rs348
-rw-r--r--third_party/rust/image/src/bmp/mod.rs14
-rw-r--r--third_party/rust/image/src/buffer.rs1348
-rw-r--r--third_party/rust/image/src/color.rs1276
-rw-r--r--third_party/rust/image/src/dds.rs170
-rw-r--r--third_party/rust/image/src/dxt.rs806
-rw-r--r--third_party/rust/image/src/dynimage.rs1157
-rw-r--r--third_party/rust/image/src/error.rs621
-rw-r--r--third_party/rust/image/src/flat.rs1551
-rw-r--r--third_party/rust/image/src/gif.rs425
-rw-r--r--third_party/rust/image/src/hdr/decoder.rs915
-rw-r--r--third_party/rust/image/src/hdr/encoder.rs431
-rw-r--r--third_party/rust/image/src/hdr/mod.rs15
-rw-r--r--third_party/rust/image/src/ico/decoder.rs284
-rw-r--r--third_party/rust/image/src/ico/encoder.rs113
-rw-r--r--third_party/rust/image/src/ico/mod.rs13
-rw-r--r--third_party/rust/image/src/image.rs1088
-rw-r--r--third_party/rust/image/src/imageops/affine.rs387
-rw-r--r--third_party/rust/image/src/imageops/colorops.rs325
-rw-r--r--third_party/rust/image/src/imageops/mod.rs219
-rw-r--r--third_party/rust/image/src/imageops/sample.rs873
-rw-r--r--third_party/rust/image/src/io/free_functions.rs289
-rw-r--r--third_party/rust/image/src/io/mod.rs5
-rw-r--r--third_party/rust/image/src/io/reader.rs210
-rw-r--r--third_party/rust/image/src/jpeg/decoder.rs136
-rw-r--r--third_party/rust/image/src/jpeg/encoder.rs917
-rw-r--r--third_party/rust/image/src/jpeg/entropy.rs61
-rw-r--r--third_party/rust/image/src/jpeg/mod.rs16
-rw-r--r--third_party/rust/image/src/jpeg/transform.rs196
-rw-r--r--third_party/rust/image/src/lib.rs141
-rw-r--r--third_party/rust/image/src/math/mod.rs6
-rw-r--r--third_party/rust/image/src/math/nq.rs409
-rw-r--r--third_party/rust/image/src/math/rect.rs12
-rw-r--r--third_party/rust/image/src/math/utils.rs24
-rw-r--r--third_party/rust/image/src/png.rs333
-rw-r--r--third_party/rust/image/src/pnm/autobreak.rs124
-rw-r--r--third_party/rust/image/src/pnm/decoder.rs1104
-rw-r--r--third_party/rust/image/src/pnm/encoder.rs653
-rw-r--r--third_party/rust/image/src/pnm/header.rs348
-rw-r--r--third_party/rust/image/src/pnm/mod.rs149
-rw-r--r--third_party/rust/image/src/tga/decoder.rs529
-rw-r--r--third_party/rust/image/src/tga/mod.rs13
-rw-r--r--third_party/rust/image/src/tiff.rs175
-rw-r--r--third_party/rust/image/src/traits.rs75
-rw-r--r--third_party/rust/image/src/utils/mod.rs127
-rw-r--r--third_party/rust/image/src/webp/decoder.rs138
-rw-r--r--third_party/rust/image/src/webp/mod.rs8
-rw-r--r--third_party/rust/image/src/webp/transform.rs77
-rw-r--r--third_party/rust/image/src/webp/vp8.rs2003
51 files changed, 22354 insertions, 0 deletions
diff --git a/third_party/rust/image/src/animation.rs b/third_party/rust/image/src/animation.rs
new file mode 100644
index 0000000000..a10c0f611f
--- /dev/null
+++ b/third_party/rust/image/src/animation.rs
@@ -0,0 +1,330 @@
+use std::iter::Iterator;
+use std::time::Duration;
+
+use num_rational::Ratio;
+
+use crate::buffer::RgbaImage;
+use crate::error::ImageResult;
+
+/// An implementation dependent iterator, reading the frames as requested
+pub struct Frames<'a> {
+ iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>
+}
+
+impl<'a> Frames<'a> {
+ /// Creates a new `Frames` from an implementation specific iterator.
+ pub fn new(iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>) -> Self {
+ Frames { iterator }
+ }
+
+ /// Steps through the iterator from the current frame until the end and pushes each frame into
+ /// a `Vec`.
+ /// If en error is encountered that error is returned instead.
+ ///
+ /// Note: This is equivalent to `Frames::collect::<ImageResult<Vec<Frame>>>()`
+ pub fn collect_frames(self) -> ImageResult<Vec<Frame>> {
+ self.collect()
+ }
+}
+
+impl<'a> Iterator for Frames<'a> {
+ type Item = ImageResult<Frame>;
+ fn next(&mut self) -> Option<ImageResult<Frame>> {
+ self.iterator.next()
+ }
+}
+
+/// A single animation frame
+#[derive(Clone)]
+pub struct Frame {
+ /// Delay between the frames in milliseconds
+ delay: Delay,
+ /// x offset
+ left: u32,
+ /// y offset
+ top: u32,
+ buffer: RgbaImage,
+}
+
+/// The delay of a frame relative to the previous one.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)]
+pub struct Delay {
+ ratio: Ratio<u32>,
+}
+
+impl Frame {
+ /// Contructs a new frame without any delay.
+ pub fn new(buffer: RgbaImage) -> Frame {
+ Frame {
+ delay: Delay::from_ratio(Ratio::from_integer(0)),
+ left: 0,
+ top: 0,
+ buffer,
+ }
+ }
+
+ /// Contructs a new frame
+ pub fn from_parts(buffer: RgbaImage, left: u32, top: u32, delay: Delay) -> Frame {
+ Frame {
+ delay,
+ left,
+ top,
+ buffer,
+ }
+ }
+
+ /// Delay of this frame
+ pub fn delay(&self) -> Delay {
+ self.delay
+ }
+
+ /// Returns the image buffer
+ pub fn buffer(&self) -> &RgbaImage {
+ &self.buffer
+ }
+
+ /// Returns the image buffer
+ pub fn into_buffer(self) -> RgbaImage {
+ self.buffer
+ }
+
+ /// Returns the x offset
+ pub fn left(&self) -> u32 {
+ self.left
+ }
+
+ /// Returns the y offset
+ pub fn top(&self) -> u32 {
+ self.top
+ }
+}
+
+impl Delay {
+ /// Create a delay from a ratio of milliseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use image::Delay;
+ /// let delay_10ms = Delay::from_numer_denom_ms(10, 1);
+ /// ```
+ pub fn from_numer_denom_ms(numerator: u32, denominator: u32) -> Self {
+ Delay { ratio: Ratio::new_raw(numerator, denominator) }
+ }
+
+ /// Convert from a duration, clamped between 0 and an implemented defined maximum.
+ ///
+ /// The maximum is *at least* `i32::MAX` milliseconds. It should be noted that the accuracy of
+ /// the result may be relative and very large delays have a coarse resolution.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ /// use image::Delay;
+ ///
+ /// let duration = Duration::from_millis(20);
+ /// let delay = Delay::from_saturating_duration(duration);
+ /// ```
+ pub fn from_saturating_duration(duration: Duration) -> Self {
+ // A few notes: The largest number we can represent as a ratio is u32::MAX but we can
+ // sometimes represent much smaller numbers.
+ //
+ // We can represent duration as `millis+a/b` (where a < b, b > 0).
+ // We must thus bound b with `bĀ·millis + (b-1) <= u32::MAX` or
+ // > `0 < b <= (u32::MAX + 1)/(millis + 1)`
+ // Corollary: millis <= u32::MAX
+
+ const MILLIS_BOUND: u128 = u32::max_value() as u128;
+
+ let millis = duration.as_millis().min(MILLIS_BOUND);
+ let submillis = (duration.as_nanos() % 1_000_000) as u32;
+
+ let max_b = if millis > 0 {
+ ((MILLIS_BOUND + 1)/(millis + 1)) as u32
+ } else {
+ MILLIS_BOUND as u32
+ };
+ let millis = millis as u32;
+
+ let (a, b) = Self::closest_bounded_fraction(max_b, submillis, 1_000_000);
+ Self::from_numer_denom_ms(a + b*millis, b)
+ }
+
+ /// The numerator and denominator of the delay in milliseconds.
+ ///
+ /// This is guaranteed to be an exact conversion if the `Delay` was previously created with the
+ /// `from_numer_denom_ms` constructor.
+ pub fn numer_denom_ms(self) -> (u32, u32) {
+ (*self.ratio.numer(), *self.ratio.denom())
+ }
+
+ pub(crate) fn from_ratio(ratio: Ratio<u32>) -> Self {
+ Delay { ratio }
+ }
+
+ pub(crate) fn into_ratio(self) -> Ratio<u32> {
+ self.ratio
+ }
+
+ /// Given some fraction, compute an approximation with denominator bounded.
+ ///
+ /// Note that `denom_bound` bounds nominator and denominator of all intermediate
+ /// approximations and the end result.
+ fn closest_bounded_fraction(denom_bound: u32, nom: u32, denom: u32) -> (u32, u32) {
+ use std::cmp::Ordering::{self, *};
+ assert!(0 < denom);
+ assert!(0 < denom_bound);
+ assert!(nom < denom);
+
+ // Avoid a few type troubles. All intermediate results are bounded by `denom_bound` which
+ // is in turn bounded by u32::MAX. Representing with u64 allows multiplication of any two
+ // values without fears of overflow.
+
+ // Compare two fractions whose parts fit into a u32.
+ fn compare_fraction((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> Ordering {
+ (an*bd).cmp(&(bn*ad))
+ }
+
+ // Computes the nominator of the absolute difference between two such fractions.
+ fn abs_diff_nom((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> u64 {
+ let c0 = an*bd;
+ let c1 = ad*bn;
+
+ let d0 = c0.max(c1);
+ let d1 = c0.min(c1);
+ d0 - d1
+ }
+
+ let exact = (u64::from(nom), u64::from(denom));
+ // The lower bound fraction, numerator and denominator.
+ let mut lower = (0u64, 1u64);
+ // The upper bound fraction, numerator and denominator.
+ let mut upper = (1u64, 1u64);
+ // The closest approximation for now.
+ let mut guess = (u64::from(nom*2 > denom), 1u64);
+
+ // loop invariant: ad, bd <= denom_bound
+ // iterates the Farey sequence.
+ loop {
+ // Break if we are done.
+ if compare_fraction(guess, exact) == Equal {
+ break;
+ }
+
+ // Break if next Farey number is out-of-range.
+ if u64::from(denom_bound) - lower.1 < upper.1 {
+ break;
+ }
+
+ // Next Farey approximation n between a and b
+ let next = (lower.0 + upper.0, lower.1 + upper.1);
+ // if F < n then replace the upper bound, else replace lower.
+ if compare_fraction(exact, next) == Less {
+ upper = next;
+ } else {
+ lower = next;
+ }
+
+ // Now correct the closest guess.
+ // In other words, if |c - f| > |n - f| then replace it with the new guess.
+ // This favors the guess with smaller denominator on equality.
+
+ // |g - f| = |g_diff_nom|/(gd*fd);
+ let g_diff_nom = abs_diff_nom(guess, exact);
+ // |n - f| = |n_diff_nom|/(nd*fd);
+ let n_diff_nom = abs_diff_nom(next, exact);
+
+ // The difference |n - f| is smaller than |g - f| if either the integral part of the
+ // fraction |n_diff_nom|/nd is smaller than the one of |g_diff_nom|/gd or if they are
+ // the same but the fractional part is larger.
+ if match (n_diff_nom/next.1).cmp(&(g_diff_nom/guess.1)) {
+ Less => true,
+ Greater => false,
+ // Note that the nominator for the fractional part is smaller than its denominator
+ // which is smaller than u32 and can't overflow the multiplication with the other
+ // denominator, that is we can compare these fractions by multiplication with the
+ // respective other denominator.
+ Equal => compare_fraction((n_diff_nom%next.1, next.1), (g_diff_nom%guess.1, guess.1)) == Less,
+ } {
+ guess = next;
+ }
+ }
+
+ (guess.0 as u32, guess.1 as u32)
+ }
+}
+
+impl From<Delay> for Duration {
+ fn from(delay: Delay) -> Self {
+ let ratio = delay.into_ratio();
+ let ms = ratio.to_integer();
+ let rest = ratio.numer() % ratio.denom();
+ let nanos = (u64::from(rest) * 1_000_000) / u64::from(*ratio.denom());
+ Duration::from_millis(ms.into()) + Duration::from_nanos(nanos)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Delay, Duration, Ratio};
+
+ #[test]
+ fn simple() {
+ let second = Delay::from_numer_denom_ms(1000, 1);
+ assert_eq!(Duration::from(second), Duration::from_secs(1));
+ }
+
+ #[test]
+ fn fps_30() {
+ let thirtieth = Delay::from_numer_denom_ms(1000, 30);
+ let duration = Duration::from(thirtieth);
+ assert_eq!(duration.as_secs(), 0);
+ assert_eq!(duration.subsec_millis(), 33);
+ assert_eq!(duration.subsec_nanos(), 33_333_333);
+ }
+
+ #[test]
+ fn duration_outlier() {
+ let oob = Duration::from_secs(0xFFFF_FFFF);
+ let delay = Delay::from_saturating_duration(oob);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+ }
+
+ #[test]
+ fn duration_approx() {
+ let oob = Duration::from_millis(0xFFFF_FFFF) + Duration::from_micros(1);
+ let delay = Delay::from_saturating_duration(oob);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+
+ let inbounds = Duration::from_millis(0xFFFF_FFFF) - Duration::from_micros(1);
+ let delay = Delay::from_saturating_duration(inbounds);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+
+ let fine = Duration::from_millis(0xFFFF_FFFF/1000) + Duration::from_micros(0xFFFF_FFFF%1000);
+ let delay = Delay::from_saturating_duration(fine);
+ // Funnily, 0xFFFF_FFFF is divisble by 5, thus we compare with a `Ratio`.
+ assert_eq!(delay.into_ratio(), Ratio::new(0xFFFF_FFFF, 1000));
+ }
+
+ #[test]
+ fn precise() {
+ // The ratio has only 32 bits in the numerator, too imprecise to get more than 11 digits
+ // correct. But it may be expressed as 1_000_000/3 instead.
+ let exceed = Duration::from_secs(333) + Duration::from_nanos(333_333_333);
+ let delay = Delay::from_saturating_duration(exceed);
+ assert_eq!(Duration::from(delay), exceed);
+ }
+
+
+ #[test]
+ fn small() {
+ // Not quite a delay of `1 ms`.
+ let delay = Delay::from_numer_denom_ms(1 << 16, (1 << 16) + 1);
+ let duration = Duration::from(delay);
+ assert_eq!(duration.as_millis(), 0);
+ // Not precisely the original but should be smaller than 0.
+ let delay = Delay::from_saturating_duration(duration);
+ assert_eq!(delay.into_ratio().to_integer(), 0);
+ }
+}
diff --git a/third_party/rust/image/src/bmp/decoder.rs b/third_party/rust/image/src/bmp/decoder.rs
new file mode 100644
index 0000000000..3b32d0810c
--- /dev/null
+++ b/third_party/rust/image/src/bmp/decoder.rs
@@ -0,0 +1,1367 @@
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Seek, SeekFrom};
+use std::iter::{repeat, Iterator, Rev};
+use std::marker::PhantomData;
+use std::slice::ChunksMut;
+use std::{cmp, mem};
+use std::cmp::Ordering;
+
+use byteorder::{LittleEndian, ReadBytesExt};
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, ImageDecoder, ImageDecoderExt, Progress};
+
+const BITMAPCOREHEADER_SIZE: u32 = 12;
+const BITMAPINFOHEADER_SIZE: u32 = 40;
+const BITMAPV2HEADER_SIZE: u32 = 52;
+const BITMAPV3HEADER_SIZE: u32 = 56;
+const BITMAPV4HEADER_SIZE: u32 = 108;
+const BITMAPV5HEADER_SIZE: u32 = 124;
+
+static LOOKUP_TABLE_3_BIT_TO_8_BIT: [u8; 8] = [0, 36, 73, 109, 146, 182, 219, 255];
+static LOOKUP_TABLE_4_BIT_TO_8_BIT: [u8; 16] = [
+ 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255,
+];
+static LOOKUP_TABLE_5_BIT_TO_8_BIT: [u8; 32] = [
+ 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140, 148, 156, 165, 173,
+ 181, 189, 197, 206, 214, 222, 230, 239, 247, 255,
+];
+static LOOKUP_TABLE_6_BIT_TO_8_BIT: [u8; 64] = [
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93,
+ 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 150, 154, 158, 162, 166, 170,
+ 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247,
+ 251, 255,
+];
+
+static R5_G5_B5_COLOR_MASK: Bitfields = Bitfields {
+ r: Bitfield { len: 5, shift: 10 },
+ g: Bitfield { len: 5, shift: 5 },
+ b: Bitfield { len: 5, shift: 0 },
+ a: Bitfield { len: 0, shift: 0 },
+};
+const R8_G8_B8_COLOR_MASK: Bitfields = Bitfields {
+ r: Bitfield { len: 8, shift: 24 },
+ g: Bitfield { len: 8, shift: 16 },
+ b: Bitfield { len: 8, shift: 8 },
+ a: Bitfield { len: 0, shift: 0 },
+};
+
+const RLE_ESCAPE: u8 = 0;
+const RLE_ESCAPE_EOL: u8 = 0;
+const RLE_ESCAPE_EOF: u8 = 1;
+const RLE_ESCAPE_DELTA: u8 = 2;
+
+/// The maximum width/height the decoder will process.
+const MAX_WIDTH_HEIGHT: i32 = 0xFFFF;
+
+#[derive(PartialEq, Copy, Clone)]
+enum ImageType {
+ Palette,
+ RGB16,
+ RGB24,
+ RGB32,
+ RGBA32,
+ RLE8,
+ RLE4,
+ Bitfields16,
+ Bitfields32,
+}
+
+#[derive(PartialEq)]
+enum BMPHeaderType {
+ Core,
+ Info,
+ V2,
+ V3,
+ V4,
+ V5,
+}
+
+#[derive(PartialEq)]
+enum FormatFullBytes {
+ RGB24,
+ RGB32,
+ RGBA32,
+ Format888,
+}
+
+enum Chunker<'a> {
+ FromTop(ChunksMut<'a, u8>),
+ FromBottom(Rev<ChunksMut<'a, u8>>),
+}
+
+pub(crate) struct RowIterator<'a> {
+ chunks: Chunker<'a>,
+}
+
+impl<'a> Iterator for RowIterator<'a> {
+ type Item = &'a mut [u8];
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a mut [u8]> {
+ match self.chunks {
+ Chunker::FromTop(ref mut chunks) => chunks.next(),
+ Chunker::FromBottom(ref mut chunks) => chunks.next(),
+ }
+ }
+}
+
+/// Convenience function to check if the combination of width, length and number of
+/// channels would result in a buffer that would overflow.
+fn check_for_overflow(width: i32, length: i32, channels: usize) -> ImageResult<()> {
+ num_bytes(width, length, channels)
+ .map(|_| ())
+ .ok_or_else(|| {
+ ImageError::FormatError(
+ "Image would require a buffer that is too large to be represented!".to_owned(),
+ )
+ })
+}
+
+/// Calculate how many many bytes a buffer holding a decoded image with these properties would
+/// require. Returns `None` if the buffer size would overflow or if one of the sizes are negative.
+fn num_bytes(width: i32, length: i32, channels: usize) -> Option<usize> {
+ if width <= 0 || length <= 0 {
+ None
+ } else {
+ match channels.checked_mul(width as usize) {
+ Some(n) => n.checked_mul(length as usize),
+ None => None,
+ }
+ }
+}
+
+/// The maximum starting number of pixels in the pixel buffer, might want to tweak this.
+///
+/// For images that specify large sizes, we don't allocate the full buffer right away
+/// to somewhat mitigate trying to make the decoder run out of memory by sending a bogus image.
+/// This is somewhat of a workaroud as ideally we would check against the expected file size
+/// but that's not possible through the Read and Seek traits alone and would require the encoder
+/// to provided with it from the caller.
+///
+/// NOTE: This is multiplied by 3 or 4 depending on the number of channels to get the maximum
+/// starting buffer size. This amounts to about 134 mb for a buffer with 4 channels.
+const MAX_INITIAL_PIXELS: usize = 8192 * 4096;
+
+/// Sets all bytes in an mutable iterator over slices of bytes to 0.
+fn blank_bytes<'a, T: Iterator<Item = &'a mut [u8]>>(iterator: T) {
+ for chunk in iterator {
+ for b in chunk {
+ *b = 0;
+ }
+ }
+}
+
+/// Extend the buffer to `full_size`, copying existing data to the end of the buffer. Returns slice
+/// pointing to the part of the buffer that is not yet filled in.
+///
+/// If blank is true, the bytes in the new buffer that are not filled in are set to 0.
+/// This is used for rle-encoded images as the decoding process for these may not fill in all the
+/// pixels.
+///
+/// As BMP images are usually stored with the rows upside-down we have to write the image data
+/// starting at the end of the buffer and thus we have to make sure the existing data is put at the
+/// end of the buffer.
+#[inline(never)]
+#[cold]
+fn extend_buffer(buffer: &mut Vec<u8>, full_size: usize, blank: bool) -> &mut [u8] {
+ let old_size = buffer.len();
+ let extend = full_size - buffer.len();
+
+ buffer.extend(repeat(0xFF).take(extend));
+ assert_eq!(buffer.len(), full_size);
+
+ let ret = if extend >= old_size {
+ // If the full buffer length is more or equal to twice the initial one, we can simply
+ // copy the data in the lower part of the buffer to the end of it and input from there.
+ let (new, old) = buffer.split_at_mut(extend);
+ old.copy_from_slice(&new[..old_size]);
+ new
+ } else {
+ // If the full size is less than twice the initial buffer, we have to
+ // copy in two steps
+ let overlap = old_size - extend;
+
+ // First we copy the data that fits into the bit we extended.
+ let (lower, upper) = buffer.split_at_mut(old_size);
+ upper.copy_from_slice(&lower[overlap..]);
+
+ // Then we slide the data that hasn't been copied yet to the top of the buffer
+ let (new, old) = lower.split_at_mut(extend);
+ old[..overlap].copy_from_slice(&new[..overlap]);
+ new
+ };
+ if blank {
+ for b in ret.iter_mut() {
+ *b = 0;
+ }
+ };
+ ret
+}
+
+/// Call the provided function on each row of the provided buffer, returning Err if the provided
+/// function returns an error, extends the buffer if it's not large enough.
+fn with_rows<F>(
+ buffer: &mut Vec<u8>,
+ width: i32,
+ height: i32,
+ channels: usize,
+ top_down: bool,
+ mut func: F,
+) -> io::Result<()>
+where
+ F: FnMut(&mut [u8]) -> io::Result<()>,
+{
+ // An overflow should already have been checked for when this is called,
+ // though we check anyhow, as it somehow seems to increase performance slightly.
+ let row_width = channels.checked_mul(width as usize).unwrap();
+ let full_image_size = row_width.checked_mul(height as usize).unwrap();
+
+ if !top_down {
+ for row in buffer.chunks_mut(row_width).rev() {
+ func(row)?;
+ }
+
+ // If we need more space, extend the buffer.
+ if buffer.len() < full_image_size {
+ let new_space = extend_buffer(buffer, full_image_size, false);
+ for row in new_space.chunks_mut(row_width).rev() {
+ func(row)?;
+ }
+ }
+ } else {
+ for row in buffer.chunks_mut(row_width) {
+ func(row)?;
+ }
+ if buffer.len() < full_image_size {
+ // If the image is stored in top-down order, we can simply use the extend function
+ // from vec to extend the buffer..
+ let extend = full_image_size - buffer.len();
+ buffer.extend(repeat(0xFF).take(extend));
+ let len = buffer.len();
+ for row in buffer[len - row_width..].chunks_mut(row_width) {
+ func(row)?;
+ }
+ };
+ }
+ Ok(())
+}
+
+fn set_8bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[(u8, u8, u8)],
+ indices: T,
+ n_pixels: usize,
+) -> bool {
+ for idx in indices.take(n_pixels) {
+ if let Some(pixel) = pixel_iter.next() {
+ let (r, g, b) = palette[*idx as usize];
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ } else {
+ return false;
+ }
+ }
+ true
+}
+
+fn set_4bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[(u8, u8, u8)],
+ indices: T,
+ mut n_pixels: usize,
+) -> bool {
+ for idx in indices {
+ macro_rules! set_pixel {
+ ($i:expr) => {
+ if n_pixels == 0 {
+ break;
+ }
+ if let Some(pixel) = pixel_iter.next() {
+ let (r, g, b) = palette[$i as usize];
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ } else {
+ return false;
+ }
+ n_pixels -= 1;
+ };
+ }
+ set_pixel!(idx >> 4);
+ set_pixel!(idx & 0xf);
+ }
+ true
+}
+
+#[rustfmt::skip]
+fn set_2bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[(u8, u8, u8)],
+ indices: T,
+ mut n_pixels: usize,
+) -> bool {
+ for idx in indices {
+ macro_rules! set_pixel {
+ ($i:expr) => {
+ if n_pixels == 0 {
+ break;
+ }
+ if let Some(pixel) = pixel_iter.next() {
+ let (r, g, b) = palette[$i as usize];
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ } else {
+ return false;
+ }
+ n_pixels -= 1;
+ };
+ }
+ set_pixel!((idx >> 6) & 0x3u8);
+ set_pixel!((idx >> 4) & 0x3u8);
+ set_pixel!((idx >> 2) & 0x3u8);
+ set_pixel!( idx & 0x3u8);
+ }
+ true
+}
+
+fn set_1bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[(u8, u8, u8)],
+ indices: T,
+) {
+ for idx in indices {
+ let mut bit = 0x80;
+ loop {
+ if let Some(pixel) = pixel_iter.next() {
+ let (r, g, b) = palette[((idx & bit) != 0) as usize];
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ } else {
+ return;
+ }
+
+ bit >>= 1;
+ if bit == 0 {
+ break;
+ }
+ }
+ }
+}
+
+#[derive(PartialEq, Eq)]
+struct Bitfield {
+ shift: u32,
+ len: u32,
+}
+
+impl Bitfield {
+ fn from_mask(mask: u32, max_len: u32) -> ImageResult<Bitfield> {
+ if mask == 0 {
+ return Ok(Bitfield { shift: 0, len: 0 });
+ }
+ let mut shift = mask.trailing_zeros();
+ let mut len = (!(mask >> shift)).trailing_zeros();
+ if len != mask.count_ones() {
+ return Err(ImageError::FormatError(
+ "Non-contiguous bitfield mask".to_string(),
+ ));
+ }
+ if len + shift > max_len {
+ return Err(ImageError::FormatError("Invalid bitfield mask".to_string()));
+ }
+ if len > 8 {
+ shift += len - 8;
+ len = 8;
+ }
+ Ok(Bitfield { shift, len })
+ }
+
+ fn read(&self, data: u32) -> u8 {
+ let data = data >> self.shift;
+ match self.len {
+ 1 => ((data & 0b1) * 0xff) as u8,
+ 2 => ((data & 0b11) * 0x55) as u8,
+ 3 => LOOKUP_TABLE_3_BIT_TO_8_BIT[(data & 0b00_0111) as usize],
+ 4 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data & 0b00_1111) as usize],
+ 5 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data & 0b01_1111) as usize],
+ 6 => LOOKUP_TABLE_6_BIT_TO_8_BIT[(data & 0b11_1111) as usize],
+ 7 => ((data & 0x7f) << 1 | (data & 0x7f) >> 6) as u8,
+ 8 => (data & 0xff) as u8,
+ _ => panic!(),
+ }
+ }
+}
+
+#[derive(PartialEq, Eq)]
+struct Bitfields {
+ r: Bitfield,
+ g: Bitfield,
+ b: Bitfield,
+ a: Bitfield,
+}
+
+impl Bitfields {
+ fn from_mask(
+ r_mask: u32,
+ g_mask: u32,
+ b_mask: u32,
+ a_mask: u32,
+ max_len: u32,
+ ) -> ImageResult<Bitfields> {
+ let bitfields = Bitfields {
+ r: Bitfield::from_mask(r_mask, max_len)?,
+ g: Bitfield::from_mask(g_mask, max_len)?,
+ b: Bitfield::from_mask(b_mask, max_len)?,
+ a: Bitfield::from_mask(a_mask, max_len)?,
+ };
+ if bitfields.r.len == 0 || bitfields.g.len == 0 || bitfields.b.len == 0 {
+ return Err(ImageError::FormatError("Missing bitfield mask".to_string()));
+ }
+ Ok(bitfields)
+ }
+}
+
+/// A bmp decoder
+pub struct BmpDecoder<R> {
+ reader: R,
+
+ bmp_header_type: BMPHeaderType,
+
+ width: i32,
+ height: i32,
+ data_offset: u64,
+ top_down: bool,
+ no_file_header: bool,
+ add_alpha_channel: bool,
+ has_loaded_metadata: bool,
+ image_type: ImageType,
+
+ bit_count: u16,
+ colors_used: u32,
+ palette: Option<Vec<(u8, u8, u8)>>,
+ bitfields: Option<Bitfields>,
+}
+
+enum RLEInsn {
+ EndOfFile,
+ EndOfRow,
+ Delta(u8, u8),
+ Absolute(u8, Vec<u8>),
+ PixelRun(u8, u8),
+}
+
+struct RLEInsnIterator<'a, R: 'a + Read> {
+ r: &'a mut R,
+ image_type: ImageType,
+}
+
+impl<'a, R: Read> Iterator for RLEInsnIterator<'a, R> {
+ type Item = RLEInsn;
+
+ fn next(&mut self) -> Option<RLEInsn> {
+ let control_byte = match self.r.read_u8() {
+ Ok(b) => b,
+ Err(_) => return None,
+ };
+
+ match control_byte {
+ RLE_ESCAPE => {
+ let op = match self.r.read_u8() {
+ Ok(b) => b,
+ Err(_) => return None,
+ };
+
+ match op {
+ RLE_ESCAPE_EOL => Some(RLEInsn::EndOfRow),
+ RLE_ESCAPE_EOF => Some(RLEInsn::EndOfFile),
+ RLE_ESCAPE_DELTA => {
+ let xdelta = match self.r.read_u8() {
+ Ok(n) => n,
+ Err(_) => return None,
+ };
+ let ydelta = match self.r.read_u8() {
+ Ok(n) => n,
+ Err(_) => return None,
+ };
+ Some(RLEInsn::Delta(xdelta, ydelta))
+ }
+ _ => {
+ let mut length = op as usize;
+ if self.image_type == ImageType::RLE4 {
+ length = (length + 1) / 2;
+ }
+ length += length & 1;
+ let mut buffer = vec![0; length];
+ match self.r.read_exact(&mut buffer) {
+ Ok(()) => Some(RLEInsn::Absolute(op, buffer)),
+ Err(_) => None,
+ }
+ }
+ }
+ }
+ _ => match self.r.read_u8() {
+ Ok(palette_index) => Some(RLEInsn::PixelRun(control_byte, palette_index)),
+ Err(_) => None,
+ },
+ }
+ }
+}
+
+impl<R: Read + Seek> BmpDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(reader: R) -> ImageResult<BmpDecoder<R>> {
+ let mut decoder = BmpDecoder {
+ reader,
+
+ bmp_header_type: BMPHeaderType::Info,
+
+ width: 0,
+ height: 0,
+ data_offset: 0,
+ top_down: false,
+ no_file_header: false,
+ add_alpha_channel: false,
+ has_loaded_metadata: false,
+ image_type: ImageType::Palette,
+
+ bit_count: 0,
+ colors_used: 0,
+ palette: None,
+ bitfields: None,
+ };
+
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ #[cfg(feature = "ico")]
+ pub(crate) fn new_with_ico_format(reader: R) -> ImageResult<BmpDecoder<R>> {
+ let mut decoder = BmpDecoder {
+ reader,
+
+ bmp_header_type: BMPHeaderType::Info,
+
+ width: 0,
+ height: 0,
+ data_offset: 0,
+ top_down: false,
+ no_file_header: false,
+ add_alpha_channel: false,
+ has_loaded_metadata: false,
+ image_type: ImageType::Palette,
+
+ bit_count: 0,
+ colors_used: 0,
+ palette: None,
+ bitfields: None,
+ };
+
+ decoder.read_metadata_in_ico_format()?;
+ Ok(decoder)
+ }
+
+ #[cfg(feature = "ico")]
+ pub(crate) fn reader(&mut self) -> &mut R {
+ &mut self.reader
+ }
+
+ fn read_file_header(&mut self) -> ImageResult<()> {
+ if self.no_file_header {
+ return Ok(());
+ }
+ let mut signature = [0; 2];
+ self.reader.read_exact(&mut signature)?;
+
+ if signature != b"BM"[..] {
+ return Err(ImageError::FormatError(
+ "BMP signature not found".to_string(),
+ ));
+ }
+
+ // The next 8 bytes represent file size, followed the 4 reserved bytes
+ // We're not interesting these values
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+
+ self.data_offset = u64::from(self.reader.read_u32::<LittleEndian>()?);
+
+ Ok(())
+ }
+
+ /// Read BITMAPCOREHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183372(v=vs.85).aspx
+ ///
+ /// returns Err if any of the values are invalid.
+ fn read_bitmap_core_header(&mut self) -> ImageResult<()> {
+ // As height/width values in BMP files with core headers are only 16 bits long,
+ // they won't be larger than `MAX_WIDTH_HEIGHT`.
+ self.width = i32::from(self.reader.read_u16::<LittleEndian>()?);
+ self.height = i32::from(self.reader.read_u16::<LittleEndian>()?);
+
+ check_for_overflow(self.width, self.height, self.num_channels())?;
+
+ // Number of planes (format specifies that this should be 1).
+ if self.reader.read_u16::<LittleEndian>()? != 1 {
+ return Err(ImageError::FormatError("More than one plane".to_string()));
+ }
+
+ self.bit_count = self.reader.read_u16::<LittleEndian>()?;
+ self.image_type = match self.bit_count {
+ 1 | 4 | 8 => ImageType::Palette,
+ 24 => ImageType::RGB24,
+ _ => return Err(ImageError::FormatError("Invalid bit count".to_string())),
+ };
+
+ Ok(())
+ }
+
+ /// Read BITMAPINFOHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183376(v=vs.85).aspx
+ /// or BITMAPV{2|3|4|5}HEADER.
+ ///
+ /// returns Err if any of the values are invalid.
+ fn read_bitmap_info_header(&mut self) -> ImageResult<()> {
+ self.width = self.reader.read_i32::<LittleEndian>()?;
+ self.height = self.reader.read_i32::<LittleEndian>()?;
+
+ // Width can not be negative
+ if self.width < 0 {
+ return Err(ImageError::FormatError("Negative width".to_string()));
+ } else if self.width > MAX_WIDTH_HEIGHT || self.height > MAX_WIDTH_HEIGHT {
+ // Limit very large image sizes to avoid OOM issues. Images with these sizes are
+ // unlikely to be valid anyhow.
+ return Err(ImageError::FormatError("Image too large".to_string()));
+ }
+
+ if self.height == i32::min_value() {
+ return Err(ImageError::FormatError("Invalid height".to_string()));
+ }
+
+ // A negative height indicates a top-down DIB.
+ if self.height < 0 {
+ self.height *= -1;
+ self.top_down = true;
+ }
+
+ check_for_overflow(self.width, self.height, self.num_channels())?;
+
+ // Number of planes (format specifies that this should be 1).
+ if self.reader.read_u16::<LittleEndian>()? != 1 {
+ return Err(ImageError::FormatError("More than one plane".to_string()));
+ }
+
+ self.bit_count = self.reader.read_u16::<LittleEndian>()?;
+ let image_type_u32 = self.reader.read_u32::<LittleEndian>()?;
+
+ // Top-down dibs can not be compressed.
+ if self.top_down && image_type_u32 != 0 && image_type_u32 != 3 {
+ return Err(ImageError::FormatError(
+ "Invalid image type for top-down image.".to_string(),
+ ));
+ }
+ self.image_type = match image_type_u32 {
+ 0 => match self.bit_count {
+ 1 | 2 | 4 | 8 => ImageType::Palette,
+ 16 => ImageType::RGB16,
+ 24 => ImageType::RGB24,
+ 32 if self.add_alpha_channel => ImageType::RGBA32,
+ 32 => ImageType::RGB32,
+ _ => {
+ return Err(ImageError::FormatError(format!(
+ "Invalid RGB bit count {}",
+ self.bit_count
+ )))
+ }
+ },
+ 1 => match self.bit_count {
+ 8 => ImageType::RLE8,
+ _ => {
+ return Err(ImageError::FormatError(
+ "Invalid RLE8 bit count".to_string(),
+ ))
+ }
+ },
+ 2 => match self.bit_count {
+ 4 => ImageType::RLE4,
+ _ => {
+ return Err(ImageError::FormatError(
+ "Invalid RLE4 bit count".to_string(),
+ ))
+ }
+ },
+ 3 => match self.bit_count {
+ 16 => ImageType::Bitfields16,
+ 32 => ImageType::Bitfields32,
+ _ => {
+ return Err(ImageError::FormatError(
+ "Invalid bitfields bit count".to_string(),
+ ))
+ }
+ },
+ // PNG and JPEG not implemented yet.
+ _ => {
+ return Err(ImageError::UnsupportedError(
+ "Unsupported image type".to_string(),
+ ))
+ }
+ };
+
+ // The next 12 bytes represent data array size in bytes,
+ // followed the horizontal and vertical printing resolutions
+ // We will calculate the pixel array size using width & height of image
+ // We're not interesting the horz or vert printing resolutions
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+
+ self.colors_used = self.reader.read_u32::<LittleEndian>()?;
+
+ // The next 4 bytes represent number of "important" colors
+ // We're not interested in this value, so we'll skip it
+ self.reader.read_u32::<LittleEndian>()?;
+
+ Ok(())
+ }
+
+ fn read_bitmasks(&mut self) -> ImageResult<()> {
+ let r_mask = self.reader.read_u32::<LittleEndian>()?;
+ let g_mask = self.reader.read_u32::<LittleEndian>()?;
+ let b_mask = self.reader.read_u32::<LittleEndian>()?;
+
+ let a_mask = match self.bmp_header_type {
+ BMPHeaderType::V3 | BMPHeaderType::V4 | BMPHeaderType::V5 => {
+ self.reader.read_u32::<LittleEndian>()?
+ }
+ _ => 0,
+ };
+
+ self.bitfields = match self.image_type {
+ ImageType::Bitfields16 => {
+ Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 16)?)
+ }
+ ImageType::Bitfields32 => {
+ Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 32)?)
+ }
+ _ => None,
+ };
+
+ if self.bitfields.is_some() && a_mask != 0 {
+ self.add_alpha_channel = true;
+ }
+
+ Ok(())
+ }
+
+ fn read_metadata(&mut self) -> ImageResult<()> {
+ if !self.has_loaded_metadata {
+ self.read_file_header()?;
+ let bmp_header_offset = self.reader.seek(SeekFrom::Current(0))?;
+ let bmp_header_size = self.reader.read_u32::<LittleEndian>()?;
+ let bmp_header_end = bmp_header_offset + u64::from(bmp_header_size);
+
+ self.bmp_header_type = match bmp_header_size {
+ BITMAPCOREHEADER_SIZE => BMPHeaderType::Core,
+ BITMAPINFOHEADER_SIZE => BMPHeaderType::Info,
+ BITMAPV2HEADER_SIZE => BMPHeaderType::V2,
+ BITMAPV3HEADER_SIZE => BMPHeaderType::V3,
+ BITMAPV4HEADER_SIZE => BMPHeaderType::V4,
+ BITMAPV5HEADER_SIZE => BMPHeaderType::V5,
+ _ => {
+ return Err(ImageError::UnsupportedError(
+ "Unsupported Bitmap Header".to_string(),
+ ))
+ }
+ };
+
+ match self.bmp_header_type {
+ BMPHeaderType::Core => {
+ self.read_bitmap_core_header()?;
+ }
+ BMPHeaderType::Info
+ | BMPHeaderType::V2
+ | BMPHeaderType::V3
+ | BMPHeaderType::V4
+ | BMPHeaderType::V5 => {
+ self.read_bitmap_info_header()?;
+ }
+ };
+
+ match self.image_type {
+ ImageType::Bitfields16 | ImageType::Bitfields32 => self.read_bitmasks()?,
+ _ => {}
+ };
+
+ self.reader.seek(SeekFrom::Start(bmp_header_end))?;
+
+ match self.image_type {
+ ImageType::Palette | ImageType::RLE4 | ImageType::RLE8 => self.read_palette()?,
+ _ => {}
+ };
+
+ if self.no_file_header {
+ // Use the offset of the end of metadata instead of reading a BMP file header.
+ self.data_offset = self.reader.seek(SeekFrom::Current(0))?;
+ }
+
+ self.has_loaded_metadata = true;
+ }
+ Ok(())
+ }
+
+ #[cfg(feature = "ico")]
+ #[doc(hidden)]
+ pub fn read_metadata_in_ico_format(&mut self) -> ImageResult<()> {
+ self.no_file_header = true;
+ self.add_alpha_channel = true;
+ self.read_metadata()?;
+
+ // The height field in an ICO file is doubled to account for the AND mask
+ // (whether or not an AND mask is actually present).
+ self.height /= 2;
+ Ok(())
+ }
+
+ fn get_palette_size(&mut self) -> ImageResult<usize> {
+ match self.colors_used {
+ 0 => Ok(1 << self.bit_count),
+ _ => {
+ if self.colors_used > 1 << self.bit_count {
+ return Err(ImageError::FormatError(format!(
+ "Palette size {} exceeds maximum size for BMP with bit count of {}",
+ self.colors_used, self.bit_count
+ )));
+ }
+ Ok(self.colors_used as usize)
+ }
+ }
+ }
+
+ fn bytes_per_color(&self) -> usize {
+ match self.bmp_header_type {
+ BMPHeaderType::Core => 3,
+ _ => 4,
+ }
+ }
+
+ fn read_palette(&mut self) -> ImageResult<()> {
+ const MAX_PALETTE_SIZE: usize = 256; // Palette indices are u8.
+
+ let bytes_per_color = self.bytes_per_color();
+ let palette_size = self.get_palette_size()?;
+ let max_length = MAX_PALETTE_SIZE * bytes_per_color;
+
+ let length = palette_size * bytes_per_color;
+ let mut buf = Vec::with_capacity(max_length);
+
+ // Resize and read the palette entries to the buffer.
+ // We limit the buffer to at most 256 colours to avoid any oom issues as
+ // 8-bit images can't reference more than 256 indexes anyhow.
+ buf.resize(cmp::min(length, max_length), 0);
+ self.reader.by_ref().read_exact(&mut buf)?;
+
+ // Allocate 256 entries even if palette_size is smaller, to prevent corrupt files from
+ // causing an out-of-bounds array access.
+ match length.cmp(&max_length) {
+ Ordering::Greater => {
+ self.reader
+ .seek(SeekFrom::Current((length - max_length) as i64))?;
+ }
+ Ordering::Less => buf.resize(max_length, 0),
+ Ordering::Equal => (),
+ }
+
+ let p: Vec<(u8, u8, u8)> = (0..MAX_PALETTE_SIZE)
+ .map(|i| {
+ let b = buf[bytes_per_color * i];
+ let g = buf[bytes_per_color * i + 1];
+ let r = buf[bytes_per_color * i + 2];
+ (r, g, b)
+ })
+ .collect();
+
+ self.palette = Some(p);
+
+ Ok(())
+ }
+
+ fn num_channels(&self) -> usize {
+ if self.add_alpha_channel {
+ 4
+ } else {
+ 3
+ }
+ }
+
+ /// Create a buffer to hold the decoded pixel data.
+ ///
+ /// The buffer will be large enough to hold the whole image if it requires less than
+ /// `MAX_INITIAL_PIXELS` times the number of channels bytes (adjusted to line up with the
+ /// width of a row).
+ fn create_pixel_data(&self) -> Vec<u8> {
+ let row_width = self.num_channels() * self.width as usize;
+ let max_pixels = self.num_channels() * MAX_INITIAL_PIXELS;
+ // Make sure the maximum size is whole number of rows.
+ let max_starting_size = max_pixels + row_width - (max_pixels % row_width);
+ // The buffer has its bytes initially set to 0xFF as the ICO decoder relies on it.
+ vec![0xFF; cmp::min(row_width * self.height as usize, max_starting_size)]
+ }
+
+ fn rows<'a>(&self, pixel_data: &'a mut [u8]) -> RowIterator<'a> {
+ let stride = self.width as usize * self.num_channels();
+ if self.top_down {
+ RowIterator {
+ chunks: Chunker::FromTop(pixel_data.chunks_mut(stride)),
+ }
+ } else {
+ RowIterator {
+ chunks: Chunker::FromBottom(pixel_data.chunks_mut(stride).rev()),
+ }
+ }
+ }
+
+ fn read_palettized_pixel_data(&mut self) -> ImageResult<Vec<u8>> {
+ let mut pixel_data = self.create_pixel_data();
+ let num_channels = self.num_channels();
+ let row_byte_length = ((i32::from(self.bit_count) * self.width + 31) / 32 * 4) as usize;
+ let mut indices = vec![0; row_byte_length];
+ let palette = self.palette.as_ref().unwrap();
+ let bit_count = self.bit_count;
+ let reader = &mut self.reader;
+ let width = self.width as usize;
+
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ with_rows(
+ &mut pixel_data,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ reader.read_exact(&mut indices)?;
+ let mut pixel_iter = row.chunks_mut(num_channels);
+ match bit_count {
+ 1 => {
+ set_1bit_pixel_run(&mut pixel_iter, palette, indices.iter());
+ }
+ 2 => {
+ set_2bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ 4 => {
+ set_4bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ 8 => {
+ set_8bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ _ => panic!(),
+ };
+ Ok(())
+ },
+ )?;
+
+ Ok(pixel_data)
+ }
+
+ fn read_16_bit_pixel_data(&mut self, bitfields: Option<&Bitfields>) -> ImageResult<Vec<u8>> {
+ let mut pixel_data = self.create_pixel_data();
+ let num_channels = self.num_channels();
+ let row_padding_len = self.width as usize % 2 * 2;
+ let row_padding = &mut [0; 2][..row_padding_len];
+ let bitfields = match bitfields {
+ Some(b) => b,
+ None => self.bitfields.as_ref().unwrap(),
+ };
+ let reader = &mut self.reader;
+
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ with_rows(
+ &mut pixel_data,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ let data = u32::from(reader.read_u16::<LittleEndian>()?);
+
+ pixel[0] = bitfields.r.read(data);
+ pixel[1] = bitfields.g.read(data);
+ pixel[2] = bitfields.b.read(data);
+ if num_channels == 4 {
+ pixel[3] = bitfields.a.read(data);
+ }
+ }
+ reader.read_exact(row_padding)
+ },
+ )?;
+
+ Ok(pixel_data)
+ }
+
+ /// Read image data from a reader in 32-bit formats that use bitfields.
+ fn read_32_bit_pixel_data(&mut self) -> ImageResult<Vec<u8>> {
+ let mut pixel_data = self.create_pixel_data();
+ let num_channels = self.num_channels();
+
+ let bitfields = self.bitfields.as_ref().unwrap();
+
+ let reader = &mut self.reader;
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ with_rows(
+ &mut pixel_data,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ let data = reader.read_u32::<LittleEndian>()?;
+
+ pixel[0] = bitfields.r.read(data);
+ pixel[1] = bitfields.g.read(data);
+ pixel[2] = bitfields.b.read(data);
+ if num_channels == 4 {
+ pixel[3] = bitfields.a.read(data);
+ }
+ }
+ Ok(())
+ },
+ )?;
+
+ Ok(pixel_data)
+ }
+
+ /// Read image data from a reader where the colours are stored as 8-bit values (24 or 32-bit).
+ fn read_full_byte_pixel_data(&mut self, format: &FormatFullBytes) -> ImageResult<Vec<u8>> {
+ let mut pixel_data = self.create_pixel_data();
+ let num_channels = self.num_channels();
+ let row_padding_len = match *format {
+ FormatFullBytes::RGB24 => (4 - (self.width as usize * 3) % 4) % 4,
+ _ => 0,
+ };
+ let row_padding = &mut [0; 4][..row_padding_len];
+
+ self.reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ let reader = &mut self.reader;
+
+ with_rows(
+ &mut pixel_data,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ if *format == FormatFullBytes::Format888 {
+ reader.read_u8()?;
+ }
+
+ // Read the colour values (b, g, r).
+ // Reading 3 bytes and reversing them is significantly faster than reading one
+ // at a time.
+ reader.read_exact(&mut pixel[0..3])?;
+ pixel[0..3].reverse();
+
+ if *format == FormatFullBytes::RGB32 {
+ reader.read_u8()?;
+ }
+
+ // Read the alpha channel if present
+ if *format == FormatFullBytes::RGBA32 {
+ reader.read_exact(&mut pixel[3..4])?;
+ }
+ }
+ reader.read_exact(row_padding)
+ },
+ )?;
+
+ Ok(pixel_data)
+ }
+
+ fn read_rle_data(&mut self, image_type: ImageType) -> ImageResult<Vec<u8>> {
+ // Seek to the start of the actual image data.
+ self.reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ let full_image_size =
+ num_bytes(self.width, self.height, self.num_channels()).ok_or_else(|| {
+ ImageError::FormatError("Image buffer would be too large!".to_owned())
+ })?;
+ let mut pixel_data = self.create_pixel_data();
+ let (skip_pixels, skip_rows, eof_hit) =
+ self.read_rle_data_step(&mut pixel_data, image_type, 0, 0)?;
+ // Extend the buffer if there is still data left.
+ // If eof_hit is true, it means that we hit an end-of-file marker in the last step and
+ // we won't extend the buffer further to avoid small files with a large specified size causing memory issues.
+ // This is only a rudimentary check, a file could still create a large buffer, but the
+ // file would now have to at least have some data in it.
+ if pixel_data.len() < full_image_size && !eof_hit {
+ let new = extend_buffer(&mut pixel_data, full_image_size, true);
+ self.read_rle_data_step(new, image_type, skip_pixels, skip_rows)?;
+ }
+ Ok(pixel_data)
+ }
+
+ fn read_rle_data_step(
+ &mut self,
+ mut pixel_data: &mut [u8],
+ image_type: ImageType,
+ skip_pixels: u8,
+ skip_rows: u8,
+ ) -> ImageResult<(u8, u8, bool)> {
+ let num_channels = self.num_channels();
+
+ let mut delta_rows_left = 0;
+ let mut delta_pixels_left = skip_pixels;
+ let mut eof_hit = false;
+
+ // Scope the borrowing of pixel_data by the row iterator.
+ {
+ // Handling deltas in the RLE scheme means that we need to manually
+ // iterate through rows and pixels. Even if we didn't have to handle
+ // deltas, we have to ensure that a single runlength doesn't straddle
+ // two rows.
+ let mut row_iter = self.rows(&mut pixel_data);
+ // If we have previously hit a delta value,
+ // blank the rows that are to be skipped.
+ blank_bytes((&mut row_iter).take(skip_rows.into()));
+ let mut insns_iter = RLEInsnIterator {
+ r: &mut self.reader,
+ image_type,
+ };
+ let p = self.palette.as_ref().unwrap();
+
+ 'row_loop: while let Some(row) = row_iter.next() {
+ let mut pixel_iter = row.chunks_mut(num_channels);
+ // Blank delta skipped pixels if any.
+ blank_bytes((&mut pixel_iter).take(delta_pixels_left.into()));
+ delta_pixels_left = 0;
+
+ 'rle_loop: loop {
+ if let Some(insn) = insns_iter.next() {
+ match insn {
+ RLEInsn::EndOfFile => {
+ blank_bytes(pixel_iter);
+ blank_bytes(row_iter);
+ eof_hit = true;
+ break 'row_loop;
+ }
+ RLEInsn::EndOfRow => {
+ blank_bytes(pixel_iter);
+ break 'rle_loop;
+ }
+ RLEInsn::Delta(x_delta, y_delta) => {
+ if y_delta > 0 {
+ for n in 1..y_delta {
+ if let Some(row) = row_iter.next() {
+ // The msdn site on bitmap compression doesn't specify
+ // what happens to the values skipped when encountering
+ // a delta code, however IE and the windows image
+ // preview seems to replace them with black pixels,
+ // so we stick to that.
+ for b in row {
+ *b = 0;
+ }
+ } else {
+ delta_pixels_left = x_delta;
+ // We've reached the end of the buffer.
+ delta_rows_left = y_delta - n;
+ break 'row_loop;
+ }
+ }
+ }
+
+ for _ in 0..x_delta {
+ if let Some(pixel) = pixel_iter.next() {
+ for b in pixel {
+ *b = 0;
+ }
+ } else {
+ // We can't go any further in this row.
+ break;
+ }
+ }
+ }
+ RLEInsn::Absolute(length, indices) => {
+ // Absolute mode cannot span rows, so if we run
+ // out of pixels to process, we should stop
+ // processing the image.
+ match image_type {
+ ImageType::RLE8 => {
+ if !set_8bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ indices.iter(),
+ length as usize,
+ ) {
+ break 'row_loop;
+ }
+ }
+ ImageType::RLE4 => {
+ if !set_4bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ indices.iter(),
+ length as usize,
+ ) {
+ break 'row_loop;
+ }
+ }
+ _ => panic!(),
+ }
+ }
+ RLEInsn::PixelRun(n_pixels, palette_index) => {
+ // A pixel run isn't allowed to span rows, but we
+ // simply continue on to the next row if we run
+ // out of pixels to set.
+ match image_type {
+ ImageType::RLE8 => {
+ if !set_8bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ repeat(&palette_index),
+ n_pixels as usize,
+ ) {
+ break 'rle_loop;
+ }
+ }
+ ImageType::RLE4 => {
+ if !set_4bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ repeat(&palette_index),
+ n_pixels as usize,
+ ) {
+ break 'rle_loop;
+ }
+ }
+ _ => panic!(),
+ }
+ }
+ }
+ } else {
+ // We ran out of data while we still had rows to fill in.
+ return Err(ImageError::FormatError("Not enough RLE data".to_string()));
+ }
+ }
+ }
+ }
+ Ok((delta_pixels_left, delta_rows_left, eof_hit))
+ }
+
+ /// Read the actual data of the image. This function is deliberately not public because it
+ /// cannot be called multiple times without seeking back the underlying reader in between.
+ pub(crate) fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ let data = match self.image_type {
+ ImageType::Palette => self.read_palettized_pixel_data(),
+ ImageType::RGB16 => self.read_16_bit_pixel_data(Some(&R5_G5_B5_COLOR_MASK)),
+ ImageType::RGB24 => self.read_full_byte_pixel_data(&FormatFullBytes::RGB24),
+ ImageType::RGB32 => self.read_full_byte_pixel_data(&FormatFullBytes::RGB32),
+ ImageType::RGBA32 => self.read_full_byte_pixel_data(&FormatFullBytes::RGBA32),
+ ImageType::RLE8 => self.read_rle_data(ImageType::RLE8),
+ ImageType::RLE4 => self.read_rle_data(ImageType::RLE4),
+ ImageType::Bitfields16 => match self.bitfields {
+ Some(_) => self.read_16_bit_pixel_data(None),
+ None => Err(ImageError::FormatError(
+ "Missing 16-bit bitfield masks".to_string(),
+ )),
+ },
+ ImageType::Bitfields32 => match self.bitfields {
+ Some(R8_G8_B8_COLOR_MASK) => {
+ self.read_full_byte_pixel_data(&FormatFullBytes::Format888)
+ }
+ Some(_) => self.read_32_bit_pixel_data(),
+ None => Err(ImageError::FormatError(
+ "Missing 32-bit bitfield masks".to_string(),
+ )),
+ },
+ }?;
+
+ buf.copy_from_slice(&data);
+ Ok(())
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct BmpReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for BmpReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for BmpDecoder<R> {
+ type Reader = BmpReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width as u32, self.height as u32)
+ }
+
+ fn color_type(&self) -> ColorType {
+ if self.add_alpha_channel {
+ ColorType::Rgba8
+ } else {
+ ColorType::Rgb8
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(BmpReader(Cursor::new(image::decoder_to_vec(self)?), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ self.read_image_data(buf)
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoderExt<'a> for BmpDecoder<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ let start = self.reader.seek(SeekFrom::Current(0))?;
+ image::load_rect(x, y, width, height, buf, progress_callback, self, |_, _| unreachable!(),
+ |s, buf| { s.read_image_data(buf).map(|_| buf.len()) })?;
+ self.reader.seek(SeekFrom::Start(start))?;
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::Bitfield;
+
+ #[test]
+ fn test_bitfield_len() {
+ for len in 1..9 {
+ let bitfield = Bitfield { shift: 0, len };
+ for i in 0..(1 << len) {
+ let read = bitfield.read(i);
+ let calc = (i as f64 / ((1 << len) - 1) as f64 * 255f64).round() as u8;
+ if read != calc {
+ println!("len:{} i:{} read:{} calc:{}", len, i, read, calc);
+ }
+ assert_eq!(read, calc);
+ }
+ }
+ }
+}
diff --git a/third_party/rust/image/src/bmp/encoder.rs b/third_party/rust/image/src/bmp/encoder.rs
new file mode 100644
index 0000000000..cceffef320
--- /dev/null
+++ b/third_party/rust/image/src/bmp/encoder.rs
@@ -0,0 +1,348 @@
+use byteorder::{LittleEndian, WriteBytesExt};
+use std::io::{self, Write};
+
+use crate::color;
+use crate::error::{ImageError, ImageResult};
+use crate::image::ImageEncoder;
+
+const BITMAPFILEHEADER_SIZE: u32 = 14;
+const BITMAPINFOHEADER_SIZE: u32 = 40;
+const BITMAPV4HEADER_SIZE: u32 = 108;
+
+/// The representation of a BMP encoder.
+pub struct BMPEncoder<'a, W: 'a> {
+ writer: &'a mut W,
+}
+
+impl<'a, W: Write + 'a> BMPEncoder<'a, W> {
+ /// Create a new encoder that writes its output to ```w```.
+ pub fn new(w: &'a mut W) -> Self {
+ BMPEncoder { writer: w }
+ }
+
+ /// Encodes the image ```image```
+ /// that has dimensions ```width``` and ```height```
+ /// and ```ColorType``` ```c```.
+ pub fn encode(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ c: color::ColorType,
+ ) -> ImageResult<()> {
+ let bmp_header_size = BITMAPFILEHEADER_SIZE;
+
+ let (dib_header_size, written_pixel_size, palette_color_count) = get_pixel_info(c)?;
+ let row_pad_size = (4 - (width * written_pixel_size) % 4) % 4; // each row must be padded to a multiple of 4 bytes
+ let image_size = width
+ .checked_mul(height)
+ .ok_or(ImageError::DimensionError)?
+ .checked_mul(written_pixel_size)
+ .ok_or(ImageError::DimensionError)?
+ .checked_add(height * row_pad_size)
+ .ok_or(ImageError::DimensionError)?;
+ let palette_size = palette_color_count * 4; // all palette colors are BGRA
+ let file_size = bmp_header_size + dib_header_size + palette_size + image_size;
+
+ // write BMP header
+ self.writer.write_u8(b'B')?;
+ self.writer.write_u8(b'M')?;
+ self.writer.write_u32::<LittleEndian>(file_size)?; // file size
+ self.writer.write_u16::<LittleEndian>(0)?; // reserved 1
+ self.writer.write_u16::<LittleEndian>(0)?; // reserved 2
+ self.writer
+ .write_u32::<LittleEndian>(bmp_header_size + dib_header_size + palette_size)?; // image data offset
+
+ // write DIB header
+ self.writer.write_u32::<LittleEndian>(dib_header_size)?;
+ self.writer.write_i32::<LittleEndian>(width as i32)?;
+ self.writer.write_i32::<LittleEndian>(height as i32)?;
+ self.writer.write_u16::<LittleEndian>(1)?; // color planes
+ self.writer
+ .write_u16::<LittleEndian>((written_pixel_size * 8) as u16)?; // bits per pixel
+ if dib_header_size >= BITMAPV4HEADER_SIZE {
+ // Assume BGRA32
+ self.writer.write_u32::<LittleEndian>(3)?; // compression method - bitfields
+ } else {
+ self.writer.write_u32::<LittleEndian>(0)?; // compression method - no compression
+ }
+ self.writer.write_u32::<LittleEndian>(image_size)?;
+ self.writer.write_i32::<LittleEndian>(0)?; // horizontal ppm
+ self.writer.write_i32::<LittleEndian>(0)?; // vertical ppm
+ self.writer.write_u32::<LittleEndian>(palette_color_count)?;
+ self.writer.write_u32::<LittleEndian>(0)?; // all colors are important
+ if dib_header_size >= BITMAPV4HEADER_SIZE {
+ // Assume BGRA32
+ self.writer.write_u32::<LittleEndian>(0xff << 16)?; // red mask
+ self.writer.write_u32::<LittleEndian>(0xff << 8)?; // green mask
+ self.writer.write_u32::<LittleEndian>(0xff << 0)?; // blue mask
+ self.writer.write_u32::<LittleEndian>(0xff << 24)?; // alpha mask
+ self.writer.write_u32::<LittleEndian>(0x73524742)?; // colorspace - sRGB
+
+ // endpoints (3x3) and gamma (3)
+ for _ in 0..12 {
+ self.writer.write_u32::<LittleEndian>(0)?;
+ }
+ }
+
+ // write image data
+ match c {
+ color::ColorType::Rgb8 => {
+ self.encode_rgb(image, width, height, row_pad_size, 3)?
+ }
+ color::ColorType::Rgba8 => {
+ self.encode_rgba(image, width, height, row_pad_size, 4)?
+ }
+ color::ColorType::L8 => {
+ self.encode_gray(image, width, height, row_pad_size, 1)?
+ }
+ color::ColorType::La8 => {
+ self.encode_gray(image, width, height, row_pad_size, 2)?
+ }
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ &get_unsupported_error_message(c)[..],
+ )))
+ }
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgb(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ ) -> io::Result<()> {
+ let x_stride = bytes_per_pixel;
+ let y_stride = width * x_stride;
+ for row in 0..height {
+ // from the bottom up
+ let row_start = (height - row - 1) * y_stride;
+ for col in 0..width {
+ let pixel_start = (row_start + (col * x_stride)) as usize;
+ let r = image[pixel_start];
+ let g = image[pixel_start + 1];
+ let b = image[pixel_start + 2];
+ // written as BGR
+ self.writer.write_u8(b)?;
+ self.writer.write_u8(g)?;
+ self.writer.write_u8(r)?;
+ // alpha is never written as it's not widely supported
+ }
+
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgba(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ ) -> io::Result<()> {
+ let x_stride = bytes_per_pixel;
+ let y_stride = width * x_stride;
+ for row in 0..height {
+ // from the bottom up
+ let row_start = (height - row - 1) * y_stride;
+ for col in 0..width {
+ let pixel_start = (row_start + (col * x_stride)) as usize;
+ let r = image[pixel_start];
+ let g = image[pixel_start + 1];
+ let b = image[pixel_start + 2];
+ let a = image[pixel_start + 3];
+ // written as BGRA
+ self.writer.write_u8(b)?;
+ self.writer.write_u8(g)?;
+ self.writer.write_u8(r)?;
+ self.writer.write_u8(a)?;
+ }
+
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn encode_gray(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ ) -> io::Result<()> {
+ // write grayscale palette
+ for val in 0..256 {
+ // each color is written as BGRA, where A is always 0 and since only grayscale is being written, B = G = R = index
+ let val = val as u8;
+ self.writer.write_u8(val)?;
+ self.writer.write_u8(val)?;
+ self.writer.write_u8(val)?;
+ self.writer.write_u8(0)?;
+ }
+
+ // write image data
+ let x_stride = bytes_per_pixel;
+ let y_stride = width * x_stride;
+ for row in 0..height {
+ // from the bottom up
+ let row_start = (height - row - 1) * y_stride;
+ for col in 0..width {
+ let pixel_start = (row_start + (col * x_stride)) as usize;
+ // color value is equal to the palette index
+ self.writer.write_u8(image[pixel_start])?;
+ // alpha is never written as it's not widely supported
+ }
+
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn write_row_pad(&mut self, row_pad_size: u32) -> io::Result<()> {
+ for _ in 0..row_pad_size {
+ self.writer.write_u8(0)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<'a, W: Write> ImageEncoder for BMPEncoder<'a, W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: color::ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+fn get_unsupported_error_message(c: color::ColorType) -> String {
+ format!(
+ "Unsupported color type {:?}. Supported types: RGB(8), RGBA(8), Gray(8), GrayA(8).",
+ c
+ )
+}
+
+/// Returns a tuple representing: (dib header size, written pixel size, palette color count).
+fn get_pixel_info(c: color::ColorType) -> io::Result<(u32, u32, u32)> {
+ let sizes = match c {
+ color::ColorType::Rgb8 => (BITMAPINFOHEADER_SIZE, 3, 0),
+ color::ColorType::Rgba8 => (BITMAPV4HEADER_SIZE, 4, 0),
+ color::ColorType::L8 => (BITMAPINFOHEADER_SIZE, 1, 256),
+ color::ColorType::La8 => (BITMAPINFOHEADER_SIZE, 1, 256),
+ _ => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ &get_unsupported_error_message(c)[..],
+ ))
+ }
+ };
+
+ Ok(sizes)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::super::BmpDecoder;
+ use super::BMPEncoder;
+ use crate::color::ColorType;
+ use crate::image::ImageDecoder;
+ use std::io::Cursor;
+
+ fn round_trip_image(image: &[u8], width: u32, height: u32, c: ColorType) -> Vec<u8> {
+ let mut encoded_data = Vec::new();
+ {
+ let mut encoder = BMPEncoder::new(&mut encoded_data);
+ encoder
+ .encode(&image, width, height, c)
+ .expect("could not encode image");
+ }
+
+ let decoder = BmpDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
+
+ let mut buf = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut buf).expect("failed to decode");
+ buf
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgb() {
+ let image = [255u8, 0, 0]; // single red pixel
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgb8);
+ assert_eq!(3, decoded.len());
+ assert_eq!(255, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ }
+
+ #[test]
+ fn huge_files_return_error() {
+ let mut encoded_data = Vec::new();
+ let image = vec![0u8; 3 * 40_000 * 40_000]; // 40_000x40_000 pixels, 3 bytes per pixel, allocated on the heap
+ let mut encoder = BMPEncoder::new(&mut encoded_data);
+ let result = encoder.encode(&image, 40_000, 40_000, ColorType::Rgb8);
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgba() {
+ let image = [1, 2, 3, 4];
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgba8);
+ assert_eq!(&decoded[..], &image[..]);
+ }
+
+ #[test]
+ fn round_trip_3px_rgb() {
+ let image = [0u8; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
+ let _decoded = round_trip_image(&image, 3, 3, ColorType::Rgb8);
+ }
+
+ #[test]
+ fn round_trip_gray() {
+ let image = [0u8, 1, 2]; // 3 pixels
+ let decoded = round_trip_image(&image, 3, 1, ColorType::L8);
+ // should be read back as 3 RGB pixels
+ assert_eq!(9, decoded.len());
+ assert_eq!(0, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ assert_eq!(1, decoded[3]);
+ assert_eq!(1, decoded[4]);
+ assert_eq!(1, decoded[5]);
+ assert_eq!(2, decoded[6]);
+ assert_eq!(2, decoded[7]);
+ assert_eq!(2, decoded[8]);
+ }
+
+ #[test]
+ fn round_trip_graya() {
+ let image = [0u8, 0, 1, 0, 2, 0]; // 3 pixels, each with an alpha channel
+ let decoded = round_trip_image(&image, 1, 3, ColorType::La8);
+ // should be read back as 3 RGB pixels
+ assert_eq!(9, decoded.len());
+ assert_eq!(0, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ assert_eq!(1, decoded[3]);
+ assert_eq!(1, decoded[4]);
+ assert_eq!(1, decoded[5]);
+ assert_eq!(2, decoded[6]);
+ assert_eq!(2, decoded[7]);
+ assert_eq!(2, decoded[8]);
+ }
+}
diff --git a/third_party/rust/image/src/bmp/mod.rs b/third_party/rust/image/src/bmp/mod.rs
new file mode 100644
index 0000000000..09f80ba3d2
--- /dev/null
+++ b/third_party/rust/image/src/bmp/mod.rs
@@ -0,0 +1,14 @@
+//! Decoding and Encoding of BMP Images
+//!
+//! A decoder and encoder for BMP (Windows Bitmap) images
+//!
+//! # Related Links
+//! * <https://msdn.microsoft.com/en-us/library/windows/desktop/dd183375%28v=vs.85%29.aspx>
+//! * <https://en.wikipedia.org/wiki/BMP_file_format>
+//!
+
+pub use self::decoder::BmpDecoder;
+pub use self::encoder::BMPEncoder;
+
+mod decoder;
+mod encoder;
diff --git a/third_party/rust/image/src/buffer.rs b/third_party/rust/image/src/buffer.rs
new file mode 100644
index 0000000000..051fb2af25
--- /dev/null
+++ b/third_party/rust/image/src/buffer.rs
@@ -0,0 +1,1348 @@
+use num_traits::Zero;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut, Index, IndexMut, Range};
+use std::path::Path;
+use std::slice::{Chunks, ChunksMut};
+
+use crate::color::{ColorType, FromColor, Luma, LumaA, Rgb, Rgba, Bgr, Bgra};
+use crate::flat::{FlatSamples, SampleLayout};
+use crate::dynimage::{save_buffer, save_buffer_with_format};
+use crate::error::ImageResult;
+use crate::image::{GenericImage, GenericImageView, ImageFormat};
+use crate::math::Rect;
+use crate::traits::{EncodableLayout, Primitive};
+use crate::utils::expand_packed;
+
+/// A generalized pixel.
+///
+/// A pixel object is usually not used standalone but as a view into an image buffer.
+pub trait Pixel: Copy + Clone {
+ /// The underlying subpixel type.
+ type Subpixel: Primitive;
+
+ /// The number of channels of this pixel type.
+ const CHANNEL_COUNT: u8;
+ /// Returns the number of channels of this pixel type.
+ #[deprecated(note="please use CHANNEL_COUNT associated constant")]
+ fn channel_count() -> u8 {
+ Self::CHANNEL_COUNT
+ }
+
+ /// Returns the components as a slice.
+ fn channels(&self) -> &[Self::Subpixel];
+
+ /// Returns the components as a mutable slice
+ fn channels_mut(&mut self) -> &mut [Self::Subpixel];
+
+ /// A string that can help to interpret the meaning each channel
+ /// See [gimp babl](http://gegl.org/babl/).
+ const COLOR_MODEL: &'static str;
+ /// Returns a string that can help to interpret the meaning each channel
+ /// See [gimp babl](http://gegl.org/babl/).
+ #[deprecated(note="please use COLOR_MODEL associated constant")]
+ fn color_model() -> &'static str {
+ Self::COLOR_MODEL
+ }
+
+ /// ColorType for this pixel format
+ const COLOR_TYPE: ColorType;
+ /// Returns the ColorType for this pixel format
+ #[deprecated(note="please use COLOR_TYPE associated constant")]
+ fn color_type() -> ColorType {
+ Self::COLOR_TYPE
+ }
+
+ /// Returns the channels of this pixel as a 4 tuple. If the pixel
+ /// has less than 4 channels the remainder is filled with the maximum value
+ ///
+ /// TODO deprecate
+ fn channels4(
+ &self,
+ ) -> (
+ Self::Subpixel,
+ Self::Subpixel,
+ Self::Subpixel,
+ Self::Subpixel,
+ );
+
+ /// Construct a pixel from the 4 channels a, b, c and d.
+ /// If the pixel does not contain 4 channels the extra are ignored.
+ ///
+ /// TODO deprecate
+ fn from_channels(
+ a: Self::Subpixel,
+ b: Self::Subpixel,
+ c: Self::Subpixel,
+ d: Self::Subpixel,
+ ) -> Self;
+
+ /// Returns a view into a slice.
+ ///
+ /// Note: The slice length is not checked on creation. Thus the caller has to ensure
+ /// that the slice is long enough to present panics if the pixel is used later on.
+ fn from_slice(slice: &[Self::Subpixel]) -> &Self;
+
+ /// Returns mutable view into a mutable slice.
+ ///
+ /// Note: The slice length is not checked on creation. Thus the caller has to ensure
+ /// that the slice is long enough to present panics if the pixel is used later on.
+ fn from_slice_mut(slice: &mut [Self::Subpixel]) -> &mut Self;
+
+ /// Convert this pixel to RGB
+ fn to_rgb(&self) -> Rgb<Self::Subpixel>;
+
+ /// Convert this pixel to RGB with an alpha channel
+ fn to_rgba(&self) -> Rgba<Self::Subpixel>;
+
+ /// Convert this pixel to luma
+ fn to_luma(&self) -> Luma<Self::Subpixel>;
+
+ /// Convert this pixel to luma with an alpha channel
+ fn to_luma_alpha(&self) -> LumaA<Self::Subpixel>;
+
+ /// Convert this pixel to BGR
+ fn to_bgr(&self) -> Bgr<Self::Subpixel>;
+
+ /// Convert this pixel to BGR with an alpha channel
+ fn to_bgra(&self) -> Bgra<Self::Subpixel>;
+
+ /// Apply the function ```f``` to each channel of this pixel.
+ fn map<F>(&self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel of this pixel.
+ fn apply<F>(&mut self, f: F)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Apply the function ```g``` to the alpha channel.
+ fn map_with_alpha<F, G>(&self, f: F, g: G) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ G: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Apply the function ```g``` to the alpha channel. Works in-place.
+ fn apply_with_alpha<F, G>(&mut self, f: F, g: G)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ G: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ fn map_without_alpha<F>(&self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ {
+ let mut this = *self;
+ this.apply_with_alpha(f, |x| x);
+ this
+ }
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Works in place.
+ fn apply_without_alpha<F>(&mut self, f: F)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ {
+ self.apply_with_alpha(f, |x| x);
+ }
+
+ /// Apply the function ```f``` to each channel of this pixel and
+ /// ```other``` pairwise.
+ fn map2<F>(&self, other: &Self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel of this pixel and
+ /// ```other``` pairwise. Works in-place.
+ fn apply2<F>(&mut self, other: &Self, f: F)
+ where
+ F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
+
+ /// Invert this pixel
+ fn invert(&mut self);
+
+ /// Blend the color of a given pixel into ourself, taking into account alpha channels
+ fn blend(&mut self, other: &Self);
+}
+
+/// Iterate over pixel refs.
+pub struct Pixels<'a, P: Pixel + 'a>
+where
+ P::Subpixel: 'a,
+{
+ chunks: Chunks<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = &'a P;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a P> {
+ self.chunks.next().map(|v| <P as Pixel>::from_slice(v))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<&'a P> {
+ self.chunks.next_back().map(|v| <P as Pixel>::from_slice(v))
+ }
+}
+
+/// Iterate over mutable pixel refs.
+pub struct PixelsMut<'a, P: Pixel + 'a>
+where
+ P::Subpixel: 'a,
+{
+ chunks: ChunksMut<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = &'a mut P;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a mut P> {
+ self.chunks.next().map(|v| <P as Pixel>::from_slice_mut(v))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<&'a mut P> {
+ self.chunks
+ .next_back()
+ .map(|v| <P as Pixel>::from_slice_mut(v))
+ }
+}
+
+/// Iterate over rows of an image
+pub struct Rows<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ chunks: Chunks<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = Pixels<'a, P>;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<Pixels<'a, P>> {
+ self.chunks.next().map(|row| Pixels {
+ chunks: row.chunks(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<Pixels<'a, P>> {
+ self.chunks.next_back().map(|row| Pixels {
+ chunks: row.chunks(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+/// Iterate over mutable rows of an image
+pub struct RowsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ chunks: ChunksMut<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = PixelsMut<'a, P>;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<PixelsMut<'a, P>> {
+ self.chunks.next().map(|row| PixelsMut {
+ chunks: row.chunks_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<PixelsMut<'a, P>> {
+ self.chunks.next_back().map(|row| PixelsMut {
+ chunks: row.chunks_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+/// Enumerate the pixels of an image.
+pub struct EnumeratePixels<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: Pixels<'a, P>,
+ x: u32,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumeratePixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, u32, &'a P);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, u32, &'a P)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+ let (x, y) = (self.x, self.y);
+ self.x += 1;
+ self.pixels.next().map(|p| (x, y, p))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumeratePixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+/// Enumerate the rows of an image.
+pub struct EnumerateRows<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ rows: Rows<'a, P>,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumerateRows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, EnumeratePixels<'a, P>);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, EnumeratePixels<'a, P>)> {
+ let y = self.y;
+ self.y += 1;
+ self.rows.next().map(|r| {
+ (
+ y,
+ EnumeratePixels {
+ x: 0,
+ y,
+ width: self.width,
+ pixels: r,
+ },
+ )
+ })
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumerateRows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.rows.len()
+ }
+}
+
+/// Enumerate the pixels of an image.
+pub struct EnumeratePixelsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: PixelsMut<'a, P>,
+ x: u32,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumeratePixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, u32, &'a mut P);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, u32, &'a mut P)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+ let (x, y) = (self.x, self.y);
+ self.x += 1;
+ self.pixels.next().map(|p| (x, y, p))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumeratePixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+/// Enumerate the rows of an image.
+pub struct EnumerateRowsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ rows: RowsMut<'a, P>,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumerateRowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, EnumeratePixelsMut<'a, P>);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, EnumeratePixelsMut<'a, P>)> {
+ let y = self.y;
+ self.y += 1;
+ self.rows.next().map(|r| {
+ (
+ y,
+ EnumeratePixelsMut {
+ x: 0,
+ y,
+ width: self.width,
+ pixels: r,
+ },
+ )
+ })
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumerateRowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.rows.len()
+ }
+}
+
+/// Generic image buffer
+#[derive(Debug)]
+pub struct ImageBuffer<P: Pixel, Container> {
+ width: u32,
+ height: u32,
+ _phantom: PhantomData<P>,
+ data: Container,
+}
+
+// generic implementation, shared along all image buffers
+//
+// TODO: Is the 'static bound on `I::Pixel` really required? Can we avoid it? Remember to remove
+// the bounds on `imageops` in case this changes!
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Contructs a buffer from a generic container
+ /// (for example a `Vec` or a slice)
+ ///
+ /// Returns `None` if the container is not big enough (including when the image dimensions
+ /// necessitate an allocation of more bytes than supported by the container).
+ pub fn from_raw(width: u32, height: u32, buf: Container) -> Option<ImageBuffer<P, Container>> {
+ if Self::check_image_fits(width, height, buf.len()) {
+ Some(ImageBuffer {
+ data: buf,
+ width,
+ height,
+ _phantom: PhantomData,
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the underlying raw buffer
+ pub fn into_raw(self) -> Container {
+ self.data
+ }
+
+ /// The width and height of this image.
+ pub fn dimensions(&self) -> (u32, u32) {
+ (self.width, self.height)
+ }
+
+ /// The width of this image.
+ pub fn width(&self) -> u32 {
+ self.width
+ }
+
+ /// The height of this image.
+ pub fn height(&self) -> u32 {
+ self.height
+ }
+
+ /// Returns an iterator over the pixels of this image.
+ pub fn pixels(&self) -> Pixels<P> {
+ Pixels {
+ chunks: self.data.chunks(<P as Pixel>::CHANNEL_COUNT as usize),
+ }
+ }
+
+ /// Returns an iterator over the rows of this image.
+ pub fn rows(&self) -> Rows<P> {
+ Rows {
+ chunks: self
+ .data
+ .chunks(<P as Pixel>::CHANNEL_COUNT as usize * self.width as usize),
+ }
+ }
+
+ /// Enumerates over the pixels of the image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with a reference to them.
+ pub fn enumerate_pixels(&self) -> EnumeratePixels<P> {
+ EnumeratePixels {
+ pixels: self.pixels(),
+ x: 0,
+ y: 0,
+ width: self.width,
+ }
+ }
+
+ /// Enumerates over the rows of the image.
+ /// The iterator yields the y-coordinate of each row
+ /// along with a reference to them.
+ pub fn enumerate_rows(&self) -> EnumerateRows<P> {
+ EnumerateRows {
+ rows: self.rows(),
+ y: 0,
+ width: self.width,
+ }
+ }
+
+ /// Gets a reference to the pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ pub fn get_pixel(&self, x: u32, y: u32) -> &P {
+ match self.pixel_indices(x, y) {
+ None => panic!("Image index {:?} out of bounds {:?}", (x, y), (self.width, self.height)),
+ Some(pixel_indices) => <P as Pixel>::from_slice(&self.data[pixel_indices]),
+ }
+ }
+
+ /// Test that the image fits inside the buffer.
+ ///
+ /// Verifies that the maximum image of pixels inside the bounds is smaller than the provided
+ /// length. Note that as a corrolary we also have that the index calculation of pixels inside
+ /// the bounds will not overflow.
+ fn check_image_fits(width: u32, height: u32, len: usize) -> bool {
+ let checked_len = Self::image_buffer_len(width, height);
+ checked_len.map(|min_len| min_len <= len).unwrap_or(false)
+ }
+
+ fn image_buffer_len(width: u32, height: u32) -> Option<usize> {
+ Some(<P as Pixel>::CHANNEL_COUNT as usize)
+ .and_then(|size| size.checked_mul(width as usize))
+ .and_then(|size| size.checked_mul(height as usize))
+ }
+
+ #[inline(always)]
+ fn pixel_indices(&self, x: u32, y: u32) -> Option<Range<usize>> {
+ if x >= self.width || y >= self.height {
+ return None
+ }
+
+ Some(self.pixel_indices_unchecked(x, y))
+ }
+
+ #[inline(always)]
+ fn pixel_indices_unchecked(&self, x: u32, y: u32) -> Range<usize> {
+ let no_channels = <P as Pixel>::CHANNEL_COUNT as usize;
+ // If in bounds, this can't overflow as we have tested that at construction!
+ let min_index = (y as usize*self.width as usize + x as usize)*no_channels;
+ min_index..min_index+no_channels
+ }
+
+ /// Get the format of the buffer when viewed as a matrix of samples.
+ pub fn sample_layout(&self) -> SampleLayout {
+ // None of these can overflow, as all our memory is addressable.
+ SampleLayout::row_major_packed(<P as Pixel>::CHANNEL_COUNT, self.width, self.height)
+ }
+
+ /// Return the raw sample buffer with its stride an dimension information.
+ ///
+ /// The returned buffer is guaranteed to be well formed in all cases. It is layed out by
+ /// colors, width then height, meaning `channel_stride <= width_stride <= height_stride`. All
+ /// strides are in numbers of elements but those are mostly `u8` in which case the strides are
+ /// also byte strides.
+ pub fn into_flat_samples(self) -> FlatSamples<Container>
+ where Container: AsRef<[P::Subpixel]>
+ {
+ // None of these can overflow, as all our memory is addressable.
+ let layout = self.sample_layout();
+ FlatSamples {
+ samples: self.data,
+ layout,
+ color_hint: Some(P::COLOR_TYPE),
+ }
+ }
+
+ /// Return a view on the raw sample buffer.
+ ///
+ /// See `flattened` for more details.
+ pub fn as_flat_samples(&self) -> FlatSamples<&[P::Subpixel]>
+ where Container: AsRef<[P::Subpixel]>
+ {
+ let layout = self.sample_layout();
+ FlatSamples {
+ samples: self.data.as_ref(),
+ layout,
+ color_hint: Some(P::COLOR_TYPE),
+ }
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ /// Returns an iterator over the mutable pixels of this image.
+ pub fn pixels_mut(&mut self) -> PixelsMut<P> {
+ PixelsMut {
+ chunks: self.data.chunks_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ }
+ }
+
+ /// Returns an iterator over the mutable rows of this image.
+ pub fn rows_mut(&mut self) -> RowsMut<P> {
+ RowsMut {
+ chunks: self
+ .data
+ .chunks_mut(<P as Pixel>::CHANNEL_COUNT as usize * self.width as usize),
+ }
+ }
+
+ /// Enumerates over the pixels of the image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with a mutable reference to them.
+ pub fn enumerate_pixels_mut(&mut self) -> EnumeratePixelsMut<P> {
+ let width = self.width;
+ EnumeratePixelsMut {
+ pixels: self.pixels_mut(),
+ x: 0,
+ y: 0,
+ width,
+ }
+ }
+
+ /// Enumerates over the rows of the image.
+ /// The iterator yields the y-coordinate of each row
+ /// along with a mutable reference to them.
+ pub fn enumerate_rows_mut(&mut self) -> EnumerateRowsMut<P> {
+ let width = self.width;
+ EnumerateRowsMut {
+ rows: self.rows_mut(),
+ y: 0,
+ width,
+ }
+ }
+
+ /// Gets a reference to the mutable pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ pub fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut P {
+ match self.pixel_indices(x, y) {
+ None => panic!("Image index {:?} out of bounds {:?}", (x, y), (self.width, self.height)),
+ Some(pixel_indices) => <P as Pixel>::from_slice_mut(&mut self.data[pixel_indices]),
+ }
+ }
+
+ /// Puts a pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ pub fn put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ *self.get_pixel_mut(x, y) = pixel
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Saves the buffer to a file at the path specified.
+ ///
+ /// The image format is derived from the file extension.
+ /// Currently only jpeg and png files are supported.
+ pub fn save<Q>(&self, path: Q) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ // This is valid as the subpixel is u8.
+ save_buffer(
+ path,
+ self.as_bytes(),
+ self.width(),
+ self.height(),
+ <P as Pixel>::COLOR_TYPE,
+ )
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Saves the buffer to a file at the specified path in
+ /// the specified format.
+ ///
+ /// See [`save_buffer_with_format`](fn.save_buffer_with_format.html) for
+ /// supported types.
+ pub fn save_with_format<Q>(&self, path: Q, format: ImageFormat) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ // This is valid as the subpixel is u8.
+ save_buffer_with_format(
+ path,
+ self.as_bytes(),
+ self.width(),
+ self.height(),
+ <P as Pixel>::COLOR_TYPE,
+ format,
+ )
+ }
+}
+
+impl<P, Container> Deref for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ type Target = [P::Subpixel];
+
+ fn deref(&self) -> &<Self as Deref>::Target {
+ &*self.data
+ }
+}
+
+impl<P, Container> DerefMut for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
+ &mut *self.data
+ }
+}
+
+impl<P, Container> Index<(u32, u32)> for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ type Output = P;
+
+ fn index(&self, (x, y): (u32, u32)) -> &P {
+ self.get_pixel(x, y)
+ }
+}
+
+impl<P, Container> IndexMut<(u32, u32)> for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ fn index_mut(&mut self, (x, y): (u32, u32)) -> &mut P {
+ self.get_pixel_mut(x, y)
+ }
+}
+
+impl<P, Container> Clone for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + Clone,
+{
+ fn clone(&self) -> ImageBuffer<P, Container> {
+ ImageBuffer {
+ data: self.data.clone(),
+ width: self.width,
+ height: self.height,
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl<P, Container> GenericImageView for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ Container: Deref<Target = [P::Subpixel]> + Deref,
+ P::Subpixel: 'static,
+{
+ type Pixel = P;
+ type InnerImageView = Self;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.dimensions()
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ (0, 0, self.width, self.height)
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> P {
+ *self.get_pixel(x, y)
+ }
+
+ /// Returns the pixel located at (x, y), ignoring bounds checking.
+ #[inline(always)]
+ unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> P {
+ let indices = self.pixel_indices_unchecked(x, y);
+ *<P as Pixel>::from_slice(self.data.get_unchecked(indices))
+ }
+
+ fn inner(&self) -> &Self::InnerImageView {
+ self
+ }
+}
+
+impl<P, Container> GenericImage for ImageBuffer<P, Container>
+where
+ P: Pixel + 'static,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+ P::Subpixel: 'static,
+{
+ type InnerImage = Self;
+
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut P {
+ self.get_pixel_mut(x, y)
+ }
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ *self.get_pixel_mut(x, y) = pixel
+ }
+
+ /// Puts a pixel at location (x, y), ignoring bounds checking.
+ #[inline(always)]
+ unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ let indices = self.pixel_indices_unchecked(x, y);
+ let p = <P as Pixel>::from_slice_mut(self.data.get_unchecked_mut(indices));
+ *p = pixel
+ }
+
+ /// Put a pixel at location (x, y), taking into account alpha channels
+ ///
+ /// DEPRECATED: This method will be removed. Blend the pixel directly instead.
+ fn blend_pixel(&mut self, x: u32, y: u32, p: P) {
+ self.get_pixel_mut(x, y).blend(&p)
+ }
+
+ fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
+ let Rect { x: sx, y: sy, width, height } = source;
+ let dx = x;
+ let dy = y;
+ assert!(sx < self.width() && dx < self.width());
+ assert!(sy < self.height() && dy < self.height());
+ if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
+ return false;
+ }
+
+ if sy < dy {
+ for y in (0..height).rev() {
+ let sy = sy + y;
+ let dy = dy + y;
+ let Range { start, .. } = self.pixel_indices_unchecked(sx, sy);
+ let Range { end, .. } = self.pixel_indices_unchecked(sx + width - 1, sy);
+ let dst = self.pixel_indices_unchecked(dx, dy).start;
+ slice_copy_within(self, start..end, dst);
+ }
+ } else {
+ for y in 0..height {
+ let sy = sy + y;
+ let dy = dy + y;
+ let Range { start, .. } = self.pixel_indices_unchecked(sx, sy);
+ let Range { end, .. } = self.pixel_indices_unchecked(sx + width - 1, sy);
+ let dst = self.pixel_indices_unchecked(dx, dy).start;
+ slice_copy_within(self, start..end, dst);
+ }
+ }
+ true
+ }
+
+ fn inner_mut(&mut self) -> &mut Self::InnerImage {
+ self
+ }
+}
+
+// FIXME non-generic `core::slice::copy_within` implementation used by `ImageBuffer::copy_within`. The implementation is rewritten
+// here due to minimum rust version support(MSRV). Image has a MSRV of 1.34 as of writing this while `core::slice::copy_within`
+// has been stabilized in 1.37.
+#[inline(always)]
+fn slice_copy_within<T: Copy>(slice: &mut [T], Range { start: src_start, end: src_end }: Range<usize>, dest: usize) {
+ assert!(src_start <= src_end, "src end is before src start");
+ assert!(src_end <= slice.len(), "src is out of bounds");
+ let count = src_end - src_start;
+ assert!(dest <= slice.len() - count, "dest is out of bounds");
+ unsafe {
+ std::ptr::copy(
+ slice.as_ptr().add(src_start),
+ slice.as_mut_ptr().add(dest),
+ count,
+ );
+ }
+}
+
+// concrete implementation for `Vec`-backed buffers
+// TODO: I think that rustc does not "see" this impl any more: the impl with
+// Container meets the same requirements. At least, I got compile errors that
+// there is no such function as `into_vec`, whereas `into_raw` did work, and
+// `into_vec` is redundant anyway, because `into_raw` will give you the vector,
+// and it is more generic.
+impl<P: Pixel + 'static> ImageBuffer<P, Vec<P::Subpixel>>
+where
+ P::Subpixel: 'static,
+{
+ /// Creates a new image buffer based on a `Vec<P::Subpixel>`.
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn new(width: u32, height: u32) -> ImageBuffer<P, Vec<P::Subpixel>> {
+ let size = Self::image_buffer_len(width, height)
+ .expect("Buffer length in `ImageBuffer::new` overflows usize");
+ ImageBuffer {
+ data: vec![Zero::zero(); size],
+ width,
+ height,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Constructs a new ImageBuffer by copying a pixel
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn from_pixel(width: u32, height: u32, pixel: P) -> ImageBuffer<P, Vec<P::Subpixel>> {
+ let mut buf = ImageBuffer::new(width, height);
+ for p in buf.pixels_mut() {
+ *p = pixel
+ }
+ buf
+ }
+
+ /// Constructs a new ImageBuffer by repeated application of the supplied function.
+ ///
+ /// The arguments to the function are the pixel's x and y coordinates.
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn from_fn<F>(width: u32, height: u32, mut f: F) -> ImageBuffer<P, Vec<P::Subpixel>>
+ where
+ F: FnMut(u32, u32) -> P,
+ {
+ let mut buf = ImageBuffer::new(width, height);
+ for (x, y, p) in buf.enumerate_pixels_mut() {
+ *p = f(x, y)
+ }
+ buf
+ }
+
+ /// Creates an image buffer out of an existing buffer.
+ /// Returns None if the buffer is not big enough.
+ pub fn from_vec(
+ width: u32,
+ height: u32,
+ buf: Vec<P::Subpixel>,
+ ) -> Option<ImageBuffer<P, Vec<P::Subpixel>>> {
+ ImageBuffer::from_raw(width, height, buf)
+ }
+
+ /// Consumes the image buffer and returns the underlying data
+ /// as an owned buffer
+ pub fn into_vec(self) -> Vec<P::Subpixel> {
+ self.into_raw()
+ }
+}
+
+/// Provides color conversions for whole image buffers.
+pub trait ConvertBuffer<T> {
+ /// Converts `self` to a buffer of type T
+ ///
+ /// A generic implementation is provided to convert any image buffer to a image buffer
+ /// based on a `Vec<T>`.
+ fn convert(&self) -> T;
+}
+
+// concrete implementation Luma -> Rgba
+impl GrayImage {
+ /// Expands a color palette by re-using the existing buffer.
+ /// Assumes 8 bit per pixel. Uses an optionally transparent index to
+ /// adjust it's alpha value accordingly.
+ pub fn expand_palette(
+ self,
+ palette: &[(u8, u8, u8)],
+ transparent_idx: Option<u8>,
+ ) -> RgbaImage {
+ let (width, height) = self.dimensions();
+ let mut data = self.into_raw();
+ let entries = data.len();
+ data.resize(entries.checked_mul(4).unwrap(), 0);
+ let mut buffer = ImageBuffer::from_vec(width, height, data).unwrap();
+ expand_packed(&mut buffer, 4, 8, |idx, pixel| {
+ let (r, g, b) = palette[idx as usize];
+ let a = if let Some(t_idx) = transparent_idx {
+ if t_idx == idx {
+ 0
+ } else {
+ 255
+ }
+ } else {
+ 255
+ };
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ pixel[3] = a;
+ });
+ buffer
+ }
+}
+
+// TODO: Equality constraints are not yet supported in where clauses, when they
+// are, the T parameter should be removed in favor of ToType::Subpixel, which
+// will then be FromType::Subpixel.
+impl<'a, 'b, Container, FromType: Pixel + 'static, ToType: Pixel + 'static>
+ ConvertBuffer<ImageBuffer<ToType, Vec<ToType::Subpixel>>> for ImageBuffer<FromType, Container>
+where
+ Container: Deref<Target = [FromType::Subpixel]>,
+ ToType: FromColor<FromType>,
+ FromType::Subpixel: 'static,
+ ToType::Subpixel: 'static,
+{
+ fn convert(&self) -> ImageBuffer<ToType, Vec<ToType::Subpixel>> {
+ let mut buffer: ImageBuffer<ToType, Vec<ToType::Subpixel>> =
+ ImageBuffer::new(self.width, self.height);
+ for (to, from) in buffer.pixels_mut().zip(self.pixels()) {
+ to.from_color(from)
+ }
+ buffer
+ }
+}
+
+/// Sendable Rgb image buffer
+pub type RgbImage = ImageBuffer<Rgb<u8>, Vec<u8>>;
+/// Sendable Rgb + alpha channel image buffer
+pub type RgbaImage = ImageBuffer<Rgba<u8>, Vec<u8>>;
+/// Sendable grayscale image buffer
+pub type GrayImage = ImageBuffer<Luma<u8>, Vec<u8>>;
+/// Sendable grayscale + alpha channel image buffer
+pub type GrayAlphaImage = ImageBuffer<LumaA<u8>, Vec<u8>>;
+/// Sendable Bgr image buffer
+pub(crate) type BgrImage = ImageBuffer<Bgr<u8>, Vec<u8>>;
+/// Sendable Bgr + alpha channel image buffer
+pub(crate) type BgraImage = ImageBuffer<Bgra<u8>, Vec<u8>>;
+/// Sendable 16-bit Rgb image buffer
+pub(crate) type Rgb16Image = ImageBuffer<Rgb<u16>, Vec<u16>>;
+/// Sendable 16-bit Rgb + alpha channel image buffer
+pub(crate) type Rgba16Image = ImageBuffer<Rgba<u16>, Vec<u16>>;
+/// Sendable 16-bit grayscale image buffer
+pub(crate) type Gray16Image = ImageBuffer<Luma<u16>, Vec<u16>>;
+/// Sendable 16-bit grayscale + alpha channel image buffer
+pub(crate) type GrayAlpha16Image = ImageBuffer<LumaA<u16>, Vec<u16>>;
+
+#[cfg(test)]
+mod test {
+
+ use super::{GrayImage, ImageBuffer, RgbImage};
+ use crate::image::GenericImage;
+ use crate::color;
+ use crate::math::Rect;
+ #[cfg(feature = "benchmarks")]
+ use test;
+
+ #[test]
+ /// Tests if image buffers from slices work
+ fn slice_buffer() {
+ let data = [0; 9];
+ let buf: ImageBuffer<color::Luma<u8>, _> = ImageBuffer::from_raw(3, 3, &data[..]).unwrap();
+ assert_eq!(&*buf, &data[..])
+ }
+
+ #[test]
+ fn test_get_pixel() {
+ let mut a: RgbImage = ImageBuffer::new(10, 10);
+ {
+ let b = a.get_mut(3 * 10).unwrap();
+ *b = 255;
+ }
+ assert_eq!(a.get_pixel(0, 1)[0], 255)
+ }
+
+ #[test]
+ fn test_mut_iter() {
+ let mut a: RgbImage = ImageBuffer::new(10, 10);
+ {
+ let val = a.pixels_mut().next().unwrap();
+ *val = color::Rgb([42, 0, 0]);
+ }
+ assert_eq!(a.data[0], 42)
+ }
+
+ #[bench]
+ #[cfg(feature = "benchmarks")]
+ fn bench_conversion(b: &mut test::Bencher) {
+ use crate::buffer::{ConvertBuffer, GrayImage, Pixel};
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+ assert!(a.data[0] != 0);
+ b.iter(|| {
+ let b: GrayImage = a.convert();
+ assert!(0 != b.data[0]);
+ assert!(a.data[0] != b.data[0]);
+ test::black_box(b);
+ });
+ b.bytes = 1000 * 1000 * 3
+ }
+
+ #[bench]
+ #[cfg(feature = "benchmarks")]
+ fn bench_image_access_row_by_row(b: &mut test::Bencher) {
+ use crate::buffer::{ImageBuffer, Pixel};
+
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+
+ b.iter(move || {
+ let image: &RgbImage = test::black_box(&a);
+ let mut sum: usize = 0;
+ for y in 0..1000 {
+ for x in 0..1000 {
+ let pixel = image.get_pixel(x, y);
+ sum = sum.wrapping_add(pixel[0] as usize);
+ sum = sum.wrapping_add(pixel[1] as usize);
+ sum = sum.wrapping_add(pixel[2] as usize);
+ }
+ }
+ test::black_box(sum)
+ });
+
+ b.bytes = 1000 * 1000 * 3;
+ }
+
+ #[bench]
+ #[cfg(feature = "benchmarks")]
+ fn bench_image_access_col_by_col(b: &mut test::Bencher) {
+ use crate::buffer::{ImageBuffer, Pixel};
+
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+
+ b.iter(move || {
+ let image: &RgbImage = test::black_box(&a);
+ let mut sum: usize = 0;
+ for x in 0..1000 {
+ for y in 0..1000 {
+ let pixel = image.get_pixel(x, y);
+ sum = sum.wrapping_add(pixel[0] as usize);
+ sum = sum.wrapping_add(pixel[1] as usize);
+ sum = sum.wrapping_add(pixel[2] as usize);
+ }
+ }
+ test::black_box(sum)
+ });
+
+ b.bytes = 1000 * 1000 * 3;
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_oob() {
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 5, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 5 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 1, y: 0, width: 4, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 1, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 1, width: 4, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 0, 1));
+ assert!(!image.copy_within(Rect { x: 1, y: 1, width: 4, height: 4 }, 0, 0));
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_tl() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 01, 02, 03,
+ 04, 00, 01, 02,
+ 08, 04, 05, 06,
+ 12, 08, 09, 10,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(Rect { x: 0, y: 0, width: 3, height: 3 }, 1, 1));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_tr() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 01, 02, 03,
+ 01, 02, 03, 07,
+ 05, 06, 07, 11,
+ 09, 10, 11, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(Rect { x: 1, y: 0, width: 3, height: 3 }, 0, 1));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_bl() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 04, 05, 06,
+ 04, 08, 09, 10,
+ 08, 12, 13, 14,
+ 12, 13, 14, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(Rect { x: 0, y: 1, width: 3, height: 3 }, 1, 0));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_br() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 05, 06, 07, 03,
+ 09, 10, 11, 07,
+ 13, 14, 15, 11,
+ 12, 13, 14, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(Rect { x: 1, y: 1, width: 3, height: 3 }, 0, 0));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+}
diff --git a/third_party/rust/image/src/color.rs b/third_party/rust/image/src/color.rs
new file mode 100644
index 0000000000..63fd9fb33b
--- /dev/null
+++ b/third_party/rust/image/src/color.rs
@@ -0,0 +1,1276 @@
+use num_traits::{NumCast, ToPrimitive, Zero};
+use std::ops::{Index, IndexMut};
+
+use crate::buffer::Pixel;
+use crate::traits::Primitive;
+
+/// An enumeration over supported color types and bit depths
+#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
+pub enum ColorType {
+ /// Pixel is 8-bit luminance
+ L8,
+ /// Pixel is 8-bit luminance with an alpha channel
+ La8,
+ /// Pixel contains 8-bit R, G and B channels
+ Rgb8,
+ /// Pixel is 8-bit RGB with an alpha channel
+ Rgba8,
+
+ /// Pixel is 16-bit luminance
+ L16,
+ /// Pixel is 16-bit luminance with an alpha channel
+ La16,
+ /// Pixel is 16-bit RGB
+ Rgb16,
+ /// Pixel is 16-bit RGBA
+ Rgba16,
+
+ /// Pixel contains 8-bit B, G and R channels
+ Bgr8,
+ /// Pixel is 8-bit BGR with an alpha channel
+ Bgra8,
+
+ #[doc(hidden)]
+ __NonExhaustive(crate::utils::NonExhaustiveMarker),
+}
+
+impl ColorType {
+ /// Returns the number of bytes contained in a pixel of `ColorType` ```c```
+ pub fn bytes_per_pixel(self) -> u8 {
+ match self {
+ ColorType::L8 => 1,
+ ColorType::L16 | ColorType::La8 => 2,
+ ColorType::Rgb8 | ColorType::Bgr8 => 3,
+ ColorType::Rgba8 | ColorType::Bgra8 | ColorType::La16 => 4,
+ ColorType::Rgb16 => 6,
+ ColorType::Rgba16 => 8,
+ ColorType::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+
+ /// Returns the number of bits contained in a pixel of `ColorType` ```c``` (which will always be
+ /// a multiple of 8).
+ pub fn bits_per_pixel(self) -> u16 {
+ <u16 as From<u8>>::from(self.bytes_per_pixel()) * 8
+ }
+
+ /// Returns the number of color channels that make up this pixel
+ pub fn channel_count(self) -> u8 {
+ let e: ExtendedColorType = self.into();
+ e.channel_count()
+ }
+}
+
+/// An enumeration of color types encountered in image formats.
+///
+/// This is not exhaustive over all existing image formats but should be granular enough to allow
+/// round tripping of decoding and encoding as much as possible. The variants will be extended as
+/// necessary to enable this.
+///
+/// Another purpose is to advise users of a rough estimate of the accuracy and effort of the
+/// decoding from and encoding to such an image format.
+#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
+pub enum ExtendedColorType {
+ L1,
+ La1,
+ Rgb1,
+ Rgba1,
+ L2,
+ La2,
+ Rgb2,
+ Rgba2,
+ L4,
+ La4,
+ Rgb4,
+ Rgba4,
+ L8,
+ La8,
+ Rgb8,
+ Rgba8,
+ L16,
+ La16,
+ Rgb16,
+ Rgba16,
+ Bgr8,
+ Bgra8,
+
+ /// Pixel is of unknown color type with the specified bits per pixel. This can apply to pixels
+ /// which are associated with an external palette. In that case, the pixel value is an index
+ /// into the palette.
+ Unknown(u8),
+
+ #[doc(hidden)]
+ __NonExhaustive(crate::utils::NonExhaustiveMarker),
+}
+
+impl ExtendedColorType {
+ /// Get the number of channels for colors of this type.
+ ///
+ /// Note that the `Unknown` variant returns a value of `1` since pixels can only be treated as
+ /// an opaque datum by the library.
+ pub fn channel_count(self) -> u8 {
+ match self {
+ ExtendedColorType::L1 |
+ ExtendedColorType::L2 |
+ ExtendedColorType::L4 |
+ ExtendedColorType::L8 |
+ ExtendedColorType::L16 |
+ ExtendedColorType::Unknown(_) => 1,
+ ExtendedColorType::La1 |
+ ExtendedColorType::La2 |
+ ExtendedColorType::La4 |
+ ExtendedColorType::La8 |
+ ExtendedColorType::La16 => 2,
+ ExtendedColorType::Rgb1 |
+ ExtendedColorType::Rgb2 |
+ ExtendedColorType::Rgb4 |
+ ExtendedColorType::Rgb8 |
+ ExtendedColorType::Rgb16 |
+ ExtendedColorType::Bgr8 => 3,
+ ExtendedColorType::Rgba1 |
+ ExtendedColorType::Rgba2 |
+ ExtendedColorType::Rgba4 |
+ ExtendedColorType::Rgba8 |
+ ExtendedColorType::Rgba16 |
+ ExtendedColorType::Bgra8 => 4,
+ ExtendedColorType::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+}
+impl From<ColorType> for ExtendedColorType {
+ fn from(c: ColorType) -> Self {
+ match c {
+ ColorType::L8 => ExtendedColorType::L8,
+ ColorType::La8 => ExtendedColorType::La8,
+ ColorType::Rgb8 => ExtendedColorType::Rgb8,
+ ColorType::Rgba8 => ExtendedColorType::Rgba8,
+ ColorType::L16 => ExtendedColorType::L16,
+ ColorType::La16 => ExtendedColorType::La16,
+ ColorType::Rgb16 => ExtendedColorType::Rgb16,
+ ColorType::Rgba16 => ExtendedColorType::Rgba16,
+ ColorType::Bgr8 => ExtendedColorType::Bgr8,
+ ColorType::Bgra8 => ExtendedColorType::Bgra8,
+ ColorType::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+}
+
+macro_rules! define_colors {
+ {$(
+ $ident:ident,
+ $channels: expr,
+ $alphas: expr,
+ $interpretation: expr,
+ $color_type_u8: expr,
+ $color_type_u16: expr,
+ #[$doc:meta];
+ )*} => {
+
+$( // START Structure definitions
+
+#[$doc]
+#[derive(PartialEq, Eq, Clone, Debug, Copy, Hash)]
+#[repr(C)]
+#[allow(missing_docs)]
+pub struct $ident<T: Primitive> (pub [T; $channels]);
+
+impl<T: Primitive + 'static> Pixel for $ident<T> {
+ type Subpixel = T;
+
+ const CHANNEL_COUNT: u8 = $channels;
+
+ const COLOR_MODEL: &'static str = $interpretation;
+
+ const COLOR_TYPE: ColorType =
+ [$color_type_u8, $color_type_u16][(std::mem::size_of::<T>() > 1) as usize];
+
+ #[inline(always)]
+ fn channels(&self) -> &[T] {
+ &self.0
+ }
+ #[inline(always)]
+ fn channels_mut(&mut self) -> &mut [T] {
+ &mut self.0
+ }
+
+ fn channels4(&self) -> (T, T, T, T) {
+ const CHANNELS: usize = $channels;
+ let mut channels = [T::max_value(); 4];
+ channels[0..CHANNELS].copy_from_slice(&self.0);
+ (channels[0], channels[1], channels[2], channels[3])
+ }
+
+ fn from_channels(a: T, b: T, c: T, d: T,) -> $ident<T> {
+ const CHANNELS: usize = $channels;
+ *<$ident<T> as Pixel>::from_slice(&[a, b, c, d][..CHANNELS])
+ }
+
+ fn from_slice(slice: &[T]) -> &$ident<T> {
+ assert_eq!(slice.len(), $channels);
+ unsafe { &*(slice.as_ptr() as *const $ident<T>) }
+ }
+ fn from_slice_mut(slice: &mut [T]) -> &mut $ident<T> {
+ assert_eq!(slice.len(), $channels);
+ unsafe { &mut *(slice.as_ptr() as *mut $ident<T>) }
+ }
+
+ fn to_rgb(&self) -> Rgb<T> {
+ let mut pix = Rgb([Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_bgr(&self) -> Bgr<T> {
+ let mut pix = Bgr([Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_rgba(&self) -> Rgba<T> {
+ let mut pix = Rgba([Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_bgra(&self) -> Bgra<T> {
+ let mut pix = Bgra([Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_luma(&self) -> Luma<T> {
+ let mut pix = Luma([Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_luma_alpha(&self) -> LumaA<T> {
+ let mut pix = LumaA([Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn map<F>(& self, f: F) -> $ident<T> where F: FnMut(T) -> T {
+ let mut this = (*self).clone();
+ this.apply(f);
+ this
+ }
+
+ fn apply<F>(&mut self, mut f: F) where F: FnMut(T) -> T {
+ for v in &mut self.0 {
+ *v = f(*v)
+ }
+ }
+
+ fn map_with_alpha<F, G>(&self, f: F, g: G) -> $ident<T> where F: FnMut(T) -> T, G: FnMut(T) -> T {
+ let mut this = (*self).clone();
+ this.apply_with_alpha(f, g);
+ this
+ }
+
+ fn apply_with_alpha<F, G>(&mut self, mut f: F, mut g: G) where F: FnMut(T) -> T, G: FnMut(T) -> T {
+ const ALPHA: usize = $channels - $alphas;
+ for v in self.0[..ALPHA].iter_mut() {
+ *v = f(*v)
+ }
+ // The branch of this match is `const`. This way ensures that no subexpression fails the
+ // `const_err` lint (the expression `self.0[ALPHA]` would).
+ if let Some(v) = self.0.get_mut(ALPHA) {
+ *v = g(*v)
+ }
+ }
+
+ fn map2<F>(&self, other: &Self, f: F) -> $ident<T> where F: FnMut(T, T) -> T {
+ let mut this = (*self).clone();
+ this.apply2(other, f);
+ this
+ }
+
+ fn apply2<F>(&mut self, other: &$ident<T>, mut f: F) where F: FnMut(T, T) -> T {
+ for (a, &b) in self.0.iter_mut().zip(other.0.iter()) {
+ *a = f(*a, b)
+ }
+ }
+
+ fn invert(&mut self) {
+ Invert::invert(self)
+ }
+
+ fn blend(&mut self, other: &$ident<T>) {
+ Blend::blend(self, other)
+ }
+}
+
+impl<T: Primitive> Index<usize> for $ident<T> {
+ type Output = T;
+ #[inline(always)]
+ fn index(&self, _index: usize) -> &T {
+ &self.0[_index]
+ }
+}
+
+impl<T: Primitive> IndexMut<usize> for $ident<T> {
+ #[inline(always)]
+ fn index_mut(&mut self, _index: usize) -> &mut T {
+ &mut self.0[_index]
+ }
+}
+
+impl<T: Primitive + 'static> From<[T; $channels]> for $ident<T> {
+ fn from(c: [T; $channels]) -> Self {
+ Self(c)
+ }
+}
+
+)* // END Structure definitions
+
+ }
+}
+
+define_colors! {
+ Rgb, 3, 0, "RGB", ColorType::Rgb8, ColorType::Rgb16, #[doc = "RGB colors"];
+ Bgr, 3, 0, "BGR", ColorType::Bgr8, ColorType::Bgr8, #[doc = "BGR colors"];
+ Luma, 1, 0, "Y", ColorType::L8, ColorType::L16, #[doc = "Grayscale colors"];
+ Rgba, 4, 1, "RGBA", ColorType::Rgba8, ColorType::Rgba16, #[doc = "RGB colors + alpha channel"];
+ Bgra, 4, 1, "BGRA", ColorType::Bgra8, ColorType::Bgra8, #[doc = "BGR colors + alpha channel"];
+ LumaA, 2, 1, "YA", ColorType::La8, ColorType::La16, #[doc = "Grayscale colors + alpha channel"];
+}
+
+/// Provides color conversions for the different pixel types.
+pub trait FromColor<Other> {
+ /// Changes `self` to represent `Other` in the color space of `Self`
+ fn from_color(&mut self, _: &Other);
+}
+
+// Self->Self: just copy
+impl<A: Copy> FromColor<A> for A {
+ fn from_color(&mut self, other: &A) {
+ *self = *other;
+ }
+}
+
+/// Copy-based conversions to target pixel types using `FromColor`.
+// FIXME: this trait should be removed and replaced with real color space models
+// rather than assuming sRGB.
+pub(crate) trait IntoColor<Other> {
+ /// Constructs a pixel of the target type and converts this pixel into it.
+ fn into_color(&self) -> Other;
+}
+
+impl<O, S> IntoColor<O> for S
+where
+ O: Pixel + FromColor<S> {
+ fn into_color(&self) -> O {
+ // Note we cannot use Pixel::CHANNELS_COUNT here to directly construct
+ // the pixel due to a current bug/limitation of consts.
+ let mut pix = O::from_channels(Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero());
+ pix.from_color(self);
+ pix
+ }
+}
+
+/// Coefficients to transform from sRGB to a CIE Y (luminance) value.
+const SRGB_LUMA: [f32; 3] = [0.2126, 0.7152, 0.0722];
+
+#[inline]
+fn rgb_to_luma<T: Primitive>(rgb: &[T]) -> T {
+ let l = SRGB_LUMA[0] * rgb[0].to_f32().unwrap()
+ + SRGB_LUMA[1] * rgb[1].to_f32().unwrap()
+ + SRGB_LUMA[2] * rgb[2].to_f32().unwrap();
+ NumCast::from(l).unwrap()
+}
+
+#[inline]
+fn bgr_to_luma<T: Primitive>(bgr: &[T]) -> T {
+ let l = SRGB_LUMA[0] * bgr[2].to_f32().unwrap()
+ + SRGB_LUMA[1] * bgr[1].to_f32().unwrap()
+ + SRGB_LUMA[2] * bgr[0].to_f32().unwrap();
+ NumCast::from(l).unwrap()
+}
+
+#[inline]
+fn downcast_channel(c16: u16) -> u8 {
+ NumCast::from(c16.to_u64().unwrap() >> 8).unwrap()
+}
+
+#[inline]
+fn upcast_channel(c8: u8) -> u16 {
+ NumCast::from(c8.to_u64().unwrap() << 8).unwrap()
+}
+
+
+// `FromColor` for Luma
+
+impl<T: Primitive + 'static> FromColor<Rgba<T>> for Luma<T> {
+ fn from_color(&mut self, other: &Rgba<T>) {
+ let gray = self.channels_mut();
+ let rgba = other.channels();
+ gray[0] = rgb_to_luma(rgba);
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgra<T>> for Luma<T> {
+ fn from_color(&mut self, other: &Bgra<T>) {
+ let gray = self.channels_mut();
+ let bgra = other.channels();
+ gray[0] = bgr_to_luma(bgra);
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Rgb<T>> for Luma<T> {
+ fn from_color(&mut self, other: &Rgb<T>) {
+ let gray = self.channels_mut();
+ let rgb = other.channels();
+ gray[0] = rgb_to_luma(rgb);
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgr<T>> for Luma<T> {
+ fn from_color(&mut self, other: &Bgr<T>) {
+ let gray = self.channels_mut();
+ let bgr = other.channels();
+ gray[0] = bgr_to_luma(bgr);
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<LumaA<T>> for Luma<T> {
+ fn from_color(&mut self, other: &LumaA<T>) {
+ self.channels_mut()[0] = other.channels()[0]
+ }
+}
+
+
+impl FromColor<Rgba<u16>> for Luma<u8> {
+ fn from_color(&mut self, other: &Rgba<u16>) {
+ let gray = self.channels_mut();
+ let rgb = other.channels();
+ let l = rgb_to_luma(rgb);
+ gray[0] = downcast_channel(l);
+ }
+}
+
+impl FromColor<Rgb<u16>> for Luma<u8> {
+ fn from_color(&mut self, other: &Rgb<u16>) {
+ let gray = self.channels_mut();
+ let rgb = other.channels();
+ let l = rgb_to_luma(rgb);
+ gray[0] = downcast_channel(l);
+ }
+}
+
+impl FromColor<Luma<u16>> for Luma<u8> {
+ fn from_color(&mut self, other: &Luma<u16>) {
+ let l = other.channels()[0];
+ self.channels_mut()[0] = downcast_channel(l);
+ }
+}
+
+impl FromColor<Luma<u8>> for Luma<u16> {
+ fn from_color(&mut self, other: &Luma<u8>) {
+ let l = other.channels()[0];
+ self.channels_mut()[0] = upcast_channel(l);
+ }
+}
+
+impl FromColor<LumaA<u16>> for Luma<u8> {
+ fn from_color(&mut self, other: &LumaA<u16>) {
+ let l = other.channels()[0];
+ self.channels_mut()[0] = downcast_channel(l);
+ }
+}
+
+
+// `FromColor` for LumaA
+
+impl<T: Primitive + 'static> FromColor<Rgba<T>> for LumaA<T> {
+ fn from_color(&mut self, other: &Rgba<T>) {
+ let gray_a = self.channels_mut();
+ let rgba = other.channels();
+ gray_a[0] = rgb_to_luma(rgba);
+ gray_a[1] = rgba[3];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgra<T>> for LumaA<T> {
+ fn from_color(&mut self, other: &Bgra<T>) {
+ let gray_a = self.channels_mut();
+ let bgra = other.channels();
+ gray_a[0] = bgr_to_luma(bgra);
+ gray_a[1] = bgra[3];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Rgb<T>> for LumaA<T> {
+ fn from_color(&mut self, other: &Rgb<T>) {
+ let gray_a = self.channels_mut();
+ let rgb = other.channels();
+ gray_a[0] = rgb_to_luma(rgb);
+ gray_a[1] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgr<T>> for LumaA<T> {
+ fn from_color(&mut self, other: &Bgr<T>) {
+ let gray_a = self.channels_mut();
+ let bgr = other.channels();
+ gray_a[0] = bgr_to_luma(bgr);
+ gray_a[1] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Luma<T>> for LumaA<T> {
+ fn from_color(&mut self, other: &Luma<T>) {
+ let gray_a = self.channels_mut();
+ gray_a[0] = other.channels()[0];
+ gray_a[1] = T::max_value();
+ }
+}
+
+impl FromColor<LumaA<u16>> for LumaA<u8> {
+ fn from_color(&mut self, other: &LumaA<u16>) {
+ let la8 = self.channels_mut();
+ let gray = other.channels()[0];
+ let alpha = other.channels()[1];
+ la8[0] = downcast_channel(gray);
+ la8[1] = downcast_channel(alpha);
+ }
+}
+
+impl FromColor<LumaA<u8>> for LumaA<u16> {
+ fn from_color(&mut self, other: &LumaA<u8>) {
+ let la8 = self.channels_mut();
+ let gray = other.channels()[0];
+ let alpha = other.channels()[1];
+ la8[0] = upcast_channel(gray);
+ la8[1] = upcast_channel(alpha);
+ }
+}
+
+
+// `FromColor` for RGBA
+
+impl<T: Primitive + 'static> FromColor<Rgb<T>> for Rgba<T> {
+ fn from_color(&mut self, other: &Rgb<T>) {
+ let rgba = self.channels_mut();
+ let rgb = other.channels();
+ rgba[0] = rgb[0];
+ rgba[1] = rgb[1];
+ rgba[2] = rgb[2];
+ rgba[3] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgr<T>> for Rgba<T> {
+ fn from_color(&mut self, other: &Bgr<T>) {
+ let rgba = self.channels_mut();
+ let bgr = other.channels();
+ rgba[0] = bgr[2];
+ rgba[1] = bgr[1];
+ rgba[2] = bgr[0];
+ rgba[3] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgra<T>> for Rgba<T> {
+ fn from_color(&mut self, other: &Bgra<T>) {
+ let rgba = self.channels_mut();
+ let bgra = other.channels();
+ rgba[0] = bgra[2];
+ rgba[1] = bgra[1];
+ rgba[2] = bgra[0];
+ rgba[3] = bgra[3];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<LumaA<T>> for Rgba<T> {
+ fn from_color(&mut self, other: &LumaA<T>) {
+ let rgba = self.channels_mut();
+ let gray = other.channels();
+ rgba[0] = gray[0];
+ rgba[1] = gray[0];
+ rgba[2] = gray[0];
+ rgba[3] = gray[1];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Luma<T>> for Rgba<T> {
+ fn from_color(&mut self, gray: &Luma<T>) {
+ let rgba = self.channels_mut();
+ let gray = gray.channels()[0];
+ rgba[0] = gray;
+ rgba[1] = gray;
+ rgba[2] = gray;
+ rgba[3] = T::max_value();
+ }
+}
+
+impl FromColor<Rgba<u16>> for Rgba<u8> {
+ fn from_color(&mut self, other: &Rgba<u16>) {
+ let rgba = self.channels_mut();
+ let rgba16 = other.channels();
+ rgba[0] = downcast_channel(rgba16[0]);
+ rgba[1] = downcast_channel(rgba16[1]);
+ rgba[2] = downcast_channel(rgba16[2]);
+ rgba[3] = downcast_channel(rgba16[3]);
+ }
+}
+
+impl FromColor<Rgba<u8>> for Rgba<u16> {
+ fn from_color(&mut self, other: &Rgba<u8>) {
+ let rgba = self.channels_mut();
+ let rgba8 = other.channels();
+ rgba[0] = upcast_channel(rgba8[0]);
+ rgba[1] = upcast_channel(rgba8[1]);
+ rgba[2] = upcast_channel(rgba8[2]);
+ rgba[3] = upcast_channel(rgba8[3]);
+ }
+}
+
+
+// `FromColor` for BGRA
+
+impl<T: Primitive + 'static> FromColor<Rgb<T>> for Bgra<T> {
+ fn from_color(&mut self, other: &Rgb<T>) {
+ let bgra = self.channels_mut();
+ let rgb = other.channels();
+ bgra[0] = rgb[2];
+ bgra[1] = rgb[1];
+ bgra[2] = rgb[0];
+ bgra[3] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgr<T>> for Bgra<T> {
+ fn from_color(&mut self, other: &Bgr<T>) {
+ let bgra = self.channels_mut();
+ let bgr = other.channels();
+ bgra[0] = bgr[0];
+ bgra[1] = bgr[1];
+ bgra[2] = bgr[2];
+ bgra[3] = T::max_value();
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Rgba<T>> for Bgra<T> {
+ fn from_color(&mut self, other: &Rgba<T>) {
+ let bgra = self.channels_mut();
+ let rgba = other.channels();
+ bgra[2] = rgba[0];
+ bgra[1] = rgba[1];
+ bgra[0] = rgba[2];
+ bgra[3] = rgba[3];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<LumaA<T>> for Bgra<T> {
+ fn from_color(&mut self, other: &LumaA<T>) {
+ let bgra = self.channels_mut();
+ let gray = other.channels();
+ bgra[0] = gray[0];
+ bgra[1] = gray[0];
+ bgra[2] = gray[0];
+ bgra[3] = gray[1];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Luma<T>> for Bgra<T> {
+ fn from_color(&mut self, gray: &Luma<T>) {
+ let bgra = self.channels_mut();
+ let gray = gray.channels()[0];
+ bgra[0] = gray;
+ bgra[1] = gray;
+ bgra[2] = gray;
+ bgra[3] = T::max_value();
+ }
+}
+
+
+// `FromColor` for RGB
+
+impl<T: Primitive + 'static> FromColor<Rgba<T>> for Rgb<T> {
+ fn from_color(&mut self, other: &Rgba<T>) {
+ let rgb = self.channels_mut();
+ let rgba = other.channels();
+ rgb[0] = rgba[0];
+ rgb[1] = rgba[1];
+ rgb[2] = rgba[2];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgra<T>> for Rgb<T> {
+ fn from_color(&mut self, other: &Bgra<T>) {
+ let rgb = self.channels_mut();
+ let bgra = other.channels();
+ rgb[0] = bgra[2];
+ rgb[1] = bgra[1];
+ rgb[2] = bgra[0];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Bgr<T>> for Rgb<T> {
+ fn from_color(&mut self, other: &Bgr<T>) {
+ let rgb = self.channels_mut();
+ let bgr = other.channels();
+ rgb[0] = bgr[2];
+ rgb[1] = bgr[1];
+ rgb[2] = bgr[0];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<LumaA<T>> for Rgb<T> {
+ fn from_color(&mut self, other: &LumaA<T>) {
+ let rgb = self.channels_mut();
+ let gray = other.channels()[0];
+ rgb[0] = gray;
+ rgb[1] = gray;
+ rgb[2] = gray;
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Luma<T>> for Rgb<T> {
+ fn from_color(&mut self, gray: &Luma<T>) {
+ let rgb = self.channels_mut();
+ let gray = gray.channels()[0];
+ rgb[0] = gray;
+ rgb[1] = gray;
+ rgb[2] = gray;
+ }
+}
+
+impl FromColor<Rgb<u16>> for Rgb<u8> {
+ fn from_color(&mut self, other: &Rgb<u16>) {
+ for (c8, &c16) in self.channels_mut().iter_mut().zip(other.channels()) {
+ *c8 = downcast_channel(c16);
+ }
+ }
+}
+
+impl FromColor<Rgb<u8>> for Rgb<u16> {
+ fn from_color(&mut self, other: &Rgb<u8>) {
+ for (c8, &c16) in self.channels_mut().iter_mut().zip(other.channels()) {
+ *c8 = upcast_channel(c16);
+ }
+ }
+}
+
+
+/// `FromColor` for BGR
+
+impl<T: Primitive + 'static> FromColor<Rgba<T>> for Bgr<T> {
+ fn from_color(&mut self, other: &Rgba<T>) {
+ let bgr = self.channels_mut();
+ let rgba = other.channels();
+ bgr[0] = rgba[2];
+ bgr[1] = rgba[1];
+ bgr[2] = rgba[0];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Rgb<T>> for Bgr<T> {
+ fn from_color(&mut self, other: &Rgb<T>) {
+ let bgr = self.channels_mut();
+ let rgb = other.channels();
+ bgr[0] = rgb[2];
+ bgr[1] = rgb[1];
+ bgr[2] = rgb[0];
+ }
+}
+
+
+impl<T: Primitive + 'static> FromColor<Bgra<T>> for Bgr<T> {
+ fn from_color(&mut self, other: &Bgra<T>) {
+ let bgr = self.channels_mut();
+ let bgra = other.channels();
+ bgr[0] = bgra[0];
+ bgr[1] = bgra[1];
+ bgr[2] = bgra[2];
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<LumaA<T>> for Bgr<T> {
+ fn from_color(&mut self, other: &LumaA<T>) {
+ let bgr = self.channels_mut();
+ let gray = other.channels()[0];
+ bgr[0] = gray;
+ bgr[1] = gray;
+ bgr[2] = gray;
+ }
+}
+
+impl<T: Primitive + 'static> FromColor<Luma<T>> for Bgr<T> {
+ fn from_color(&mut self, gray: &Luma<T>) {
+ let bgr = self.channels_mut();
+ let gray = gray.channels()[0];
+ bgr[0] = gray;
+ bgr[1] = gray;
+ bgr[2] = gray;
+ }
+}
+
+macro_rules! downcast_bit_depth_early {
+ ($src:ident, $intermediate:ident, $dst:ident) => {
+ impl FromColor<$src<u16>> for $dst<u8> {
+ fn from_color(&mut self, other: &$src<u16>) {
+ let mut intermediate: $intermediate<u8> = $intermediate([Zero::zero(); <$intermediate<u8> as Pixel>::CHANNEL_COUNT as usize]);
+ intermediate.from_color(other);
+ self.from_color(&intermediate);
+ }
+ }
+ };
+}
+
+
+// Downcasts
+// LumaA
+downcast_bit_depth_early!(Luma, Luma, LumaA);
+downcast_bit_depth_early!(Rgb, Rgb, LumaA);
+downcast_bit_depth_early!(Rgba, Rgba, LumaA);
+// Rgb
+downcast_bit_depth_early!(Luma, Luma, Rgb);
+downcast_bit_depth_early!(LumaA, LumaA, Rgb);
+downcast_bit_depth_early!(Rgba, Rgba, Rgb);
+// Rgba
+downcast_bit_depth_early!(Luma, Luma, Rgba);
+downcast_bit_depth_early!(LumaA, LumaA, Rgba);
+downcast_bit_depth_early!(Rgb, Rgb, Rgba);
+// Bgr
+downcast_bit_depth_early!(Luma, Luma, Bgr);
+downcast_bit_depth_early!(LumaA, LumaA, Bgr);
+downcast_bit_depth_early!(Rgb, Rgb, Bgr);
+downcast_bit_depth_early!(Rgba, Rgba, Bgr);
+// Bgra
+downcast_bit_depth_early!(Luma, Luma, Bgra);
+downcast_bit_depth_early!(LumaA, LumaA, Bgra);
+downcast_bit_depth_early!(Rgb, Rgb, Bgra);
+downcast_bit_depth_early!(Rgba, Rgba, Bgra);
+
+
+/// Blends a color inter another one
+pub(crate) trait Blend {
+ /// Blends a color in-place.
+ fn blend(&mut self, other: &Self);
+}
+
+impl<T: Primitive> Blend for LumaA<T> {
+ fn blend(&mut self, other: &LumaA<T>) {
+ let max_t = T::max_value();
+ let max_t = max_t.to_f32().unwrap();
+ let (bg_luma, bg_a) = (self.0[0], self.0[1]);
+ let (fg_luma, fg_a) = (other.0[0], other.0[1]);
+
+ let (bg_luma, bg_a) = (
+ bg_luma.to_f32().unwrap() / max_t,
+ bg_a.to_f32().unwrap() / max_t,
+ );
+ let (fg_luma, fg_a) = (
+ fg_luma.to_f32().unwrap() / max_t,
+ fg_a.to_f32().unwrap() / max_t,
+ );
+
+ let alpha_final = bg_a + fg_a - bg_a * fg_a;
+ if alpha_final == 0.0 {
+ return;
+ };
+ let bg_luma_a = bg_luma * bg_a;
+ let fg_luma_a = fg_luma * fg_a;
+
+ let out_luma_a = fg_luma_a + bg_luma_a * (1.0 - fg_a);
+ let out_luma = out_luma_a / alpha_final;
+
+ *self = LumaA([
+ NumCast::from(max_t * out_luma).unwrap(),
+ NumCast::from(max_t * alpha_final).unwrap(),
+ ])
+ }
+}
+
+impl<T: Primitive> Blend for Luma<T> {
+ fn blend(&mut self, other: &Luma<T>) {
+ *self = *other
+ }
+}
+
+impl<T: Primitive> Blend for Rgba<T> {
+ fn blend(&mut self, other: &Rgba<T>) {
+ // http://stackoverflow.com/questions/7438263/alpha-compositing-algorithm-blend-modes#answer-11163848
+
+ // First, as we don't know what type our pixel is, we have to convert to floats between 0.0 and 1.0
+ let max_t = T::max_value();
+ let max_t = max_t.to_f32().unwrap();
+ let (bg_r, bg_g, bg_b, bg_a) = (self.0[0], self.0[1], self.0[2], self.0[3]);
+ let (fg_r, fg_g, fg_b, fg_a) = (other.0[0], other.0[1], other.0[2], other.0[3]);
+ let (bg_r, bg_g, bg_b, bg_a) = (
+ bg_r.to_f32().unwrap() / max_t,
+ bg_g.to_f32().unwrap() / max_t,
+ bg_b.to_f32().unwrap() / max_t,
+ bg_a.to_f32().unwrap() / max_t,
+ );
+ let (fg_r, fg_g, fg_b, fg_a) = (
+ fg_r.to_f32().unwrap() / max_t,
+ fg_g.to_f32().unwrap() / max_t,
+ fg_b.to_f32().unwrap() / max_t,
+ fg_a.to_f32().unwrap() / max_t,
+ );
+
+ // Work out what the final alpha level will be
+ let alpha_final = bg_a + fg_a - bg_a * fg_a;
+ if alpha_final == 0.0 {
+ return;
+ };
+
+ // We premultiply our channels by their alpha, as this makes it easier to calculate
+ let (bg_r_a, bg_g_a, bg_b_a) = (bg_r * bg_a, bg_g * bg_a, bg_b * bg_a);
+ let (fg_r_a, fg_g_a, fg_b_a) = (fg_r * fg_a, fg_g * fg_a, fg_b * fg_a);
+
+ // Standard formula for src-over alpha compositing
+ let (out_r_a, out_g_a, out_b_a) = (
+ fg_r_a + bg_r_a * (1.0 - fg_a),
+ fg_g_a + bg_g_a * (1.0 - fg_a),
+ fg_b_a + bg_b_a * (1.0 - fg_a),
+ );
+
+ // Unmultiply the channels by our resultant alpha channel
+ let (out_r, out_g, out_b) = (
+ out_r_a / alpha_final,
+ out_g_a / alpha_final,
+ out_b_a / alpha_final,
+ );
+
+ // Cast back to our initial type on return
+ *self = Rgba([
+ NumCast::from(max_t * out_r).unwrap(),
+ NumCast::from(max_t * out_g).unwrap(),
+ NumCast::from(max_t * out_b).unwrap(),
+ NumCast::from(max_t * alpha_final).unwrap(),
+ ])
+ }
+}
+
+
+
+impl<T: Primitive> Blend for Bgra<T> {
+ fn blend(&mut self, other: &Bgra<T>) {
+ // http://stackoverflow.com/questions/7438263/alpha-compositing-algorithm-blend-modes#answer-11163848
+
+ // First, as we don't know what type our pixel is, we have to convert to floats between 0.0 and 1.0
+ let max_t = T::max_value();
+ let max_t = max_t.to_f32().unwrap();
+ let (bg_r, bg_g, bg_b, bg_a) = (self.0[2], self.0[1], self.0[0], self.0[3]);
+ let (fg_r, fg_g, fg_b, fg_a) = (other.0[2], other.0[1], other.0[0], other.0[3]);
+ let (bg_r, bg_g, bg_b, bg_a) = (
+ bg_r.to_f32().unwrap() / max_t,
+ bg_g.to_f32().unwrap() / max_t,
+ bg_b.to_f32().unwrap() / max_t,
+ bg_a.to_f32().unwrap() / max_t,
+ );
+ let (fg_r, fg_g, fg_b, fg_a) = (
+ fg_r.to_f32().unwrap() / max_t,
+ fg_g.to_f32().unwrap() / max_t,
+ fg_b.to_f32().unwrap() / max_t,
+ fg_a.to_f32().unwrap() / max_t,
+ );
+
+ // Work out what the final alpha level will be
+ let alpha_final = bg_a + fg_a - bg_a * fg_a;
+ if alpha_final == 0.0 {
+ return;
+ };
+
+ // We premultiply our channels by their alpha, as this makes it easier to calculate
+ let (bg_r_a, bg_g_a, bg_b_a) = (bg_r * bg_a, bg_g * bg_a, bg_b * bg_a);
+ let (fg_r_a, fg_g_a, fg_b_a) = (fg_r * fg_a, fg_g * fg_a, fg_b * fg_a);
+
+ // Standard formula for src-over alpha compositing
+ let (out_r_a, out_g_a, out_b_a) = (
+ fg_r_a + bg_r_a * (1.0 - fg_a),
+ fg_g_a + bg_g_a * (1.0 - fg_a),
+ fg_b_a + bg_b_a * (1.0 - fg_a),
+ );
+
+ // Unmultiply the channels by our resultant alpha channel
+ let (out_r, out_g, out_b) = (
+ out_r_a / alpha_final,
+ out_g_a / alpha_final,
+ out_b_a / alpha_final,
+ );
+
+ // Cast back to our initial type on return
+ *self = Bgra([
+ NumCast::from(max_t * out_b).unwrap(),
+ NumCast::from(max_t * out_g).unwrap(),
+ NumCast::from(max_t * out_r).unwrap(),
+ NumCast::from(max_t * alpha_final).unwrap(),
+ ])
+ }
+}
+
+impl<T: Primitive> Blend for Rgb<T> {
+ fn blend(&mut self, other: &Rgb<T>) {
+ *self = *other
+ }
+}
+
+impl<T: Primitive> Blend for Bgr<T> {
+ fn blend(&mut self, other: &Bgr<T>) {
+ *self = *other
+ }
+}
+
+
+/// Invert a color
+pub(crate) trait Invert {
+ /// Inverts a color in-place.
+ fn invert(&mut self);
+}
+
+impl<T: Primitive> Invert for LumaA<T> {
+ fn invert(&mut self) {
+ let l = self.0;
+ let max = T::max_value();
+
+ *self = LumaA([max - l[0], l[1]])
+ }
+}
+
+impl<T: Primitive> Invert for Luma<T> {
+ fn invert(&mut self) {
+ let l = self.0;
+
+ let max = T::max_value();
+ let l1 = max - l[0];
+
+ *self = Luma([l1])
+ }
+}
+
+impl<T: Primitive> Invert for Rgba<T> {
+ fn invert(&mut self) {
+ let rgba = self.0;
+
+ let max = T::max_value();
+
+ *self = Rgba([max - rgba[0], max - rgba[1], max - rgba[2], rgba[3]])
+ }
+}
+
+
+impl<T: Primitive> Invert for Bgra<T> {
+ fn invert(&mut self) {
+ let bgra = self.0;
+
+ let max = T::max_value();
+
+ *self = Bgra([max - bgra[2], max - bgra[1], max - bgra[0], bgra[3]])
+ }
+}
+
+
+impl<T: Primitive> Invert for Rgb<T> {
+ fn invert(&mut self) {
+ let rgb = self.0;
+
+ let max = T::max_value();
+
+ let r1 = max - rgb[0];
+ let g1 = max - rgb[1];
+ let b1 = max - rgb[2];
+
+ *self = Rgb([r1, g1, b1])
+ }
+}
+
+impl<T: Primitive> Invert for Bgr<T> {
+ fn invert(&mut self) {
+ let bgr = self.0;
+
+ let max = T::max_value();
+
+ let r1 = max - bgr[2];
+ let g1 = max - bgr[1];
+ let b1 = max - bgr[0];
+
+ *self = Bgr([b1, g1, r1])
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Luma, LumaA, Pixel, Rgb, Rgba, Bgr, Bgra};
+
+ #[test]
+ fn test_apply_with_alpha_rgba() {
+ let mut rgba = Rgba([0, 0, 0, 0]);
+ rgba.apply_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(rgba, Rgba([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_apply_with_alpha_bgra() {
+ let mut bgra = Bgra([0, 0, 0, 0]);
+ bgra.apply_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(bgra, Bgra([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_apply_with_alpha_rgb() {
+ let mut rgb = Rgb([0, 0, 0]);
+ rgb.apply_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(rgb, Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ fn test_apply_with_alpha_bgr() {
+ let mut bgr = Bgr([0, 0, 0]);
+ bgr.apply_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(bgr, Bgr([0, 0, 0]));
+ }
+
+
+ #[test]
+ fn test_map_with_alpha_rgba() {
+ let rgba = Rgba([0, 0, 0, 0]).map_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(rgba, Rgba([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_map_with_alpha_rgb() {
+ let rgb = Rgb([0, 0, 0]).map_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(rgb, Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ fn test_map_with_alpha_bgr() {
+ let bgr = Bgr([0, 0, 0]).map_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(bgr, Bgr([0, 0, 0]));
+ }
+
+
+ #[test]
+ fn test_map_with_alpha_bgra() {
+ let bgra = Bgra([0, 0, 0, 0]).map_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(bgra, Bgra([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_blend_luma_alpha() {
+ let ref mut a = LumaA([255 as u8, 255]);
+ let b = LumaA([255 as u8, 255]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 0]);
+ let b = LumaA([255 as u8, 255]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 255]);
+ let b = LumaA([255 as u8, 0]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 0]);
+ let b = LumaA([255 as u8, 0]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 0);
+ }
+
+ #[test]
+ fn test_blend_rgba() {
+ let ref mut a = Rgba([255 as u8, 255, 255, 255]);
+ let b = Rgba([255 as u8, 255, 255, 255]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 0]);
+ let b = Rgba([255 as u8, 255, 255, 255]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 255]);
+ let b = Rgba([255 as u8, 255, 255, 0]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 0]);
+ let b = Rgba([255 as u8, 255, 255, 0]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 0]);
+ }
+
+ #[test]
+ fn test_apply_without_alpha_rgba() {
+ let mut rgba = Rgba([0, 0, 0, 0]);
+ rgba.apply_without_alpha(|s| s + 1);
+ assert_eq!(rgba, Rgba([1, 1, 1, 0]));
+ }
+
+ #[test]
+ fn test_apply_without_alpha_bgra() {
+ let mut bgra = Bgra([0, 0, 0, 0]);
+ bgra.apply_without_alpha(|s| s + 1);
+ assert_eq!(bgra, Bgra([1, 1, 1, 0]));
+ }
+
+ #[test]
+ fn test_apply_without_alpha_rgb() {
+ let mut rgb = Rgb([0, 0, 0]);
+ rgb.apply_without_alpha(|s| s + 1);
+ assert_eq!(rgb, Rgb([1, 1, 1]));
+ }
+
+ #[test]
+ fn test_apply_without_alpha_bgr() {
+ let mut bgr = Bgr([0, 0, 0]);
+ bgr.apply_without_alpha(|s| s + 1);
+ assert_eq!(bgr, Bgr([1, 1, 1]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_rgba() {
+ let rgba = Rgba([0, 0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(rgba, Rgba([1, 1, 1, 0]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_rgb() {
+ let rgb = Rgb([0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(rgb, Rgb([1, 1, 1]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_bgr() {
+ let bgr = Bgr([0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(bgr, Bgr([1, 1, 1]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_bgra() {
+ let bgra = Bgra([0, 0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(bgra, Bgra([1, 1, 1, 0]));
+ }
+
+ macro_rules! test_lossless_conversion {
+ ($a:ty, $b:ty, $c:ty) => {
+ let a: $a = [<$a as Pixel>::Subpixel::max_value() >> 2; <$a as Pixel>::CHANNEL_COUNT as usize].into();
+ let b: $b = a.into_color();
+ let c: $c = b.into_color();
+ assert_eq!(a.channels(), c.channels());
+ };
+ }
+
+ #[test]
+ fn test_lossless_conversions() {
+ use super::IntoColor;
+
+ test_lossless_conversion!(Bgr<u8>, Rgba<u8>, Bgr<u8>);
+ test_lossless_conversion!(Bgra<u8>, Rgba<u8>, Bgra<u8>);
+ test_lossless_conversion!(Luma<u8>, Luma<u16>, Luma<u8>);
+ test_lossless_conversion!(LumaA<u8>, LumaA<u16>, LumaA<u8>);
+ test_lossless_conversion!(Rgb<u8>, Rgb<u16>, Rgb<u8>);
+ test_lossless_conversion!(Rgba<u8>, Rgba<u16>, Rgba<u8>);
+ }
+}
diff --git a/third_party/rust/image/src/dds.rs b/third_party/rust/image/src/dds.rs
new file mode 100644
index 0000000000..09071bd31f
--- /dev/null
+++ b/third_party/rust/image/src/dds.rs
@@ -0,0 +1,170 @@
+//! Decoding of DDS images
+//!
+//! DDS (DirectDraw Surface) is a container format for storing DXT (S3TC) compressed images.
+//!
+//! # Related Links
+//! * <https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dx-graphics-dds-pguide> - Description of the DDS format.
+
+use std::io::Read;
+
+use byteorder::{LittleEndian, ReadBytesExt};
+
+use crate::color::ColorType;
+use crate::dxt::{DxtDecoder, DXTReader, DXTVariant};
+use crate::error::{ImageError, ImageResult};
+use crate::image::ImageDecoder;
+
+
+/// Header used by DDS image files
+#[derive(Debug)]
+struct Header {
+ flags: u32,
+ height: u32,
+ width: u32,
+ pitch_or_linear_size: u32,
+ depth: u32,
+ mipmap_count: u32,
+ pixel_format: PixelFormat,
+ caps: u32,
+ caps2: u32,
+}
+
+/// DDS pixel format
+#[derive(Debug)]
+struct PixelFormat {
+ flags: u32,
+ fourcc: [u8; 4],
+ rgb_bit_count: u32,
+ r_bit_mask: u32,
+ g_bit_mask: u32,
+ b_bit_mask: u32,
+ a_bit_mask: u32,
+}
+
+impl PixelFormat {
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ let size = r.read_u32::<LittleEndian>()?;
+ if size != 32 {
+ return Err(ImageError::FormatError("Invalid DDS PixelFormat size".to_string()))
+ }
+
+ Ok(Self {
+ flags: r.read_u32::<LittleEndian>()?,
+ fourcc: {
+ let mut v = [0; 4];
+ r.read_exact(&mut v)?;
+ v
+ },
+ rgb_bit_count: r.read_u32::<LittleEndian>()?,
+ r_bit_mask: r.read_u32::<LittleEndian>()?,
+ g_bit_mask: r.read_u32::<LittleEndian>()?,
+ b_bit_mask: r.read_u32::<LittleEndian>()?,
+ a_bit_mask: r.read_u32::<LittleEndian>()?,
+ })
+ }
+}
+
+impl Header {
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ let size = r.read_u32::<LittleEndian>()?;
+ if size != 124 {
+ return Err(ImageError::FormatError("Invalid DDS header size".to_string()))
+ }
+
+ const REQUIRED_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x1000;
+ const VALID_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x8 | 0x1000 | 0x20000 | 0x80000 | 0x800000;
+ let flags = r.read_u32::<LittleEndian>()?;
+ if flags & (REQUIRED_FLAGS | !VALID_FLAGS) != REQUIRED_FLAGS {
+ return Err(ImageError::FormatError("Invalid DDS header flags".to_string()))
+ }
+
+ let height = r.read_u32::<LittleEndian>()?;
+ let width = r.read_u32::<LittleEndian>()?;
+ let pitch_or_linear_size = r.read_u32::<LittleEndian>()?;
+ let depth = r.read_u32::<LittleEndian>()?;
+ let mipmap_count = r.read_u32::<LittleEndian>()?;
+ // Skip `dwReserved1`
+ {
+ let mut skipped = [0; 4 * 11];
+ r.read_exact(&mut skipped)?;
+ }
+ let pixel_format = PixelFormat::from_reader(r)?;
+ let caps = r.read_u32::<LittleEndian>()?;
+ let caps2 = r.read_u32::<LittleEndian>()?;
+ // Skip `dwCaps3`, `dwCaps4`, `dwReserved2` (unused)
+ {
+ let mut skipped = [0; 4 + 4 + 4];
+ r.read_exact(&mut skipped)?;
+ }
+
+ Ok(Self {
+ flags,
+ height,
+ width,
+ pitch_or_linear_size,
+ depth,
+ mipmap_count,
+ pixel_format,
+ caps,
+ caps2,
+ })
+ }
+}
+
+
+/// The representation of a DDS decoder
+pub struct DdsDecoder<R: Read> {
+ inner: DxtDecoder<R>,
+}
+
+impl<R: Read> DdsDecoder<R> {
+ /// Create a new decoder that decodes from the stream `r`
+ pub fn new(mut r: R) -> ImageResult<Self> {
+ let mut magic = [0; 4];
+ r.read_exact(&mut magic)?;
+ if magic != b"DDS "[..] {
+ return Err(ImageError::FormatError("DDS signature not found".to_string()))
+ }
+
+ let header = Header::from_reader(&mut r)?;
+
+ if header.pixel_format.flags & 0x4 != 0 {
+ let variant = match &header.pixel_format.fourcc {
+ b"DXT1" => DXTVariant::DXT1,
+ b"DXT3" => DXTVariant::DXT3,
+ b"DXT5" => DXTVariant::DXT5,
+ _ => return Err(ImageError::FormatError("Unsupported DDS FourCC".to_string())),
+ };
+ let inner = DxtDecoder::new(r, header.width, header.height, variant)?;
+ Ok(Self { inner })
+ } else {
+ // For now, supports only DXT variants
+ Err(ImageError::FormatError("DDS format not supported".to_string()))
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for DdsDecoder<R> {
+ type Reader = DXTReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.inner.dimensions()
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.inner.color_type()
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ self.inner.scanline_bytes()
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ self.inner.into_reader()
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ self.inner.read_image(buf)
+ }
+}
+
diff --git a/third_party/rust/image/src/dxt.rs b/third_party/rust/image/src/dxt.rs
new file mode 100644
index 0000000000..a76a10f6db
--- /dev/null
+++ b/third_party/rust/image/src/dxt.rs
@@ -0,0 +1,806 @@
+//! Decoding of DXT (S3TC) compression
+//!
+//! DXT is an image format that supports lossy compression
+//!
+//! # Related Links
+//! * <https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_compression_s3tc.txt> - Description of the DXT compression OpenGL extensions.
+//!
+//! Note: this module only implements bare DXT encoding/decoding, it does not parse formats that can contain DXT files like .dds
+
+use std::convert::TryFrom;
+use std::io::{self, Read, Seek, SeekFrom, Write};
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, ImageDecoder, ImageDecoderExt, ImageReadBuffer, Progress};
+
+/// What version of DXT compression are we using?
+/// Note that DXT2 and DXT4 are left away as they're
+/// just DXT3 and DXT5 with premultiplied alpha
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DXTVariant {
+ /// The DXT1 format. 48 bytes of RGB data in a 4x4 pixel square is
+ /// compressed into an 8 byte block of DXT1 data
+ DXT1,
+ /// The DXT3 format. 64 bytes of RGBA data in a 4x4 pixel square is
+ /// compressed into a 16 byte block of DXT3 data
+ DXT3,
+ /// The DXT5 format. 64 bytes of RGBA data in a 4x4 pixel square is
+ /// compressed into a 16 byte block of DXT5 data
+ DXT5,
+}
+
+impl DXTVariant {
+ /// Returns the amount of bytes of raw image data
+ /// that is encoded in a single DXTn block
+ fn decoded_bytes_per_block(self) -> usize {
+ match self {
+ DXTVariant::DXT1 => 48,
+ DXTVariant::DXT3 | DXTVariant::DXT5 => 64,
+ }
+ }
+
+ /// Returns the amount of bytes per block of encoded DXTn data
+ fn encoded_bytes_per_block(self) -> usize {
+ match self {
+ DXTVariant::DXT1 => 8,
+ DXTVariant::DXT3 | DXTVariant::DXT5 => 16,
+ }
+ }
+
+ /// Returns the color type that is stored in this DXT variant
+ pub fn color_type(self) -> ColorType {
+ match self {
+ DXTVariant::DXT1 => ColorType::Rgb8,
+ DXTVariant::DXT3 | DXTVariant::DXT5 => ColorType::Rgba8,
+ }
+ }
+}
+
+/// DXT decoder
+pub struct DxtDecoder<R: Read> {
+ inner: R,
+ width_blocks: u32,
+ height_blocks: u32,
+ variant: DXTVariant,
+ row: u32,
+}
+
+impl<R: Read> DxtDecoder<R> {
+ /// Create a new DXT decoder that decodes from the stream ```r```.
+ /// As DXT is often stored as raw buffers with the width/height
+ /// somewhere else the width and height of the image need
+ /// to be passed in ```width``` and ```height```, as well as the
+ /// DXT variant in ```variant```.
+ /// width and height are required to be powers of 2 and at least 4.
+ /// otherwise an error will be returned
+ pub fn new(
+ r: R,
+ width: u32,
+ height: u32,
+ variant: DXTVariant,
+ ) -> Result<DxtDecoder<R>, ImageError> {
+ if width % 4 != 0 || height % 4 != 0 {
+ return Err(ImageError::DimensionError);
+ }
+ let width_blocks = width / 4;
+ let height_blocks = height / 4;
+ Ok(DxtDecoder {
+ inner: r,
+ width_blocks,
+ height_blocks,
+ variant,
+ row: 0,
+ })
+ }
+
+ fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.scanline_bytes()));
+
+ let mut src =
+ vec![0u8; self.variant.encoded_bytes_per_block() * self.width_blocks as usize];
+ self.inner.read_exact(&mut src)?;
+ match self.variant {
+ DXTVariant::DXT1 => decode_dxt1_row(&src, buf),
+ DXTVariant::DXT3 => decode_dxt3_row(&src, buf),
+ DXTVariant::DXT5 => decode_dxt5_row(&src, buf),
+ }
+ self.row += 1;
+ Ok(buf.len())
+ }
+}
+
+// Note that, due to the way that DXT compression works, a scanline is considered to consist out of
+// 4 lines of pixels.
+impl<'a, R: 'a + Read> ImageDecoder<'a> for DxtDecoder<R> {
+ type Reader = DXTReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width_blocks * 4, self.height_blocks * 4)
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.variant.color_type()
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ self.variant.decoded_bytes_per_block() as u64 * u64::from(self.width_blocks)
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(DXTReader {
+ buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()),
+ decoder: self,
+ })
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ for chunk in buf.chunks_mut(self.scanline_bytes() as usize) {
+ self.read_scanline(chunk)?;
+ }
+ Ok(())
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoderExt<'a> for DxtDecoder<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ let encoded_scanline_bytes = self.variant.encoded_bytes_per_block() as u64
+ * u64::from(self.width_blocks);
+
+ let start = self.inner.seek(SeekFrom::Current(0))?;
+ image::load_rect(x, y, width, height, buf, progress_callback, self,
+ |s, scanline| {
+ s.inner.seek(SeekFrom::Start(start + scanline * encoded_scanline_bytes))?;
+ Ok(())
+ },
+ |s, buf| s.read_scanline(buf))?;
+ self.inner.seek(SeekFrom::Start(start))?;
+ Ok(())
+ }
+}
+
+/// DXT reader
+pub struct DXTReader<R: Read> {
+ buffer: ImageReadBuffer,
+ decoder: DxtDecoder<R>,
+}
+impl<R: Read> Read for DXTReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let decoder = &mut self.decoder;
+ self.buffer.read(buf, |buf| decoder.read_scanline(buf))
+ }
+}
+
+/// DXT encoder
+pub struct DXTEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> DXTEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: W) -> DXTEncoder<W> {
+ DXTEncoder { w }
+ }
+
+ /// Encodes the image data ```data```
+ /// that has dimensions ```width``` and ```height```
+ /// in ```DXTVariant``` ```variant```
+ /// data is assumed to be in variant.color_type()
+ pub fn encode(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ variant: DXTVariant,
+ ) -> ImageResult<()> {
+ if width % 4 != 0 || height % 4 != 0 {
+ return Err(ImageError::DimensionError);
+ }
+ let width_blocks = width / 4;
+ let height_blocks = height / 4;
+
+ let stride = variant.decoded_bytes_per_block();
+
+ assert!(data.len() >= width_blocks as usize * height_blocks as usize * stride);
+
+ for chunk in data.chunks(width_blocks as usize * stride) {
+ let data = match variant {
+ DXTVariant::DXT1 => encode_dxt1_row(chunk),
+ DXTVariant::DXT3 => encode_dxt3_row(chunk),
+ DXTVariant::DXT5 => encode_dxt5_row(chunk),
+ };
+ self.w.write_all(&data)?;
+ }
+ Ok(())
+ }
+}
+
+/**
+ * Actual encoding/decoding logic below.
+ */
+use std::mem::swap;
+
+type Rgb = [u8; 3];
+
+/// decodes a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value into 8-bit RGB
+/// mapping is done so min/max range values are preserved. So for 5-bit
+/// values 0x00 -> 0x00 and 0x1F -> 0xFF
+fn enc565_decode(value: u16) -> Rgb {
+ let red = (value >> 11) & 0x1F;
+ let green = (value >> 5) & 0x3F;
+ let blue = (value) & 0x1F;
+ [
+ (red * 0xFF / 0x1F) as u8,
+ (green * 0xFF / 0x3F) as u8,
+ (blue * 0xFF / 0x1F) as u8,
+ ]
+}
+
+/// encodes an 8-bit RGB value into a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value
+/// mapping preserves min/max values. It is guaranteed that i == encode(decode(i)) for all i
+fn enc565_encode(rgb: Rgb) -> u16 {
+ let red = (u16::from(rgb[0]) * 0x1F + 0x7E) / 0xFF;
+ let green = (u16::from(rgb[1]) * 0x3F + 0x7E) / 0xFF;
+ let blue = (u16::from(rgb[2]) * 0x1F + 0x7E) / 0xFF;
+ (red << 11) | (green << 5) | blue
+}
+
+/// utility function: squares a value
+fn square(a: i32) -> i32 {
+ a * a
+}
+
+/// returns the squared error between two RGB values
+fn diff(a: Rgb, b: Rgb) -> i32 {
+ square(i32::from(a[0]) - i32::from(b[0])) + square(i32::from(a[1]) - i32::from(b[1]))
+ + square(i32::from(a[2]) - i32::from(b[2]))
+}
+
+/*
+ * Functions for decoding DXT compression
+ */
+
+/// Constructs the DXT5 alpha lookup table from the two alpha entries
+/// if alpha0 > alpha1, constructs a table of [a0, a1, 6 linearly interpolated values from a0 to a1]
+/// if alpha0 <= alpha1, constructs a table of [a0, a1, 4 linearly interpolated values from a0 to a1, 0, 0xFF]
+fn alpha_table_dxt5(alpha0: u8, alpha1: u8) -> [u8; 8] {
+ let mut table = [alpha0, alpha1, 0, 0, 0, 0, 0, 0xFF];
+ if alpha0 > alpha1 {
+ for i in 2..8u16 {
+ table[i as usize] =
+ (((8 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 7) as u8;
+ }
+ } else {
+ for i in 2..6u16 {
+ table[i as usize] =
+ (((6 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 5) as u8;
+ }
+ }
+ table
+}
+
+/// decodes an 8-byte dxt color block into the RGB channels of a 16xRGB or 16xRGBA block.
+/// source should have a length of 8, dest a length of 48 (RGB) or 64 (RGBA)
+fn decode_dxt_colors(source: &[u8], dest: &mut [u8]) {
+ // sanity checks, also enable the compiler to elide all following bound checks
+ assert!(source.len() == 8 && (dest.len() == 48 || dest.len() == 64));
+ // calculate pitch to store RGB values in dest (3 for RGB, 4 for RGBA)
+ let pitch = dest.len() / 16;
+
+ // extract color data
+ let color0 = u16::from(source[0]) | (u16::from(source[1]) << 8);
+ let color1 = u16::from(source[2]) | (u16::from(source[3]) << 8);
+ let color_table = u32::from(source[4]) | (u32::from(source[5]) << 8)
+ | (u32::from(source[6]) << 16) | (u32::from(source[7]) << 24);
+ // let color_table = source[4..8].iter().rev().fold(0, |t, &b| (t << 8) | b as u32);
+
+ // decode the colors to rgb format
+ let mut colors = [[0; 3]; 4];
+ colors[0] = enc565_decode(color0);
+ colors[1] = enc565_decode(color1);
+
+ // determine color interpolation method
+ if color0 > color1 {
+ // linearly interpolate the other two color table entries
+ for i in 0..3 {
+ colors[2][i] = ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8;
+ colors[3][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8;
+ }
+ } else {
+ // linearly interpolate one other entry, keep the other at 0
+ for i in 0..3 {
+ colors[2][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8;
+ }
+ }
+
+ // serialize the result. Every color is determined by looking up
+ // two bits in color_table which identify which color to actually pick from the 4 possible colors
+ for i in 0..16 {
+ dest[i * pitch..i * pitch + 3]
+ .copy_from_slice(&colors[(color_table >> (i * 2)) as usize & 3]);
+ }
+}
+
+/// Decodes a 16-byte bock of dxt5 data to a 16xRGBA block
+fn decode_dxt5_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 16 && dest.len() == 64);
+
+ // extract alpha index table (stored as little endian 64-bit value)
+ let alpha_table = source[2..8]
+ .iter()
+ .rev()
+ .fold(0, |t, &b| (t << 8) | u64::from(b));
+
+ // alhpa level decode
+ let alphas = alpha_table_dxt5(source[0], source[1]);
+
+ // serialize alpha
+ for i in 0..16 {
+ dest[i * 4 + 3] = alphas[(alpha_table >> (i * 3)) as usize & 7];
+ }
+
+ // handle colors
+ decode_dxt_colors(&source[8..16], dest);
+}
+
+/// Decodes a 16-byte bock of dxt3 data to a 16xRGBA block
+fn decode_dxt3_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 16 && dest.len() == 64);
+
+ // extract alpha index table (stored as little endian 64-bit value)
+ let alpha_table = source[0..8]
+ .iter()
+ .rev()
+ .fold(0, |t, &b| (t << 8) | u64::from(b));
+
+ // serialize alpha (stored as 4-bit values)
+ for i in 0..16 {
+ dest[i * 4 + 3] = ((alpha_table >> (i * 4)) as u8 & 0xF) * 0x11;
+ }
+
+ // handle colors
+ decode_dxt_colors(&source[8..16], dest);
+}
+
+/// Decodes a 8-byte bock of dxt5 data to a 16xRGB block
+fn decode_dxt1_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 8 && dest.len() == 48);
+ decode_dxt_colors(&source, dest);
+}
+
+/// Decode a row of DXT1 data to four rows of RGBA data.
+/// source.len() should be a multiple of 8, otherwise this panics.
+fn decode_dxt1_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 8 == 0);
+ let block_count = source.len() / 8;
+ assert!(dest.len() >= block_count * 48);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 48];
+
+ for (x, encoded_block) in source.chunks(8).enumerate() {
+ decode_dxt1_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 12;
+ dest[offset..offset + 12].copy_from_slice(&decoded_block[line * 12..(line + 1) * 12]);
+ }
+ }
+}
+
+/// Decode a row of DXT3 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn decode_dxt3_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 16 == 0);
+ let block_count = source.len() / 16;
+ assert!(dest.len() >= block_count * 64);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in source.chunks(16).enumerate() {
+ decode_dxt3_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
+ }
+ }
+}
+
+/// Decode a row of DXT5 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn decode_dxt5_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 16 == 0);
+ let block_count = source.len() / 16;
+ assert!(dest.len() >= block_count * 64);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in source.chunks(16).enumerate() {
+ decode_dxt5_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
+ }
+ }
+}
+
+/*
+ * Functions for encoding DXT compression
+ */
+
+/// Tries to perform the color encoding part of dxt compression
+/// the approach taken is simple, it picks unique combinations
+/// of the colors present in the block, and attempts to encode the
+/// block with each, picking the encoding that yields the least
+/// squared error out of all of them.
+///
+/// This could probably be faster but is already reasonably fast
+/// and a good reference impl to optimize others against.
+///
+/// Another way to perform this analysis would be to perform a
+/// singular value decomposition of the different colors, and
+/// then pick 2 points on this line as the base colors. But
+/// this is still rather unwieldly math and has issues
+/// with the 3-linear-colors-and-0 case, it's also worse
+/// at conserving the original colors.
+///
+/// source: should be RGBAx16 or RGBx16 bytes of data,
+/// dest 8 bytes of resulting encoded color data
+fn encode_dxt_colors(source: &[u8], dest: &mut [u8]) {
+ // sanity checks and determine stride when parsing the source data
+ assert!((source.len() == 64 || source.len() == 48) && dest.len() == 8);
+ let stride = source.len() / 16;
+
+ // reference colors array
+ let mut colors = [[0u8; 3]; 4];
+
+ // Put the colors we're going to be processing in an array with pure RGB layout
+ // note: we reverse the pixel order here. The reason for this is found in the inner quantization loop.
+ let mut targets = [[0u8; 3]; 16];
+ for (s, d) in source.chunks(stride).rev().zip(&mut targets) {
+ *d = [s[0], s[1], s[2]];
+ }
+
+ // and a set of colors to pick from.
+ let mut colorspace = targets.to_vec();
+
+ // roundtrip all colors through the r5g6b5 encoding
+ for rgb in &mut colorspace {
+ *rgb = enc565_decode(enc565_encode(*rgb));
+ }
+
+ // and deduplicate the set of colors to choose from as the algorithm is O(N^2) in this
+ colorspace.dedup();
+
+ // in case of slight gradients it can happen that there's only one entry left in the color table.
+ // as the resulting banding can be quite bad if we would just left the block at the closest
+ // encodable color, we have a special path here that tries to emulate the wanted color
+ // using the linear interpolation between gradients
+ if colorspace.len() == 1 {
+ // the base color we got from colorspace reduction
+ let ref_rgb = colorspace[0];
+ // the unreduced color in this block that's the furthest away from the actual block
+ let mut rgb = targets
+ .iter()
+ .cloned()
+ .max_by_key(|rgb| diff(*rgb, ref_rgb))
+ .unwrap();
+ // amplify differences by 2.5, which should push them to the next quantized value
+ // if possible without overshoot
+ for i in 0..3 {
+ rgb[i] =
+ ((i16::from(rgb[i]) - i16::from(ref_rgb[i])) * 5 / 2 + i16::from(ref_rgb[i])) as u8;
+ }
+
+ // roundtrip it through quantization
+ let encoded = enc565_encode(rgb);
+ let rgb = enc565_decode(encoded);
+
+ // in case this didn't land us a different color the best way to represent this field is
+ // as a single color block
+ if rgb == ref_rgb {
+ dest[0] = encoded as u8;
+ dest[1] = (encoded >> 8) as u8;
+
+ for d in dest.iter_mut().take(8).skip(2) {
+ *d = 0;
+ }
+ return;
+ }
+
+ // we did find a separate value: add it to the options so after one round of quantization
+ // we're done
+ colorspace.push(rgb);
+ }
+
+ // block quantization loop: we basically just try every possible combination, returning
+ // the combination with the least squared error
+ // stores the best candidate colors
+ let mut chosen_colors = [[0; 3]; 4];
+ // did this index table use the [0,0,0] variant
+ let mut chosen_use_0 = false;
+ // error calculated for the last entry
+ let mut chosen_error = 0xFFFF_FFFFu32;
+
+ // loop through unique permutations of the colorspace, where c1 != c2
+ 'search: for (i, &c1) in colorspace.iter().enumerate() {
+ colors[0] = c1;
+
+ for &c2 in &colorspace[0..i] {
+ colors[1] = c2;
+
+ // what's inside here is ran at most 120 times.
+ for use_0 in 0..2 {
+ // and 240 times here.
+
+ if use_0 != 0 {
+ // interpolate one color, set the other to 0
+ for i in 0..3 {
+ colors[2][i] =
+ ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8;
+ }
+ colors[3] = [0, 0, 0];
+ } else {
+ // interpolate to get 2 more colors
+ for i in 0..3 {
+ colors[2][i] =
+ ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8;
+ colors[3][i] =
+ ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8;
+ }
+ }
+
+ // calculate the total error if we were to quantize the block with these color combinations
+ // both these loops have statically known iteration counts and are well vectorizable
+ // note that the inside of this can be run about 15360 times worst case, i.e. 960 times per
+ // pixel.
+ let total_error = targets
+ .iter()
+ .map(|t| colors.iter().map(|c| diff(*c, *t) as u32).min().unwrap())
+ .sum();
+
+ // update the match if we found a better one
+ if total_error < chosen_error {
+ chosen_colors = colors;
+ chosen_use_0 = use_0 != 0;
+ chosen_error = total_error;
+
+ // if we've got a perfect or at most 1 LSB off match, we're done
+ if total_error < 4 {
+ break 'search;
+ }
+ }
+ }
+ }
+ }
+
+ // calculate the final indices
+ // note that targets is already in reverse pixel order, to make the index computation easy.
+ let mut chosen_indices = 0u32;
+ for t in &targets {
+ let (idx, _) = chosen_colors
+ .iter()
+ .enumerate()
+ .min_by_key(|&(_, c)| diff(*c, *t))
+ .unwrap();
+ chosen_indices = (chosen_indices << 2) | idx as u32;
+ }
+
+ // encode the colors
+ let mut color0 = enc565_encode(chosen_colors[0]);
+ let mut color1 = enc565_encode(chosen_colors[1]);
+
+ // determine encoding. Note that color0 == color1 is impossible at this point
+ if color0 > color1 {
+ if chosen_use_0 {
+ swap(&mut color0, &mut color1);
+ // Indexes are packed 2 bits wide, swap index 0/1 but preserve 2/3.
+ let filter = (chosen_indices & 0xAAAA_AAAA) >> 1;
+ chosen_indices ^= filter ^ 0x5555_5555;
+ }
+ } else if !chosen_use_0 {
+ swap(&mut color0, &mut color1);
+ // Indexes are packed 2 bits wide, swap index 0/1 and 2/3.
+ chosen_indices ^= 0x5555_5555;
+ }
+
+ // encode everything.
+ dest[0] = color0 as u8;
+ dest[1] = (color0 >> 8) as u8;
+ dest[2] = color1 as u8;
+ dest[3] = (color1 >> 8) as u8;
+ for i in 0..4 {
+ dest[i + 4] = (chosen_indices >> (i * 8)) as u8;
+ }
+}
+
+/// Encodes a buffer of 16 alpha bytes into a dxt5 alpha index table,
+/// where the alpha table they are indexed against is created by
+/// calling alpha_table_dxt5(alpha0, alpha1)
+/// returns the resulting error and alpha table
+fn encode_dxt5_alpha(alpha0: u8, alpha1: u8, alphas: &[u8; 16]) -> (i32, u64) {
+ // create a table for the given alpha ranges
+ let table = alpha_table_dxt5(alpha0, alpha1);
+ let mut indices = 0u64;
+ let mut total_error = 0i32;
+
+ // least error brute force search
+ for (i, &a) in alphas.iter().enumerate() {
+ let (index, error) = table
+ .iter()
+ .enumerate()
+ .map(|(i, &e)| (i, square(i32::from(e) - i32::from(a))))
+ .min_by_key(|&(_, e)| e)
+ .unwrap();
+ total_error += error;
+ indices |= (index as u64) << (i * 3);
+ }
+
+ (total_error, indices)
+}
+
+/// Encodes a RGBAx16 sequence of bytes to a 16 bytes DXT5 block
+fn encode_dxt5_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 64 && dest.len() == 16);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, &mut dest[8..16]);
+
+ // copy out the alpha bytes
+ let mut alphas = [0; 16];
+ for i in 0..16 {
+ alphas[i] = source[i * 4 + 3];
+ }
+
+ // try both alpha compression methods, see which has the least error.
+ let alpha07 = alphas.iter().cloned().min().unwrap();
+ let alpha17 = alphas.iter().cloned().max().unwrap();
+ let (error7, indices7) = encode_dxt5_alpha(alpha07, alpha17, &alphas);
+
+ // if all alphas are 0 or 255 it doesn't particularly matter what we do here.
+ let alpha05 = alphas
+ .iter()
+ .cloned()
+ .filter(|&i| i != 255)
+ .max()
+ .unwrap_or(255);
+ let alpha15 = alphas
+ .iter()
+ .cloned()
+ .filter(|&i| i != 0)
+ .min()
+ .unwrap_or(0);
+ let (error5, indices5) = encode_dxt5_alpha(alpha05, alpha15, &alphas);
+
+ // pick the best one, encode the min/max values
+ let mut alpha_table = if error5 < error7 {
+ dest[0] = alpha05;
+ dest[1] = alpha15;
+ indices5
+ } else {
+ dest[0] = alpha07;
+ dest[1] = alpha17;
+ indices7
+ };
+
+ // encode the alphas
+ for byte in dest[2..8].iter_mut() {
+ *byte = alpha_table as u8;
+ alpha_table >>= 8;
+ }
+}
+
+/// Encodes a RGBAx16 sequence of bytes into a 16 bytes DXT3 block
+fn encode_dxt3_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 64 && dest.len() == 16);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, &mut dest[8..16]);
+
+ // DXT3 alpha compression is very simple, just round towards the nearest value
+
+ // index the alpha values into the 64bit alpha table
+ let mut alpha_table = 0u64;
+ for i in 0..16 {
+ let alpha = u64::from(source[i * 4 + 3]);
+ let alpha = (alpha + 0x8) / 0x11;
+ alpha_table |= alpha << (i * 4);
+ }
+
+ // encode the alpha values
+ for byte in &mut dest[0..8] {
+ *byte = alpha_table as u8;
+ alpha_table >>= 8;
+ }
+}
+
+/// Encodes a RGBx16 sequence of bytes into a 8 bytes DXT1 block
+fn encode_dxt1_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 48 && dest.len() == 8);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, dest);
+}
+
+/// Decode a row of DXT1 data to four rows of RGBA data.
+/// source.len() should be a multiple of 8, otherwise this panics.
+fn encode_dxt1_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 48 == 0);
+ let block_count = source.len() / 48;
+
+ let mut dest = vec![0u8; block_count * 8];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 48];
+
+ for (x, encoded_block) in dest.chunks_mut(8).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 12;
+ decoded_block[line * 12..(line + 1) * 12].copy_from_slice(&source[offset..offset + 12]);
+ }
+
+ encode_dxt1_block(&decoded_block, encoded_block);
+ }
+ dest
+}
+
+/// Decode a row of DXT3 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn encode_dxt3_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 64 == 0);
+ let block_count = source.len() / 64;
+
+ let mut dest = vec![0u8; block_count * 16];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in dest.chunks_mut(16).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]);
+ }
+
+ encode_dxt3_block(&decoded_block, encoded_block);
+ }
+ dest
+}
+
+/// Decode a row of DXT5 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn encode_dxt5_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 64 == 0);
+ let block_count = source.len() / 64;
+
+ let mut dest = vec![0u8; block_count * 16];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in dest.chunks_mut(16).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]);
+ }
+
+ encode_dxt5_block(&decoded_block, encoded_block);
+ }
+ dest
+}
diff --git a/third_party/rust/image/src/dynimage.rs b/third_party/rust/image/src/dynimage.rs
new file mode 100644
index 0000000000..141de48563
--- /dev/null
+++ b/third_party/rust/image/src/dynimage.rs
@@ -0,0 +1,1157 @@
+use std::io;
+use std::io::Write;
+use std::path::Path;
+use std::u32;
+
+#[cfg(feature = "bmp")]
+use crate::bmp;
+#[cfg(feature = "gif")]
+use crate::gif;
+#[cfg(feature = "ico")]
+use crate::ico;
+#[cfg(feature = "jpeg")]
+use crate::jpeg;
+#[cfg(feature = "png")]
+use crate::png;
+#[cfg(feature = "pnm")]
+use crate::pnm;
+
+use crate::buffer::{
+ BgrImage, BgraImage, ConvertBuffer, GrayAlphaImage, GrayAlpha16Image,
+ GrayImage, Gray16Image, ImageBuffer, Pixel, RgbImage, Rgb16Image,
+ RgbaImage, Rgba16Image,
+};
+use crate::color::{self, IntoColor};
+use crate::error::{ImageError, ImageResult};
+use crate::flat::FlatSamples;
+use crate::image;
+use crate::image::{GenericImage, GenericImageView, ImageDecoder, ImageFormat, ImageOutputFormat};
+use crate::io::free_functions;
+use crate::imageops;
+
+/// A Dynamic Image
+#[derive(Clone)]
+pub enum DynamicImage {
+ /// Each pixel in this image is 8-bit Luma
+ ImageLuma8(GrayImage),
+
+ /// Each pixel in this image is 8-bit Luma with alpha
+ ImageLumaA8(GrayAlphaImage),
+
+ /// Each pixel in this image is 8-bit Rgb
+ ImageRgb8(RgbImage),
+
+ /// Each pixel in this image is 8-bit Rgb with alpha
+ ImageRgba8(RgbaImage),
+
+ /// Each pixel in this image is 8-bit Bgr
+ ImageBgr8(BgrImage),
+
+ /// Each pixel in this image is 8-bit Bgr with alpha
+ ImageBgra8(BgraImage),
+
+ /// Each pixel in this image is 16-bit Luma
+ ImageLuma16(Gray16Image),
+
+ /// Each pixel in this image is 16-bit Luma with alpha
+ ImageLumaA16(GrayAlpha16Image),
+
+ /// Each pixel in this image is 16-bit Rgb
+ ImageRgb16(Rgb16Image),
+
+ /// Each pixel in this image is 16-bit Rgb with alpha
+ ImageRgba16(Rgba16Image),
+}
+
+macro_rules! dynamic_map(
+ ($dynimage: expr, ref $image: ident => $action: expr) => (
+ match $dynimage {
+ DynamicImage::ImageLuma8(ref $image) => DynamicImage::ImageLuma8($action),
+ DynamicImage::ImageLumaA8(ref $image) => DynamicImage::ImageLumaA8($action),
+ DynamicImage::ImageRgb8(ref $image) => DynamicImage::ImageRgb8($action),
+ DynamicImage::ImageRgba8(ref $image) => DynamicImage::ImageRgba8($action),
+ DynamicImage::ImageBgr8(ref $image) => DynamicImage::ImageBgr8($action),
+ DynamicImage::ImageBgra8(ref $image) => DynamicImage::ImageBgra8($action),
+ DynamicImage::ImageLuma16(ref $image) => DynamicImage::ImageLuma16($action),
+ DynamicImage::ImageLumaA16(ref $image) => DynamicImage::ImageLumaA16($action),
+ DynamicImage::ImageRgb16(ref $image) => DynamicImage::ImageRgb16($action),
+ DynamicImage::ImageRgba16(ref $image) => DynamicImage::ImageRgba16($action),
+ }
+ );
+
+ ($dynimage: expr, ref mut $image: ident => $action: expr) => (
+ match $dynimage {
+ DynamicImage::ImageLuma8(ref mut $image) => DynamicImage::ImageLuma8($action),
+ DynamicImage::ImageLumaA8(ref mut $image) => DynamicImage::ImageLumaA8($action),
+ DynamicImage::ImageRgb8(ref mut $image) => DynamicImage::ImageRgb8($action),
+ DynamicImage::ImageRgba8(ref mut $image) => DynamicImage::ImageRgba8($action),
+ DynamicImage::ImageBgr8(ref mut $image) => DynamicImage::ImageBgr8($action),
+ DynamicImage::ImageBgra8(ref mut $image) => DynamicImage::ImageBgra8($action),
+ DynamicImage::ImageLuma16(ref mut $image) => DynamicImage::ImageLuma16($action),
+ DynamicImage::ImageLumaA16(ref mut $image) => DynamicImage::ImageLumaA16($action),
+ DynamicImage::ImageRgb16(ref mut $image) => DynamicImage::ImageRgb16($action),
+ DynamicImage::ImageRgba16(ref mut $image) => DynamicImage::ImageRgba16($action),
+ }
+ );
+
+ ($dynimage: expr, ref $image: ident -> $action: expr) => (
+ match $dynimage {
+ DynamicImage::ImageLuma8(ref $image) => $action,
+ DynamicImage::ImageLumaA8(ref $image) => $action,
+ DynamicImage::ImageRgb8(ref $image) => $action,
+ DynamicImage::ImageRgba8(ref $image) => $action,
+ DynamicImage::ImageBgr8(ref $image) => $action,
+ DynamicImage::ImageBgra8(ref $image) => $action,
+ DynamicImage::ImageLuma16(ref $image) => $action,
+ DynamicImage::ImageLumaA16(ref $image) => $action,
+ DynamicImage::ImageRgb16(ref $image) => $action,
+ DynamicImage::ImageRgba16(ref $image) => $action,
+ }
+ );
+
+ ($dynimage: expr, ref mut $image: ident -> $action: expr) => (
+ match $dynimage {
+ DynamicImage::ImageLuma8(ref mut $image) => $action,
+ DynamicImage::ImageLumaA8(ref mut $image) => $action,
+ DynamicImage::ImageRgb8(ref mut $image) => $action,
+ DynamicImage::ImageRgba8(ref mut $image) => $action,
+ DynamicImage::ImageBgr8(ref mut $image) => $action,
+ DynamicImage::ImageBgra8(ref mut $image) => $action,
+ DynamicImage::ImageLuma16(ref mut $image) => $action,
+ DynamicImage::ImageLumaA16(ref mut $image) => $action,
+ DynamicImage::ImageRgb16(ref mut $image) => $action,
+ DynamicImage::ImageRgba16(ref mut $image) => $action,
+ }
+ );
+);
+
+impl DynamicImage {
+ /// Creates a dynamic image backed by a buffer of grey pixels.
+ pub fn new_luma8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLuma8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of grey
+ /// pixels with transparency.
+ pub fn new_luma_a8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLumaA8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGB pixels.
+ pub fn new_rgb8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgb8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGBA pixels.
+ pub fn new_rgba8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgba8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of BGRA pixels.
+ pub fn new_bgra8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageBgra8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of BGR pixels.
+ pub fn new_bgr8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageBgr8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of grey pixels.
+ pub fn new_luma16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLuma16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of grey
+ /// pixels with transparency.
+ pub fn new_luma_a16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLumaA16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGB pixels.
+ pub fn new_rgb16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgb16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGBA pixels.
+ pub fn new_rgba16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgba16(ImageBuffer::new(w, h))
+ }
+
+ /// Decodes an encoded image into a dynamic image.
+ pub fn from_decoder<'a>(decoder: impl ImageDecoder<'a>)
+ -> ImageResult<Self>
+ {
+ decoder_to_image(decoder)
+ }
+
+ /// Returns a copy of this image as an RGB image.
+ pub fn to_rgb(&self) -> RgbImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Returns a copy of this image as an RGBA image.
+ pub fn to_rgba(&self) -> RgbaImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Returns a copy of this image as an BGR image.
+ pub fn to_bgr(&self) -> BgrImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Returns a copy of this image as an BGRA image.
+ pub fn to_bgra(&self) -> BgraImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Returns a copy of this image as a Luma image.
+ pub fn to_luma(&self) -> GrayImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Returns a copy of this image as a LumaA image.
+ pub fn to_luma_alpha(&self) -> GrayAlphaImage {
+ dynamic_map!(*self, ref p -> {
+ p.convert()
+ })
+ }
+
+ /// Consume the image and returns a RGB image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgb(self) -> RgbImage {
+ match self {
+ DynamicImage::ImageRgb8(x) => x,
+ x => x.to_rgb(),
+ }
+ }
+
+ /// Consume the image and returns a RGBA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgba(self) -> RgbaImage {
+ match self {
+ DynamicImage::ImageRgba8(x) => x,
+ x => x.to_rgba(),
+ }
+ }
+
+ /// Consume the image and returns a BGR image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_bgr(self) -> BgrImage {
+ match self {
+ DynamicImage::ImageBgr8(x) => x,
+ x => x.to_bgr(),
+ }
+ }
+
+ /// Consume the image and returns a BGRA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_bgra(self) -> BgraImage {
+ match self {
+ DynamicImage::ImageBgra8(x) => x,
+ x => x.to_bgra(),
+ }
+ }
+
+ /// Consume the image and returns a Luma image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma(self) -> GrayImage {
+ match self {
+ DynamicImage::ImageLuma8(x) => x,
+ x => x.to_luma(),
+ }
+ }
+
+ /// Consume the image and returns a LumaA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma_alpha(self) -> GrayAlphaImage {
+ match self {
+ DynamicImage::ImageLumaA8(x) => x,
+ x => x.to_luma_alpha(),
+ }
+ }
+
+ /// Return a cut out of this image delimited by the bounding rectangle.
+ pub fn crop(&mut self, x: u32, y: u32, width: u32, height: u32) -> DynamicImage {
+ dynamic_map!(*self, ref mut p => imageops::crop(p, x, y, width, height).to_image())
+ }
+
+ /// Return a reference to an 8bit RGB image
+ pub fn as_rgb8(&self) -> Option<&RgbImage> {
+ match *self {
+ DynamicImage::ImageRgb8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit RGB image
+ pub fn as_mut_rgb8(&mut self) -> Option<&mut RgbImage> {
+ match *self {
+ DynamicImage::ImageRgb8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit BGR image
+ pub fn as_bgr8(&self) -> Option<&BgrImage> {
+ match *self {
+ DynamicImage::ImageBgr8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit BGR image
+ pub fn as_mut_bgr8(&mut self) -> Option<&mut BgrImage> {
+ match *self {
+ DynamicImage::ImageBgr8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit RGBA image
+ pub fn as_rgba8(&self) -> Option<&RgbaImage> {
+ match *self {
+ DynamicImage::ImageRgba8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit RGBA image
+ pub fn as_mut_rgba8(&mut self) -> Option<&mut RgbaImage> {
+ match *self {
+ DynamicImage::ImageRgba8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit BGRA image
+ pub fn as_bgra8(&self) -> Option<&BgraImage> {
+ match *self {
+ DynamicImage::ImageBgra8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit RGBA image
+ pub fn as_mut_bgra8(&mut self) -> Option<&mut BgraImage> {
+ match *self {
+ DynamicImage::ImageBgra8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit Grayscale image
+ pub fn as_luma8(&self) -> Option<&GrayImage> {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit Grayscale image
+ pub fn as_mut_luma8(&mut self) -> Option<&mut GrayImage> {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit Grayscale image with an alpha channel
+ pub fn as_luma_alpha8(&self) -> Option<&GrayAlphaImage> {
+ match *self {
+ DynamicImage::ImageLumaA8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit Grayscale image with an alpha channel
+ pub fn as_mut_luma_alpha8(&mut self) -> Option<&mut GrayAlphaImage> {
+ match *self {
+ DynamicImage::ImageLumaA8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit RGB image
+ pub fn as_rgb16(&self) -> Option<&Rgb16Image> {
+ match *self {
+ DynamicImage::ImageRgb16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit RGB image
+ pub fn as_mut_rgb16(&mut self) -> Option<&mut Rgb16Image> {
+ match *self {
+ DynamicImage::ImageRgb16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit RGBA image
+ pub fn as_rgba16(&self) -> Option<&Rgba16Image> {
+ match *self {
+ DynamicImage::ImageRgba16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit RGBA image
+ pub fn as_mut_rgba16(&mut self) -> Option<&mut Rgba16Image> {
+ match *self {
+ DynamicImage::ImageRgba16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit Grayscale image
+ pub fn as_luma16(&self) -> Option<&Gray16Image> {
+ match *self {
+ DynamicImage::ImageLuma16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit Grayscale image
+ pub fn as_mut_luma16(&mut self) -> Option<&mut Gray16Image> {
+ match *self {
+ DynamicImage::ImageLuma16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit Grayscale image with an alpha channel
+ pub fn as_luma_alpha16(&self) -> Option<&GrayAlpha16Image> {
+ match *self {
+ DynamicImage::ImageLumaA16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit Grayscale image with an alpha channel
+ pub fn as_mut_luma_alpha16(&mut self) -> Option<&mut GrayAlpha16Image> {
+ match *self {
+ DynamicImage::ImageLumaA16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a view on the raw sample buffer for 8 bit per channel images.
+ pub fn as_flat_samples_u8(&self) -> Option<FlatSamples<&[u8]>> {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageLumaA8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgb8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgba8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageBgr8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageBgra8(ref p) => Some(p.as_flat_samples()),
+ _ => None,
+ }
+ }
+
+ /// Return a view on the raw sample buffer for 16 bit per channel images.
+ pub fn as_flat_samples_u16(&self) -> Option<FlatSamples<&[u16]>> {
+ match *self {
+ DynamicImage::ImageLuma16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageLumaA16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgb16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgba16(ref p) => Some(p.as_flat_samples()),
+ _ => None,
+ }
+ }
+
+ /// Return this image's pixels as a byte vector.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ image_to_bytes(self)
+ }
+
+ /// Return this image's color type.
+ pub fn color(&self) -> color::ColorType {
+ match *self {
+ DynamicImage::ImageLuma8(_) => color::ColorType::L8,
+ DynamicImage::ImageLumaA8(_) => color::ColorType::La8,
+ DynamicImage::ImageRgb8(_) => color::ColorType::Rgb8,
+ DynamicImage::ImageRgba8(_) => color::ColorType::Rgba8,
+ DynamicImage::ImageBgra8(_) => color::ColorType::Bgra8,
+ DynamicImage::ImageBgr8(_) => color::ColorType::Bgr8,
+ DynamicImage::ImageLuma16(_) => color::ColorType::L16,
+ DynamicImage::ImageLumaA16(_) => color::ColorType::La16,
+ DynamicImage::ImageRgb16(_) => color::ColorType::Rgb16,
+ DynamicImage::ImageRgba16(_) => color::ColorType::Rgba16,
+ }
+ }
+
+ /// Return a grayscale version of this image.
+ pub fn grayscale(&self) -> DynamicImage {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => DynamicImage::ImageLuma8(p.clone()),
+ DynamicImage::ImageLumaA8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageRgb8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageRgba8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageBgr8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageBgra8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageLuma16(ref p) => DynamicImage::ImageLuma16(p.clone()),
+ DynamicImage::ImageLumaA16(ref p) => DynamicImage::ImageLuma16(imageops::grayscale(p)),
+ DynamicImage::ImageRgb16(ref p) => DynamicImage::ImageLuma16(imageops::grayscale(p)),
+ DynamicImage::ImageRgba16(ref p) => DynamicImage::ImageLuma16(imageops::grayscale(p)),
+ }
+ }
+
+ /// Invert the colors of this image.
+ /// This method operates inplace.
+ pub fn invert(&mut self) {
+ dynamic_map!(*self, ref mut p -> imageops::invert(p))
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the bounds specified by ```nwidth``` and ```nheight```.
+ pub fn resize(&self, nwidth: u32, nheight: u32, filter: imageops::FilterType) -> DynamicImage {
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, false);
+
+ self.resize_exact(width2, height2, filter)
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. Does not preserve aspect ratio.
+ /// ```nwidth``` and ```nheight``` are the new image's dimensions
+ pub fn resize_exact(
+ &self,
+ nwidth: u32,
+ nheight: u32,
+ filter: imageops::FilterType,
+ ) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::resize(p, nwidth, nheight, filter))
+ }
+
+ /// Scale this image down to fit within a specific size.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the bounds specified by ```nwidth``` and ```nheight```.
+ ///
+ /// This method uses a fast integer algorithm where each source
+ /// pixel contributes to exactly one target pixel.
+ /// May give aliasing artifacts if new size is close to old size.
+ pub fn thumbnail(&self, nwidth: u32, nheight: u32) -> DynamicImage {
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, false);
+ self.thumbnail_exact(width2, height2)
+ }
+
+ /// Scale this image down to a specific size.
+ /// Returns a new image. Does not preserve aspect ratio.
+ /// ```nwidth``` and ```nheight``` are the new image's dimensions.
+ /// This method uses a fast integer algorithm where each source
+ /// pixel contributes to exactly one target pixel.
+ /// May give aliasing artifacts if new size is close to old size.
+ pub fn thumbnail_exact(&self, nwidth: u32, nheight: u32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::thumbnail(p, nwidth, nheight))
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the larger (relative to aspect ratio) of the bounds
+ /// specified by ```nwidth``` and ```nheight```, then cropped to
+ /// fit within the other bound.
+ pub fn resize_to_fill(
+ &self,
+ nwidth: u32,
+ nheight: u32,
+ filter: imageops::FilterType,
+ ) -> DynamicImage {
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, true);
+
+ let mut intermediate = self.resize_exact(width2, height2, filter);
+ let (iwidth, iheight) = intermediate.dimensions();
+ let ratio = u64::from(iwidth) * u64::from(nheight);
+ let nratio = u64::from(nwidth) * u64::from(iheight);
+
+ if nratio > ratio {
+ intermediate.crop(0, (iheight - nheight) / 2, nwidth, nheight)
+ } else {
+ intermediate.crop((iwidth - nwidth) / 2, 0, nwidth, nheight)
+ }
+ }
+
+ /// Performs a Gaussian blur on this image.
+ /// ```sigma``` is a measure of how much to blur by.
+ pub fn blur(&self, sigma: f32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::blur(p, sigma))
+ }
+
+ /// Performs an unsharpen mask on this image.
+ /// ```sigma``` is the amount to blur the image by.
+ /// ```threshold``` is a control of how much to sharpen.
+ ///
+ /// See <https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking>
+ pub fn unsharpen(&self, sigma: f32, threshold: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::unsharpen(p, sigma, threshold))
+ }
+
+ /// Filters this image with the specified 3x3 kernel.
+ pub fn filter3x3(&self, kernel: &[f32]) -> DynamicImage {
+ if kernel.len() != 9 {
+ panic!("filter must be 3 x 3")
+ }
+
+ dynamic_map!(*self, ref p => imageops::filter3x3(p, kernel))
+ }
+
+ /// Adjust the contrast of this image.
+ /// ```contrast``` is the amount to adjust the contrast by.
+ /// Negative values decrease the contrast and positive values increase the contrast.
+ pub fn adjust_contrast(&self, c: f32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::contrast(p, c))
+ }
+
+ /// Brighten the pixels of this image.
+ /// ```value``` is the amount to brighten each pixel by.
+ /// Negative values decrease the brightness and positive values increase it.
+ pub fn brighten(&self, value: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::brighten(p, value))
+ }
+
+ /// Hue rotate the supplied image.
+ /// `value` is the degrees to rotate each pixel by.
+ /// 0 and 360 do nothing, the rest rotates by the given degree value.
+ /// just like the css webkit filter hue-rotate(180)
+ pub fn huerotate(&self, value: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::huerotate(p, value))
+ }
+
+ /// Flip this image vertically
+ pub fn flipv(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::flip_vertical(p))
+ }
+
+ /// Flip this image horizontally
+ pub fn fliph(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::flip_horizontal(p))
+ }
+
+ /// Rotate this image 90 degrees clockwise.
+ pub fn rotate90(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate90(p))
+ }
+
+ /// Rotate this image 180 degrees clockwise.
+ pub fn rotate180(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate180(p))
+ }
+
+ /// Rotate this image 270 degrees clockwise.
+ pub fn rotate270(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate270(p))
+ }
+
+ /// Encode this image and write it to ```w```
+ pub fn write_to<W: Write, F: Into<ImageOutputFormat>>(
+ &self,
+ w: &mut W,
+ format: F,
+ ) -> ImageResult<()> {
+ let mut bytes = self.to_bytes();
+ let (width, height) = self.dimensions();
+ let mut color = self.color();
+ let format = format.into();
+
+ #[allow(deprecated)]
+ match format {
+ #[cfg(feature = "png")]
+ image::ImageOutputFormat::Png => {
+ let p = png::PNGEncoder::new(w);
+ match *self {
+ DynamicImage::ImageBgra8(_) => {
+ bytes = self.to_rgba().iter().cloned().collect();
+ color = color::ColorType::Rgba8;
+ }
+ DynamicImage::ImageBgr8(_) => {
+ bytes = self.to_rgb().iter().cloned().collect();
+ color = color::ColorType::Rgb8;
+ }
+ _ => {}
+ }
+ p.encode(&bytes, width, height, color)?;
+ Ok(())
+ }
+ #[cfg(feature = "pnm")]
+ image::ImageOutputFormat::Pnm(subtype) => {
+ let mut p = pnm::PNMEncoder::new(w).with_subtype(subtype);
+ match *self {
+ DynamicImage::ImageBgra8(_) => {
+ bytes = self.to_rgba().iter().cloned().collect();
+ color = color::ColorType::Rgba8;
+ }
+ DynamicImage::ImageBgr8(_) => {
+ bytes = self.to_rgb().iter().cloned().collect();
+ color = color::ColorType::Rgb8;
+ }
+ _ => {}
+ }
+ p.encode(&bytes[..], width, height, color)?;
+ Ok(())
+ }
+ #[cfg(feature = "jpeg")]
+ image::ImageOutputFormat::Jpeg(quality) => {
+ let mut j = jpeg::JPEGEncoder::new_with_quality(w, quality);
+
+ j.encode(&bytes, width, height, color)?;
+ Ok(())
+ }
+
+ #[cfg(feature = "gif")]
+ image::ImageOutputFormat::Gif => {
+ let mut g = gif::Encoder::new(w);
+ g.encode_frame(crate::animation::Frame::new(self.to_rgba()))?;
+ Ok(())
+ }
+
+ #[cfg(feature = "ico")]
+ image::ImageOutputFormat::Ico => {
+ let i = ico::ICOEncoder::new(w);
+
+ i.encode(&bytes, width, height, color)?;
+ Ok(())
+ }
+
+ #[cfg(feature = "bmp")]
+ image::ImageOutputFormat::Bmp => {
+ let mut b = bmp::BMPEncoder::new(w);
+ b.encode(&bytes, width, height, color)?;
+ Ok(())
+ }
+
+ image::ImageOutputFormat::Unsupported(msg) => {
+ Err(ImageError::UnsupportedError(msg))
+ },
+
+ image::ImageOutputFormat::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+
+ /// Saves the buffer to a file at the path specified.
+ ///
+ /// The image format is derived from the file extension.
+ pub fn save<Q>(&self, path: Q) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ dynamic_map!(*self, ref p -> {
+ p.save(path)
+ })
+ }
+
+ /// Saves the buffer to a file at the specified path in
+ /// the specified format.
+ ///
+ /// See [`save_buffer_with_format`](fn.save_buffer_with_format.html) for
+ /// supported types.
+ pub fn save_with_format<Q>(&self, path: Q, format: ImageFormat) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ dynamic_map!(*self, ref p -> {
+ p.save_with_format(path, format)
+ })
+ }
+}
+
+#[allow(deprecated)]
+impl GenericImageView for DynamicImage {
+ type Pixel = color::Rgba<u8>;
+ type InnerImageView = Self;
+
+ fn dimensions(&self) -> (u32, u32) {
+ dynamic_map!(*self, ref p -> p.dimensions())
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ dynamic_map!(*self, ref p -> p.bounds())
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> color::Rgba<u8> {
+ dynamic_map!(*self, ref p -> p.get_pixel(x, y).to_rgba().into_color())
+ }
+
+ fn inner(&self) -> &Self::InnerImageView {
+ self
+ }
+}
+
+#[allow(deprecated)]
+impl GenericImage for DynamicImage {
+ type InnerImage = DynamicImage;
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: color::Rgba<u8>) {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => p.put_pixel(x, y, pixel.to_luma()),
+ DynamicImage::ImageLumaA8(ref mut p) => p.put_pixel(x, y, pixel.to_luma_alpha()),
+ DynamicImage::ImageRgb8(ref mut p) => p.put_pixel(x, y, pixel.to_rgb()),
+ DynamicImage::ImageRgba8(ref mut p) => p.put_pixel(x, y, pixel),
+ DynamicImage::ImageBgr8(ref mut p) => p.put_pixel(x, y, pixel.to_bgr()),
+ DynamicImage::ImageBgra8(ref mut p) => p.put_pixel(x, y, pixel.to_bgra()),
+ DynamicImage::ImageLuma16(ref mut p) => p.put_pixel(x, y, pixel.to_luma().into_color()),
+ DynamicImage::ImageLumaA16(ref mut p) => p.put_pixel(x, y, pixel.to_luma_alpha().into_color()),
+ DynamicImage::ImageRgb16(ref mut p) => p.put_pixel(x, y, pixel.to_rgb().into_color()),
+ DynamicImage::ImageRgba16(ref mut p) => p.put_pixel(x, y, pixel.into_color()),
+ }
+ }
+ /// DEPRECATED: Use iterator `pixels_mut` to blend the pixels directly.
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: color::Rgba<u8>) {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => p.blend_pixel(x, y, pixel.to_luma()),
+ DynamicImage::ImageLumaA8(ref mut p) => p.blend_pixel(x, y, pixel.to_luma_alpha()),
+ DynamicImage::ImageRgb8(ref mut p) => p.blend_pixel(x, y, pixel.to_rgb()),
+ DynamicImage::ImageRgba8(ref mut p) => p.blend_pixel(x, y, pixel),
+ DynamicImage::ImageBgr8(ref mut p) => p.blend_pixel(x, y, pixel.to_bgr()),
+ DynamicImage::ImageBgra8(ref mut p) => p.blend_pixel(x, y, pixel.to_bgra()),
+ DynamicImage::ImageLuma16(ref mut p) => p.blend_pixel(x, y, pixel.to_luma().into_color()),
+ DynamicImage::ImageLumaA16(ref mut p) => p.blend_pixel(x, y, pixel.to_luma_alpha().into_color()),
+ DynamicImage::ImageRgb16(ref mut p) => p.blend_pixel(x, y, pixel.to_rgb().into_color()),
+ DynamicImage::ImageRgba16(ref mut p) => p.blend_pixel(x, y, pixel.into_color()),
+ }
+ }
+
+ /// DEPRECATED: Do not use is function: It is unimplemented!
+ fn get_pixel_mut(&mut self, _: u32, _: u32) -> &mut color::Rgba<u8> {
+ unimplemented!()
+ }
+
+ fn inner_mut(&mut self) -> &mut Self::InnerImage {
+ self
+ }
+}
+
+/// Decodes an image and stores it into a dynamic image
+fn decoder_to_image<'a, I: ImageDecoder<'a>>(decoder: I) -> ImageResult<DynamicImage> {
+ let (w, h) = decoder.dimensions();
+ let color_type = decoder.color_type();
+
+ let image = match color_type {
+ color::ColorType::Rgb8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgb8)
+ }
+
+ color::ColorType::Rgba8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgba8)
+ }
+
+ color::ColorType::Bgr8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageBgr8)
+ }
+
+ color::ColorType::Bgra8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageBgra8)
+ }
+
+ color::ColorType::L8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLuma8)
+ }
+
+ color::ColorType::La8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLumaA8)
+ }
+
+ color::ColorType::Rgb16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgb16)
+ }
+
+ color::ColorType::Rgba16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgba16)
+ }
+
+ color::ColorType::L16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLuma16)
+ }
+ color::ColorType::La16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLumaA16)
+ }
+ _ => return Err(ImageError::UnsupportedColor(color_type.into())),
+ };
+ match image {
+ Some(image) => Ok(image),
+ None => Err(ImageError::DimensionError),
+ }
+}
+
+#[allow(deprecated)]
+fn image_to_bytes(image: &DynamicImage) -> Vec<u8> {
+ use crate::traits::EncodableLayout;
+
+ match *image {
+ // TODO: consider transmuting
+ DynamicImage::ImageLuma8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageLumaA8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageRgb8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageRgba8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageBgr8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageBgra8(ref a) => a.iter().cloned().collect(),
+
+ DynamicImage::ImageLuma16(ref a) => a.as_bytes().to_vec(),
+
+ DynamicImage::ImageLumaA16(ref a) => a.as_bytes().to_vec(),
+
+ DynamicImage::ImageRgb16(ref a) => a.as_bytes().to_vec(),
+
+ DynamicImage::ImageRgba16(ref a) => a.as_bytes().to_vec(),
+ }
+}
+
+/// Open the image located at the path specified.
+/// The image's format is determined from the path's file extension.
+///
+/// Try [`io::Reader`] for more advanced uses, including guessing the format based on the file's
+/// content before its path.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn open<P>(path: P) -> ImageResult<DynamicImage>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling open_impl
+ free_functions::open_impl(path.as_ref())
+}
+
+/// Read the dimensions of the image located at the specified path.
+/// This is faster than fully loading the image and then getting its dimensions.
+///
+/// Try [`io::Reader`] for more advanced uses, including guessing the format based on the file's
+/// content before its path or manually supplying the format.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn image_dimensions<P>(path: P) -> ImageResult<(u32, u32)>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling open_impl
+ free_functions::image_dimensions_impl(path.as_ref())
+}
+
+/// Saves the supplied buffer to a file at the path specified.
+///
+/// The image format is derived from the file extension. The buffer is assumed to have
+/// the correct format according to the specified color type.
+
+/// This will lead to corrupted files if the buffer contains malformed data. Currently only
+/// jpeg, png, ico, pnm, bmp and tiff files are supported.
+pub fn save_buffer<P>(
+ path: P,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+) -> ImageResult<()>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling save_buffer_impl
+ free_functions::save_buffer_impl(path.as_ref(), buf, width, height, color)
+}
+
+/// Saves the supplied buffer to a file at the path specified
+/// in the specified format.
+///
+/// The buffer is assumed to have the correct format according
+/// to the specified color type.
+/// This will lead to corrupted files if the buffer contains
+/// malformed data. Currently only jpeg, png, ico, bmp and
+/// tiff files are supported.
+pub fn save_buffer_with_format<P>(
+ path: P,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: ImageFormat,
+) -> ImageResult<()>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics
+ free_functions::save_buffer_with_format_impl(path.as_ref(), buf, width, height, color, format)
+}
+
+/// Create a new image from a byte slice
+///
+/// Makes an educated guess about the image format.
+/// TGA is not supported by this function.
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn load_from_memory(buffer: &[u8]) -> ImageResult<DynamicImage> {
+ let format = free_functions::guess_format(buffer)?;
+ load_from_memory_with_format(buffer, format)
+}
+
+/// Create a new image from a byte slice
+///
+/// This is just a simple wrapper that constructs an `std::io::Cursor` around the buffer and then
+/// calls `load` with that reader.
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`load`]: fn.load.html
+/// [`io::Reader`]: io/struct.Reader.html
+#[inline(always)]
+pub fn load_from_memory_with_format(buf: &[u8], format: ImageFormat) -> ImageResult<DynamicImage> {
+ let b = io::Cursor::new(buf);
+ free_functions::load(b, format)
+}
+
+/// Calculates the width and height an image should be resized to.
+/// This preserves aspect ratio, and based on the `fill` parameter
+/// will either fill the dimensions to fit inside the smaller constraint
+/// (will overflow the specified bounds on one axis to preserve
+/// aspect ratio), or will shrink so that both dimensions are
+/// completely contained with in the given `width` and `height`,
+/// with empty space on one axis.
+fn resize_dimensions(width: u32, height: u32, nwidth: u32, nheight: u32, fill: bool) -> (u32, u32) {
+ let ratio = u64::from(width) * u64::from(nheight);
+ let nratio = u64::from(nwidth) * u64::from(height);
+
+ let use_width = if fill {
+ nratio > ratio
+ } else {
+ nratio <= ratio
+ };
+ let intermediate = if use_width {
+ u64::from(height) * u64::from(nwidth) / u64::from(width)
+ } else {
+ u64::from(width) * u64::from(nheight) / u64::from(height)
+ };
+ if use_width {
+ if intermediate <= u64::from(::std::u32::MAX) {
+ (nwidth, intermediate as u32)
+ } else {
+ (
+ (u64::from(nwidth) * u64::from(::std::u32::MAX) / intermediate) as u32,
+ ::std::u32::MAX,
+ )
+ }
+ } else if intermediate <= u64::from(::std::u32::MAX) {
+ (intermediate as u32, nheight)
+ } else {
+ (
+ ::std::u32::MAX,
+ (u64::from(nheight) * u64::from(::std::u32::MAX) / intermediate) as u32,
+ )
+ }
+}
+
+#[cfg(test)]
+mod bench {
+ #[cfg(feature = "benchmarks")]
+ use test;
+
+ #[bench]
+ #[cfg(feature = "benchmarks")]
+ fn bench_conversion(b: &mut test::Bencher) {
+ let a = super::DynamicImage::ImageRgb8(crate::ImageBuffer::new(1000, 1000));
+ b.iter(|| a.to_luma());
+ b.bytes = 1000 * 1000 * 3
+ }
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn test_empty_file() {
+ assert!(super::load_from_memory(b"").is_err());
+ }
+
+ quickcheck! {
+ fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
+ if old_w == 0 || new_w == 0 { return true; }
+ let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
+ result.0 == new_w && result.1 == (400 as f64 * new_w as f64 / old_w as f64) as u32
+ }
+ }
+
+ quickcheck! {
+ fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
+ if old_h == 0 || new_h == 0 { return true; }
+ let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
+ result.1 == new_h && result.0 == (400 as f64 * new_h as f64 / old_h as f64) as u32
+ }
+ }
+
+ #[test]
+ fn resize_handles_fill() {
+ let result = super::resize_dimensions(100, 200, 200, 500, true);
+ assert!(result.0 == 250);
+ assert!(result.1 == 500);
+
+ let result = super::resize_dimensions(200, 100, 500, 200, true);
+ assert!(result.0 == 500);
+ assert!(result.1 == 250);
+ }
+
+ #[test]
+ fn resize_handles_overflow() {
+ let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
+ assert!(result.0 == 100);
+ assert!(result.1 == ::std::u32::MAX);
+
+ let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
+ assert!(result.0 == ::std::u32::MAX);
+ assert!(result.1 == 100);
+ }
+
+ #[cfg(feature = "jpeg")]
+ #[test]
+ fn image_dimensions() {
+ let im_path = "./tests/images/jpg/progressive/cat.jpg";
+ let dims = super::image_dimensions(im_path).unwrap();
+ assert_eq!(dims, (320, 240));
+ }
+
+ #[cfg(feature = "png")]
+ #[test]
+ fn open_16bpc_png() {
+ let im_path = "./tests/images/png/16bpc/basn6a16.png";
+ let image = super::open(im_path).unwrap();
+ assert_eq!(image.color(), super::color::ColorType::Rgba16);
+ }
+}
diff --git a/third_party/rust/image/src/error.rs b/third_party/rust/image/src/error.rs
new file mode 100644
index 0000000000..5785db8e74
--- /dev/null
+++ b/third_party/rust/image/src/error.rs
@@ -0,0 +1,621 @@
+//! Contains detailed error representation.
+//!
+//! See the main [`ImageError`] which contains a variant for each specialized error type. The
+//! subtypes used in each variant are opaque by design. They can be roughly inspected through their
+//! respective `kind` methods which work similar to `std::io::Error::kind`.
+//!
+//! The error interface makes it possible to inspect the error of an underlying decoder or encoder,
+//! through the `Error::source` method. Note that this is not part of the stable interface and you
+//! may not rely on a particular error value for a particular operation. This means mainly that
+//! `image` does not promise to remain on a particular version of its underlying decoders but if
+//! you ensure to use the same version of the dependency (or at least of the error type) through
+//! external means then you could inspect the error type in slightly more detail.
+//!
+//! [`ImageError`]: enum.ImageError.html
+
+use std::{fmt, io};
+use std::error::Error;
+
+use crate::color::ExtendedColorType;
+use crate::image::ImageFormat;
+use crate::utils::NonExhaustiveMarker;
+
+/// The generic error type for image operations.
+///
+/// This high level enum allows, by variant matching, a rough separation of concerns between
+/// underlying IO, the caller, format specifications, and the `image` implementation.
+#[derive(Debug)]
+pub enum ImageError {
+ /// An error was encountered while decoding.
+ ///
+ /// This means that the input data did not conform to the specification of some image format,
+ /// or that no format could be determined, or that it did not match format specific
+ /// requirements set by the caller.
+ Decoding(DecodingError),
+
+ /// An error was encountered while encoding.
+ ///
+ /// The input image can not be encoded with the chosen format, for example because the
+ /// specification has no representation for its color space or because a necessary conversion
+ /// is ambiguous. In some cases it might also happen that the dimensions can not be used with
+ /// the format.
+ Encoding(EncodingError),
+
+ /// An error was encountered in input arguments.
+ ///
+ /// This is a catch-all case for strictly internal operations such as scaling, conversions,
+ /// etc. that involve no external format specifications.
+ Parameter(ParameterError),
+
+ /// Completing the operation would have required more resources than allowed.
+ ///
+ /// Errors of this type are limits set by the user or environment, *not* inherent in a specific
+ /// format or operation that was executed.
+ Limits(LimitError),
+
+ /// An operation can not be completed by the chosen abstraction.
+ ///
+ /// This means that it might be possible for the operation to succeed in general but
+ /// * it requires a disabled feature,
+ /// * the implementation does not yet exist, or
+ /// * no abstraction for a lower level could be found.
+ Unsupported(UnsupportedError),
+
+ /// An error occurred while interacting with the environment.
+ IoError(io::Error),
+}
+
+/// The implementation for an operation was not provided.
+///
+/// See the variant [`Unsupported`] for more documentation.
+///
+/// [`Unsupported`]: enum.ImageError.html#variant.Unsupported
+#[derive(Debug)]
+pub struct UnsupportedError {
+ format: ImageFormatHint,
+ kind: UnsupportedErrorKind,
+}
+
+/// Details what feature is not supported.
+#[derive(Clone, Debug, Hash, PartialEq)]
+pub enum UnsupportedErrorKind {
+ /// The required color type can not be handled.
+ Color(ExtendedColorType),
+ /// An image format is not supported.
+ Format(ImageFormatHint),
+ /// Some feature specified by string.
+ /// This is discouraged and is likely to get deprecated (but not removed).
+ GenericFeature(String),
+ #[doc(hidden)]
+ __NonExhaustive(NonExhaustiveMarker),
+}
+
+/// An error was encountered while encoding an image.
+///
+/// This is used as an opaque representation for the [`ImageError::Encoding`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Encoding`]: enum.ImageError.html#variant.Encoding
+#[derive(Debug)]
+pub struct EncodingError {
+ format: ImageFormatHint,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+
+/// An error was encountered in inputs arguments.
+///
+/// This is used as an opaque representation for the [`ImageError::Parameter`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Parameter`]: enum.ImageError.html#variant.Parameter
+#[derive(Debug)]
+pub struct ParameterError {
+ kind: ParameterErrorKind,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+/// Details how a parameter is malformed.
+#[derive(Clone, Debug, Hash, PartialEq)]
+pub enum ParameterErrorKind {
+ /// Repeated an operation for which error that could not be cloned was emitted already.
+ FailedAlready,
+ /// The dimensions passed are wrong.
+ DimensionMismatch,
+ /// A string describing the parameter.
+ /// This is discouraged and is likely to get deprecated (but not removed).
+ Generic(String),
+ #[doc(hidden)]
+ /// Do not use this, not part of stability guarantees.
+ __NonExhaustive(NonExhaustiveMarker),
+}
+
+/// An error was encountered while decoding an image.
+///
+/// This is used as an opaque representation for the [`ImageError::Decoding`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Decoding`]: enum.ImageError.html#variant.Decoding
+#[derive(Debug)]
+pub struct DecodingError {
+ format: ImageFormatHint,
+ message: Option<Box<str>>,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+/// Completing the operation would have required more resources than allowed.
+///
+/// This is used as an opaque representation for the [`ImageError::Limits`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Limits`]: enum.ImageError.html#variant.Limits
+#[derive(Debug)]
+pub struct LimitError {
+ kind: LimitErrorKind,
+ // do we need an underlying error?
+}
+
+/// Indicates the limit that prevented an operation from completing.
+///
+/// Note that this enumeration is not exhaustive and may in the future be extended to provide more
+/// detailed information or to incorporate other resources types.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[allow(missing_copy_implementations)] // Might be non-Copy in the future.
+pub enum LimitErrorKind {
+ /// The resulting image exceed dimension limits in either direction.
+ DimensionError,
+ /// The operation would have performed an allocation larger than allowed.
+ InsufficientMemory,
+ #[doc(hidden)]
+ /// Do not use this, not part of stability guarantees.
+ __NonExhaustive(NonExhaustiveMarker),
+}
+
+/// A best effort representation for image formats.
+#[derive(Clone, Debug, Hash, PartialEq)]
+pub enum ImageFormatHint {
+ /// The format is known exactly.
+ Exact(ImageFormat),
+
+ /// The format can be identified by a name.
+ Name(String),
+
+ /// A common path extension for the format is known.
+ PathExtension(std::path::PathBuf),
+
+ /// The format is not known or could not be determined.
+ Unknown,
+
+ #[doc(hidden)]
+ __NonExhaustive(NonExhaustiveMarker),
+}
+
+// Internal implementation block for ImageError.
+#[allow(non_upper_case_globals)]
+#[allow(non_snake_case)]
+impl ImageError {
+ pub(crate) const InsufficientMemory: Self =
+ ImageError::Limits(LimitError {
+ kind: LimitErrorKind::InsufficientMemory,
+ });
+
+ pub(crate) const DimensionError: Self =
+ ImageError::Parameter(ParameterError {
+ kind: ParameterErrorKind::DimensionMismatch,
+ underlying: None,
+ });
+
+ pub(crate) const ImageEnd: Self =
+ ImageError::Parameter(ParameterError {
+ kind: ParameterErrorKind::FailedAlready,
+ underlying: None,
+ });
+
+ pub(crate) fn UnsupportedError(message: String) -> Self {
+ ImageError::Unsupported(UnsupportedError::legacy_from_string(message))
+ }
+
+ pub(crate) fn UnsupportedColor(color: ExtendedColorType) -> Self {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormatHint::Unknown,
+ UnsupportedErrorKind::Color(color),
+ ))
+ }
+
+ pub(crate) fn FormatError(message: String) -> Self {
+ ImageError::Decoding(DecodingError::legacy_from_string(message))
+ }
+}
+
+impl UnsupportedError {
+ /// Create an `UnsupportedError` for an image with details on the unsupported feature.
+ ///
+ /// If the operation was not connected to a particular image format then the hint may be
+ /// `Unknown`.
+ pub fn from_format_and_kind(format: ImageFormatHint, kind: UnsupportedErrorKind) -> Self {
+ UnsupportedError {
+ format,
+ kind,
+ }
+ }
+
+ /// A shorthand for a generic feature without an image format.
+ pub(crate) fn legacy_from_string(message: String) -> Self {
+ UnsupportedError {
+ format: ImageFormatHint::Unknown,
+ kind: UnsupportedErrorKind::GenericFeature(message),
+ }
+ }
+
+ /// Returns the corresponding `UnsupportedErrorKind` of the error.
+ pub fn kind(&self) -> UnsupportedErrorKind {
+ self.kind.clone()
+ }
+
+ /// Returns the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+}
+
+impl DecodingError {
+ /// Create a `DecodingError` that stems from an arbitrary error of an underlying decoder.
+ pub fn new(
+ format: ImageFormatHint,
+ err: impl Into<Box<dyn Error + Send + Sync>>,
+ ) -> Self {
+ DecodingError {
+ format,
+ message: None,
+ underlying: Some(err.into()),
+ }
+ }
+
+ /// Create a `DecodingError` for an image format.
+ ///
+ /// The error will not contain any further information but is very easy to create.
+ pub fn from_format_hint(format: ImageFormatHint) -> Self {
+ DecodingError {
+ format,
+ message: None,
+ underlying: None,
+ }
+ }
+
+ /// Returns the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+
+ /// A shorthand for a string error without an image format.
+ pub(crate) fn legacy_from_string(message: String) -> Self {
+ DecodingError {
+ format: ImageFormatHint::Unknown,
+ message: Some(message.into_boxed_str()),
+ underlying: None,
+ }
+ }
+
+ /// Not quite legacy but also highly discouraged.
+ /// This is just since the string typing is prevalent in the `image` decoders...
+ // TODO: maybe a Cow? A constructor from `&'static str` wouldn't be too bad.
+ pub(crate) fn with_message(
+ format: ImageFormatHint,
+ message: String,
+ ) -> Self {
+ DecodingError {
+ format,
+ message: Some(message.into_boxed_str()),
+ underlying: None,
+ }
+ }
+
+ fn get_message_or_default(&self) -> &str {
+ match &self.message {
+ Some(st) => st,
+ None => "",
+ }
+ }
+}
+
+impl EncodingError {
+ /// Create an `EncodingError` that stems from an arbitrary error of an underlying encoder.
+ pub fn new(
+ format: ImageFormatHint,
+ err: impl Into<Box<dyn Error + Send + Sync>>,
+ ) -> Self {
+ EncodingError {
+ format,
+ underlying: Some(err.into()),
+ }
+ }
+
+ /// Create a `DecodingError` for an image format.
+ ///
+ /// The error will not contain any further information but is very easy to create.
+ pub fn from_format_hint(format: ImageFormatHint) -> Self {
+ EncodingError {
+ format,
+ underlying: None,
+ }
+ }
+
+ /// Return the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+}
+
+impl ParameterError {
+ /// Construct a `ParameterError` directly from a corresponding kind.
+ pub fn from_kind(kind: ParameterErrorKind) -> Self {
+ ParameterError {
+ kind,
+ underlying: None,
+ }
+ }
+
+ /// Returns the corresponding `ParameterErrorKind` of the error.
+ pub fn kind(&self) -> ParameterErrorKind {
+ self.kind.clone()
+ }
+}
+
+impl LimitError {
+ /// Construct a generic `LimitError` directly from a corresponding kind.
+ pub fn from_kind(kind: LimitErrorKind) -> Self {
+ LimitError {
+ kind,
+ }
+ }
+
+ /// Returns the corresponding `LimitErrorKind` of the error.
+ pub fn kind(&self) -> LimitErrorKind {
+ self.kind.clone()
+ }
+}
+
+impl From<io::Error> for ImageError {
+ fn from(err: io::Error) -> ImageError {
+ ImageError::IoError(err)
+ }
+}
+
+impl From<ImageFormat> for ImageFormatHint {
+ fn from(format: ImageFormat) -> Self {
+ ImageFormatHint::Exact(format)
+ }
+}
+
+impl From<&'_ std::path::Path> for ImageFormatHint {
+ fn from(path: &'_ std::path::Path) -> Self {
+ match path.extension() {
+ Some(ext) => ImageFormatHint::PathExtension(ext.into()),
+ None => ImageFormatHint::Unknown,
+ }
+ }
+}
+
+impl From<ImageFormatHint> for UnsupportedError {
+ fn from(hint: ImageFormatHint) -> Self {
+ UnsupportedError {
+ format: hint.clone(),
+ kind: UnsupportedErrorKind::Format(hint),
+ }
+ }
+}
+
+/// Result of an image decoding/encoding process
+pub type ImageResult<T> = Result<T, ImageError>;
+
+impl fmt::Display for ImageError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self {
+ ImageError::IoError(err) => err.fmt(fmt),
+ ImageError::Decoding(err) => err.fmt(fmt),
+ ImageError::Encoding(err) => err.fmt(fmt),
+ ImageError::Parameter(err) => err.fmt(fmt),
+ ImageError::Limits(err) => err.fmt(fmt),
+ ImageError::Unsupported(err) => err.fmt(fmt),
+ }
+ }
+}
+
+impl Error for ImageError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ ImageError::IoError(err) => err.source(),
+ ImageError::Decoding(err) => err.source(),
+ ImageError::Encoding(err) => err.source(),
+ ImageError::Parameter(err) => err.source(),
+ ImageError::Limits(err) => err.source(),
+ ImageError::Unsupported(err) => err.source(),
+ }
+ }
+}
+
+impl fmt::Display for UnsupportedError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.kind {
+ UnsupportedErrorKind::Format(ImageFormatHint::Unknown) => write!(
+ fmt,
+ "The image format could not be determined",
+ ),
+ UnsupportedErrorKind::Format(format @ ImageFormatHint::PathExtension(_)) => write!(
+ fmt,
+ "The file extension {} was not recognized as an image format",
+ format,
+ ),
+ UnsupportedErrorKind::Format(format) => write!(
+ fmt,
+ "The image format {} is not supported",
+ format,
+ ),
+ UnsupportedErrorKind::Color(color) => write!(
+ fmt,
+ "The decoder for {} does not support the color type `{:?}`",
+ self.format,
+ color,
+ ),
+ UnsupportedErrorKind::GenericFeature(message) => {
+ match &self.format {
+ ImageFormatHint::Unknown => write!(
+ fmt,
+ "The decoder does not support the format feature {}",
+ message,
+ ),
+ other => write!(
+ fmt,
+ "The decoder for {} does not support the format features {}",
+ other,
+ message,
+ ),
+ }
+ },
+ UnsupportedErrorKind::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+}
+
+impl Error for UnsupportedError { }
+
+impl fmt::Display for ParameterError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.kind {
+ ParameterErrorKind::DimensionMismatch => write!(
+ fmt,
+ "The Image's dimensions are either too \
+ small or too large"
+ ),
+ ParameterErrorKind::FailedAlready => write!(
+ fmt,
+ "The end the image stream has been reached due to a previous error"
+ ),
+ ParameterErrorKind::Generic(message) => write!(
+ fmt,
+ "The parameter is malformed: {}",
+ message,
+ ),
+ ParameterErrorKind::__NonExhaustive(marker) => match marker._private {},
+ }?;
+
+ if let Some(underlying) = &self.underlying {
+ write!(fmt, "\n{}", underlying)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Error for ParameterError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for EncodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.underlying {
+ Some(underlying) => write!(
+ fmt,
+ "Format error encoding {}:\n{}",
+ self.format,
+ underlying,
+ ),
+ None => write!(
+ fmt,
+ "Format error encoding {}",
+ self.format,
+ ),
+ }
+ }
+}
+
+impl Error for EncodingError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for DecodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.underlying {
+ None => match self.format {
+ ImageFormatHint::Unknown => write!(
+ fmt,
+ "Format error: {}",
+ self.get_message_or_default(),
+ ),
+ _ => write!(
+ fmt,
+ "Format error decoding {}: {}",
+ self.format,
+ self.get_message_or_default(),
+ ),
+ },
+ Some(underlying) => write!(
+ fmt,
+ "Format error decoding {}: {}\n{}",
+ self.format,
+ self.get_message_or_default(),
+ underlying,
+ ),
+ }
+ }
+}
+
+impl Error for DecodingError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for LimitError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self.kind {
+ LimitErrorKind::InsufficientMemory => write!(fmt, "Insufficient memory"),
+ LimitErrorKind::DimensionError => write!(fmt, "Image is too large"),
+ LimitErrorKind::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+}
+
+impl Error for LimitError { }
+
+impl fmt::Display for ImageFormatHint {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self {
+ ImageFormatHint::Exact(format) => write!(fmt, "{:?}", format),
+ ImageFormatHint::Name(name) => write!(fmt, "`{}`", name),
+ ImageFormatHint::PathExtension(ext) => write!(fmt, "`.{:?}`", ext),
+ ImageFormatHint::Unknown => write!(fmt, "`Unknown`"),
+ ImageFormatHint::__NonExhaustive(marker) => match marker._private {},
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::mem;
+ use super::*;
+
+ #[allow(dead_code)]
+ // This will fail to compile if the size of this type is large.
+ const ASSERT_SMALLISH: usize = [0][(mem::size_of::<ImageError>() >= 200) as usize];
+
+ #[test]
+ fn test_send_sync_stability() {
+ fn assert_send_sync<T: Send + Sync>() { }
+
+ assert_send_sync::<ImageError>();
+ }
+}
diff --git a/third_party/rust/image/src/flat.rs b/third_party/rust/image/src/flat.rs
new file mode 100644
index 0000000000..6df78890cb
--- /dev/null
+++ b/third_party/rust/image/src/flat.rs
@@ -0,0 +1,1551 @@
+//! Image representations for ffi.
+//!
+//! # Usage
+//!
+//! Imagine you want to offer a very simple ffi interface: The caller provides an image buffer and
+//! your program creates a thumbnail from it and dumps that image as `png`. This module is designed
+//! to help you transition from raw memory data to Rust representation.
+//!
+//! ```no_run
+//! use std::ptr;
+//! use std::slice;
+//! use image::Rgb;
+//! use image::flat::{FlatSamples, SampleLayout};
+//! use image::imageops::thumbnail;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn store_rgb8_compressed(
+//! data: *const u8, len: usize,
+//! layout: *const SampleLayout
+//! )
+//! -> bool
+//! {
+//! let samples = unsafe { slice::from_raw_parts(data, len) };
+//! let layout = unsafe { ptr::read(layout) };
+//!
+//! let buffer = FlatSamples {
+//! samples,
+//! layout,
+//! color_hint: None,
+//! };
+//!
+//! let view = match buffer.as_view::<Rgb<u8>>() {
+//! Err(_) => return false, // Invalid layout.
+//! Ok(view) => view,
+//! };
+//!
+//! thumbnail(&view, 64, 64)
+//! .save("output.png")
+//! .map(|_| true)
+//! .unwrap_or_else(|_| false)
+//! }
+//! ```
+//!
+use std::cmp;
+use std::ops::{Deref, Index, IndexMut};
+use std::marker::PhantomData;
+
+use num_traits::Zero;
+
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::color::ColorType;
+use crate::error::ImageError;
+use crate::image::{GenericImage, GenericImageView};
+
+/// A flat buffer over a (multi channel) image.
+///
+/// In contrast to `ImageBuffer`, this representation of a sample collection is much more lenient
+/// in the layout thereof. In particular, it also allows grouping by color planes instead of by
+/// pixel, at least for the purpose of a `GenericImageView`.
+///
+/// Note that the strides need not conform to the assumption that constructed indices actually
+/// refer inside the underlying buffer but return values of library functions will always guarantee
+/// this. To manually make this check use `check_index_validities` and maybe put that inside an
+/// assert.
+#[derive(Clone, Debug)]
+pub struct FlatSamples<Buffer> {
+ /// Underlying linear container holding sample values.
+ pub samples: Buffer,
+
+ /// A `repr(C)` description of the layout of buffer samples.
+ pub layout: SampleLayout,
+
+ /// Supplementary color information.
+ ///
+ /// You may keep this as `None` in most cases. This is NOT checked in `View` or other
+ /// converters. It is intended mainly as a way for types that convert to this buffer type to
+ /// attach their otherwise static color information. A dynamic image representation could
+ /// however use this to resolve representational ambiguities such as the order of RGB channels.
+ pub color_hint: Option<ColorType>,
+}
+
+/// A ffi compatible description of a sample buffer.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct SampleLayout {
+ /// The number of channels in the color representation of the image.
+ pub channels: u8,
+
+ /// Add this to an index to get to the sample in the next channel.
+ pub channel_stride: usize,
+
+ /// The width of the represented image.
+ pub width: u32,
+
+ /// Add this to an index to get to the next sample in x-direction.
+ pub width_stride: usize,
+
+ /// The height of the represented image.
+ pub height: u32,
+
+ /// Add this to an index to get to the next sample in y-direction.
+ pub height_stride: usize,
+}
+
+/// Helper struct for an unnamed (stride, length) pair.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+struct Dim(usize, usize);
+
+impl SampleLayout {
+ /// Describe a row-major image packed in all directions.
+ ///
+ /// The resulting will surely be `NormalForm::RowMajorPacked`. It can therefore be converted to
+ /// safely to an `ImageBuffer` with a large enough underlying buffer.
+ ///
+ /// ```
+ /// # use image::flat::{NormalForm, SampleLayout};
+ /// let layout = SampleLayout::row_major_packed(3, 640, 480);
+ /// assert!(layout.is_normal(NormalForm::RowMajorPacked));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// On platforms where `usize` has the same size as `u32` this panics when the resulting stride
+ /// in the `height` direction would be larger than `usize::max_value()`. On other platforms
+ /// where it can surely accomodate `u8::max_value() * u32::max_value(), this can never happen.
+ pub fn row_major_packed(channels: u8, width: u32, height: u32) -> Self {
+ let height_stride = (channels as usize).checked_mul(width as usize)
+ .expect("Row major packed image can not be described because it does not fit into memory");
+ SampleLayout {
+ channels,
+ channel_stride: 1,
+ width,
+ width_stride: channels as usize,
+ height,
+ height_stride,
+ }
+ }
+
+ /// Describe a column-major image packed in all directions.
+ ///
+ /// The resulting will surely be `NormalForm::ColumnMajorPacked`. This is not particularly
+ /// useful for conversion but can be used to describe such a buffer without pitfalls.
+ ///
+ /// ```
+ /// # use image::flat::{NormalForm, SampleLayout};
+ /// let layout = SampleLayout::column_major_packed(3, 640, 480);
+ /// assert!(layout.is_normal(NormalForm::ColumnMajorPacked));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// On platforms where `usize` has the same size as `u32` this panics when the resulting stride
+ /// in the `width` direction would be larger than `usize::max_value()`. On other platforms
+ /// where it can surely accomodate `u8::max_value() * u32::max_value(), this can never happen.
+ pub fn column_major_packed(channels: u8, width: u32, height: u32) -> Self {
+ let width_stride = (channels as usize).checked_mul(height as usize)
+ .expect("Column major packed image can not be described because it does not fit into memory");
+ SampleLayout {
+ channels,
+ channel_stride: 1,
+ height,
+ height_stride: channels as usize,
+ width,
+ width_stride,
+ }
+ }
+
+ /// Get the strides for indexing matrix-like `[(c, w, h)]`.
+ ///
+ /// For a row-major layout with grouped samples, this tuple is strictly
+ /// increasing.
+ pub fn strides_cwh(&self) -> (usize, usize, usize) {
+ (self.channel_stride, self.width_stride, self.height_stride)
+ }
+
+ /// Get the dimensions `(channels, width, height)`.
+ ///
+ /// The interface is optimized for use with `strides_cwh` instead. The channel extent will be
+ /// before width and height.
+ pub fn extents(&self) -> (usize, usize, usize) {
+ (self.channels as usize, self.width as usize, self.height as usize)
+ }
+
+ /// Tuple of bounds in the order of coordinate inputs.
+ ///
+ /// This function should be used whenever working with image coordinates opposed to buffer
+ /// coordinates. The only difference compared to `extents` is the output type.
+ pub fn bounds(&self) -> (u8, u32, u32) {
+ (self.channels, self.width, self.height)
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// This method will allow zero strides, allowing compact representations of monochrome images.
+ /// To check that no aliasing occurs, try `check_alias_invariants`. For compact images (no
+ /// aliasing and no unindexed samples) this is `width*height*channels`. But for both of the
+ /// other cases, the reasoning is slightly more involved.
+ ///
+ /// # Explanation
+ ///
+ /// Note that there is a difference between `min_length` and the index of the sample
+ /// 'one-past-the-end`. This is due to strides that may be larger than the dimension below.
+ ///
+ /// ## Example with holes
+ ///
+ /// Let's look at an example of a grayscale image with
+ /// * `width_stride = 1`
+ /// * `width = 2`
+ /// * `height_stride = 3`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// | x x | x x m | $
+ /// min_length m ^
+ /// ^ one-past-the-end $
+ /// ```
+ ///
+ /// The difference is also extreme for empty images with large strides. The one-past-the-end
+ /// sample index is still as large as the largest of these strides while `min_length = 0`.
+ ///
+ /// ## Example with aliasing
+ ///
+ /// The concept gets even more important when you allow samples to alias each other. Here we
+ /// have the buffer of a small grayscale image where this is the case, this time we will first
+ /// show the buffer and then the individual rows below.
+ ///
+ /// * `width_stride = 1`
+ /// * `width = 3`
+ /// * `height_stride = 2`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// 1 2 3 4 5 m
+ /// |1 2 3| row one
+ /// |3 4 5| row two
+ /// ^ m min_length
+ /// ^ ??? one-past-the-end
+ /// ```
+ ///
+ /// This time 'one-past-the-end' is not even simply the largest stride times the extent of its
+ /// dimension. That still points inside the image because `height*height_stride = 4` but also
+ /// `index_of(1, 2) = 4`.
+ pub fn min_length(&self) -> Option<usize> {
+ if self.width == 0 || self.height == 0 || self.channels == 0 {
+ return Some(0)
+ }
+
+ self.index(self.channels - 1, self.width - 1, self.height - 1)
+ .and_then(|idx| idx.checked_add(1))
+ }
+
+ /// Check if a buffer of length `len` is large enough.
+ pub fn fits(&self, len: usize) -> bool {
+ self.min_length().map(|min| len >= min).unwrap_or(false)
+ }
+
+ /// The extents of this array, in order of increasing strides.
+ fn increasing_stride_dims(&self) -> [Dim; 3] {
+ // Order extents by strides, then check that each is less equal than the next stride.
+ let mut grouped: [Dim; 3] = [
+ Dim(self.channel_stride, self.channels as usize),
+ Dim(self.width_stride, self.width as usize),
+ Dim(self.height_stride, self.height as usize)];
+
+ grouped.sort();
+
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+ assert!(min_dim.stride() <= mid_dim.stride() && mid_dim.stride() <= max_dim.stride());
+
+ grouped
+ }
+
+ /// If there are any samples aliasing each other.
+ ///
+ /// If this is not the case, it would always be safe to allow mutable access to two different
+ /// samples at the same time. Otherwise, this operation would need additional checks. When one
+ /// dimension overflows `usize` with its stride we also consider this aliasing.
+ pub fn has_aliased_samples(&self) -> bool {
+ let grouped = self.increasing_stride_dims();
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+
+ let min_size = match min_dim.checked_len() {
+ None => return true,
+ Some(size) => size,
+ };
+
+ let mid_size = match mid_dim.checked_len() {
+ None => return true,
+ Some(size) => size,
+ };
+
+ let _max_size = match max_dim.checked_len() {
+ None => return true,
+ Some(_) => (), // Only want to know this didn't overflow.
+ };
+
+ // Each higher dimension must walk over all of one lower dimension.
+ min_size > mid_dim.stride() || mid_size > max_dim.stride()
+ }
+
+ /// Check if a buffer fulfills the requirements of a normal form.
+ ///
+ /// Certain conversions have preconditions on the structure of the sample buffer that are not
+ /// captured (by design) by the type system. These are then checked before the conversion. Such
+ /// checks can all be done in constant time and will not inspect the buffer content. You can
+ /// perform these checks yourself when the conversion is not required at this moment but maybe
+ /// still performed later.
+ pub fn is_normal(&self, form: NormalForm) -> bool {
+ if self.has_aliased_samples() {
+ return false;
+ }
+
+ if form >= NormalForm::PixelPacked && self.channel_stride != 1 {
+ return false;
+ }
+
+ if form >= NormalForm::ImagePacked {
+ // has aliased already checked for overflows.
+ let grouped = self.increasing_stride_dims();
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+
+ if 1 != min_dim.stride() {
+ return false;
+ }
+
+ if min_dim.len() != mid_dim.stride() {
+ return false;
+ }
+
+ if mid_dim.len() != max_dim.stride() {
+ return false;
+ }
+ }
+
+ if form >= NormalForm::RowMajorPacked {
+ if self.width_stride != self.channels as usize {
+ return false;
+ }
+
+ if self.width as usize*self.width_stride != self.height_stride {
+ return false;
+ }
+ }
+
+ if form >= NormalForm::ColumnMajorPacked {
+ if self.height_stride != self.channels as usize {
+ return false;
+ }
+
+ if self.height as usize*self.height_stride != self.width_stride {
+ return false;
+ }
+ }
+
+ true
+ }
+
+ /// Check that the pixel and the channel index are in bounds.
+ ///
+ /// An in-bound coordinate does not yet guarantee that the corresponding calculation of a
+ /// buffer index does not overflow. However, if such a buffer large enough to hold all samples
+ /// actually exists in memory, this porperty of course follows.
+ pub fn in_bounds(&self, channel: u8, x: u32, y: u32) -> bool {
+ channel < self.channels && x < self.width && y < self.height
+ }
+
+ /// Resolve the index of a particular sample.
+ ///
+ /// `None` if the index is outside the bounds or does not fit into a `usize`.
+ pub fn index(&self, channel: u8, x: u32, y: u32) -> Option<usize> {
+ if !self.in_bounds(channel, x, y) {
+ return None
+ }
+
+ self.index_ignoring_bounds(channel as usize, x as usize, y as usize)
+ }
+
+ /// Get the theoretical position of sample (channel, x, y).
+ ///
+ /// The 'check' is for overflow during index calculation, not that it is contained in the
+ /// image. Two samples may return the same index, even when one of them is out of bounds. This
+ /// happens when all strides are `0`, i.e. the image is an arbitrarily large monochrome image.
+ pub fn index_ignoring_bounds(&self, channel: usize, x: usize, y: usize) -> Option<usize> {
+ let idx_c = (channel as usize).checked_mul(self.channel_stride);
+ let idx_x = (x as usize).checked_mul(self.width_stride);
+ let idx_y = (y as usize).checked_mul(self.height_stride);
+
+ let (idx_c, idx_x, idx_y) = match (idx_c, idx_x, idx_y) {
+ (Some(idx_c), Some(idx_x), Some(idx_y)) => (idx_c, idx_x, idx_y),
+ _ => return None,
+ };
+
+ Some(0usize)
+ .and_then(|b| b.checked_add(idx_c))
+ .and_then(|b| b.checked_add(idx_x))
+ .and_then(|b| b.checked_add(idx_y))
+ }
+
+ /// Get an index provided it is inbouds.
+ ///
+ /// Assumes that the image is backed by some sufficiently large buffer. Then computation can
+ /// not overflow as we could represent the maximum coordinate. Since overflow is defined either
+ /// way, this method can not be unsafe.
+ pub fn in_bounds_index(&self, c: u8, x: u32, y: u32) -> usize {
+ let (c_stride, x_stride, y_stride) = self.strides_cwh();
+ (y as usize * y_stride) + (x as usize * x_stride) + (c as usize * c_stride)
+ }
+
+
+ /// Shrink the image to the minimum of current and given extents.
+ ///
+ /// This does not modify the strides, so that the resulting sample buffer may have holes
+ /// created by the shrinking operation. Shrinking could also lead to an non-aliasing image when
+ /// samples had aliased each other before.
+ pub fn shrink_to(&mut self, channels: u8, width: u32, height: u32) {
+ self.channels = self.channels.min(channels);
+ self.width = self.width.min(width);
+ self.height = self.height.min(height);
+ }
+}
+
+impl Dim {
+ fn stride(self) -> usize {
+ self.0
+ }
+
+ /// Length of this dimension in memory.
+ fn checked_len(self) -> Option<usize> {
+ self.0.checked_mul(self.1)
+ }
+
+ fn len(self) -> usize {
+ self.0*self.1
+ }
+}
+
+impl<Buffer> FlatSamples<Buffer> {
+ /// Get the strides for indexing matrix-like `[(c, w, h)]`.
+ ///
+ /// For a row-major layout with grouped samples, this tuple is strictly
+ /// increasing.
+ pub fn strides_cwh(&self) -> (usize, usize, usize) {
+ self.layout.strides_cwh()
+ }
+
+ /// Get the dimensions `(channels, width, height)`.
+ ///
+ /// The interface is optimized for use with `strides_cwh` instead. The channel extent will be
+ /// before width and height.
+ pub fn extents(&self) -> (usize, usize, usize) {
+ self.layout.extents()
+ }
+
+ /// Tuple of bounds in the order of coordinate inputs.
+ ///
+ /// This function should be used whenever working with image coordinates opposed to buffer
+ /// coordinates. The only difference compared to `extents` is the output type.
+ pub fn bounds(&self) -> (u8, u32, u32) {
+ self.layout.bounds()
+ }
+
+ /// Get a reference based version.
+ pub fn as_ref<T>(&self) -> FlatSamples<&[T]> where Buffer: AsRef<[T]> {
+ FlatSamples {
+ samples: self.samples.as_ref(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Get a mutable reference based version.
+ pub fn as_mut<T>(&mut self) -> FlatSamples<&mut [T]> where Buffer: AsMut<[T]> {
+ FlatSamples {
+ samples: self.samples.as_mut(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Copy the data into an owned vector.
+ pub fn to_vec<T>(&self) -> FlatSamples<Vec<T>>
+ where T: Clone, Buffer: AsRef<[T]>
+ {
+ FlatSamples {
+ samples: self.samples.as_ref().to_vec(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Get a reference to a single sample.
+ ///
+ /// This more restrictive than the method based on `std::ops::Index` but guarantees to properly
+ /// check all bounds and not panic as long as `Buffer::as_ref` does not do so.
+ ///
+ /// ```
+ /// # use image::{RgbImage};
+ /// let flat = RgbImage::new(480, 640).into_flat_samples();
+ ///
+ /// // Get the blue channel at (10, 10).
+ /// assert!(flat.get_sample(1, 10, 10).is_some());
+ ///
+ /// // There is no alpha channel.
+ /// assert!(flat.get_sample(3, 10, 10).is_none());
+ /// ```
+ ///
+ /// For cases where a special buffer does not provide `AsRef<[T]>`, consider encapsulating
+ /// bounds checks with `min_length` in a type similar to `View`. Then you may use
+ /// `in_bounds_index` as a small speedup over the index calculation of this method which relies
+ /// on `index_ignoring_bounds` since it can not have a-priori knowledge that the sample
+ /// coordinate is in fact backed by any memory buffer.
+ pub fn get_sample<T>(&self, channel: u8, x: u32, y: u32) -> Option<&T>
+ where Buffer: AsRef<[T]>,
+ {
+ self.index(channel, x, y).and_then(|idx| self.samples.as_ref().get(idx))
+ }
+
+
+ /// Get a mutable reference to a single sample.
+ ///
+ /// This more restrictive than the method based on `std::ops::IndexMut` but guarantees to
+ /// properly check all bounds and not panic as long as `Buffer::as_ref` does not do so.
+ /// Contrary to conversion to `ViewMut`, this does not require that samples are packed since it
+ /// does not need to convert samples to a color representation.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// here can in fact modify more than the coordinate in the argument.
+ ///
+ /// ```
+ /// # use image::{RgbImage};
+ /// let mut flat = RgbImage::new(480, 640).into_flat_samples();
+ ///
+ /// // Assign some new color to the blue channel at (10, 10).
+ /// *flat.get_mut_sample(1, 10, 10).unwrap() = 255;
+ ///
+ /// // There is no alpha channel.
+ /// assert!(flat.get_mut_sample(3, 10, 10).is_none());
+ /// ```
+ ///
+ /// For cases where a special buffer does not provide `AsRef<[T]>`, consider encapsulating
+ /// bounds checks with `min_length` in a type similar to `View`. Then you may use
+ /// `in_bounds_index` as a small speedup over the index calculation of this method which relies
+ /// on `index_ignoring_bounds` since it can not have a-priori knowledge that the sample
+ /// coordinate is in fact backed by any memory buffer.
+ pub fn get_mut_sample<T>(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut T>
+ where Buffer: AsMut<[T]>,
+ {
+ match self.index(channel, x, y) {
+ None => None,
+ Some(idx) => self.samples.as_mut().get_mut(idx),
+ }
+ }
+
+ /// View this buffer as an image over some type of pixel.
+ ///
+ /// This first ensures that all in-bounds coordinates refer to valid indices in the sample
+ /// buffer. It also checks that the specified pixel format expects the same number of channels
+ /// that are present in this buffer. Neither are larger nor a smaller number will be accepted.
+ /// There is no automatic conversion.
+ pub fn as_view<P>(&self) -> Result<View<&[P::Subpixel], P>, Error>
+ where P: Pixel, Buffer: AsRef<[P::Subpixel]>,
+ {
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::WrongColor(P::COLOR_TYPE))
+ }
+
+ let as_ref = self.samples.as_ref();
+ if !self.layout.fits(as_ref.len()) {
+ return Err(Error::TooLarge)
+ }
+
+ Ok(View {
+ inner: FlatSamples {
+ samples: as_ref,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// View this buffer but keep mutability at a sample level.
+ ///
+ /// This is similar to `as_view` but subtly different from `as_view_mut`. The resulting type
+ /// can be used as a `GenericImage` with the same prior invariants needed as for `as_view`.
+ /// It can not be used as a mutable `GenericImage` but does not need channels to be packed in
+ /// their pixel representation.
+ ///
+ /// This first ensures that all in-bounds coordinates refer to valid indices in the sample
+ /// buffer. It also checks that the specified pixel format expects the same number of channels
+ /// that are present in this buffer. Neither are larger nor a smaller number will be accepted.
+ /// There is no automatic conversion.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// for one sample can in fact modify other samples as well. Sometimes exactly this is
+ /// intended.
+ pub fn as_view_with_mut_samples<P>(&mut self) -> Result<View<&mut [P::Subpixel], P>, Error>
+ where P: Pixel, Buffer: AsMut<[P::Subpixel]>,
+ {
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::WrongColor(P::COLOR_TYPE))
+ }
+
+ let as_mut = self.samples.as_mut();
+ if !self.layout.fits(as_mut.len()) {
+ return Err(Error::TooLarge)
+ }
+
+ Ok(View {
+ inner: FlatSamples {
+ samples: as_mut,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// Interpret this buffer as a mutable image.
+ ///
+ /// To succeed, the pixels in this buffer may not alias each other and the samples of each
+ /// pixel must be packed (i.e. `channel_stride` is `1`). The number of channels must be
+ /// consistent with the channel count expected by the pixel format.
+ ///
+ /// This is similar to an `ImageBuffer` except it is a temporary view that is not normalized as
+ /// strongly. To get an owning version, consider copying the data into an `ImageBuffer`. This
+ /// provides many more operations, is possibly faster (if not you may want to open an issue) is
+ /// generally polished. You can also try to convert this buffer inline, see
+ /// `ImageBuffer::from_raw`.
+ pub fn as_view_mut<P>(&mut self) -> Result<ViewMut<&mut [P::Subpixel], P>, Error>
+ where P: Pixel, Buffer: AsMut<[P::Subpixel]>,
+ {
+ if !self.layout.is_normal(NormalForm::PixelPacked) {
+ return Err(Error::NormalFormRequired(NormalForm::PixelPacked))
+ }
+
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::WrongColor(P::COLOR_TYPE))
+ }
+
+ let as_mut = self.samples.as_mut();
+ if !self.layout.fits(as_mut.len()) {
+ return Err(Error::TooLarge)
+ }
+
+ Ok(ViewMut {
+ inner: FlatSamples {
+ samples: as_mut,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// View the samples as a slice.
+ ///
+ /// The slice is not limited to the region of the image and not all sample indices are valid
+ /// indices into this buffer. See `image_mut_slice` as an alternative.
+ pub fn as_slice<T>(&self) -> &[T] where Buffer: AsRef<[T]> {
+ self.samples.as_ref()
+ }
+
+ /// View the samples as a slice.
+ ///
+ /// The slice is not limited to the region of the image and not all sample indices are valid
+ /// indices into this buffer. See `image_mut_slice` as an alternative.
+ pub fn as_mut_slice<T>(&mut self) -> &mut [T] where Buffer: AsMut<[T]> {
+ self.samples.as_mut()
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// This may fail when the coordinates in this image are either out-of-bounds of the underlying
+ /// buffer or can not be represented. Note that the slice may have holes that do not correspond
+ /// to any sample in the image represented by it.
+ pub fn image_slice<T>(&self) -> Option<&[T]> where Buffer: AsRef<[T]> {
+ let min_length = match self.min_length() {
+ None => return None,
+ Some(index) => index,
+ };
+
+ let slice = self.samples.as_ref();
+ if slice.len() < min_length {
+ return None
+ }
+
+ Some(&slice[..min_length])
+ }
+
+ /// Mutable portion of the buffer that holds sample values.
+ pub fn image_mut_slice<T>(&mut self) -> Option<&mut [T]> where Buffer: AsMut<[T]> {
+ let min_length = match self.min_length() {
+ None => return None,
+ Some(index) => index,
+ };
+
+ let slice = self.samples.as_mut();
+ if slice.len() < min_length {
+ return None
+ }
+
+ Some(&mut slice[..min_length])
+ }
+
+ /// Move the data into an image buffer.
+ ///
+ /// This does **not** convert the sample layout. The buffer needs to be in packed row-major form
+ /// before calling this function. In case of an error, returns the buffer again so that it does
+ /// not release any allocation.
+ pub fn try_into_buffer<P>(self) -> Result<ImageBuffer<P, Buffer>, (Error, Self)>
+ where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Buffer: Deref<Target=[P::Subpixel]>,
+ {
+ if !self.is_normal(NormalForm::RowMajorPacked) {
+ return Err((Error::NormalFormRequired(NormalForm::RowMajorPacked), self))
+ }
+
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err((Error::WrongColor(P::COLOR_TYPE), self))
+ }
+
+ if !self.fits(self.samples.deref().len()) {
+ return Err((Error::TooLarge, self))
+ }
+
+
+ Ok(ImageBuffer::from_raw(self.layout.width, self.layout.height, self.samples).unwrap_or_else(
+ || panic!("Preconditions should have been ensured before conversion")))
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// This method will allow zero strides, allowing compact representations of monochrome images.
+ /// To check that no aliasing occurs, try `check_alias_invariants`. For compact images (no
+ /// aliasing and no unindexed samples) this is `width*height*channels`. But for both of the
+ /// other cases, the reasoning is slightly more involved.
+ ///
+ /// # Explanation
+ ///
+ /// Note that there is a difference between `min_length` and the index of the sample
+ /// 'one-past-the-end`. This is due to strides that may be larger than the dimension below.
+ ///
+ /// ## Example with holes
+ ///
+ /// Let's look at an example of a grayscale image with
+ /// * `width_stride = 1`
+ /// * `width = 2`
+ /// * `height_stride = 3`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// | x x | x x m | $
+ /// min_length m ^
+ /// ^ one-past-the-end $
+ /// ```
+ ///
+ /// The difference is also extreme for empty images with large strides. The one-past-the-end
+ /// sample index is still as large as the largest of these strides while `min_length = 0`.
+ ///
+ /// ## Example with aliasing
+ ///
+ /// The concept gets even more important when you allow samples to alias each other. Here we
+ /// have the buffer of a small grayscale image where this is the case, this time we will first
+ /// show the buffer and then the individual rows below.
+ ///
+ /// * `width_stride = 1`
+ /// * `width = 3`
+ /// * `height_stride = 2`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// 1 2 3 4 5 m
+ /// |1 2 3| row one
+ /// |3 4 5| row two
+ /// ^ m min_length
+ /// ^ ??? one-past-the-end
+ /// ```
+ ///
+ /// This time 'one-past-the-end' is not even simply the largest stride times the extent of its
+ /// dimension. That still points inside the image because `height*height_stride = 4` but also
+ /// `index_of(1, 2) = 4`.
+ pub fn min_length(&self) -> Option<usize> {
+ self.layout.min_length()
+ }
+
+ /// Check if a buffer of length `len` is large enough.
+ pub fn fits(&self, len: usize) -> bool {
+ self.layout.fits(len)
+ }
+
+ /// If there are any samples aliasing each other.
+ ///
+ /// If this is not the case, it would always be safe to allow mutable access to two different
+ /// samples at the same time. Otherwise, this operation would need additional checks. When one
+ /// dimension overflows `usize` with its stride we also consider this aliasing.
+ pub fn has_aliased_samples(&self) -> bool {
+ self.layout.has_aliased_samples()
+ }
+
+ /// Check if a buffer fulfills the requirements of a normal form.
+ ///
+ /// Certain conversions have preconditions on the structure of the sample buffer that are not
+ /// captured (by design) by the type system. These are then checked before the conversion. Such
+ /// checks can all be done in constant time and will not inspect the buffer content. You can
+ /// perform these checks yourself when the conversion is not required at this moment but maybe
+ /// still performed later.
+ pub fn is_normal(&self, form: NormalForm) -> bool {
+ self.layout.is_normal(form)
+ }
+
+ /// Check that the pixel and the channel index are in bounds.
+ ///
+ /// An in-bound coordinate does not yet guarantee that the corresponding calculation of a
+ /// buffer index does not overflow. However, if such a buffer large enough to hold all samples
+ /// actually exists in memory, this porperty of course follows.
+ pub fn in_bounds(&self, channel: u8, x: u32, y: u32) -> bool {
+ self.layout.in_bounds(channel, x, y)
+ }
+
+ /// Resolve the index of a particular sample.
+ ///
+ /// `None` if the index is outside the bounds or does not fit into a `usize`.
+ pub fn index(&self, channel: u8, x: u32, y: u32) -> Option<usize> {
+ self.layout.index(channel, x, y)
+ }
+
+ /// Get the theoretical position of sample (x, y, channel).
+ ///
+ /// The 'check' is for overflow during index calculation, not that it is contained in the
+ /// image. Two samples may return the same index, even when one of them is out of bounds. This
+ /// happens when all strides are `0`, i.e. the image is an arbitrarily large monochrome image.
+ pub fn index_ignoring_bounds(&self, channel: usize, x: usize, y: usize) -> Option<usize> {
+ self.layout.index_ignoring_bounds(channel, x, y)
+ }
+
+ /// Get an index provided it is inbouds.
+ ///
+ /// Assumes that the image is backed by some sufficiently large buffer. Then computation can
+ /// not overflow as we could represent the maximum coordinate. Since overflow is defined either
+ /// way, this method can not be unsafe.
+ pub fn in_bounds_index(&self, channel: u8, x: u32, y: u32) -> usize {
+ self.layout.in_bounds_index(channel, x, y)
+ }
+
+ /// Shrink the image to the minimum of current and given extents.
+ ///
+ /// This does not modify the strides, so that the resulting sample buffer may have holes
+ /// created by the shrinking operation. Shrinking could also lead to an non-aliasing image when
+ /// samples had aliased each other before.
+ pub fn shrink_to(&mut self, channels: u8, width: u32, height: u32) {
+ self.layout.shrink_to(channels, width, height)
+ }
+}
+
+/// A flat buffer that can be used as an image view.
+///
+/// This is a nearly trivial wrapper around a buffer but at least sanitizes by checking the buffer
+/// length first and constraining the pixel type.
+///
+/// Note that this does not eliminate panics as the `AsRef<[T]>` implementation of `Buffer` may be
+/// unreliable, i.e. return different buffers at different times. This of course is a non-issue for
+/// all common collections where the bounds check once must be enough.
+///
+/// # Inner invariants
+///
+/// * For all indices inside bounds, the corresponding index is valid in the buffer
+/// * `P::channel_count()` agrees with `self.inner.layout.channels`
+///
+#[derive(Clone, Debug)]
+pub struct View<Buffer, P: Pixel>
+where
+ Buffer: AsRef<[P::Subpixel]>
+{
+ inner: FlatSamples<Buffer>,
+ phantom: PhantomData<P>,
+}
+
+/// A mutable owning version of a flat buffer.
+///
+/// While this wraps a buffer similar to `ImageBuffer`, this is mostly intended as a utility. The
+/// library endorsed normalized representation is still `ImageBuffer`. Also, the implementation of
+/// `AsMut<[P::Subpixel]>` must always yield the same buffer. Therefore there is no public way to
+/// construct this with an owning buffer.
+///
+/// # Inner invariants
+///
+/// * For all indices inside bounds, the corresponding index is valid in the buffer
+/// * There is no aliasing of samples
+/// * The samples are packed, i.e. `self.inner.layout.sample_stride == 1`
+/// * `P::channel_count()` agrees with `self.inner.layout.channels`
+///
+#[derive(Clone, Debug)]
+pub struct ViewMut<Buffer, P: Pixel>
+where
+ Buffer: AsMut<[P::Subpixel]>
+{
+ inner: FlatSamples<Buffer>,
+ phantom: PhantomData<P>,
+}
+
+/// Denotes invalid flat sample buffers when trying to convert to stricter types.
+///
+/// The biggest use case being `ImageBuffer` which expects closely packed
+/// samples in a row major matrix representation. But this error type may be
+/// resused for other import functions. A more versatile user may also try to
+/// correct the underlying representation depending on the error variant.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum Error {
+ /// The represented image was too large.
+ ///
+ /// The optional value denotes a possibly accepted maximal bound.
+ TooLarge,
+
+ /// The represented image can not use this representation.
+ ///
+ /// Has an additional value of the normalized form that would be accepted.
+ NormalFormRequired(NormalForm),
+
+ /// The color format did not match the channel count.
+ ///
+ /// In some cases you might be able to fix this by lowering the reported pixel count of the
+ /// buffer without touching the strides.
+ ///
+ /// In very special circumstances you *may* do the opposite. This is **VERY** dangerous but not
+ /// directly memory unsafe although that will likely alias pixels. One scenario is when you
+ /// want to construct an `Rgba` image but have only 3 bytes per pixel and for some reason don't
+ /// care about the value of the alpha channel even though you need `Rgba`.
+ WrongColor(ColorType),
+}
+
+/// Different normal forms of buffers.
+///
+/// A normal form is an unaliased buffer with some additional constraints. The `ƌmageBuffer` uses
+/// row major form with packed samples.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum NormalForm {
+ /// No pixel aliases another.
+ ///
+ /// Unaliased also guarantees that all index calculations in the image bounds using
+ /// `dim_index*dim_stride` (such as `x*width_stride + y*height_stride`) do not overflow.
+ Unaliased,
+
+ /// At least pixels are packed.
+ ///
+ /// Images of these types can wrap `[T]`-slices into the standard color types. This is a
+ /// precondition for `GenericImage` which requires by-reference access to pixels.
+ PixelPacked,
+
+ /// All samples are packed.
+ ///
+ /// This is orthogonal to `PixelPacked`. It requires that there are no holes in the image but
+ /// it is not necessary that the pixel samples themselves are adjacent. An example of this
+ /// behaviour is a planar image layout.
+ ImagePacked,
+
+ /// The samples are in row-major form and all samples are packed.
+ ///
+ /// In addition to `PixelPacked` and `ImagePacked` this also asserts that the pixel matrix is
+ /// in row-major form.
+ RowMajorPacked,
+
+ /// The samples are in column-major form and all samples are packed.
+ ///
+ /// In addition to `PixelPacked` and `ImagePacked` this also asserts that the pixel matrix is
+ /// in column-major form.
+ ColumnMajorPacked,
+}
+
+impl<Buffer, P: Pixel> View<Buffer, P>
+where
+ Buffer: AsRef<[P::Subpixel]>
+{
+ /// Take out the sample buffer.
+ ///
+ /// Gives up the normalization invariants on the buffer format.
+ pub fn into_inner(self) -> FlatSamples<Buffer> {
+ self.inner
+ }
+
+ /// Get a reference on the inner sample descriptor.
+ ///
+ /// There is no mutable counterpart as modifying the buffer format, including strides and
+ /// lengths, could invalidate the accessibility invariants of the `View`. It is not specified
+ /// if the inner buffer is the same as the buffer of the image from which this view was
+ /// created. It might have been truncated as an optimization.
+ pub fn flat(&self) -> &FlatSamples<Buffer> {
+ &self.inner
+ }
+
+ /// Get a reference on the inner buffer.
+ ///
+ /// There is no mutable counter part since it is not intended to allow you to reassign the
+ /// buffer or otherwise change its size or properties.
+ pub fn samples(&self) -> &Buffer {
+ &self.inner.samples
+ }
+
+ /// Get a reference to a selected subpixel if it is in-bounds.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_sample(&self, channel: u8, x: u32, y: u32) -> Option<&P::Subpixel> {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.samples().as_ref().get(index)
+ }
+
+ /// Get a mutable reference to a selected subpixel if it is in-bounds.
+ ///
+ /// This is relevant only when constructed with `FlatSamples::as_view_with_mut_samples`. This
+ /// method will return `None` when the sample is out-of-bounds. All errors that could occur due
+ /// to overflow have been eliminated while construction the `View`.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// here can in fact modify more than the coordinate in the argument.
+ pub fn get_mut_sample(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut P::Subpixel>
+ where Buffer: AsMut<[P::Subpixel]>
+ {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.inner.samples.as_mut().get_mut(index)
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// See `FlatSamples::min_length`. This method will always succeed.
+ pub fn min_length(&self) -> usize {
+ self.inner.min_length().unwrap()
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// While this can not failā€“the validity of all coordinates has been validated during the
+ /// conversion from `FlatSamples`ā€“the resulting slice may still contain holes.
+ pub fn image_slice(&self) -> &[P::Subpixel] {
+ &self.samples().as_ref()[..self.min_length()]
+ }
+
+ /// Return the mutable portion of the buffer that holds sample values.
+ ///
+ /// This is relevant only when constructed with `FlatSamples::as_view_with_mut_samples`. While
+ /// this can not failā€“the validity of all coordinates has been validated during the conversion
+ /// from `FlatSamples`ā€“the resulting slice may still contain holes.
+ pub fn image_mut_slice(&mut self) -> &mut [P::Subpixel]
+ where Buffer: AsMut<[P::Subpixel]>
+ {
+ let min_length = self.min_length();
+ &mut self.inner.samples.as_mut()[..min_length]
+ }
+
+ /// Shrink the inner image.
+ ///
+ /// The new dimensions will be the minimum of the previous dimensions. Since the set of
+ /// in-bounds pixels afterwards is a subset of the current ones, this is allowed on a `View`.
+ /// Note that you can not change the number of channels as an intrinsic property of `P`.
+ pub fn shrink_to(&mut self, width: u32, height: u32) {
+ let channels = self.inner.layout.channels;
+ self.inner.shrink_to(channels, width, height)
+ }
+
+ /// Try to convert this into an image with mutable pixels.
+ ///
+ /// The resulting image implements `GenericImage` in addition to `GenericImageView`. While this
+ /// has mutable samples, it does not enforce that pixel can not alias and that samples are
+ /// packed enough for a mutable pixel reference. This is slightly cheaper than the chain
+ /// `self.into_inner().as_view_mut()` and keeps the `View` alive on failure.
+ ///
+ /// ```
+ /// # use image::RgbImage;
+ /// # use image::Rgb;
+ /// let mut buffer = RgbImage::new(480, 640).into_flat_samples();
+ /// let view = buffer.as_view_with_mut_samples::<Rgb<u8>>().unwrap();
+ ///
+ /// // Inspect some pixels, ā€¦
+ ///
+ /// // Doesn't fail because it was originally an `RgbImage`.
+ /// let view_mut = view.try_upgrade().unwrap();
+ /// ```
+ pub fn try_upgrade(self) -> Result<ViewMut<Buffer, P>, (Error, Self)>
+ where Buffer: AsMut<[P::Subpixel]>
+ {
+ if !self.inner.is_normal(NormalForm::PixelPacked) {
+ return Err((Error::NormalFormRequired(NormalForm::PixelPacked), self))
+ }
+
+ // No length check or channel count check required, all the same.
+ Ok(ViewMut {
+ inner: self.inner,
+ phantom: PhantomData,
+ })
+ }
+}
+
+impl<Buffer, P: Pixel> ViewMut<Buffer, P>
+where
+ Buffer: AsMut<[P::Subpixel]>
+{
+ /// Take out the sample buffer.
+ ///
+ /// Gives up the normalization invariants on the buffer format.
+ pub fn into_inner(self) -> FlatSamples<Buffer> {
+ self.inner
+ }
+
+ /// Get a reference on the sample buffer descriptor.
+ ///
+ /// There is no mutable counterpart as modifying the buffer format, including strides and
+ /// lengths, could invalidate the accessibility invariants of the `View`. It is not specified
+ /// if the inner buffer is the same as the buffer of the image from which this view was
+ /// created. It might have been truncated as an optimization.
+ pub fn flat(&self) -> &FlatSamples<Buffer> {
+ &self.inner
+ }
+
+ /// Get a reference on the inner buffer.
+ ///
+ /// There is no mutable counter part since it is not intended to allow you to reassign the
+ /// buffer or otherwise change its size or properties. However, its contents can be accessed
+ /// mutable through a slice with `image_mut_slice`.
+ pub fn samples(&self) -> &Buffer {
+ &self.inner.samples
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// See `FlatSamples::min_length`. This method will always succeed.
+ pub fn min_length(&self) -> usize {
+ self.inner.min_length().unwrap()
+ }
+
+ /// Get a reference to a selected subpixel.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_sample(&self, channel: u8, x: u32, y: u32) -> Option<&P::Subpixel>
+ where Buffer: AsRef<[P::Subpixel]>
+ {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.samples().as_ref().get(index)
+ }
+
+ /// Get a mutable reference to a selected sample.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_mut_sample(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut P::Subpixel> {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.inner.samples.as_mut().get_mut(index)
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// While this can not failā€“the validity of all coordinates has been validated during the
+ /// conversion from `FlatSamples`ā€“the resulting slice may still contain holes.
+ pub fn image_slice(&self) -> &[P::Subpixel] where Buffer: AsRef<[P::Subpixel]> {
+ &self.inner.samples.as_ref()[..self.min_length()]
+ }
+
+ /// Return the mutable buffer that holds sample values.
+ pub fn image_mut_slice(&mut self) -> &mut [P::Subpixel] {
+ let length = self.min_length();
+ &mut self.inner.samples.as_mut()[..length]
+ }
+
+ /// Shrink the inner image.
+ ///
+ /// The new dimensions will be the minimum of the previous dimensions. Since the set of
+ /// in-bounds pixels afterwards is a subset of the current ones, this is allowed on a `View`.
+ /// Note that you can not change the number of channels as an intrinsic property of `P`.
+ pub fn shrink_to(&mut self, width: u32, height: u32) {
+ let channels = self.inner.layout.channels;
+ self.inner.shrink_to(channels, width, height)
+ }
+}
+
+
+// The out-of-bounds panic for single sample access similar to `slice::index`.
+#[inline(never)]
+#[cold]
+fn panic_cwh_out_of_bounds(
+ (c, x, y): (u8, u32, u32),
+ bounds: (u8, u32, u32),
+ strides: (usize, usize, usize)) -> !
+{
+ panic!("Sample coordinates {:?} out of sample matrix bounds {:?} with strides {:?}", (c, x, y), bounds, strides)
+}
+
+// The out-of-bounds panic for pixel access similar to `slice::index`.
+#[inline(never)]
+#[cold]
+fn panic_pixel_out_of_bounds(
+ (x, y): (u32, u32),
+ bounds: (u32, u32)) -> !
+{
+ panic!("Image index {:?} out of bounds {:?}", (x, y), bounds)
+}
+
+impl<Buffer> Index<(u8, u32, u32)> for FlatSamples<Buffer>
+ where Buffer: Index<usize>
+{
+ type Output = Buffer::Output;
+
+ /// Return a reference to a single sample at specified coordinates.
+ ///
+ /// # Panics
+ ///
+ /// When the coordinates are out of bounds or the index calculation fails.
+ fn index(&self, (c, x, y): (u8, u32, u32)) -> &Self::Output {
+ let bounds = self.bounds();
+ let strides = self.strides_cwh();
+ let index = self.index(c, x, y).unwrap_or_else(||
+ panic_cwh_out_of_bounds((c, x, y), bounds, strides));
+ &self.samples[index]
+ }
+}
+
+impl<Buffer> IndexMut<(u8, u32, u32)> for FlatSamples<Buffer>
+ where Buffer: IndexMut<usize>
+{
+
+ /// Return a mutable reference to a single sample at specified coordinates.
+ ///
+ /// # Panics
+ ///
+ /// When the coordinates are out of bounds or the index calculation fails.
+ fn index_mut(&mut self, (c, x, y): (u8, u32, u32)) -> &mut Self::Output {
+ let bounds = self.bounds();
+ let strides = self.strides_cwh();
+ let index = self.index(c, x, y).unwrap_or_else(||
+ panic_cwh_out_of_bounds((c, x, y), bounds, strides));
+ &mut self.samples[index]
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImageView for View<Buffer, P>
+ where Buffer: AsRef<[P::Subpixel]>
+{
+ type Pixel = P;
+
+ // We don't proxy an inner image.
+ type InnerImageView = Self;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.inner.layout.width, self.inner.layout.height)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ let (w, h) = self.dimensions();
+ (0, w, 0, h)
+ }
+
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (w, h) = self.dimensions();
+ x < w && y < h
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let image = self.inner.samples.as_ref();
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channels = P::CHANNEL_COUNT as usize;
+
+ let mut buffer = [Zero::zero(); 256];
+ buffer.iter_mut().enumerate().take(channels).for_each(|(c, to)| {
+ let index = base_index + c*self.inner.layout.channel_stride;
+ *to = image[index];
+ });
+
+ *P::from_slice(&buffer[..channels])
+ }
+
+ fn inner(&self) -> &Self {
+ self // There is no other inner image.
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImageView for ViewMut<Buffer, P>
+ where Buffer: AsMut<[P::Subpixel]> + AsRef<[P::Subpixel]>,
+{
+ type Pixel = P;
+
+ // We don't proxy an inner image.
+ type InnerImageView = Self;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.inner.layout.width, self.inner.layout.height)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ let (w, h) = self.dimensions();
+ (0, w, 0, h)
+ }
+
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (w, h) = self.dimensions();
+ x < w && y < h
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let image = self.inner.samples.as_ref();
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channels = P::CHANNEL_COUNT as usize;
+
+ let mut buffer = [Zero::zero(); 256];
+ buffer.iter_mut().enumerate().take(channels).for_each(|(c, to)| {
+ let index = base_index + c*self.inner.layout.channel_stride;
+ *to = image[index];
+ });
+
+ *P::from_slice(&buffer[..channels])
+ }
+
+ fn inner(&self) -> &Self {
+ self // There is no other inner image.
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImage for ViewMut<Buffer, P>
+ where Buffer: AsMut<[P::Subpixel]> + AsRef<[P::Subpixel]>,
+{
+ type InnerImage = Self;
+
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channel_count = <P as Pixel>::CHANNEL_COUNT as usize;
+ let pixel_range = base_index..base_index + channel_count;
+ P::from_slice_mut(&mut self.inner.samples.as_mut()[pixel_range])
+ }
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ *self.get_pixel_mut(x, y) = pixel;
+ }
+
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.get_pixel_mut(x, y).blend(&pixel);
+ }
+
+ fn inner_mut(&mut self) -> &mut Self {
+ self
+ }
+}
+
+impl From<Error> for ImageError {
+ fn from(error: Error) -> ImageError {
+ match error {
+ Error::TooLarge => ImageError::DimensionError,
+ Error::WrongColor(color) => ImageError::UnsupportedColor(color.into()),
+ Error::NormalFormRequired(form) => ImageError::FormatError(
+ format!("Required sample buffer in normal form {:?}", form)),
+ }
+ }
+}
+
+impl PartialOrd for NormalForm {
+ /// Compares the logical preconditions.
+ ///
+ /// `a < b` if the normal form `a` has less preconditions than `b`.
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ match (*self, *other) {
+ (NormalForm::Unaliased, NormalForm::Unaliased) => Some(cmp::Ordering::Equal),
+ (NormalForm::PixelPacked, NormalForm::PixelPacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::ImagePacked, NormalForm::ImagePacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::RowMajorPacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::ColumnMajorPacked, NormalForm::ColumnMajorPacked) => Some(cmp::Ordering::Equal),
+
+ (NormalForm::Unaliased, _) => Some(cmp::Ordering::Less),
+ (_, NormalForm::Unaliased) => Some(cmp::Ordering::Greater),
+
+ (NormalForm::PixelPacked, NormalForm::ColumnMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::PixelPacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::RowMajorPacked, NormalForm::PixelPacked) => Some(cmp::Ordering::Greater),
+ (NormalForm::ColumnMajorPacked, NormalForm::PixelPacked) => Some(cmp::Ordering::Greater),
+
+ (NormalForm::ImagePacked, NormalForm::ColumnMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::ImagePacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::RowMajorPacked, NormalForm::ImagePacked) => Some(cmp::Ordering::Greater),
+ (NormalForm::ColumnMajorPacked, NormalForm::ImagePacked) => Some(cmp::Ordering::Greater),
+
+ (NormalForm::ImagePacked, NormalForm::PixelPacked) => None,
+ (NormalForm::PixelPacked, NormalForm::ImagePacked) => None,
+ (NormalForm::RowMajorPacked, NormalForm::ColumnMajorPacked) => None,
+ (NormalForm::ColumnMajorPacked, NormalForm::RowMajorPacked) => None,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::buffer::GrayAlphaImage;
+ use crate::color::{LumaA, Rgb};
+
+ #[test]
+ fn aliasing_view() {
+ let buffer = FlatSamples {
+ samples: &[42],
+ layout: SampleLayout {
+ channels: 3,
+ channel_stride: 0,
+ width: 100,
+ width_stride: 0,
+ height: 100,
+ height_stride: 0,
+ },
+ color_hint: None,
+ };
+
+ let view = buffer.as_view::<Rgb<usize>>()
+ .expect("This is a valid view");
+ let pixel_count = view.pixels()
+ .inspect(|pixel| assert!(pixel.2 == Rgb([42, 42, 42])))
+ .count();
+ assert_eq!(pixel_count, 100*100);
+ }
+
+ #[test]
+ fn mutable_view() {
+ let mut buffer = FlatSamples {
+ samples: [0; 18],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 3,
+ width_stride: 2,
+ height: 3,
+ height_stride: 6,
+ },
+ color_hint: None,
+ };
+
+ {
+ let mut view = buffer.as_view_mut::<LumaA<usize>>()
+ .expect("This should be a valid mutable buffer");
+ assert_eq!(view.dimensions(), (3, 3));
+ for i in 0..9 {
+ *view.get_pixel_mut(i % 3, i / 3) = LumaA([2 * i as usize, 2 * i as usize + 1]);
+ }
+ }
+
+ buffer.samples.iter()
+ .enumerate()
+ .for_each(|(idx, sample)| assert_eq!(idx, *sample));
+ }
+
+ #[test]
+ fn normal_forms() {
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 3,
+ width_stride: 9,
+ height: 3,
+ height_stride: 28,
+ },
+ color_hint: None,
+ }.is_normal(NormalForm::PixelPacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 8,
+ width: 4,
+ width_stride: 1,
+ height: 2,
+ height_stride: 4,
+ },
+ color_hint: None,
+ }.is_normal(NormalForm::ImagePacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 2,
+ height: 2,
+ height_stride: 8,
+ },
+ color_hint: None,
+ }.is_normal(NormalForm::RowMajorPacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 4,
+ height: 2,
+ height_stride: 2,
+ },
+ color_hint: None,
+ }.is_normal(NormalForm::ColumnMajorPacked));
+ }
+
+ #[test]
+ fn image_buffer_conversion() {
+ let expected_layout = SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 2,
+ height: 2,
+ height_stride: 8,
+ };
+
+ let initial = GrayAlphaImage::new(expected_layout.width, expected_layout.height);
+ let buffer = initial.into_flat_samples();
+
+ assert_eq!(buffer.layout, expected_layout);
+
+ let _: GrayAlphaImage = buffer.try_into_buffer().unwrap_or_else(|(error, _)|
+ panic!("Expected buffer to be convertible but {:?}", error));
+ }
+}
diff --git a/third_party/rust/image/src/gif.rs b/third_party/rust/image/src/gif.rs
new file mode 100644
index 0000000000..d191d41977
--- /dev/null
+++ b/third_party/rust/image/src/gif.rs
@@ -0,0 +1,425 @@
+//! Decoding of GIF Images
+//!
+//! GIF (Graphics Interchange Format) is an image format that supports lossless compression.
+//!
+//! # Related Links
+//! * <http://www.w3.org/Graphics/GIF/spec-gif89a.txt> - The GIF Specification
+//!
+//! # Examples
+//! ```rust,no_run
+//! use image::gif::{GifDecoder, Encoder};
+//! use image::{ImageDecoder, AnimationDecoder};
+//! use std::fs::File;
+//! # fn main() -> std::io::Result<()> {
+//! // Decode a gif into frames
+//! let file_in = File::open("foo.gif")?;
+//! let mut decoder = GifDecoder::new(file_in).unwrap();
+//! let frames = decoder.into_frames();
+//! let frames = frames.collect_frames().expect("error decoding gif");
+//!
+//! // Encode frames into a gif and save to a file
+//! let mut file_out = File::open("out.gif")?;
+//! let mut encoder = Encoder::new(file_out);
+//! encoder.encode_frames(frames.into_iter());
+//! # Ok(())
+//! # }
+//! ```
+#![allow(clippy::while_let_loop)]
+
+use std::clone::Clone;
+use std::convert::TryInto;
+use std::cmp::min;
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Write};
+use std::marker::PhantomData;
+use std::mem;
+
+use gif::{ColorOutput, SetParameter};
+use gif::{DisposalMethod, Frame};
+use num_rational::Ratio;
+
+use crate::animation;
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::color::{ColorType, Rgba};
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, AnimationDecoder, ImageDecoder};
+
+/// GIF decoder
+pub struct GifDecoder<R: Read> {
+ reader: gif::Reader<R>,
+}
+
+impl<R: Read> GifDecoder<R> {
+ /// Creates a new decoder that decodes the input steam ```r```
+ pub fn new(r: R) -> ImageResult<GifDecoder<R>> {
+ let mut decoder = gif::Decoder::new(r);
+ decoder.set(ColorOutput::RGBA);
+
+ Ok(GifDecoder {
+ reader: decoder.read_info().map_err(ImageError::from_gif)?,
+ })
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct GifReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for GifReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for GifDecoder<R> {
+ type Reader = GifReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (u32::from(self.reader.width()), u32::from(self.reader.height()))
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgba8
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(GifReader(Cursor::new(image::decoder_to_vec(self)?), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let (f_width, f_height, left, top);
+
+ if let Some(frame) = self.reader.next_frame_info().map_err(ImageError::from_gif)? {
+ left = u32::from(frame.left);
+ top = u32::from(frame.top);
+ f_width = u32::from(frame.width);
+ f_height = u32::from(frame.height);
+ } else {
+ return Err(ImageError::ImageEnd);
+ }
+
+ self.reader.read_into_buffer(buf).map_err(ImageError::from_gif)?;
+
+ let (width, height) = (u32::from(self.reader.width()), u32::from(self.reader.height()));
+ if (left, top) != (0, 0) || (width, height) != (f_width, f_height) {
+ // This is somewhat of an annoying case. The image we read into `buf` doesn't take up
+ // the whole buffer and now we need to properly insert borders. For simplicity this code
+ // currently takes advantage of the `ImageBuffer::from_fn` function to make a second
+ // ImageBuffer that is properly positioned, and then copies it back into `buf`.
+ //
+ // TODO: Implement this without any allocation.
+
+ // Recover the full image
+ let image_buffer = {
+ // See the comments inside `<GifFrameIterator as Iterator>::next` about
+ // the error handling of `from_raw`.
+ let image = ImageBuffer::from_raw(f_width, f_height, &mut *buf).ok_or_else(
+ || ImageError::UnsupportedError("Image dimensions are too large".into())
+ )?;
+
+ ImageBuffer::from_fn(width, height, |x, y| {
+ let x = x.wrapping_sub(left);
+ let y = y.wrapping_sub(top);
+ if x < image.width() && y < image.height() {
+ *image.get_pixel(x, y)
+ } else {
+ Rgba([0, 0, 0, 0])
+ }
+ })
+ };
+ buf.copy_from_slice(&mut image_buffer.into_raw());
+ }
+ Ok(())
+ }
+}
+
+struct GifFrameIterator<R: Read> {
+ reader: gif::Reader<R>,
+
+ width: u32,
+ height: u32,
+
+ non_disposed_frame: ImageBuffer<Rgba<u8>, Vec<u8>>,
+}
+
+
+impl<R: Read> GifFrameIterator<R> {
+ fn new(decoder: GifDecoder<R>) -> GifFrameIterator<R> {
+ let (width, height) = decoder.dimensions();
+
+ // TODO: Avoid this cast
+ let (width, height) = (width as u32, height as u32);
+
+ // intentionally ignore the background color for web compatibility
+
+ // create the first non disposed frame
+ let non_disposed_frame = ImageBuffer::from_pixel(width, height, Rgba([0, 0, 0, 0]));
+
+ GifFrameIterator {
+ reader: decoder.reader,
+ width,
+ height,
+ non_disposed_frame,
+ }
+ }
+}
+
+
+impl<R: Read> Iterator for GifFrameIterator<R> {
+ type Item = ImageResult<animation::Frame>;
+
+ fn next(&mut self) -> Option<ImageResult<animation::Frame>> {
+ // begin looping over each frame
+ let (left, top, delay, dispose, f_width, f_height);
+
+ match self.reader.next_frame_info() {
+ Ok(frame_info) => {
+ if let Some(frame) = frame_info {
+ left = u32::from(frame.left);
+ top = u32::from(frame.top);
+ f_width = u32::from(frame.width);
+ f_height = u32::from(frame.height);
+
+ // frame.delay is in units of 10ms so frame.delay*10 is in ms
+ delay = Ratio::new(u32::from(frame.delay) * 10, 1);
+ dispose = frame.dispose;
+ } else {
+ // no more frames
+ return None;
+ }
+ },
+ Err(err) => return Some(Err(ImageError::from_gif(err))),
+ }
+
+ let mut vec = vec![0; self.reader.buffer_size()];
+ if let Err(err) = self.reader.read_into_buffer(&mut vec) {
+ return Some(Err(ImageError::from_gif(err)));
+ }
+
+ // create the image buffer from the raw frame.
+ // `buffer_size` uses wrapping arithmetics, thus might not report the
+ // correct storage requirement if the result does not fit in `usize`.
+ // on the other hand, `ImageBuffer::from_raw` detects overflow and
+ // reports by returning `None`.
+ let image_buffer_raw = match ImageBuffer::from_raw(f_width, f_height, vec) {
+ Some(image_buffer_raw) => image_buffer_raw,
+ None => {
+ return Some(Err(ImageError::UnsupportedError(
+ "Image dimensions are too large".into(),
+ )))
+ }
+ };
+
+ // if `image_buffer_raw`'s frame exactly matches the entire image, then
+ // use it directly.
+ //
+ // otherwise, `image_buffer_raw` represents a smaller image.
+ // create a new image of the target size and place
+ // `image_buffer_raw` within it. the outside region is filled with
+ // transparent pixels.
+ let mut image_buffer =
+ full_image_from_frame(self.width, self.height, image_buffer_raw, left, top);
+
+ // loop over all pixels, checking if any pixels from the non disposed
+ // frame need to be used
+ for (x, y, pixel) in image_buffer.enumerate_pixels_mut() {
+ let previous_img_buffer = &self.non_disposed_frame;
+ let adjusted_pixel: &mut Rgba<u8> = pixel;
+ let previous_pixel: &Rgba<u8> = previous_img_buffer.get_pixel(x, y);
+
+ let pixel_alpha = adjusted_pixel.channels()[3];
+
+ // If a pixel is not visible then we show the non disposed frame pixel instead
+ if pixel_alpha == 0 {
+ adjusted_pixel.blend(previous_pixel);
+ }
+ }
+
+ let frame = animation::Frame::from_parts(
+ image_buffer.clone(), 0, 0, animation::Delay::from_ratio(delay),
+ );
+
+ match dispose {
+ DisposalMethod::Any => {
+ // do nothing
+ // (completely replace this frame with the next)
+ }
+ DisposalMethod::Keep => {
+ // do not dispose
+ // (keep pixels from this frame)
+ self.non_disposed_frame = image_buffer;
+ }
+ DisposalMethod::Background => {
+ // restore to background color
+ // (background shows through transparent pixels in the next frame)
+ for y in top..min(top + f_height, self.height) {
+ for x in left..min(left + f_width, self.width) {
+ self.non_disposed_frame.put_pixel(x, y, Rgba([0, 0, 0, 0]));
+ }
+ }
+ }
+ DisposalMethod::Previous => {
+ // restore to previous
+ // (dispose frames leaving the last none disposal frame)
+ }
+ };
+
+ Some(Ok(frame))
+ }
+}
+
+/// Given a frame subimage, construct a full image of size
+/// `(screen_width, screen_height)` by placing it at the top-left coordinates
+/// `(left, top)`. The remaining portion is filled with transparent pixels.
+fn full_image_from_frame(
+ screen_width: u32,
+ screen_height: u32,
+ image: crate::RgbaImage,
+ left: u32,
+ top: u32,
+) -> crate::RgbaImage {
+ if (left, top) == (0, 0) && (screen_width, screen_height) == (image.width(), image.height()) {
+ image
+ } else {
+ ImageBuffer::from_fn(screen_width, screen_height, |x, y| {
+ let x = x.wrapping_sub(left);
+ let y = y.wrapping_sub(top);
+ if x < image.width() && y < image.height() {
+ *image.get_pixel(x, y)
+ } else {
+ Rgba([0, 0, 0, 0])
+ }
+ })
+ }
+}
+
+impl<'a, R: Read + 'a> AnimationDecoder<'a> for GifDecoder<R> {
+ fn into_frames(self) -> animation::Frames<'a> {
+ animation::Frames::new(Box::new(GifFrameIterator::new(self)))
+ }
+}
+
+/// GIF encoder.
+pub struct Encoder<W: Write> {
+ w: Option<W>,
+ gif_encoder: Option<gif::Encoder<W>>,
+}
+
+impl<W: Write> Encoder<W> {
+ /// Creates a new GIF encoder.
+ pub fn new(w: W) -> Encoder<W> {
+ Encoder {
+ w: Some(w),
+ gif_encoder: None,
+ }
+ }
+
+ /// Encode a single image.
+ pub fn encode(
+ &mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ let (width, height) = self.gif_dimensions(width, height)?;
+ match color {
+ ColorType::Rgb8 => self.encode_gif(Frame::from_rgb(width, height, data)),
+ ColorType::Rgba8 => {
+ self.encode_gif(Frame::from_rgb(width, height, &mut data.to_owned()))
+ },
+ _ => Err(ImageError::UnsupportedColor(color.into())),
+ }
+ }
+
+ /// Encode one frame of animation.
+ pub fn encode_frame(&mut self, img_frame: animation::Frame) -> ImageResult<()> {
+ let frame = self.convert_frame(img_frame)?;
+ self.encode_gif(frame)
+ }
+
+ /// Encodes Frames.
+ /// Consider using `try_encode_frames` instead to encode an `animation::Frames` like iterator.
+ pub fn encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
+ where
+ F: IntoIterator<Item = animation::Frame>
+ {
+ for img_frame in frames {
+ self.encode_frame(img_frame)?;
+ }
+ Ok(())
+ }
+
+ /// Try to encode a collection of `ImageResult<animation::Frame>` objects.
+ /// Use this function to encode an `animation::Frames` like iterator.
+ /// Whenever an `Err` item is encountered, that value is returned without further actions.
+ pub fn try_encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
+ where
+ F: IntoIterator<Item = ImageResult<animation::Frame>>
+ {
+ for img_frame in frames {
+ self.encode_frame(img_frame?)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn convert_frame(&mut self, img_frame: animation::Frame)
+ -> ImageResult<Frame<'static>>
+ {
+ // get the delay before converting img_frame
+ let frame_delay = img_frame.delay().into_ratio().to_integer();
+ // convert img_frame into RgbaImage
+ let mut rbga_frame = img_frame.into_buffer();
+ let (width, height) = self.gif_dimensions(
+ rbga_frame.width(),
+ rbga_frame.height())?;
+
+ // Create the gif::Frame from the animation::Frame
+ let mut frame = Frame::from_rgba(width, height, &mut *rbga_frame);
+ frame.delay = (frame_delay / 10).try_into().map_err(|_|ImageError::DimensionError)?;
+
+ Ok(frame)
+ }
+
+ fn gif_dimensions(&self, width: u32, height: u32) -> ImageResult<(u16, u16)> {
+ fn inner_dimensions(width: u32, height: u32) -> Option<(u16, u16)> {
+ let width = u16::try_from(width).ok()?;
+ let height = u16::try_from(height).ok()?;
+ Some((width, height))
+ }
+
+ inner_dimensions(width, height).ok_or(ImageError::DimensionError)
+ }
+
+ pub(crate) fn encode_gif(&mut self, frame: Frame) -> ImageResult<()> {
+ let gif_encoder;
+ if let Some(ref mut encoder) = self.gif_encoder {
+ gif_encoder = encoder;
+ } else {
+ let writer = self.w.take().unwrap();
+ let encoder = gif::Encoder::new(writer, frame.width, frame.height, &[])?;
+ self.gif_encoder = Some(encoder);
+ gif_encoder = self.gif_encoder.as_mut().unwrap()
+ }
+
+ gif_encoder.write_frame(&frame).map_err(|err| err.into())
+ }
+}
+
+impl ImageError {
+ fn from_gif(err: gif::DecodingError) -> ImageError {
+ use gif::DecodingError::*;
+ match err {
+ Format(desc) | Internal(desc) => ImageError::FormatError(desc.into()),
+ Io(io_err) => ImageError::IoError(io_err),
+ }
+ }
+}
diff --git a/third_party/rust/image/src/hdr/decoder.rs b/third_party/rust/image/src/hdr/decoder.rs
new file mode 100644
index 0000000000..1409e4683f
--- /dev/null
+++ b/third_party/rust/image/src/hdr/decoder.rs
@@ -0,0 +1,915 @@
+use num_traits::identities::Zero;
+use scoped_threadpool::Pool;
+#[cfg(test)]
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::io::{self, BufRead, Cursor, Read, Seek};
+use std::iter::Iterator;
+use std::marker::PhantomData;
+use std::mem;
+use std::path::Path;
+use crate::Primitive;
+
+use crate::color::{ColorType, Rgb};
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, ImageDecoder, ImageDecoderExt, Progress};
+
+/// Adapter to conform to ```ImageDecoder``` trait
+#[derive(Debug)]
+pub struct HDRAdapter<R: BufRead> {
+ inner: Option<HdrDecoder<R>>,
+ // data: Option<Vec<u8>>,
+ meta: HDRMetadata,
+}
+
+impl<R: BufRead> HDRAdapter<R> {
+ /// Creates adapter
+ pub fn new(r: R) -> ImageResult<HDRAdapter<R>> {
+ let decoder = HdrDecoder::new(r)?;
+ let meta = decoder.metadata();
+ Ok(HDRAdapter {
+ inner: Some(decoder),
+ meta,
+ })
+ }
+
+ /// Allows reading old Radiance HDR images
+ pub fn new_nonstrict(r: R) -> ImageResult<HDRAdapter<R>> {
+ let decoder = HdrDecoder::with_strictness(r, false)?;
+ let meta = decoder.metadata();
+ Ok(HDRAdapter {
+ inner: Some(decoder),
+ meta,
+ })
+ }
+
+ /// Read the actual data of the image, and store it in Self::data.
+ fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.inner.take() {
+ Some(decoder) => {
+ let img: Vec<Rgb<u8>> = decoder.read_image_ldr()?;
+ for (i, Rgb(data)) in img.into_iter().enumerate() {
+ buf[(i*3)..][..3].copy_from_slice(&data);
+ }
+
+ Ok(())
+ }
+ None => Err(ImageError::ImageEnd),
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct HdrReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for HdrReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + BufRead> ImageDecoder<'a> for HDRAdapter<R> {
+ type Reader = HdrReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.meta.width, self.meta.height)
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgb8
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(HdrReader(Cursor::new(image::decoder_to_vec(self)?), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ self.read_image_data(buf)
+ }
+}
+
+impl<'a, R: 'a + BufRead + Seek> ImageDecoderExt<'a> for HDRAdapter<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ image::load_rect(x, y, width, height, buf, progress_callback, self, |_, _| unreachable!(),
+ |s, buf| s.read_image_data(buf).map(|_| buf.len()))
+ }
+}
+
+/// Radiance HDR file signature
+pub const SIGNATURE: &[u8] = b"#?RADIANCE";
+const SIGNATURE_LENGTH: usize = 10;
+
+/// An Radiance HDR decoder
+#[derive(Debug)]
+pub struct HdrDecoder<R> {
+ r: R,
+ width: u32,
+ height: u32,
+ meta: HDRMetadata,
+}
+
+/// Refer to [wikipedia](https://en.wikipedia.org/wiki/RGBE_image_format)
+#[repr(C)]
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+pub struct RGBE8Pixel {
+ /// Color components
+ pub c: [u8; 3],
+ /// Exponent
+ pub e: u8,
+}
+
+/// Creates ```RGBE8Pixel``` from components
+pub fn rgbe8(r: u8, g: u8, b: u8, e: u8) -> RGBE8Pixel {
+ RGBE8Pixel { c: [r, g, b], e }
+}
+
+impl RGBE8Pixel {
+ /// Converts ```RGBE8Pixel``` into ```Rgb<f32>``` linearly
+ #[inline]
+ pub fn to_hdr(self) -> Rgb<f32> {
+ if self.e == 0 {
+ Rgb([0.0, 0.0, 0.0])
+ } else {
+ // let exp = f32::ldexp(1., self.e as isize - (128 + 8)); // unstable
+ let exp = f32::exp2(<f32 as From<_>>::from(self.e) - (128.0 + 8.0));
+ Rgb([
+ exp * <f32 as From<_>>::from(self.c[0]),
+ exp * <f32 as From<_>>::from(self.c[1]),
+ exp * <f32 as From<_>>::from(self.c[2]),
+ ])
+ }
+ }
+
+ /// Converts ```RGBE8Pixel``` into ```Rgb<T>``` with scale=1 and gamma=2.2
+ ///
+ /// color_ldr = (color_hdr*scale)<sup>gamma</sup>
+ ///
+ /// # Panic
+ ///
+ /// Panics when ```T::max_value()``` cannot be represented as f32.
+ #[inline]
+ pub fn to_ldr<T: Primitive + Zero>(self) -> Rgb<T> {
+ self.to_ldr_scale_gamma(1.0, 2.2)
+ }
+
+ /// Converts RGBE8Pixel into Rgb<T> using provided scale and gamma
+ ///
+ /// color_ldr = (color_hdr*scale)<sup>gamma</sup>
+ ///
+ /// # Panic
+ ///
+ /// Panics when T::max_value() cannot be represented as f32.
+ /// Panics when scale or gamma is NaN
+ #[inline]
+ pub fn to_ldr_scale_gamma<T: Primitive + Zero>(self, scale: f32, gamma: f32) -> Rgb<T> {
+ let Rgb(data) = self.to_hdr();
+ let (r, g, b) = (data[0], data[1], data[2]);
+ #[inline]
+ fn sg<T: Primitive + Zero>(v: f32, scale: f32, gamma: f32) -> T {
+ let t_max = T::max_value();
+ // Disassembly shows that t_max_f32 is compiled into constant
+ let t_max_f32: f32 = num_traits::NumCast::from(t_max)
+ .expect("to_ldr_scale_gamma: maximum value of type is not representable as f32");
+ let fv = f32::powf(v * scale, gamma) * t_max_f32 + 0.5;
+ if fv < 0.0 {
+ T::zero()
+ } else if fv > t_max_f32 {
+ t_max
+ } else {
+ num_traits::NumCast::from(fv)
+ .expect("to_ldr_scale_gamma: cannot convert f32 to target type. NaN?")
+ }
+ }
+ Rgb([
+ sg(r, scale, gamma),
+ sg(g, scale, gamma),
+ sg(b, scale, gamma),
+ ])
+ }
+}
+
+impl<R: BufRead> HdrDecoder<R> {
+ /// Reads Radiance HDR image header from stream ```r```
+ /// if the header is valid, creates HdrDecoder
+ /// strict mode is enabled
+ pub fn new(reader: R) -> ImageResult<HdrDecoder<R>> {
+ HdrDecoder::with_strictness(reader, true)
+ }
+
+ /// Reads Radiance HDR image header from stream ```reader```,
+ /// if the header is valid, creates ```HdrDecoder```.
+ ///
+ /// strict enables strict mode
+ ///
+ /// Warning! Reading wrong file in non-strict mode
+ /// could consume file size worth of memory in the process.
+ pub fn with_strictness(mut reader: R, strict: bool) -> ImageResult<HdrDecoder<R>> {
+ let mut attributes = HDRMetadata::new();
+
+ {
+ // scope to make borrowck happy
+ let r = &mut reader;
+ if strict {
+ let mut signature = [0; SIGNATURE_LENGTH];
+ r.read_exact(&mut signature)?;
+ if signature != SIGNATURE {
+ return Err(ImageError::FormatError(
+ "Radiance HDR signature not found".to_string(),
+ ));
+ } // no else
+ // skip signature line ending
+ read_line_u8(r)?;
+ } else {
+ // Old Radiance HDR files (*.pic) don't use signature
+ // Let them be parsed in non-strict mode
+ }
+ // read header data until empty line
+ loop {
+ match read_line_u8(r)? {
+ None => {
+ // EOF before end of header
+ return Err(ImageError::FormatError("EOF in header".into()));
+ }
+ Some(line) => {
+ if line.is_empty() {
+ // end of header
+ break;
+ } else if line[0] == b'#' {
+ // line[0] will not panic, line.len() == 0 is false here
+ // skip comments
+ continue;
+ } // no else
+ // process attribute line
+ let line = String::from_utf8_lossy(&line[..]);
+ attributes.update_header_info(&line, strict)?;
+ } // <= Some(line)
+ } // match read_line_u8()
+ } // loop
+ } // scope to end borrow of reader
+ // parse dimensions
+ let (width, height) = match read_line_u8(&mut reader)? {
+ None => {
+ // EOF instead of image dimensions
+ return Err(ImageError::FormatError("EOF in dimensions line".into()));
+ }
+ Some(dimensions) => {
+ let dimensions = String::from_utf8_lossy(&dimensions[..]);
+ parse_dimensions_line(&dimensions, strict)?
+ }
+ };
+
+ Ok(HdrDecoder {
+ r: reader,
+
+ width,
+ height,
+ meta: HDRMetadata {
+ width,
+ height,
+ ..attributes
+ },
+ })
+ } // end with_strictness
+
+ /// Returns file metadata. Refer to ```HDRMetadata``` for details.
+ pub fn metadata(&self) -> HDRMetadata {
+ self.meta.clone()
+ }
+
+ /// Consumes decoder and returns a vector of RGBE8 pixels
+ pub fn read_image_native(mut self) -> ImageResult<Vec<RGBE8Pixel>> {
+ // Don't read anything if image is empty
+ if self.width == 0 || self.height == 0 {
+ return Ok(vec![]);
+ }
+ // expression self.width > 0 && self.height > 0 is true from now to the end of this method
+ let pixel_count = self.width as usize * self.height as usize;
+ let mut ret = vec![Default::default(); pixel_count];
+ for chunk in ret.chunks_mut(self.width as usize) {
+ read_scanline(&mut self.r, chunk)?;
+ }
+ Ok(ret)
+ }
+
+ /// Consumes decoder and returns a vector of transformed pixels
+ pub fn read_image_transform<T: Send, F: Send + Sync + Fn(RGBE8Pixel) -> T>(
+ mut self,
+ f: F,
+ output_slice: &mut [T],
+ ) -> ImageResult<()> {
+ assert_eq!(
+ output_slice.len(),
+ self.width as usize * self.height as usize
+ );
+
+ // Don't read anything if image is empty
+ if self.width == 0 || self.height == 0 {
+ return Ok(());
+ }
+
+ let chunks_iter = output_slice.chunks_mut(self.width as usize);
+ let mut pool = Pool::new(8); //
+
+ (pool.scoped(|scope| {
+ for chunk in chunks_iter {
+ let mut buf = vec![Default::default(); self.width as usize];
+ read_scanline(&mut self.r, &mut buf[..])?;
+ let f = &f;
+ scope.execute(move || {
+ for (dst, &pix) in chunk.iter_mut().zip(buf.iter()) {
+ *dst = f(pix);
+ }
+ });
+ }
+ Ok(())
+ }) as Result<(), ImageError>)?;
+ Ok(())
+ }
+
+ /// Consumes decoder and returns a vector of Rgb<u8> pixels.
+ /// scale = 1, gamma = 2.2
+ pub fn read_image_ldr(self) -> ImageResult<Vec<Rgb<u8>>> {
+ let mut ret = vec![Rgb([0, 0, 0]); self.width as usize * self.height as usize];
+ self.read_image_transform(|pix| pix.to_ldr(), &mut ret[..])?;
+ Ok(ret)
+ }
+
+ /// Consumes decoder and returns a vector of Rgb<f32> pixels.
+ ///
+ pub fn read_image_hdr(self) -> ImageResult<Vec<Rgb<f32>>> {
+ let mut ret = vec![Rgb([0.0, 0.0, 0.0]); self.width as usize * self.height as usize];
+ self.read_image_transform(|pix| pix.to_hdr(), &mut ret[..])?;
+ Ok(ret)
+ }
+}
+
+impl<R: BufRead> IntoIterator for HdrDecoder<R> {
+ type Item = ImageResult<RGBE8Pixel>;
+ type IntoIter = HDRImageDecoderIterator<R>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ HDRImageDecoderIterator {
+ r: self.r,
+ scanline_cnt: self.height as usize,
+ buf: vec![Default::default(); self.width as usize],
+ col: 0,
+ scanline: 0,
+ trouble: true, // make first call to `next()` read scanline
+ error_encountered: false,
+ }
+ }
+}
+
+/// Scanline buffered pixel by pixel iterator
+pub struct HDRImageDecoderIterator<R: BufRead> {
+ r: R,
+ scanline_cnt: usize,
+ buf: Vec<RGBE8Pixel>, // scanline buffer
+ col: usize, // current position in scanline
+ scanline: usize, // current scanline
+ trouble: bool, // optimization, true indicates that we need to check something
+ error_encountered: bool,
+}
+
+impl<R: BufRead> HDRImageDecoderIterator<R> {
+ // Advances counter to the next pixel
+ #[inline]
+ fn advance(&mut self) {
+ self.col += 1;
+ if self.col == self.buf.len() {
+ self.col = 0;
+ self.scanline += 1;
+ self.trouble = true;
+ }
+ }
+}
+
+impl<R: BufRead> Iterator for HDRImageDecoderIterator<R> {
+ type Item = ImageResult<RGBE8Pixel>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if !self.trouble {
+ let ret = self.buf[self.col];
+ self.advance();
+ Some(Ok(ret))
+ } else {
+ // some condition is pending
+ if self.buf.is_empty() || self.scanline == self.scanline_cnt {
+ // No more pixels
+ return None;
+ } // no else
+ if self.error_encountered {
+ self.advance();
+ // Error was encountered. Keep producing errors.
+ // ImageError can't implement Clone, so just dump some error
+ return Some(Err(ImageError::ImageEnd));
+ } // no else
+ if self.col == 0 {
+ // fill scanline buffer
+ match read_scanline(&mut self.r, &mut self.buf[..]) {
+ Ok(_) => {
+ // no action required
+ }
+ Err(err) => {
+ self.advance();
+ self.error_encountered = true;
+ self.trouble = true;
+ return Some(Err(err));
+ }
+ }
+ } // no else
+ self.trouble = false;
+ let ret = self.buf[0];
+ self.advance();
+ Some(Ok(ret))
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let total_cnt = self.buf.len() * self.scanline_cnt;
+ let cur_cnt = self.buf.len() * self.scanline + self.col;
+ let remaining = total_cnt - cur_cnt;
+ (remaining, Some(remaining))
+ }
+}
+
+impl<R: BufRead> ExactSizeIterator for HDRImageDecoderIterator<R> {}
+
+// Precondition: buf.len() > 0
+fn read_scanline<R: BufRead>(r: &mut R, buf: &mut [RGBE8Pixel]) -> ImageResult<()> {
+ assert!(!buf.is_empty());
+ let width = buf.len();
+ // first 4 bytes in scanline allow to determine compression method
+ let fb = read_rgbe(r)?;
+ if fb.c[0] == 2 && fb.c[1] == 2 && fb.c[2] < 128 {
+ // denormalized pixel value (2,2,<128,_) indicates new per component RLE method
+ // decode_component guarantees that offset is within 0 .. width
+ // therefore we can skip bounds checking here, but we will not
+ decode_component(r, width, |offset, value| buf[offset].c[0] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].c[1] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].c[2] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].e = value)?;
+ } else {
+ // old RLE method (it was considered old around 1991, should it be here?)
+ decode_old_rle(r, fb, buf)?;
+ }
+ Ok(())
+}
+
+#[inline(always)]
+fn read_byte<R: BufRead>(r: &mut R) -> io::Result<u8> {
+ let mut buf = [0u8];
+ r.read_exact(&mut buf[..])?;
+ Ok(buf[0])
+}
+
+// Guarantees that first parameter of set_component will be within pos .. pos+width
+#[inline]
+fn decode_component<R: BufRead, S: FnMut(usize, u8)>(
+ r: &mut R,
+ width: usize,
+ mut set_component: S,
+) -> ImageResult<()> {
+ let mut buf = [0; 128];
+ let mut pos = 0;
+ while pos < width {
+ // increment position by a number of decompressed values
+ pos += {
+ let rl = read_byte(r)?;
+ if rl <= 128 {
+ // sanity check
+ if pos + rl as usize > width {
+ return Err(ImageError::FormatError(
+ "Wrong length of decoded scanline".into(),
+ ));
+ }
+ // read values
+ r.read_exact(&mut buf[0..rl as usize])?;
+ for (offset, &value) in buf[0..rl as usize].iter().enumerate() {
+ set_component(pos + offset, value);
+ }
+ rl as usize
+ } else {
+ // run
+ let rl = rl - 128;
+ // sanity check
+ if pos + rl as usize > width {
+ return Err(ImageError::FormatError(
+ "Wrong length of decoded scanline".into(),
+ ));
+ }
+ // fill with same value
+ let value = read_byte(r)?;
+ for offset in 0..rl as usize {
+ set_component(pos + offset, value);
+ }
+ rl as usize
+ }
+ };
+ }
+ if pos != width {
+ return Err(ImageError::FormatError(
+ "Wrong length of decoded scanline".into(),
+ ));
+ }
+ Ok(())
+}
+
+// Decodes scanline, places it into buf
+// Precondition: buf.len() > 0
+// fb - first 4 bytes of scanline
+fn decode_old_rle<R: BufRead>(
+ r: &mut R,
+ fb: RGBE8Pixel,
+ buf: &mut [RGBE8Pixel],
+) -> ImageResult<()> {
+ assert!(!buf.is_empty());
+ let width = buf.len();
+ // convenience function.
+ // returns run length if pixel is a run length marker
+ #[inline]
+ fn rl_marker(pix: RGBE8Pixel) -> Option<usize> {
+ if pix.c == [1, 1, 1] {
+ Some(pix.e as usize)
+ } else {
+ None
+ }
+ }
+ // first pixel in scanline should not be run length marker
+ // it is error if it is
+ if rl_marker(fb).is_some() {
+ return Err(ImageError::FormatError(
+ "First pixel of a scanline shouldn't be run length marker".into(),
+ ));
+ }
+ buf[0] = fb; // set first pixel of scanline
+
+ let mut x_off = 1; // current offset from beginning of a scanline
+ let mut rl_mult = 1; // current run length multiplier
+ let mut prev_pixel = fb;
+ while x_off < width {
+ let pix = read_rgbe(r)?;
+ // it's harder to forget to increase x_off if I write this this way.
+ x_off += {
+ if let Some(rl) = rl_marker(pix) {
+ // rl_mult takes care of consecutive RL markers
+ let rl = rl * rl_mult;
+ rl_mult *= 256;
+ if x_off + rl <= width {
+ // do run
+ for b in &mut buf[x_off..x_off + rl] {
+ *b = prev_pixel;
+ }
+ } else {
+ return Err(ImageError::FormatError(
+ "Wrong length of decoded scanline".into(),
+ ));
+ };
+ rl // value to increase x_off by
+ } else {
+ rl_mult = 1; // chain of consecutive RL markers is broken
+ prev_pixel = pix;
+ buf[x_off] = pix;
+ 1 // value to increase x_off by
+ }
+ };
+ }
+ if x_off != width {
+ return Err(ImageError::FormatError(
+ "Wrong length of decoded scanline".into(),
+ ));
+ }
+ Ok(())
+}
+
+fn read_rgbe<R: BufRead>(r: &mut R) -> io::Result<RGBE8Pixel> {
+ let mut buf = [0u8; 4];
+ r.read_exact(&mut buf[..])?;
+ Ok(RGBE8Pixel {
+ c: [buf[0], buf[1], buf[2]],
+ e: buf[3],
+ })
+}
+
+/// Metadata for Radiance HDR image
+#[derive(Debug, Clone)]
+pub struct HDRMetadata {
+ /// Width of decoded image. It could be either scanline length,
+ /// or scanline count, depending on image orientation.
+ pub width: u32,
+ /// Height of decoded image. It depends on orientation too.
+ pub height: u32,
+ /// Orientation matrix. For standard orientation it is ((1,0),(0,1)) - left to right, top to bottom.
+ /// First pair tells how resulting pixel coordinates change along a scanline.
+ /// Second pair tells how they change from one scanline to the next.
+ pub orientation: ((i8, i8), (i8, i8)),
+ /// Divide color values by exposure to get to get physical radiance in
+ /// watts/steradian/m<sup>2</sup>
+ ///
+ /// Image may not contain physical data, even if this field is set.
+ pub exposure: Option<f32>,
+ /// Divide color values by corresponding tuple member (r, g, b) to get to get physical radiance
+ /// in watts/steradian/m<sup>2</sup>
+ ///
+ /// Image may not contain physical data, even if this field is set.
+ pub color_correction: Option<(f32, f32, f32)>,
+ /// Pixel height divided by pixel width
+ pub pixel_aspect_ratio: Option<f32>,
+ /// All lines contained in image header are put here. Ordering of lines is preserved.
+ /// Lines in the form "key=value" are represented as ("key", "value").
+ /// All other lines are ("", "line")
+ pub custom_attributes: Vec<(String, String)>,
+}
+
+impl HDRMetadata {
+ fn new() -> HDRMetadata {
+ HDRMetadata {
+ width: 0,
+ height: 0,
+ orientation: ((1, 0), (0, 1)),
+ exposure: None,
+ color_correction: None,
+ pixel_aspect_ratio: None,
+ custom_attributes: vec![],
+ }
+ }
+
+ // Updates header info, in strict mode returns error for malformed lines (no '=' separator)
+ // unknown attributes are skipped
+ fn update_header_info(&mut self, line: &str, strict: bool) -> ImageResult<()> {
+ // split line at first '='
+ // old Radiance HDR files (*.pic) feature tabs in key, so vvv trim
+ let maybe_key_value = split_at_first(line, "=").map(|(key, value)| (key.trim(), value));
+ // save all header lines in custom_attributes
+ match maybe_key_value {
+ Some((key, val)) => self
+ .custom_attributes
+ .push((key.to_owned(), val.to_owned())),
+ None => self.custom_attributes.push(("".into(), line.to_owned())),
+ }
+ // parse known attributes
+ match maybe_key_value {
+ Some(("FORMAT", val)) => {
+ if val.trim() != "32-bit_rle_rgbe" {
+ // XYZE isn't supported yet
+ return Err(ImageError::UnsupportedError(limit_string_len(val, 20)));
+ }
+ }
+ Some(("EXPOSURE", val)) => {
+ match val.trim().parse::<f32>() {
+ Ok(v) => {
+ self.exposure = Some(self.exposure.unwrap_or(1.0) * v); // all encountered exposure values should be multiplied
+ }
+ Err(parse_error) => {
+ if strict {
+ return Err(ImageError::FormatError(format!(
+ "Cannot parse EXPOSURE value: {}",
+ parse_error
+ )));
+ } // no else, skip this line in non-strict mode
+ }
+ };
+ }
+ Some(("PIXASPECT", val)) => {
+ match val.trim().parse::<f32>() {
+ Ok(v) => {
+ self.pixel_aspect_ratio = Some(self.pixel_aspect_ratio.unwrap_or(1.0) * v);
+ // all encountered exposure values should be multiplied
+ }
+ Err(parse_error) => {
+ if strict {
+ return Err(ImageError::FormatError(format!(
+ "Cannot parse PIXASPECT value: {}",
+ parse_error
+ )));
+ } // no else, skip this line in non-strict mode
+ }
+ };
+ }
+ Some(("COLORCORR", val)) => {
+ let mut rgbcorr = [1.0, 1.0, 1.0];
+ match parse_space_separated_f32(val, &mut rgbcorr, "COLORCORR") {
+ Ok(extra_numbers) => {
+ if strict && extra_numbers {
+ return Err(ImageError::FormatError(
+ "Extra numbers in COLORCORR".into(),
+ ));
+ } // no else, just ignore extra numbers
+ let (rc, gc, bc) = self.color_correction.unwrap_or((1.0, 1.0, 1.0));
+ self.color_correction =
+ Some((rc * rgbcorr[0], gc * rgbcorr[1], bc * rgbcorr[2]));
+ }
+ Err(err) => {
+ if strict {
+ return Err(err);
+ } // no else, skip malformed line in non-strict mode
+ }
+ }
+ }
+ None => {
+ // old Radiance HDR files (*.pic) contain commands in a header
+ // just skip them
+ }
+ _ => {
+ // skip unknown attribute
+ }
+ } // match attributes
+ Ok(())
+ }
+}
+
+fn parse_space_separated_f32(line: &str, vals: &mut [f32], name: &str) -> ImageResult<bool> {
+ let mut nums = line.split_whitespace();
+ for val in vals.iter_mut() {
+ if let Some(num) = nums.next() {
+ match num.parse::<f32>() {
+ Ok(v) => *val = v,
+ Err(err) => {
+ return Err(ImageError::FormatError(format!(
+ "f32 parse error in {}: {}",
+ name,
+ err
+ )));
+ }
+ }
+ } else {
+ // not enough numbers in line
+ return Err(ImageError::FormatError(format!(
+ "Not enough numbers in {}",
+ name
+ )));
+ }
+ }
+ Ok(nums.next().is_some())
+}
+
+// Parses dimension line "-Y height +X width"
+// returns (width, height) or error
+fn parse_dimensions_line(line: &str, strict: bool) -> ImageResult<(u32, u32)> {
+ let mut dim_parts = line.split_whitespace();
+ let err = "Malformed dimensions line";
+ let c1_tag = dim_parts
+ .next()
+ .ok_or_else(|| ImageError::FormatError(err.into()))?;
+ let c1_str = dim_parts
+ .next()
+ .ok_or_else(|| ImageError::FormatError(err.into()))?;
+ let c2_tag = dim_parts
+ .next()
+ .ok_or_else(|| ImageError::FormatError(err.into()))?;
+ let c2_str = dim_parts
+ .next()
+ .ok_or_else(|| ImageError::FormatError(err.into()))?;
+ if strict && dim_parts.next().is_some() {
+ // extra data in dimensions line
+ return Err(ImageError::FormatError(err.into()));
+ } // no else
+ // dimensions line is in the form "-Y 10 +X 20"
+ // There are 8 possible orientations: +Y +X, +X -Y and so on
+ match (c1_tag, c2_tag) {
+ ("-Y", "+X") => {
+ // Common orientation (left-right, top-down)
+ // c1_str is height, c2_str is width
+ let height = c1_str.parse::<u32>().into_image_error(err)?;
+ let width = c2_str.parse::<u32>().into_image_error(err)?;
+ Ok((width, height))
+ }
+ _ => Err(ImageError::FormatError(format!(
+ "Unsupported orientation {} {}",
+ limit_string_len(c1_tag, 4),
+ limit_string_len(c2_tag, 4)
+ ))),
+ } // final expression. Returns value
+}
+
+trait IntoImageError<T> {
+ fn into_image_error(self, description: &str) -> ImageResult<T>;
+}
+
+impl<T> IntoImageError<T> for ::std::result::Result<T, ::std::num::ParseFloatError> {
+ fn into_image_error(self, description: &str) -> ImageResult<T> {
+ self.map_err(|err| {
+ ImageError::FormatError(format!("{} {}", description, err))
+ })
+ }
+}
+
+impl<T> IntoImageError<T> for ::std::result::Result<T, ::std::num::ParseIntError> {
+ fn into_image_error(self, description: &str) -> ImageResult<T> {
+ self.map_err(|err| {
+ ImageError::FormatError(format!("{} {}", description, err))
+ })
+ }
+}
+
+// Returns string with no more than len+3 characters
+fn limit_string_len(s: &str, len: usize) -> String {
+ let s_char_len = s.chars().count();
+ if s_char_len > len {
+ s.chars().take(len).chain("...".chars()).collect()
+ } else {
+ s.into()
+ }
+}
+
+// Splits string into (before separator, after separator) tuple
+// or None if separator isn't found
+fn split_at_first<'a>(s: &'a str, separator: &str) -> Option<(&'a str, &'a str)> {
+ match s.find(separator) {
+ None | Some(0) => None,
+ Some(p) if p >= s.len() - separator.len() => None,
+ Some(p) => Some((&s[..p], &s[(p + separator.len())..])),
+ }
+}
+
+#[test]
+fn split_at_first_test() {
+ assert_eq!(split_at_first(&Cow::Owned("".into()), "="), None);
+ assert_eq!(split_at_first(&Cow::Owned("=".into()), "="), None);
+ assert_eq!(split_at_first(&Cow::Owned("= ".into()), "="), None);
+ assert_eq!(
+ split_at_first(&Cow::Owned(" = ".into()), "="),
+ Some((" ", " "))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE= ".into()), "="),
+ Some(("EXPOSURE", " "))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE= =".into()), "="),
+ Some(("EXPOSURE", " ="))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE== =".into()), "=="),
+ Some(("EXPOSURE", " ="))
+ );
+ assert_eq!(split_at_first(&Cow::Owned("EXPOSURE".into()), ""), None);
+}
+
+// Reads input until b"\n" or EOF
+// Returns vector of read bytes NOT including end of line characters
+// or return None to indicate end of file
+fn read_line_u8<R: BufRead>(r: &mut R) -> ::std::io::Result<Option<Vec<u8>>> {
+ let mut ret = Vec::with_capacity(16);
+ match r.read_until(b'\n', &mut ret) {
+ Ok(0) => Ok(None),
+ Ok(_) => {
+ if let Some(&b'\n') = ret[..].last() {
+ let _ = ret.pop();
+ }
+ Ok(Some(ret))
+ }
+ Err(err) => Err(err),
+ }
+}
+
+#[test]
+fn read_line_u8_test() {
+ let buf: Vec<_> = (&b"One\nTwo\nThree\nFour\n\n\n"[..]).into();
+ let input = &mut ::std::io::Cursor::new(buf);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"One"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Two"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Three"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Four"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
+ assert_eq!(read_line_u8(input).unwrap(), None);
+}
+
+/// Helper function for reading raw 3-channel f32 images
+pub fn read_raw_file<P: AsRef<Path>>(path: P) -> ::std::io::Result<Vec<Rgb<f32>>> {
+ use byteorder::{LittleEndian as LE, ReadBytesExt};
+ use std::fs::File;
+ use std::io::BufReader;
+
+ let mut r = BufReader::new(File::open(path)?);
+ let w = r.read_u32::<LE>()? as usize;
+ let h = r.read_u32::<LE>()? as usize;
+ let c = r.read_u32::<LE>()? as usize;
+ assert_eq!(c, 3);
+ let cnt = w * h;
+ let mut ret = Vec::with_capacity(cnt);
+ for _ in 0..cnt {
+ let cr = r.read_f32::<LE>()?;
+ let cg = r.read_f32::<LE>()?;
+ let cb = r.read_f32::<LE>()?;
+ ret.push(Rgb([cr, cg, cb]));
+ }
+ Ok(ret)
+}
diff --git a/third_party/rust/image/src/hdr/encoder.rs b/third_party/rust/image/src/hdr/encoder.rs
new file mode 100644
index 0000000000..285a44526a
--- /dev/null
+++ b/third_party/rust/image/src/hdr/encoder.rs
@@ -0,0 +1,431 @@
+use crate::color::Rgb;
+use crate::error::ImageResult;
+use crate::hdr::{rgbe8, RGBE8Pixel, SIGNATURE};
+use std::io::{Result, Write};
+use std::cmp::Ordering;
+
+/// Radiance HDR encoder
+pub struct HDREncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> HDREncoder<W> {
+ /// Creates encoder
+ pub fn new(w: W) -> HDREncoder<W> {
+ HDREncoder { w }
+ }
+
+ /// Encodes the image ```data```
+ /// that has dimensions ```width``` and ```height```
+ pub fn encode(mut self, data: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> {
+ assert!(data.len() >= width * height);
+ let w = &mut self.w;
+ w.write_all(SIGNATURE)?;
+ w.write_all(b"\n")?;
+ w.write_all(b"# Rust HDR encoder\n")?;
+ w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?;
+ w.write_all(format!("-Y {} +X {}\n", height, width).as_bytes())?;
+
+ if width < 8 || width > 32_768 {
+ for &pix in data {
+ write_rgbe8(w, to_rgbe8(pix))?;
+ }
+ } else {
+ // new RLE marker contains scanline width
+ let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8);
+ // buffers for encoded pixels
+ let mut bufr = vec![0; width];
+ let mut bufg = vec![0; width];
+ let mut bufb = vec![0; width];
+ let mut bufe = vec![0; width];
+ let mut rle_buf = vec![0; width];
+ for scanline in data.chunks(width) {
+ for ((((r, g), b), e), &pix) in bufr.iter_mut()
+ .zip(bufg.iter_mut())
+ .zip(bufb.iter_mut())
+ .zip(bufe.iter_mut())
+ .zip(scanline.iter())
+ {
+ let cp = to_rgbe8(pix);
+ *r = cp.c[0];
+ *g = cp.c[1];
+ *b = cp.c[2];
+ *e = cp.e;
+ }
+ write_rgbe8(w, marker)?; // New RLE encoding marker
+ rle_buf.clear();
+ rle_compress(&bufr[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufg[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufb[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufe[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ }
+ }
+ Ok(())
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum RunOrNot {
+ Run(u8, usize),
+ Norun(usize, usize),
+}
+use self::RunOrNot::{Norun, Run};
+
+const RUN_MAX_LEN: usize = 127;
+const NORUN_MAX_LEN: usize = 128;
+
+struct RunIterator<'a> {
+ data: &'a [u8],
+ curidx: usize,
+}
+
+impl<'a> RunIterator<'a> {
+ fn new(data: &'a [u8]) -> RunIterator<'a> {
+ RunIterator { data, curidx: 0 }
+ }
+}
+
+impl<'a> Iterator for RunIterator<'a> {
+ type Item = RunOrNot;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.curidx == self.data.len() {
+ None
+ } else {
+ let cv = self.data[self.curidx];
+ let crun = self.data[self.curidx..]
+ .iter()
+ .take_while(|&&v| v == cv)
+ .take(RUN_MAX_LEN)
+ .count();
+ let ret = if crun > 2 {
+ Run(cv, crun)
+ } else {
+ Norun(self.curidx, crun)
+ };
+ self.curidx += crun;
+ Some(ret)
+ }
+ }
+}
+
+struct NorunCombineIterator<'a> {
+ runiter: RunIterator<'a>,
+ prev: Option<RunOrNot>,
+}
+
+impl<'a> NorunCombineIterator<'a> {
+ fn new(data: &'a [u8]) -> NorunCombineIterator<'a> {
+ NorunCombineIterator {
+ runiter: RunIterator::new(data),
+ prev: None,
+ }
+ }
+}
+
+// Combines sequential noruns produced by RunIterator
+impl<'a> Iterator for NorunCombineIterator<'a> {
+ type Item = RunOrNot;
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ match self.prev.take() {
+ Some(Run(c, len)) => {
+ // Just return stored run
+ return Some(Run(c, len));
+ }
+ Some(Norun(idx, len)) => {
+ // Let's see if we need to continue norun
+ match self.runiter.next() {
+ Some(Norun(_, len1)) => {
+ // norun continues
+ let clen = len + len1; // combined length
+ match clen.cmp(&NORUN_MAX_LEN) {
+ Ordering::Equal => return Some(Norun(idx, clen)),
+ Ordering::Greater => {
+ // combined norun exceeds maximum length. store extra part of norun
+ self.prev = Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN));
+ // then return maximal norun
+ return Some(Norun(idx, NORUN_MAX_LEN));
+ }
+ Ordering::Less => {
+ // len + len1 < NORUN_MAX_LEN
+ self.prev = Some(Norun(idx, len + len1));
+ // combine and continue loop
+ }
+ }
+ }
+ Some(Run(c, len1)) => {
+ // Run encountered. Store it
+ self.prev = Some(Run(c, len1));
+ return Some(Norun(idx, len)); // and return combined norun
+ }
+ None => {
+ // End of sequence
+ return Some(Norun(idx, len)); // return combined norun
+ }
+ }
+ } // End match self.prev.take() == Some(NoRun())
+ None => {
+ // No norun to combine
+ match self.runiter.next() {
+ Some(Norun(idx, len)) => {
+ self.prev = Some(Norun(idx, len));
+ // store for combine and continue the loop
+ }
+ Some(Run(c, len)) => {
+ // Some run. Just return it
+ return Some(Run(c, len));
+ }
+ None => {
+ // That's all, folks
+ return None;
+ }
+ }
+ } // End match self.prev.take() == None
+ } // End match
+ } // End loop
+ }
+}
+
+// Appends RLE compressed ```data``` to ```rle```
+fn rle_compress(data: &[u8], rle: &mut Vec<u8>) {
+ rle.clear();
+ if data.is_empty() {
+ rle.push(0); // Technically correct. It means read next 0 bytes.
+ return;
+ }
+ // Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128)
+ // Prepend non-repeating chunk with its length
+ // Replace repeating byte with (run length + 128) and the byte
+ for rnr in NorunCombineIterator::new(data) {
+ match rnr {
+ Run(c, len) => {
+ assert!(len <= 127);
+ rle.push(128u8 + len as u8);
+ rle.push(c);
+ }
+ Norun(idx, len) => {
+ assert!(len <= 128);
+ rle.push(len as u8);
+ rle.extend_from_slice(&data[idx..idx + len]);
+ }
+ }
+ }
+}
+
+fn write_rgbe8<W: Write>(w: &mut W, v: RGBE8Pixel) -> Result<()> {
+ w.write_all(&[v.c[0], v.c[1], v.c[2], v.e])
+}
+
+/// Converts ```Rgb<f32>``` into ```RGBE8Pixel```
+pub fn to_rgbe8(pix: Rgb<f32>) -> RGBE8Pixel {
+ let pix = pix.0;
+ let mx = f32::max(pix[0], f32::max(pix[1], pix[2]));
+ if mx <= 0.0 {
+ RGBE8Pixel { c: [0, 0, 0], e: 0 }
+ } else {
+ // let (frac, exp) = mx.frexp(); // unstable yet
+ let exp = mx.log2().floor() as i32 + 1;
+ let mul = f32::powi(2.0, exp);
+ let mut conv = [0u8; 3];
+ for (cv, &sv) in conv.iter_mut().zip(pix.iter()) {
+ *cv = f32::trunc(sv / mul * 256.0) as u8;
+ }
+ RGBE8Pixel {
+ c: conv,
+ e: (exp + 128) as u8,
+ }
+ }
+}
+
+#[test]
+fn to_rgbe8_test() {
+ use crate::hdr::rgbe8;
+ let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)];
+ for &pix in &test_cases {
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ }
+ for mc in 128..255 {
+ // TODO: use inclusive range when stable
+ let pix = rgbe8(mc, mc, mc, 100);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(mc, 0, mc, 130);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(0, 0, mc, 140);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(1, 0, mc, 150);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(1, mc, 10, 128);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ for c in 0..255 {
+ // Radiance HDR seems to be pre IEEE 754.
+ // exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32
+ // Let's exclude exponent value of -128 (0u8) from testing
+ let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c });
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ }
+ }
+ fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 {
+ // maximal difference divided by maximal value
+ let max_diff = a.0
+ .iter()
+ .zip(b.0.iter())
+ .fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs()));
+ let max_val = a.0
+ .iter()
+ .chain(b.0.iter())
+ .fold(0.0, |maxv, &a| f32::max(maxv, a));
+ if max_val == 0.0 {
+ 0.0
+ } else {
+ max_diff / max_val
+ }
+ }
+ let test_values = vec![
+ 0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0,
+ 300_000.0,
+ ];
+ for &r in &test_values {
+ for &g in &test_values {
+ for &b in &test_values {
+ let c1 = Rgb([r, g, b]);
+ let c2 = to_rgbe8(c1).to_hdr();
+ let rel_dist = relative_dist(c1, c2);
+ // Maximal value is normalized to the range 128..256, thus we have 1/128 precision
+ assert!(
+ rel_dist <= 1.0 / 128.0,
+ "Relative distance ({}) exceeds 1/128 for {:?} and {:?}",
+ rel_dist,
+ c1,
+ c2
+ );
+ }
+ }
+ }
+}
+
+#[test]
+fn runiterator_test() {
+ let data = [];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), None);
+ let data = [5];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 1)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 0];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), Some(Norun(2, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 0, 1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
+ assert_eq!(run_iter.next(), Some(Norun(3, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 2, 2, 2];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 1)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 1, 2, 2, 2];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 128];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Norun(127, 1)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 129];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Norun(127, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 130];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+}
+
+#[test]
+fn noruncombine_test() {
+ fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> {
+ v.append(&mut other);
+ v
+ }
+
+ let v = vec![];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![1];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 1)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![2, 2];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![3, 3, 3];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(3, 3)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![4, 4, 3, 3, 3];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), Some(Run(3, 3)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![40; 400];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 19)));
+ assert_eq!(rsi.next(), None);
+
+ let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]);
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(5, 3)));
+ assert_eq!(rsi.next(), Some(Run(6, 127)));
+ assert_eq!(rsi.next(), Some(Norun(130, 7)));
+ assert_eq!(rsi.next(), None);
+
+ let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]);
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), Some(Run(6, 127)));
+ assert_eq!(rsi.next(), Some(Norun(129, 7)));
+ assert_eq!(rsi.next(), None);
+
+ let v: Vec<_> = ::std::iter::repeat(())
+ .flat_map(|_| (0..2))
+ .take(257)
+ .collect();
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 128)));
+ assert_eq!(rsi.next(), Some(Norun(128, 128)));
+ assert_eq!(rsi.next(), Some(Norun(256, 1)));
+ assert_eq!(rsi.next(), None);
+}
diff --git a/third_party/rust/image/src/hdr/mod.rs b/third_party/rust/image/src/hdr/mod.rs
new file mode 100644
index 0000000000..b3325bc648
--- /dev/null
+++ b/third_party/rust/image/src/hdr/mod.rs
@@ -0,0 +1,15 @@
+//! Decoding of Radiance HDR Images
+//!
+//! A decoder for Radiance HDR images
+//!
+//! # Related Links
+//!
+//! * <http://radsite.lbl.gov/radiance/refer/filefmts.pdf>
+//! * <http://www.graphics.cornell.edu/~bjw/rgbe/rgbe.c>
+//!
+
+mod decoder;
+mod encoder;
+
+pub use self::decoder::*;
+pub use self::encoder::*;
diff --git a/third_party/rust/image/src/ico/decoder.rs b/third_party/rust/image/src/ico/decoder.rs
new file mode 100644
index 0000000000..0dfc44f112
--- /dev/null
+++ b/third_party/rust/image/src/ico/decoder.rs
@@ -0,0 +1,284 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Seek, SeekFrom};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, ImageDecoder};
+
+use self::InnerDecoder::*;
+use crate::bmp::BmpDecoder;
+use crate::png::PngDecoder;
+
+// http://www.w3.org/TR/PNG-Structure.html
+// The first eight bytes of a PNG file always contain the following (decimal) values:
+const PNG_SIGNATURE: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
+
+/// An ico decoder
+pub struct IcoDecoder<R: Read> {
+ selected_entry: DirEntry,
+ inner_decoder: InnerDecoder<R>,
+}
+
+enum InnerDecoder<R: Read> {
+ BMP(BmpDecoder<R>),
+ PNG(PngDecoder<R>),
+}
+
+#[derive(Clone, Copy, Default)]
+struct DirEntry {
+ width: u8,
+ height: u8,
+ color_count: u8,
+ reserved: u8,
+
+ num_color_planes: u16,
+ bits_per_pixel: u16,
+
+ image_length: u32,
+ image_offset: u32,
+}
+
+impl<R: Read + Seek> IcoDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(mut r: R) -> ImageResult<IcoDecoder<R>> {
+ let entries = read_entries(&mut r)?;
+ let entry = best_entry(entries)?;
+ let decoder = entry.decoder(r)?;
+
+ Ok(IcoDecoder {
+ selected_entry: entry,
+ inner_decoder: decoder,
+ })
+ }
+}
+
+fn read_entries<R: Read>(r: &mut R) -> ImageResult<Vec<DirEntry>> {
+ let _reserved = r.read_u16::<LittleEndian>()?;
+ let _type = r.read_u16::<LittleEndian>()?;
+ let count = r.read_u16::<LittleEndian>()?;
+ (0..count).map(|_| read_entry(r)).collect()
+}
+
+fn read_entry<R: Read>(r: &mut R) -> ImageResult<DirEntry> {
+ let mut entry = DirEntry::default();
+
+ entry.width = r.read_u8()?;
+ entry.height = r.read_u8()?;
+ entry.color_count = r.read_u8()?;
+ // Reserved value (not used)
+ entry.reserved = r.read_u8()?;
+
+ // This may be either the number of color planes (0 or 1), or the horizontal coordinate
+ // of the hotspot for CUR files.
+ entry.num_color_planes = r.read_u16::<LittleEndian>()?;
+ if entry.num_color_planes > 256 {
+ return Err(ImageError::FormatError(
+ "ICO image entry has a too large color planes/hotspot value".to_string(),
+ ));
+ }
+
+ // This may be either the bit depth (may be 0 meaning unspecified),
+ // or the vertical coordinate of the hotspot for CUR files.
+ entry.bits_per_pixel = r.read_u16::<LittleEndian>()?;
+ if entry.bits_per_pixel > 256 {
+ return Err(ImageError::FormatError(
+ "ICO image entry has a too large bits per pixel/hotspot value".to_string(),
+ ));
+ }
+
+ entry.image_length = r.read_u32::<LittleEndian>()?;
+ entry.image_offset = r.read_u32::<LittleEndian>()?;
+
+ Ok(entry)
+}
+
+/// Find the entry with the highest (color depth, size).
+fn best_entry(mut entries: Vec<DirEntry>) -> ImageResult<DirEntry> {
+ let mut best = entries.pop().ok_or(ImageError::ImageEnd)?;
+ let mut best_score = (
+ best.bits_per_pixel,
+ u32::from(best.real_width()) * u32::from(best.real_height()),
+ );
+
+ for entry in entries {
+ let score = (
+ entry.bits_per_pixel,
+ u32::from(entry.real_width()) * u32::from(entry.real_height()),
+ );
+ if score > best_score {
+ best = entry;
+ best_score = score;
+ }
+ }
+ Ok(best)
+}
+
+impl DirEntry {
+ fn real_width(&self) -> u16 {
+ match self.width {
+ 0 => 256,
+ w => u16::from(w),
+ }
+ }
+
+ fn real_height(&self) -> u16 {
+ match self.height {
+ 0 => 256,
+ h => u16::from(h),
+ }
+ }
+
+ fn matches_dimensions(&self, width: u32, height: u32) -> bool {
+ u32::from(self.real_width()) == width && u32::from(self.real_height()) == height
+ }
+
+ fn seek_to_start<R: Read + Seek>(&self, r: &mut R) -> ImageResult<()> {
+ r.seek(SeekFrom::Start(u64::from(self.image_offset)))?;
+ Ok(())
+ }
+
+ fn is_png<R: Read + Seek>(&self, r: &mut R) -> ImageResult<bool> {
+ self.seek_to_start(r)?;
+
+ // Read the first 8 bytes to sniff the image.
+ let mut signature = [0u8; 8];
+ r.read_exact(&mut signature)?;
+
+ Ok(signature == PNG_SIGNATURE)
+ }
+
+ fn decoder<R: Read + Seek>(&self, mut r: R) -> ImageResult<InnerDecoder<R>> {
+ let is_png = self.is_png(&mut r)?;
+ self.seek_to_start(&mut r)?;
+
+ if is_png {
+ Ok(PNG(PngDecoder::new(r)?))
+ } else {
+ Ok(BMP(BmpDecoder::new_with_ico_format(r)?))
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct IcoReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for IcoReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for IcoDecoder<R> {
+ type Reader = IcoReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ match self.inner_decoder {
+ BMP(ref decoder) => decoder.dimensions(),
+ PNG(ref decoder) => decoder.dimensions(),
+ }
+ }
+
+ fn color_type(&self) -> ColorType {
+ match self.inner_decoder {
+ BMP(ref decoder) => decoder.color_type(),
+ PNG(ref decoder) => decoder.color_type(),
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(IcoReader(Cursor::new(image::decoder_to_vec(self)?), PhantomData))
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.inner_decoder {
+ PNG(decoder) => {
+ if self.selected_entry.image_length < PNG_SIGNATURE.len() as u32 {
+ return Err(ImageError::FormatError(
+ "Entry specified a length that is shorter than PNG header!".to_string(),
+ ));
+ }
+
+ // Check if the image dimensions match the ones in the image data.
+ let (width, height) = decoder.dimensions();
+ if !self.selected_entry.matches_dimensions(width, height) {
+ return Err(ImageError::FormatError(
+ "Entry and PNG dimensions do not match!".to_string(),
+ ));
+ }
+
+ // Embedded PNG images can only be of the 32BPP RGBA format.
+ // https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/
+ let color_type = decoder.color_type();
+ if let ColorType::Rgba8 = color_type {
+ } else {
+ return Err(ImageError::FormatError(
+ "The PNG is not in RGBA format!".to_string(),
+ ));
+ }
+
+ decoder.read_image(buf)
+ }
+ BMP(mut decoder) => {
+ let (width, height) = decoder.dimensions();
+ if !self.selected_entry.matches_dimensions(width, height) {
+ return Err(ImageError::FormatError(
+ "Entry({:?}) and BMP({:?}) dimensions do not match!".to_string(),
+ ));
+ }
+
+ // The ICO decoder needs an alpha channel to apply the AND mask.
+ if decoder.color_type() != ColorType::Rgba8 {
+ return Err(ImageError::UnsupportedColor(decoder.color_type().into()));
+ }
+
+ decoder.read_image_data(buf)?;
+
+ // If there's an AND mask following the image, read and apply it.
+ let r = decoder.reader();
+ let mask_start = r.seek(SeekFrom::Current(0))?;
+ let mask_end =
+ u64::from(self.selected_entry.image_offset + self.selected_entry.image_length);
+ let mask_length = mask_end - mask_start;
+
+ if mask_length > 0 {
+ // A mask row contains 1 bit per pixel, padded to 4 bytes.
+ let mask_row_bytes = ((width + 31) / 32) * 4;
+ let expected_length = u64::from(mask_row_bytes) * u64::from(height);
+ if mask_length < expected_length {
+ return Err(ImageError::ImageEnd);
+ }
+
+ for y in 0..height {
+ let mut x = 0;
+ for _ in 0..mask_row_bytes {
+ // Apply the bits of each byte until we reach the end of the row.
+ let mask_byte = r.read_u8()?;
+ for bit in (0..8).rev() {
+ if x >= width {
+ break;
+ }
+ if mask_byte & (1 << bit) != 0 {
+ // Set alpha channel to transparent.
+ buf[((height - y - 1) * width + x) as usize * 4 + 3] = 0;
+ }
+ x += 1;
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+ }
+ }
+}
diff --git a/third_party/rust/image/src/ico/encoder.rs b/third_party/rust/image/src/ico/encoder.rs
new file mode 100644
index 0000000000..57a14a1ce8
--- /dev/null
+++ b/third_party/rust/image/src/ico/encoder.rs
@@ -0,0 +1,113 @@
+use byteorder::{LittleEndian, WriteBytesExt};
+use std::io::{self, Write};
+
+use crate::color::ColorType;
+use crate::error::ImageResult;
+use crate::image::ImageEncoder;
+
+use crate::png::PNGEncoder;
+
+// Enum value indicating an ICO image (as opposed to a CUR image):
+const ICO_IMAGE_TYPE: u16 = 1;
+// The length of an ICO file ICONDIR structure, in bytes:
+const ICO_ICONDIR_SIZE: u32 = 6;
+// The length of an ICO file DIRENTRY structure, in bytes:
+const ICO_DIRENTRY_SIZE: u32 = 16;
+
+/// ICO encoder
+pub struct ICOEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> ICOEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```.
+ pub fn new(w: W) -> ICOEncoder<W> {
+ ICOEncoder { w }
+ }
+
+ /// Encodes the image ```image``` that has dimensions ```width``` and
+ /// ```height``` and ```ColorType``` ```c```. The dimensions of the image
+ /// must be between 1 and 256 (inclusive) or an error will be returned.
+ pub fn encode(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ let mut image_data: Vec<u8> = Vec::new();
+ PNGEncoder::new(&mut image_data).encode(data, width, height, color)?;
+
+ write_icondir(&mut self.w, 1)?;
+ write_direntry(
+ &mut self.w,
+ width,
+ height,
+ color,
+ ICO_ICONDIR_SIZE + ICO_DIRENTRY_SIZE,
+ image_data.len() as u32,
+ )?;
+ self.w.write_all(&image_data)?;
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for ICOEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+fn write_icondir<W: Write>(w: &mut W, num_images: u16) -> io::Result<()> {
+ // Reserved field (must be zero):
+ w.write_u16::<LittleEndian>(0)?;
+ // Image type (ICO or CUR):
+ w.write_u16::<LittleEndian>(ICO_IMAGE_TYPE)?;
+ // Number of images in the file:
+ w.write_u16::<LittleEndian>(num_images)?;
+ Ok(())
+}
+
+fn write_direntry<W: Write>(
+ w: &mut W,
+ width: u32,
+ height: u32,
+ color: ColorType,
+ data_start: u32,
+ data_size: u32,
+) -> io::Result<()> {
+ // Image dimensions:
+ write_width_or_height(w, width)?;
+ write_width_or_height(w, height)?;
+ // Number of colors in palette (or zero for no palette):
+ w.write_u8(0)?;
+ // Reserved field (must be zero):
+ w.write_u8(0)?;
+ // Color planes:
+ w.write_u16::<LittleEndian>(0)?;
+ // Bits per pixel:
+ w.write_u16::<LittleEndian>(color.bits_per_pixel())?;
+ // Image data size, in bytes:
+ w.write_u32::<LittleEndian>(data_size)?;
+ // Image data offset, in bytes:
+ w.write_u32::<LittleEndian>(data_start)?;
+ Ok(())
+}
+
+/// Encode a width/height value as a single byte, where 0 means 256.
+fn write_width_or_height<W: Write>(w: &mut W, value: u32) -> io::Result<()> {
+ if value < 1 || value > 256 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Invalid ICO dimensions (width and \
+ height must be between 1 and 256)",
+ ));
+ }
+ w.write_u8(if value < 256 { value as u8 } else { 0 })
+}
diff --git a/third_party/rust/image/src/ico/mod.rs b/third_party/rust/image/src/ico/mod.rs
new file mode 100644
index 0000000000..fd65df1f6e
--- /dev/null
+++ b/third_party/rust/image/src/ico/mod.rs
@@ -0,0 +1,13 @@
+//! Decoding and Encoding of ICO files
+//!
+//! A decoder and encoder for ICO (Windows Icon) image container files.
+//!
+//! # Related Links
+//! * <https://msdn.microsoft.com/en-us/library/ms997538.aspx>
+//! * <https://en.wikipedia.org/wiki/ICO_%28file_format%29>
+
+pub use self::decoder::IcoDecoder;
+pub use self::encoder::ICOEncoder;
+
+mod decoder;
+mod encoder;
diff --git a/third_party/rust/image/src/image.rs b/third_party/rust/image/src/image.rs
new file mode 100644
index 0000000000..f761902b2a
--- /dev/null
+++ b/third_party/rust/image/src/image.rs
@@ -0,0 +1,1088 @@
+#![allow(clippy::too_many_arguments)]
+use std::convert::TryFrom;
+use std::io;
+use std::io::Read;
+use std::ops::{Deref, DerefMut};
+use std::path::Path;
+
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{ImageError, ImageResult};
+use crate::math::Rect;
+
+use crate::animation::Frames;
+
+#[cfg(feature = "pnm")]
+use crate::pnm::PNMSubtype;
+
+/// An enumeration of supported image formats.
+/// Not all formats support both encoding and decoding.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+pub enum ImageFormat {
+ /// An Image in PNG Format
+ Png,
+
+ /// An Image in JPEG Format
+ Jpeg,
+
+ /// An Image in GIF Format
+ Gif,
+
+ /// An Image in WEBP Format
+ WebP,
+
+ /// An Image in general PNM Format
+ Pnm,
+
+ /// An Image in TIFF Format
+ Tiff,
+
+ /// An Image in TGA Format
+ Tga,
+
+ /// An Image in DDS Format
+ Dds,
+
+ /// An Image in BMP Format
+ Bmp,
+
+ /// An Image in ICO Format
+ Ico,
+
+ /// An Image in Radiance HDR Format
+ Hdr,
+
+ #[doc(hidden)]
+ __NonExhaustive(crate::utils::NonExhaustiveMarker),
+}
+
+impl ImageFormat {
+ /// Return the image format specified by the path's file extension.
+ pub fn from_path<P>(path: P) -> ImageResult<Self> where P : AsRef<Path> {
+ // thin wrapper function to strip generics before calling from_path_impl
+ crate::io::free_functions::guess_format_from_path_impl(path.as_ref())
+ .map_err(Into::into)
+ }
+}
+
+/// An enumeration of supported image formats for encoding.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum ImageOutputFormat {
+ #[cfg(feature = "png")]
+ /// An Image in PNG Format
+ Png,
+
+ #[cfg(feature = "jpeg")]
+ /// An Image in JPEG Format with specified quality
+ Jpeg(u8),
+
+ #[cfg(feature = "pnm")]
+ /// An Image in one of the PNM Formats
+ Pnm(PNMSubtype),
+
+ #[cfg(feature = "gif")]
+ /// An Image in GIF Format
+ Gif,
+
+ #[cfg(feature = "ico")]
+ /// An Image in ICO Format
+ Ico,
+
+ #[cfg(feature = "bmp")]
+ /// An Image in BMP Format
+ Bmp,
+
+ /// A value for signalling an error: An unsupported format was requested
+ // Note: When TryFrom is stabilized, this value should not be needed, and
+ // a TryInto<ImageOutputFormat> should be used instead of an Into<ImageOutputFormat>.
+ Unsupported(String),
+
+ #[doc(hidden)]
+ __NonExhaustive(crate::utils::NonExhaustiveMarker),
+}
+
+impl From<ImageFormat> for ImageOutputFormat {
+ fn from(fmt: ImageFormat) -> Self {
+ match fmt {
+ #[cfg(feature = "png")]
+ ImageFormat::Png => ImageOutputFormat::Png,
+ #[cfg(feature = "jpeg")]
+ ImageFormat::Jpeg => ImageOutputFormat::Jpeg(75),
+ #[cfg(feature = "pnm")]
+ ImageFormat::Pnm => ImageOutputFormat::Pnm(PNMSubtype::ArbitraryMap),
+ #[cfg(feature = "gif")]
+ ImageFormat::Gif => ImageOutputFormat::Gif,
+ #[cfg(feature = "ico")]
+ ImageFormat::Ico => ImageOutputFormat::Ico,
+ #[cfg(feature = "bmp")]
+ ImageFormat::Bmp => ImageOutputFormat::Bmp,
+
+ f => ImageOutputFormat::Unsupported(format!(
+ "Image format {:?} not supported for encoding.",
+ f
+ )),
+ }
+ }
+}
+
+// This struct manages buffering associated with implementing `Read` and `Seek` on decoders that can
+// must decode ranges of bytes at a time.
+pub(crate) struct ImageReadBuffer {
+ scanline_bytes: usize,
+ buffer: Vec<u8>,
+ consumed: usize,
+
+ total_bytes: u64,
+ offset: u64,
+}
+impl ImageReadBuffer {
+ /// Create a new ImageReadBuffer.
+ ///
+ /// Panics if scanline_bytes doesn't fit into a usize, because that would mean reading anything
+ /// from the image would take more RAM than the entire virtual address space. In other words,
+ /// actually using this struct would instantly OOM so just get it out of the way now.
+ pub(crate) fn new(scanline_bytes: u64, total_bytes: u64) -> Self {
+ Self {
+ scanline_bytes: usize::try_from(scanline_bytes).unwrap(),
+ buffer: Vec::new(),
+ consumed: 0,
+ total_bytes,
+ offset: 0,
+ }
+ }
+
+ pub(crate) fn read<F>(&mut self, buf: &mut [u8], mut read_scanline: F) -> io::Result<usize>
+ where
+ F: FnMut(&mut [u8]) -> io::Result<usize>,
+ {
+ if self.buffer.len() == self.consumed {
+ if self.offset == self.total_bytes {
+ return Ok(0);
+ } else if buf.len() >= self.scanline_bytes {
+ // If there is nothing buffered and the user requested a full scanline worth of
+ // data, skip buffering.
+ let bytes_read = read_scanline(&mut buf[..self.scanline_bytes])?;
+ self.offset += u64::try_from(bytes_read).unwrap();
+ return Ok(bytes_read);
+ } else {
+ // Lazily allocate buffer the first time that read is called with a buffer smaller
+ // than the scanline size.
+ if self.buffer.is_empty() {
+ self.buffer.resize(self.scanline_bytes, 0);
+ }
+
+ self.consumed = 0;
+ let bytes_read = read_scanline(&mut self.buffer[..])?;
+ self.buffer.resize(bytes_read, 0);
+ self.offset += u64::try_from(bytes_read).unwrap();
+
+ assert!(bytes_read == self.scanline_bytes || self.offset == self.total_bytes);
+ }
+ }
+
+ // Finally, copy bytes into output buffer.
+ let bytes_buffered = self.buffer.len() - self.consumed;
+ if bytes_buffered > buf.len() {
+ crate::copy_memory(&self.buffer[self.consumed..][..buf.len()], &mut buf[..]);
+ self.consumed += buf.len();
+ Ok(buf.len())
+ } else {
+ crate::copy_memory(&self.buffer[self.consumed..], &mut buf[..bytes_buffered]);
+ self.consumed = self.buffer.len();
+ Ok(bytes_buffered)
+ }
+ }
+}
+
+/// Decodes a specific region of the image, represented by the rectangle
+/// starting from ```x``` and ```y``` and having ```length``` and ```width```
+pub(crate) fn load_rect<'a, D, F, F1, F2, E>(x: u32, y: u32, width: u32, height: u32, buf: &mut [u8],
+ progress_callback: F,
+ decoder: &mut D,
+ mut seek_scanline: F1,
+ mut read_scanline: F2) -> ImageResult<()>
+ where D: ImageDecoder<'a>,
+ F: Fn(Progress),
+ F1: FnMut(&mut D, u64) -> io::Result<()>,
+ F2: FnMut(&mut D, &mut [u8]) -> Result<usize, E>,
+ ImageError: From<E>,
+{
+ let (x, y, width, height) = (u64::from(x), u64::from(y), u64::from(width), u64::from(height));
+ let dimensions = decoder.dimensions();
+ let bytes_per_pixel = u64::from(decoder.color_type().bytes_per_pixel());
+ let row_bytes = bytes_per_pixel * u64::from(dimensions.0);
+ let scanline_bytes = decoder.scanline_bytes();
+ let total_bytes = width * height * bytes_per_pixel;
+
+ let mut bytes_read = 0u64;
+ let mut current_scanline = 0;
+ let mut tmp = Vec::new();
+
+ {
+ // Read a range of the image starting from byte number `start` and continuing until byte
+ // number `end`. Updates `current_scanline` and `bytes_read` appropiately.
+ let mut read_image_range = |start: u64, end: u64| -> ImageResult<()> {
+ let target_scanline = start / scanline_bytes;
+ if target_scanline != current_scanline {
+ seek_scanline(decoder, target_scanline)?;
+ current_scanline = target_scanline;
+ }
+
+ let mut position = current_scanline * scanline_bytes;
+ while position < end {
+ if position >= start && end - position >= scanline_bytes {
+ read_scanline(decoder, &mut buf[(bytes_read as usize)..]
+ [..(scanline_bytes as usize)])?;
+ bytes_read += scanline_bytes;
+ } else {
+ tmp.resize(scanline_bytes as usize, 0u8);
+ read_scanline(decoder, &mut tmp)?;
+
+ let offset = start.saturating_sub(position);
+ let len = (end - start)
+ .min(scanline_bytes - offset)
+ .min(end - position);
+
+ buf[(bytes_read as usize)..][..len as usize]
+ .copy_from_slice(&tmp[offset as usize..][..len as usize]);
+ bytes_read += len;
+ }
+
+ current_scanline += 1;
+ position += scanline_bytes;
+ progress_callback(Progress {current: bytes_read, total: total_bytes});
+ }
+ Ok(())
+ };
+
+ if x + width > u64::from(dimensions.0) || y + height > u64::from(dimensions.0)
+ || width == 0 || height == 0 {
+ return Err(ImageError::DimensionError);
+ }
+ if scanline_bytes > usize::max_value() as u64 {
+ return Err(ImageError::InsufficientMemory);
+ }
+
+ progress_callback(Progress {current: 0, total: total_bytes});
+ if x == 0 && width == u64::from(dimensions.0) {
+ let start = x * bytes_per_pixel + y * row_bytes;
+ let end = (x + width) * bytes_per_pixel + (y + height - 1) * row_bytes;
+ read_image_range(start, end)?;
+ } else {
+ for row in y..(y+height) {
+ let start = x * bytes_per_pixel + row * row_bytes;
+ let end = (x + width) * bytes_per_pixel + row * row_bytes;
+ read_image_range(start, end)?;
+ }
+ }
+ }
+
+ // Seek back to the start
+ Ok(seek_scanline(decoder, 0)?)
+}
+
+/// Reads all of the bytes of a decoder into a Vec<T>. No particular alignment
+/// of the output buffer is guaranteed.
+///
+/// Panics if there isn't enough memory to decode the image.
+pub(crate) fn decoder_to_vec<'a, T>(decoder: impl ImageDecoder<'a>) -> ImageResult<Vec<T>>
+where
+ T: crate::traits::Primitive + bytemuck::Pod,
+{
+ let mut buf = vec![num_traits::Zero::zero(); usize::try_from(decoder.total_bytes()).unwrap() / std::mem::size_of::<T>()];
+ decoder.read_image(bytemuck::cast_slice_mut(buf.as_mut_slice()))?;
+ Ok(buf)
+}
+
+/// Represents the progress of an image operation.
+///
+/// Note that this is not necessarily accurate and no change to the values passed to the progress
+/// function during decoding will be considered breaking. A decoder could in theory report the
+/// progress `(0, 0)` if progress is unknown, without violating the interface contract of the type.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct Progress {
+ current: u64,
+ total: u64,
+}
+
+impl Progress {
+ /// A measure of completed decoding.
+ pub fn current(self) -> u64 {
+ self.current
+ }
+
+ /// A measure of all necessary decoding work.
+ ///
+ /// This is in general greater or equal than `current`.
+ pub fn total(self) -> u64 {
+ self.total
+ }
+
+ /// Calculate a measure for remaining decoding work.
+ pub fn remaining(self) -> u64 {
+ self.total.max(self.current) - self.current
+ }
+}
+
+/// The trait that all decoders implement
+pub trait ImageDecoder<'a>: Sized {
+ /// The type of reader produced by `into_reader`.
+ type Reader: Read + 'a;
+
+ /// Returns a tuple containing the width and height of the image
+ fn dimensions(&self) -> (u32, u32);
+
+ /// Returns the color type of the image data produced by this decoder
+ fn color_type(&self) -> ColorType;
+
+ /// Retuns the color type of the image file before decoding
+ fn original_color_type(&self) -> ExtendedColorType {
+ self.color_type().into()
+ }
+
+ /// Returns a reader that can be used to obtain the bytes of the image. For the best
+ /// performance, always try to read at least `scanline_bytes` from the reader at a time. Reading
+ /// fewer bytes will cause the reader to perform internal buffering.
+ fn into_reader(self) -> ImageResult<Self::Reader>;
+
+ /// Returns the total number of bytes in the decoded image.
+ ///
+ /// This is the size of the buffer that must be passed to `read_image` or
+ /// `read_image_with_progress`. The returned value may exceed usize::MAX, in
+ /// which case it isn't actually possible to construct a buffer to decode all the image data
+ /// into.
+ fn total_bytes(&self) -> u64 {
+ let dimensions = self.dimensions();
+ u64::from(dimensions.0) * u64::from(dimensions.1) * u64::from(self.color_type().bytes_per_pixel())
+ }
+
+ /// Returns the minimum number of bytes that can be efficiently read from this decoder. This may
+ /// be as few as 1 or as many as `total_bytes()`.
+ fn scanline_bytes(&self) -> u64 {
+ self.total_bytes()
+ }
+
+ /// Returns all the bytes in the image.
+ ///
+ /// This function takes a slice of bytes and writes the pixel data of the image into it.
+ /// Although not required, for certain color types callers may want to pass buffers which are
+ /// aligned to 2 or 4 byte boundaries to the slice can be cast to a [u16] or [u32]. To accommodate
+ /// such casts, the returned contents will always be in native endian.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if buf.len() != self.total_bytes().
+ ///
+ /// # Examples
+ ///
+ /// ```no_build
+ /// use zerocopy::{AsBytes, FromBytes};
+ /// fn read_16bit_image(decoder: impl ImageDecoder) -> Vec<16> {
+ /// let mut buf: Vec<u16> = vec![0; decoder.total_bytes()/2];
+ /// decoder.read_image(buf.as_bytes());
+ /// buf
+ /// }
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ self.read_image_with_progress(buf, |_| {})
+ }
+
+ /// Same as `read_image` but periodically calls the provided callback to give updates on loading
+ /// progress.
+ fn read_image_with_progress<F: Fn(Progress)>(
+ self,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let total_bytes = self.total_bytes() as usize;
+ let scanline_bytes = self.scanline_bytes() as usize;
+ let target_read_size = if scanline_bytes < 4096 {
+ (4096 / scanline_bytes) * scanline_bytes
+ } else {
+ scanline_bytes
+ };
+
+ let mut reader = self.into_reader()?;
+
+ let mut bytes_read = 0;
+ while bytes_read < total_bytes {
+ let read_size = target_read_size.min(total_bytes - bytes_read);
+ reader.read_exact(&mut buf[bytes_read..][..read_size])?;
+ bytes_read += read_size;
+
+ progress_callback(Progress {
+ current: bytes_read as u64,
+ total: total_bytes as u64,
+ });
+ }
+
+ Ok(())
+ }
+}
+
+/// ImageDecoderExt trait
+pub trait ImageDecoderExt<'a>: ImageDecoder<'a> + Sized {
+ /// Read a rectangular section of the image.
+ fn read_rect(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ ) -> ImageResult<()> {
+ self.read_rect_with_progress(x, y, width, height, buf, |_|{})
+ }
+
+ /// Read a rectangular section of the image, periodically reporting progress.
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()>;
+}
+
+/// AnimationDecoder trait
+pub trait AnimationDecoder<'a> {
+ /// Consume the decoder producing a series of frames.
+ fn into_frames(self) -> Frames<'a>;
+}
+
+/// The trait all encoders implement
+pub trait ImageEncoder {
+ /// Writes all the bytes in an image to the encoder.
+ ///
+ /// This function takes a slice of bytes of the pixel data of the image
+ /// and encodes them. Unlike particular format encoders inherent impl encode
+ /// methods where endianness is not specified, here image data bytes should
+ /// always be in native endian. The implementor will reorder the endianess
+ /// as necessary for the target encoding format.
+ ///
+ /// See also `ImageDecoder::read_image` which reads byte buffers into
+ /// native endian.
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()>;
+}
+
+/// Immutable pixel iterator
+pub struct Pixels<'a, I: ?Sized + 'a> {
+ image: &'a I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+}
+
+impl<'a, I: GenericImageView> Iterator for Pixels<'a, I> {
+ type Item = (u32, u32, I::Pixel);
+
+ fn next(&mut self) -> Option<(u32, u32, I::Pixel)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+
+ if self.y >= self.height {
+ None
+ } else {
+ let pixel = self.image.get_pixel(self.x, self.y);
+ let p = (self.x, self.y, pixel);
+
+ self.x += 1;
+
+ Some(p)
+ }
+ }
+}
+
+/// Trait to inspect an image.
+pub trait GenericImageView {
+ /// The type of pixel.
+ type Pixel: Pixel;
+
+ /// Underlying image type. This is mainly used by SubImages in order to
+ /// always have a reference to the original image. This allows for less
+ /// indirections and it eases the use of nested SubImages.
+ type InnerImageView: GenericImageView<Pixel = Self::Pixel>;
+
+ /// The width and height of this image.
+ fn dimensions(&self) -> (u32, u32);
+
+ /// The width of this image.
+ fn width(&self) -> u32 {
+ let (w, _) = self.dimensions();
+ w
+ }
+
+ /// The height of this image.
+ fn height(&self) -> u32 {
+ let (_, h) = self.dimensions();
+ h
+ }
+
+ /// The bounding rectangle of this image.
+ fn bounds(&self) -> (u32, u32, u32, u32);
+
+ /// Returns true if this x, y coordinate is contained inside the image.
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (ix, iy, iw, ih) = self.bounds();
+ x >= ix && x < ix + iw && y >= iy && y < iy + ih
+ }
+
+ /// Returns the pixel located at (x, y)
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ ///
+ /// TODO: change this signature to &P
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel;
+
+ /// Returns the pixel located at (x, y)
+ ///
+ /// This function can be implemented in a way that ignores bounds checking.
+ unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ self.get_pixel(x, y)
+ }
+
+ /// Returns an Iterator over the pixels of this image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with their value
+ fn pixels(&self) -> Pixels<Self> {
+ let (width, height) = self.dimensions();
+
+ Pixels {
+ image: self,
+ x: 0,
+ y: 0,
+ width,
+ height,
+ }
+ }
+
+ /// Returns a reference to the underlying image.
+ fn inner(&self) -> &Self::InnerImageView;
+
+ /// Returns an subimage that is an immutable view into this image.
+ /// You can use [`GenericImage::sub_image`] if you need a mutable view instead.
+ fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
+ SubImage::new(self.inner(), x, y, width, height)
+ }
+}
+
+/// A trait for manipulating images.
+pub trait GenericImage: GenericImageView {
+ /// Underlying image type. This is mainly used by SubImages in order to
+ /// always have a reference to the original image. This allows for less
+ /// indirections and it eases the use of nested SubImages.
+ type InnerImage: GenericImage<Pixel = Self::Pixel>;
+
+ /// Gets a reference to the mutable pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel;
+
+ /// Put a pixel at location (x, y)
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
+
+ /// Puts a pixel at location (x, y)
+ ///
+ /// This function can be implemented in a way that ignores bounds checking.
+ unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.put_pixel(x, y, pixel);
+ }
+
+ /// Put a pixel at location (x, y), taking into account alpha channels
+ ///
+ /// DEPRECATED: This method will be removed. Blend the pixel directly instead.
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
+
+ /// Copies all of the pixels from another image into this image.
+ ///
+ /// The other image is copied with the top-left corner of the
+ /// other image placed at (x, y).
+ ///
+ /// In order to copy only a piece of the other image, use [`GenericImageView::view`].
+ ///
+ /// # Returns
+ /// Returns an error if the image is too large to be copied at the given position
+ fn copy_from<O>(&mut self, other: &O, x: u32, y: u32) -> ImageResult<()>
+ where
+ O: GenericImageView<Pixel = Self::Pixel>,
+ {
+ // Do bounds checking here so we can use the non-bounds-checking
+ // functions to copy pixels.
+ if self.width() < other.width() + x || self.height() < other.height() + y {
+ return Err(ImageError::DimensionError);
+ }
+
+ for i in 0..other.width() {
+ for k in 0..other.height() {
+ let p = other.get_pixel(i, k);
+ self.put_pixel(i + x, k + y, p);
+ }
+ }
+ Ok(())
+ }
+
+ /// Copies all of the pixels from one part of this image to another part of this image.
+ ///
+ /// The destination rectangle of the copy is specified with the top-left corner placed at (x, y).
+ ///
+ /// # Returns
+ /// `true` if the copy was successful, `false` if the image could not
+ /// be copied due to size constraints.
+ fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
+ let Rect { x: sx, y: sy, width, height } = source;
+ let dx = x;
+ let dy = y;
+ assert!(sx < self.width() && dx < self.width());
+ assert!(sy < self.height() && dy < self.height());
+ if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
+ return false;
+ }
+ // since `.rev()` creates a new dype we would either have to go with dynamic dispatch for the ranges
+ // or have quite a lot of code bloat. A macro gives us static dispatch with less visible bloat.
+ macro_rules! copy_within_impl_ {
+ ($xiter:expr, $yiter:expr) => {
+ for y in $yiter {
+ let sy = sy + y;
+ let dy = dy + y;
+ for x in $xiter {
+ let sx = sx + x;
+ let dx = dx + x;
+ let pixel = self.get_pixel(sx, sy);
+ self.put_pixel(dx, dy, pixel);
+ }
+ }
+ };
+ }
+ // check how target and source rectangles relate to each other so we dont overwrite data before we copied it.
+ match (sx < dx, sy < dy) {
+ (true, true) => copy_within_impl_!((0..width).rev(), (0..height).rev()),
+ (true, false) => copy_within_impl_!((0..width).rev(), 0..height),
+ (false, true) => copy_within_impl_!(0..width, (0..height).rev()),
+ (false, false) => copy_within_impl_!(0..width, 0..height),
+ }
+ true
+ }
+
+ /// Returns a mutable reference to the underlying image.
+ fn inner_mut(&mut self) -> &mut Self::InnerImage;
+
+ /// Returns a mutable subimage that is a view into this image.
+ /// If you want an immutable subimage instead, use [`GenericImageView::view`]
+ fn sub_image(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ ) -> SubImage<&mut Self::InnerImage> {
+ SubImage::new(self.inner_mut(), x, y, width, height)
+ }
+}
+
+/// A View into another image
+///
+/// Instances of this struct can be created using:
+/// - [`GenericImage::sub_image`] to create a mutable view,
+/// - [`GenericImageView::view`] to create an immutable view,
+/// - [`SubImage::new`] to instantiate the struct directly.
+pub struct SubImage<I> {
+ image: I,
+ xoffset: u32,
+ yoffset: u32,
+ xstride: u32,
+ ystride: u32,
+}
+
+/// Alias to access Pixel behind a reference
+type DerefPixel<I> = <<I as Deref>::Target as GenericImageView>::Pixel;
+
+/// Alias to access Subpixel behind a reference
+type DerefSubpixel<I> = <DerefPixel<I> as Pixel>::Subpixel;
+
+impl<I> SubImage<I> {
+ /// Construct a new subimage
+ pub fn new(image: I, x: u32, y: u32, width: u32, height: u32) -> SubImage<I> {
+ SubImage {
+ image,
+ xoffset: x,
+ yoffset: y,
+ xstride: width,
+ ystride: height,
+ }
+ }
+
+ /// Change the coordinates of this subimage.
+ pub fn change_bounds(&mut self, x: u32, y: u32, width: u32, height: u32) {
+ self.xoffset = x;
+ self.yoffset = y;
+ self.xstride = width;
+ self.ystride = height;
+ }
+
+ /// Convert this subimage to an ImageBuffer
+ pub fn to_image(&self) -> ImageBuffer<DerefPixel<I>, Vec<DerefSubpixel<I>>>
+ where
+ I: Deref,
+ I::Target: GenericImage + 'static,
+ {
+ let mut out = ImageBuffer::new(self.xstride, self.ystride);
+ let borrowed = self.image.deref();
+
+ for y in 0..self.ystride {
+ for x in 0..self.xstride {
+ let p = borrowed.get_pixel(x + self.xoffset, y + self.yoffset);
+ out.put_pixel(x, y, p);
+ }
+ }
+
+ out
+ }
+}
+
+#[allow(deprecated)]
+impl<I> GenericImageView for SubImage<I>
+where
+ I: Deref,
+ I::Target: GenericImageView + Sized,
+{
+ type Pixel = DerefPixel<I>;
+ type InnerImageView = I::Target;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.xstride, self.ystride)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ (self.xoffset, self.yoffset, self.xstride, self.ystride)
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ self.image.get_pixel(x + self.xoffset, y + self.yoffset)
+ }
+
+ fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
+ let x = self.xoffset + x;
+ let y = self.yoffset + y;
+ SubImage::new(self.inner(), x, y, width, height)
+ }
+
+ fn inner(&self) -> &Self::InnerImageView {
+ &self.image
+ }
+}
+
+#[allow(deprecated)]
+impl<I> GenericImage for SubImage<I>
+where
+ I: DerefMut,
+ I::Target: GenericImage + Sized,
+{
+ type InnerImage = I::Target;
+
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
+ self.image.get_pixel_mut(x + self.xoffset, y + self.yoffset)
+ }
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.image
+ .put_pixel(x + self.xoffset, y + self.yoffset, pixel)
+ }
+
+ /// DEPRECATED: This method will be removed. Blend the pixel directly instead.
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.image
+ .blend_pixel(x + self.xoffset, y + self.yoffset, pixel)
+ }
+
+ fn sub_image(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ ) -> SubImage<&mut Self::InnerImage> {
+ let x = self.xoffset + x;
+ let y = self.yoffset + y;
+ SubImage::new(self.inner_mut(), x, y, width, height)
+ }
+
+ fn inner_mut(&mut self) -> &mut Self::InnerImage {
+ &mut self.image
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io;
+ use std::path::Path;
+
+ use super::{ColorType, ImageDecoder, ImageResult, GenericImage, GenericImageView, load_rect, ImageFormat};
+ use crate::buffer::{GrayImage, ImageBuffer};
+ use crate::color::Rgba;
+ use crate::math::Rect;
+
+ #[test]
+ /// Test that alpha blending works as expected
+ fn test_image_alpha_blending() {
+ let mut target = ImageBuffer::new(1, 1);
+ target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
+ assert!(*target.get_pixel(0, 0) == Rgba([255, 0, 0, 255]));
+ target.blend_pixel(0, 0, Rgba([0, 255, 0, 255]));
+ assert!(*target.get_pixel(0, 0) == Rgba([0, 255, 0, 255]));
+
+ // Blending an alpha channel onto a solid background
+ target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
+ assert!(*target.get_pixel(0, 0) == Rgba([127, 127, 0, 255]));
+
+ // Blending two alpha channels
+ target.put_pixel(0, 0, Rgba([0, 255, 0, 127]));
+ target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
+ assert!(*target.get_pixel(0, 0) == Rgba([169, 85, 0, 190]));
+ }
+
+ #[test]
+ fn test_in_bounds() {
+ let mut target = ImageBuffer::new(2, 2);
+ target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
+
+ assert!(target.in_bounds(0, 0));
+ assert!(target.in_bounds(1, 0));
+ assert!(target.in_bounds(0, 1));
+ assert!(target.in_bounds(1, 1));
+
+ assert!(!target.in_bounds(2, 0));
+ assert!(!target.in_bounds(0, 2));
+ assert!(!target.in_bounds(2, 2));
+ }
+
+ #[test]
+ fn test_can_subimage_clone_nonmut() {
+ let mut source = ImageBuffer::new(3, 3);
+ source.put_pixel(1, 1, Rgba([255u8, 0, 0, 255]));
+
+ // A non-mutable copy of the source image
+ let source = source.clone();
+
+ // Clone a view into non-mutable to a separate buffer
+ let cloned = source.view(1, 1, 1, 1).to_image();
+
+ assert!(cloned.get_pixel(0, 0) == source.get_pixel(1, 1));
+ }
+
+ #[test]
+ fn test_can_nest_views() {
+ let mut source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+
+ {
+ let mut sub1 = source.sub_image(0, 0, 2, 2);
+ let mut sub2 = sub1.sub_image(1, 1, 1, 1);
+ sub2.put_pixel(0, 0, Rgba([0, 0, 0, 0]));
+ }
+
+ assert_eq!(*source.get_pixel(1, 1), Rgba([0, 0, 0, 0]));
+
+ let view1 = source.view(0, 0, 2, 2);
+ assert_eq!(*source.get_pixel(1, 1), view1.get_pixel(1, 1));
+
+ let view2 = view1.view(1, 1, 1, 1);
+ assert_eq!(*source.get_pixel(1, 1), view2.get_pixel(0, 0));
+ }
+
+ #[test]
+ fn test_load_rect() {
+ struct MockDecoder {scanline_number: u64, scanline_bytes: u64}
+ impl<'a> ImageDecoder<'a> for MockDecoder {
+ type Reader = Box<dyn io::Read>;
+ fn dimensions(&self) -> (u32, u32) {(5, 5)}
+ fn color_type(&self) -> ColorType { ColorType::L8 }
+ fn into_reader(self) -> ImageResult<Self::Reader> {unimplemented!()}
+ fn scanline_bytes(&self) -> u64 { self.scanline_bytes }
+ }
+
+ const DATA: [u8; 25] = [0, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24];
+
+ fn seek_scanline(m: &mut MockDecoder, n: u64) -> io::Result<()> {
+ m.scanline_number = n;
+ Ok(())
+ }
+ fn read_scanline(m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<usize> {
+ let bytes_read = m.scanline_number * m.scanline_bytes;
+ if bytes_read >= 25 {
+ return Ok(0);
+ }
+
+ let len = m.scanline_bytes.min(25 - bytes_read);
+ buf[..(len as usize)].copy_from_slice(&DATA[(bytes_read as usize)..][..(len as usize)]);
+ m.scanline_number += 1;
+ Ok(len as usize)
+ }
+
+ for scanline_bytes in 1..30 {
+ let mut output = [0u8; 26];
+
+ load_rect(0, 0, 5, 5, &mut output, |_|{},
+ &mut MockDecoder{scanline_number:0, scanline_bytes},
+ seek_scanline, read_scanline).unwrap();
+ assert_eq!(output[0..25], DATA);
+ assert_eq!(output[25], 0);
+
+ output = [0u8; 26];
+ load_rect(3, 2, 1, 1, &mut output, |_|{},
+ &mut MockDecoder{scanline_number:0, scanline_bytes},
+ seek_scanline, read_scanline).unwrap();
+ assert_eq!(output[0..2], [13, 0]);
+
+ output = [0u8; 26];
+ load_rect(3, 2, 2, 2, &mut output, |_|{},
+ &mut MockDecoder{scanline_number:0, scanline_bytes},
+ seek_scanline, read_scanline).unwrap();
+ assert_eq!(output[0..5], [13, 14, 18, 19, 0]);
+
+
+ output = [0u8; 26];
+ load_rect(1, 1, 2, 4, &mut output, |_|{},
+ &mut MockDecoder{scanline_number:0, scanline_bytes},
+ seek_scanline, read_scanline).unwrap();
+ assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
+
+ }
+ }
+
+ #[test]
+ fn test_image_format_from_path() {
+ fn from_path(s: &str) -> ImageResult<ImageFormat> {
+ ImageFormat::from_path(Path::new(s))
+ }
+ assert_eq!(from_path("./a.jpg").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.jpeg").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.JPEG").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.pNg").unwrap(), ImageFormat::Png);
+ assert_eq!(from_path("./a.gif").unwrap(), ImageFormat::Gif);
+ assert_eq!(from_path("./a.webp").unwrap(), ImageFormat::WebP);
+ assert_eq!(from_path("./a.tiFF").unwrap(), ImageFormat::Tiff);
+ assert_eq!(from_path("./a.tif").unwrap(), ImageFormat::Tiff);
+ assert_eq!(from_path("./a.tga").unwrap(), ImageFormat::Tga);
+ assert_eq!(from_path("./a.dds").unwrap(), ImageFormat::Dds);
+ assert_eq!(from_path("./a.bmp").unwrap(), ImageFormat::Bmp);
+ assert_eq!(from_path("./a.Ico").unwrap(), ImageFormat::Ico);
+ assert_eq!(from_path("./a.hdr").unwrap(), ImageFormat::Hdr);
+ assert_eq!(from_path("./a.pbm").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.pAM").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.Ppm").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.pgm").unwrap(), ImageFormat::Pnm);
+ assert!(from_path("./a.txt").is_err());
+ assert!(from_path("./a").is_err());
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_oob() {
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 5, height: 4 }, 0, 0));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 5 }, 0, 0));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 0, width: 4, height: 4 }, 0, 0));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 1, 0));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 1, width: 4, height: 4 }, 0, 0));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 0, 1));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 1, width: 4, height: 4 }, 0, 0));
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_tl() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 01, 02, 03,
+ 04, 00, 01, 02,
+ 08, 04, 05, 06,
+ 12, 08, 09, 10,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 0, width: 3, height: 3 }, 1, 1));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_tr() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 01, 02, 03,
+ 01, 02, 03, 07,
+ 05, 06, 07, 11,
+ 09, 10, 11, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 0, width: 3, height: 3 }, 0, 1));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_bl() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 00, 04, 05, 06,
+ 04, 08, 09, 10,
+ 08, 12, 13, 14,
+ 12, 13, 14, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 0, y: 1, width: 3, height: 3 }, 1, 0));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_br() {
+ let data = &[
+ 00, 01, 02, 03,
+ 04, 05, 06, 07,
+ 08, 09, 10, 11,
+ 12, 13, 14, 15
+ ];
+ let expected = [
+ 05, 06, 07, 03,
+ 09, 10, 11, 07,
+ 13, 14, 15, 11,
+ 12, 13, 14, 15
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(Rect { x: 1, y: 1, width: 3, height: 3 }, 0, 0));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+}
diff --git a/third_party/rust/image/src/imageops/affine.rs b/third_party/rust/image/src/imageops/affine.rs
new file mode 100644
index 0000000000..adaa8b995b
--- /dev/null
+++ b/third_party/rust/image/src/imageops/affine.rs
@@ -0,0 +1,387 @@
+//! Functions for performing affine transformations.
+
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::image::{GenericImage, GenericImageView};
+
+/// Rotate an image 90 degrees clockwise.
+pub fn rotate90<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+ where I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(height, width);
+ let _ = rotate90_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 180 degrees clockwise.
+pub fn rotate180<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+ where I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = rotate180_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 270 degrees clockwise.
+pub fn rotate270<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+ where I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(height, width);
+ let _ = rotate270_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 90 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate90_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>
+) -> crate::ImageResult<()> where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != h1 || h0 != w1 {
+ return Err(crate::ImageError::DimensionError);
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(h0 - y - 1, x, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 180 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate180_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>
+) -> crate::ImageResult<()> where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(crate::ImageError::DimensionError);
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(w0 - x - 1, h0 - y - 1, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 270 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate270_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>
+) -> crate::ImageResult<()> where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != h1 || h0 != w1 {
+ return Err(crate::ImageError::DimensionError);
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(y, w0 - x - 1, p);
+ }
+ }
+ Ok(())
+}
+
+/// Flip an image horizontally
+pub fn flip_horizontal<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+ where I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = flip_horizontal_in(image, &mut out);
+ out
+}
+
+/// Flip an image vertically
+pub fn flip_vertical<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+ where I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = flip_vertical_in(image, &mut out);
+ out
+}
+
+/// Flip an image horizontally and put the result into the destination [`ImageBuffer`].
+pub fn flip_horizontal_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>
+) -> crate::ImageResult<()> where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(crate::ImageError::DimensionError);
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(w0 - x - 1, y, p);
+ }
+ }
+ Ok(())
+}
+
+/// Flip an image vertically and put the result into the destination [`ImageBuffer`].
+pub fn flip_vertical_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>
+) -> crate::ImageResult<()> where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(crate::ImageError::DimensionError);
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(x, h0 - 1 - y, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 180 degrees clockwise in place.
+pub fn rotate180_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height / 2 {
+ for x in 0..width {
+ let p = image.get_pixel(x, y);
+
+ let x2 = width - x - 1;
+ let y2 = height - y - 1;
+
+ let p2 = image.get_pixel(x2, y2);
+ image.put_pixel(x, y, p2);
+ image.put_pixel(x2, y2, p);
+ }
+ }
+
+ if height % 2 != 0 {
+ let middle = height / 2;
+
+ for x in 0..width / 2 {
+ let p = image.get_pixel(x, middle);
+ let x2 = width - x - 1;
+
+ let p2 = image.get_pixel(x2, middle);
+ image.put_pixel(x, middle, p2);
+ image.put_pixel(x2, middle, p);
+ }
+ }
+}
+
+/// Flip an image horizontally in place.
+pub fn flip_horizontal_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width / 2 {
+ let x2 = width - x - 1;
+ let p2 = image.get_pixel(x2, y);
+ let p = image.get_pixel(x, y);
+ image.put_pixel(x2, y, p);
+ image.put_pixel(x, y, p2);
+ }
+ }
+}
+
+/// Flip an image vertically in place.
+pub fn flip_vertical_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height / 2 {
+ for x in 0..width {
+ let y2 = height - y - 1;
+ let p2 = image.get_pixel(x, y2);
+ let p = image.get_pixel(x, y);
+ image.put_pixel(x, y2, p);
+ image.put_pixel(x, y, p2);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::{
+ flip_horizontal, flip_horizontal_in_place, flip_vertical, flip_vertical_in_place,
+ rotate180, rotate180_in_place, rotate270, rotate90,
+ };
+ use crate::buffer::{GrayImage, ImageBuffer, Pixel};
+ use crate::image::GenericImage;
+
+ macro_rules! assert_pixels_eq {
+ ($actual:expr, $expected:expr) => {{
+ let actual_dim = $actual.dimensions();
+ let expected_dim = $expected.dimensions();
+
+ if actual_dim != expected_dim {
+ panic!(
+ "dimensions do not match. \
+ actual: {:?}, expected: {:?}",
+ actual_dim, expected_dim
+ )
+ }
+
+ let diffs = pixel_diffs($actual, $expected);
+
+ if !diffs.is_empty() {
+ let mut err = "pixels do not match. ".to_string();
+
+ let diff_messages = diffs
+ .iter()
+ .take(5)
+ .map(|d| format!("\nactual: {:?}, expected {:?} ", d.0, d.1))
+ .collect::<Vec<_>>()
+ .join("");
+
+ err.push_str(&diff_messages);
+ panic!(err)
+ }
+ }};
+ }
+
+ #[test]
+ fn test_rotate90() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(2, 3, vec![10u8, 00u8, 11u8, 01u8, 12u8, 02u8]).unwrap();
+
+ assert_pixels_eq!(&rotate90(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate180() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 02u8, 01u8, 00u8]).unwrap();
+
+ assert_pixels_eq!(&rotate180(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate270() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(2, 3, vec![02u8, 12u8, 01u8, 11u8, 00u8, 10u8]).unwrap();
+
+ assert_pixels_eq!(&rotate270(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate180_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 02u8, 01u8, 00u8]).unwrap();
+
+ rotate180_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ #[test]
+ fn test_flip_horizontal() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![02u8, 01u8, 00u8, 12u8, 11u8, 10u8]).unwrap();
+
+ assert_pixels_eq!(&flip_horizontal(&image), &expected);
+ }
+
+ #[test]
+ fn test_flip_vertical() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 00u8, 01u8, 02u8]).unwrap();
+
+ assert_pixels_eq!(&flip_vertical(&image), &expected);
+ }
+
+ #[test]
+ fn test_flip_horizontal_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![02u8, 01u8, 00u8, 12u8, 11u8, 10u8]).unwrap();
+
+ flip_horizontal_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ #[test]
+ fn test_flip_vertical_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 00u8, 01u8, 02u8]).unwrap();
+
+ flip_vertical_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ fn pixel_diffs<I, J, P>(left: &I, right: &J) -> Vec<((u32, u32, P), (u32, u32, P))>
+ where
+ I: GenericImage<Pixel = P>,
+ J: GenericImage<Pixel = P>,
+ P: Pixel + Eq,
+ {
+ left.pixels()
+ .zip(right.pixels())
+ .filter(|&(p, q)| p != q)
+ .collect::<Vec<_>>()
+ }
+}
diff --git a/third_party/rust/image/src/imageops/colorops.rs b/third_party/rust/image/src/imageops/colorops.rs
new file mode 100644
index 0000000000..2de5194957
--- /dev/null
+++ b/third_party/rust/image/src/imageops/colorops.rs
@@ -0,0 +1,325 @@
+//! Functions for altering and converting the color of pixelbufs
+
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::color::{Luma, Rgba};
+use crate::image::{GenericImage, GenericImageView};
+use crate::math::nq;
+use crate::math::utils::clamp;
+use num_traits::{Num, NumCast};
+use std::f64::consts::PI;
+use crate::traits::Primitive;
+
+type Subpixel<I> = <<I as GenericImageView>::Pixel as Pixel>::Subpixel;
+
+/// Convert the supplied image to grayscale
+pub fn grayscale<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<Luma<Subpixel<I>>, Vec<Subpixel<I>>>
+where
+ Subpixel<I>: 'static,
+ <Subpixel<I> as Num>::FromStrRadixErr: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ for y in 0..height {
+ for x in 0..width {
+ let p = image.get_pixel(x, y).to_luma();
+ out.put_pixel(x, y, p);
+ }
+ }
+
+ out
+}
+
+/// Invert each pixel within the supplied image.
+/// This function operates in place.
+pub fn invert<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width {
+ let mut p = image.get_pixel(x, y);
+ p.invert();
+
+ image.put_pixel(x, y, p);
+ }
+ }
+}
+
+/// Adjust the contrast of the supplied image.
+/// ```contrast``` is the amount to adjust the contrast by.
+/// Negative values decrease the contrast and positive values increase the contrast.
+pub fn contrast<I, P, S>(image: &I, contrast: f32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::max_value();
+ let max: f32 = NumCast::from(max).unwrap();
+
+ let percent = ((100.0 + contrast) / 100.0).powi(2);
+
+ for y in 0..height {
+ for x in 0..width {
+ let f = image.get_pixel(x, y).map(|b| {
+ let c: f32 = NumCast::from(b).unwrap();
+
+ let d = ((c / max - 0.5) * percent + 0.5) * max;
+ let e = clamp(d, 0.0, max);
+
+ NumCast::from(e).unwrap()
+ });
+
+ out.put_pixel(x, y, f);
+ }
+ }
+
+ out
+}
+
+/// Brighten the supplied image.
+/// ```value``` is the amount to brighten each pixel by.
+/// Negative values decrease the brightness and positive values increase it.
+pub fn brighten<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::max_value();
+ let max: i32 = NumCast::from(max).unwrap();
+
+ for y in 0..height {
+ for x in 0..width {
+ let e = image.get_pixel(x, y).map_with_alpha(
+ |b| {
+ let c: i32 = NumCast::from(b).unwrap();
+ let d = clamp(c + value, 0, max);
+
+ NumCast::from(d).unwrap()
+ },
+ |alpha| alpha,
+ );
+
+ out.put_pixel(x, y, e);
+ }
+ }
+
+ out
+}
+
+/// Hue rotate the supplied image.
+/// `value` is the degrees to rotate each pixel by.
+/// 0 and 360 do nothing, the rest rotates by the given degree value.
+/// just like the css webkit filter hue-rotate(180)
+pub fn huerotate<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let angle: f64 = NumCast::from(value).unwrap();
+
+ let cosv = (angle * PI / 180.0).cos();
+ let sinv = (angle * PI / 180.0).sin();
+ let matrix: [f64; 9] = [
+ // Reds
+ 0.213 + cosv * 0.787 - sinv * 0.213,
+ 0.715 - cosv * 0.715 - sinv * 0.715,
+ 0.072 - cosv * 0.072 + sinv * 0.928,
+ // Greens
+ 0.213 - cosv * 0.213 + sinv * 0.143,
+ 0.715 + cosv * 0.285 + sinv * 0.140,
+ 0.072 - cosv * 0.072 - sinv * 0.283,
+ // Blues
+ 0.213 - cosv * 0.213 - sinv * 0.787,
+ 0.715 - cosv * 0.715 + sinv * 0.715,
+ 0.072 + cosv * 0.928 + sinv * 0.072,
+ ];
+ for (x, y, pixel) in out.enumerate_pixels_mut() {
+ let p = image.get_pixel(x, y);
+ let (k1, k2, k3, k4) = p.channels4();
+ let vec: (f64, f64, f64, f64) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ let r = vec.0;
+ let g = vec.1;
+ let b = vec.2;
+
+ let new_r = matrix[0] * r + matrix[1] * g + matrix[2] * b;
+ let new_g = matrix[3] * r + matrix[4] * g + matrix[5] * b;
+ let new_b = matrix[6] * r + matrix[7] * g + matrix[8] * b;
+ let max = 255f64;
+ let outpixel = Pixel::from_channels(
+ NumCast::from(clamp(new_r, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_g, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_b, 0.0, max)).unwrap(),
+ NumCast::from(clamp(vec.3, 0.0, max)).unwrap(),
+ );
+ *pixel = outpixel;
+ }
+ out
+}
+
+/// A color map
+pub trait ColorMap {
+ /// The color type on which the map operates on
+ type Color;
+ /// Returns the index of the closed match of `color`
+ /// in the color map.
+ fn index_of(&self, color: &Self::Color) -> usize;
+ /// Maps `color` to the closest color in the color map.
+ fn map_color(&self, color: &mut Self::Color);
+}
+
+/// A bi-level color map
+#[derive(Clone, Copy)]
+pub struct BiLevel;
+
+impl ColorMap for BiLevel {
+ type Color = Luma<u8>;
+
+ #[inline(always)]
+ fn index_of(&self, color: &Luma<u8>) -> usize {
+ let luma = color.0;
+ if luma[0] > 127 {
+ 1
+ } else {
+ 0
+ }
+ }
+
+ #[inline(always)]
+ fn map_color(&self, color: &mut Luma<u8>) {
+ let new_color = 0xFF * self.index_of(color) as u8;
+ let luma = &mut color.0;
+ luma[0] = new_color;
+ }
+}
+
+impl ColorMap for nq::NeuQuant {
+ type Color = Rgba<u8>;
+
+ #[inline(always)]
+ fn index_of(&self, color: &Rgba<u8>) -> usize {
+ self.index_of(color.channels())
+ }
+
+ #[inline(always)]
+ fn map_color(&self, color: &mut Rgba<u8>) {
+ self.map_pixel(color.channels_mut())
+ }
+}
+
+/// Floyd-Steinberg error diffusion
+fn diffuse_err<P: Pixel<Subpixel = u8>>(pixel: &mut P, error: [i16; 3], factor: i16) {
+ for (e, c) in error.iter().zip(pixel.channels_mut().iter_mut()) {
+ *c = match <i16 as From<_>>::from(*c) + e * factor / 16 {
+ val if val < 0 => 0,
+ val if val > 0xFF => 0xFF,
+ val => val as u8,
+ }
+ }
+}
+
+macro_rules! do_dithering(
+ ($map:expr, $image:expr, $err:expr, $x:expr, $y:expr) => (
+ {
+ let old_pixel = $image[($x, $y)];
+ let new_pixel = $image.get_pixel_mut($x, $y);
+ $map.map_color(new_pixel);
+ for ((e, &old), &new) in $err.iter_mut()
+ .zip(old_pixel.channels().iter())
+ .zip(new_pixel.channels().iter())
+ {
+ *e = <i16 as From<_>>::from(old) - <i16 as From<_>>::from(new)
+ }
+ }
+ )
+);
+
+/// Reduces the colors of the image using the supplied `color_map` while applying
+/// Floyd-Steinberg dithering to improve the visual conception
+pub fn dither<Pix, Map>(image: &mut ImageBuffer<Pix, Vec<u8>>, color_map: &Map)
+where
+ Map: ColorMap<Color = Pix>,
+ Pix: Pixel<Subpixel = u8> + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut err: [i16; 3] = [0; 3];
+ for y in 0..height - 1 {
+ let x = 0;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
+ for x in 1..width - 1 {
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
+ }
+ let x = width - 1;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ }
+ let y = height - 1;
+ let x = 0;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ for x in 1..width - 1 {
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ }
+ let x = width - 1;
+ do_dithering!(color_map, image, err, x, y);
+}
+
+/// Reduces the colors using the supplied `color_map` and returns an image of the indices
+pub fn index_colors<Pix, Map>(
+ image: &ImageBuffer<Pix, Vec<u8>>,
+ color_map: &Map,
+) -> ImageBuffer<Luma<u8>, Vec<u8>>
+where
+ Map: ColorMap<Color = Pix>,
+ Pix: Pixel<Subpixel = u8> + 'static,
+{
+ let mut indices = ImageBuffer::new(image.width(), image.height());
+ for (pixel, idx) in image.pixels().zip(indices.pixels_mut()) {
+ *idx = Luma([color_map.index_of(pixel) as u8])
+ }
+ indices
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+ use crate::ImageBuffer;
+
+ #[test]
+ fn test_dither() {
+ let mut image = ImageBuffer::from_raw(2, 2, vec![127, 127, 127, 127]).unwrap();
+ let cmap = BiLevel;
+ dither(&mut image, &cmap);
+ assert_eq!(&*image, &[0, 0xFF, 0xFF, 0]);
+ assert_eq!(index_colors(&image, &cmap).into_raw(), vec![0, 1, 1, 0])
+ }
+}
diff --git a/third_party/rust/image/src/imageops/mod.rs b/third_party/rust/image/src/imageops/mod.rs
new file mode 100644
index 0000000000..f0c7fe68c8
--- /dev/null
+++ b/third_party/rust/image/src/imageops/mod.rs
@@ -0,0 +1,219 @@
+//! Image Processing Functions
+use std::cmp;
+
+use crate::image::{GenericImage, GenericImageView, SubImage};
+
+use crate::buffer::Pixel;
+
+pub use self::sample::FilterType;
+
+pub use self::sample::FilterType::{CatmullRom, Gaussian, Lanczos3, Nearest, Triangle};
+
+/// Affine transformations
+pub use self::affine::{
+ flip_horizontal, flip_horizontal_in_place, flip_vertical, flip_vertical_in_place, rotate180,
+ rotate180_in_place, rotate270, rotate90, rotate180_in, rotate90_in, rotate270_in, flip_horizontal_in, flip_vertical_in
+};
+
+/// Image sampling
+pub use self::sample::{blur, filter3x3, resize, thumbnail, unsharpen};
+
+/// Color operations
+pub use self::colorops::{brighten, contrast, dither, grayscale, huerotate, index_colors, invert,
+ BiLevel, ColorMap};
+
+mod affine;
+// Public only because of Rust bug:
+// https://github.com/rust-lang/rust/issues/18241
+pub mod colorops;
+mod sample;
+
+/// Return a mutable view into an image
+pub fn crop<I: GenericImageView>(
+ image: &mut I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+) -> SubImage<&mut I> {
+ let (iwidth, iheight) = image.dimensions();
+
+ let x = cmp::min(x, iwidth);
+ let y = cmp::min(y, iheight);
+
+ let height = cmp::min(height, iheight - y);
+ let width = cmp::min(width, iwidth - x);
+
+ SubImage::new(image, x, y, width, height)
+}
+
+/// Calculate the region that can be copied from top to bottom.
+///
+/// Given image size of bottom and top image, and a point at which we want to place the top image
+/// onto the bottom image, how large can we be? Have to wary of the following issues:
+/// * Top might be larger than bottom
+/// * Overflows in the computation
+/// * Coordinates could be completely out of bounds
+///
+/// The main idea is to make use of inequalities provided by the nature of `saturing_add` and
+/// `saturating_sub`. These intrinsically validate that all resulting coordinates will be in bounds
+/// for both images.
+///
+/// We want that all these coordinate accesses are safe:
+/// 1. `bottom.get_pixel(x + [0..x_range), y + [0..y_range))`
+/// 2. `top.get_pixel([0..x_range), [0..y_range))`
+///
+/// Proof that the function provides the necessary bounds for width. Note that all unaugmented math
+/// operations are to be read in standard arithmetic, not integer arithmetic. Since no direct
+/// integer arithmetic occurs in the implementation, this is unambiguous.
+///
+/// ```text
+/// Three short notes/lemmata:
+/// - Iff `(a - b) <= 0` then `a.saturating_sub(b) = 0`
+/// - Iff `(a - b) >= 0` then `a.saturating_sub(b) = a - b`
+/// - If `a <= c` then `a.saturating_sub(b) <= c.saturating_sub(b)`
+///
+/// 1.1 We show that if `bottom_width <= x`, then `x_range = 0` therefore `x + [0..x_range)` is empty.
+///
+/// x_range
+/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
+/// <= bottom_width.saturating_sub(x)
+///
+/// bottom_width <= x
+/// <==> bottom_width - x <= 0
+/// <==> bottom_width.saturating_sub(x) = 0
+/// ==> x_range <= 0
+/// ==> x_range = 0
+///
+/// 1.2 If `x < bottom_width` then `x + x_range < bottom_width`
+///
+/// x + x_range
+/// <= x + bottom_width.saturating_sub(x)
+/// = x + (bottom_width - x)
+/// = bottom_width
+///
+/// 2. We show that `x_range <= top_width`
+///
+/// x_range
+/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
+/// <= top_width.saturating_add(x).saturating_sub(x)
+/// <= (top_wdith + x).saturating_sub(x)
+/// = top_width (due to `top_width >= 0` and `x >= 0`)
+/// ```
+///
+/// Proof is the same for height.
+pub fn overlay_bounds(
+ (bottom_width, bottom_height): (u32, u32),
+ (top_width, top_height): (u32, u32),
+ x: u32,
+ y: u32
+)
+ -> (u32, u32)
+{
+ let x_range = top_width.saturating_add(x) // Calculate max coordinate
+ .min(bottom_width) // Restrict to lower width
+ .saturating_sub(x); // Determinate length from start `x`
+ let y_range = top_height.saturating_add(y)
+ .min(bottom_height)
+ .saturating_sub(y);
+ (x_range, y_range)
+}
+
+/// Overlay an image at a given coordinate (x, y)
+pub fn overlay<I, J>(bottom: &mut I, top: &J, x: u32, y: u32)
+where
+ I: GenericImage,
+ J: GenericImageView<Pixel = I::Pixel>,
+{
+ let bottom_dims = bottom.dimensions();
+ let top_dims = top.dimensions();
+
+ // Crop our top image if we're going out of bounds
+ let (range_width, range_height) = overlay_bounds(bottom_dims, top_dims, x, y);
+
+ for top_y in 0..range_height {
+ for top_x in 0..range_width {
+ let p = top.get_pixel(top_x, top_y);
+ let mut bottom_pixel = bottom.get_pixel(x + top_x, y + top_y);
+ bottom_pixel.blend(&p);
+
+ bottom.put_pixel(x + top_x, y + top_y, bottom_pixel);
+ }
+ }
+}
+
+/// Replace the contents of an image at a given coordinate (x, y)
+pub fn replace<I, J>(bottom: &mut I, top: &J, x: u32, y: u32)
+where
+ I: GenericImage,
+ J: GenericImageView<Pixel = I::Pixel>,
+{
+ let bottom_dims = bottom.dimensions();
+ let top_dims = top.dimensions();
+
+ // Crop our top image if we're going out of bounds
+ let (range_width, range_height) = overlay_bounds(bottom_dims, top_dims, x, y);
+
+ for top_y in 0..range_height {
+ for top_x in 0..range_width {
+ let p = top.get_pixel(top_x, top_y);
+ bottom.put_pixel(x + top_x, y + top_y, p);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::overlay;
+ use crate::buffer::ImageBuffer;
+ use crate::color::Rgb;
+
+ #[test]
+ /// Test that images written into other images works
+ fn test_image_in_image() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(16, 16, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 0, 0);
+ assert!(*target.get_pixel(0, 0) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(15, 0) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(16, 0) == Rgb([0u8, 0, 0]));
+ assert!(*target.get_pixel(0, 15) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(0, 16) == Rgb([0u8, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written outside of a frame doesn't blow up
+ fn test_image_in_image_outside_of_bounds() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 1, 1);
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(31, 31) == Rgb([255u8, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written to coordinates out of the frame doesn't blow up
+ /// (issue came up in #848)
+ fn test_image_outside_image_no_wrap_around() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 33, 33);
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(31, 31) == Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written to coordinates with overflow works
+ fn test_image_coordinate_overflow() {
+ let mut target = ImageBuffer::new(16, 16);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ // Overflows to 'sane' coordinates but top is larger than bot.
+ overlay(&mut target, &source, u32::max_value() - 31, u32::max_value() - 31);
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(15, 15) == Rgb([0, 0, 0]));
+ }
+}
diff --git a/third_party/rust/image/src/imageops/sample.rs b/third_party/rust/image/src/imageops/sample.rs
new file mode 100644
index 0000000000..6f8a76da17
--- /dev/null
+++ b/third_party/rust/image/src/imageops/sample.rs
@@ -0,0 +1,873 @@
+//! Functions and filters for the sampling of pixels.
+
+// See http://cs.brown.edu/courses/cs123/lectures/08_Image_Processing_IV.pdf
+// for some of the theory behind image scaling and convolution
+
+use std::f32;
+
+use num_traits::{NumCast, ToPrimitive, Zero};
+
+use crate::buffer::{ImageBuffer, Pixel};
+use crate::image::GenericImageView;
+use crate::math::utils::clamp;
+use crate::traits::{Enlargeable, Primitive};
+
+/// Available Sampling Filters.
+///
+/// ## Examples
+///
+/// To test the different sampling filters on a real example, you can find two
+/// examples called
+/// [`scaledown`](https://github.com/image-rs/image/tree/master/examples/scaledown)
+/// and
+/// [`scaleup`](https://github.com/image-rs/image/tree/master/examples/scaleup)
+/// in the `examples` directory of the crate source code.
+///
+/// Here is a 3.58 MiB
+/// [test image](https://github.com/image-rs/image/blob/master/examples/scaledown/test.jpg)
+/// that has been scaled down to 300x225 px:
+///
+/// <!-- NOTE: To test new test images locally, replace the GitHub path with `../../../docs/` -->
+/// <div style="display: flex; flex-wrap: wrap; align-items: flex-start;">
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-near.png" title="Nearest"><br>
+/// Nearest Neighbor
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-tri.png" title="Triangle"><br>
+/// Linear: Triangle
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-cmr.png" title="CatmullRom"><br>
+/// Cubic: Catmull-Rom
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-gauss.png" title="Gaussian"><br>
+/// Gaussian
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-lcz2.png" title="Lanczos3"><br>
+/// Lanczos with window 3
+/// </div>
+/// </div>
+///
+/// ## Speed
+///
+/// Time required to create each of the examples above, tested on an Intel
+/// i7-4770 CPU with Rust 1.37 in release mode:
+///
+/// <table style="width: auto;">
+/// <tr>
+/// <th>Nearest</th>
+/// <td>31 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Triangle</th>
+/// <td>414 ms</td>
+/// </tr>
+/// <tr>
+/// <th>CatmullRom</th>
+/// <td>817 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Gaussian</th>
+/// <td>1180 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Lanczos3</th>
+/// <td>1170 ms</td>
+/// </tr>
+/// </table>
+#[derive(Clone, Copy, Debug)]
+pub enum FilterType {
+ /// Nearest Neighbor
+ Nearest,
+
+ /// Linear Filter
+ Triangle,
+
+ /// Cubic Filter
+ CatmullRom,
+
+ /// Gaussian Filter
+ Gaussian,
+
+ /// Lanczos with window 3
+ Lanczos3,
+}
+
+/// A Representation of a separable filter.
+pub(crate) struct Filter<'a> {
+ /// The filter's filter function.
+ pub(crate) kernel: Box<dyn Fn(f32) -> f32 + 'a>,
+
+ /// The window on which this filter operates.
+ pub(crate) support: f32,
+}
+
+// sinc function: the ideal sampling filter.
+fn sinc(t: f32) -> f32 {
+ let a = t * f32::consts::PI;
+
+ if t == 0.0 {
+ 1.0
+ } else {
+ a.sin() / a
+ }
+}
+
+// lanczos kernel function. A windowed sinc function.
+fn lanczos(x: f32, t: f32) -> f32 {
+ if x.abs() < t {
+ sinc(x) * sinc(x / t)
+ } else {
+ 0.0
+ }
+}
+
+// Calculate a splice based on the b and c parameters.
+// from authors Mitchell and Netravali.
+fn bc_cubic_spline(x: f32, b: f32, c: f32) -> f32 {
+ let a = x.abs();
+
+ let k = if a < 1.0 {
+ (12.0 - 9.0 * b - 6.0 * c) * a.powi(3) + (-18.0 + 12.0 * b + 6.0 * c) * a.powi(2)
+ + (6.0 - 2.0 * b)
+ } else if a < 2.0 {
+ (-b - 6.0 * c) * a.powi(3) + (6.0 * b + 30.0 * c) * a.powi(2) + (-12.0 * b - 48.0 * c) * a
+ + (8.0 * b + 24.0 * c)
+ } else {
+ 0.0
+ };
+
+ k / 6.0
+}
+
+/// The Gaussian Function.
+/// ```r``` is the standard deviation.
+pub(crate) fn gaussian(x: f32, r: f32) -> f32 {
+ ((2.0 * f32::consts::PI).sqrt() * r).recip() * (-x.powi(2) / (2.0 * r.powi(2))).exp()
+}
+
+/// Calculate the lanczos kernel with a window of 3
+pub(crate) fn lanczos3_kernel(x: f32) -> f32 {
+ lanczos(x, 3.0)
+}
+
+/// Calculate the gaussian function with a
+/// standard deviation of 0.5
+pub(crate) fn gaussian_kernel(x: f32) -> f32 {
+ gaussian(x, 0.5)
+}
+
+/// Calculate the Catmull-Rom cubic spline.
+/// Also known as a form of `BiCubic` sampling in two dimensions.
+pub(crate) fn catmullrom_kernel(x: f32) -> f32 {
+ bc_cubic_spline(x, 0.0, 0.5)
+}
+
+/// Calculate the triangle function.
+/// Also known as `BiLinear` sampling in two dimensions.
+pub(crate) fn triangle_kernel(x: f32) -> f32 {
+ if x.abs() < 1.0 {
+ 1.0 - x.abs()
+ } else {
+ 0.0
+ }
+}
+
+/// Calculate the box kernel.
+/// Only pixels inside the box should be considered, and those
+/// contribute equally. So this method simply returns 1.
+pub(crate) fn box_kernel(_x: f32) -> f32 {
+ 1.0
+}
+
+// Sample the rows of the supplied image using the provided filter.
+// The height of the image remains unchanged.
+// ```new_width``` is the desired width of the new image
+// ```filter``` is the filter to use for sampling.
+fn horizontal_sample<I, P, S>(
+ image: &I,
+ new_width: u32,
+ filter: &mut Filter,
+) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(new_width, height);
+ let mut ws = Vec::new();
+
+ let max: f32 = NumCast::from(S::max_value()).unwrap();
+ let ratio = width as f32 / new_width as f32;
+ let sratio = if ratio < 1.0 { 1.0 } else { ratio };
+ let src_support = filter.support * sratio;
+
+ for outx in 0..new_width {
+ // Find the point in the input image corresponding to the centre
+ // of the current pixel in the output image.
+ let inputx = (outx as f32 + 0.5) * ratio;
+
+ // Left and right are slice bounds for the input pixels relevant
+ // to the output pixel we are calculating. Pixel x is relevant
+ // if and only if (x >= left) && (x < right).
+
+ // Invariant: 0 <= left < right <= width
+
+ let left = (inputx - src_support).floor() as i64;
+ let left = clamp(left, 0, <i64 as From<_>>::from(width) - 1) as u32;
+
+ let right = (inputx + src_support).ceil() as i64;
+ let right = clamp(
+ right,
+ <i64 as From<_>>::from(left) + 1,
+ <i64 as From<_>>::from(width),
+ ) as u32;
+
+ // Go back to left boundary of pixel, to properly compare with i
+ // below, as the kernel treats the centre of a pixel as 0.
+ let inputx = inputx - 0.5;
+
+ ws.clear();
+ let mut sum = 0.0;
+ for i in left..right {
+ let w = (filter.kernel)((i as f32 - inputx) / sratio);
+ ws.push(w);
+ sum += w;
+ }
+
+ for y in 0..height {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ for (i, w) in ws.iter().enumerate() {
+ let p = image.get_pixel(left + i as u32, y);
+
+ let (k1, k2, k3, k4) = p.channels4();
+ let vec: (f32, f32, f32, f32) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ t.0 += vec.0 * w;
+ t.1 += vec.1 * w;
+ t.2 += vec.2 * w;
+ t.3 += vec.3 * w;
+ }
+
+ let (t1, t2, t3, t4) = (t.0 / sum, t.1 / sum, t.2 / sum, t.3 / sum);
+ let t = Pixel::from_channels(
+ NumCast::from(clamp(t1, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t2, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t3, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t4, 0.0, max)).unwrap(),
+ );
+
+ out.put_pixel(outx, y, t);
+ }
+ }
+
+ out
+}
+
+// Sample the columns of the supplied image using the provided filter.
+// The width of the image remains unchanged.
+// ```new_height``` is the desired height of the new image
+// ```filter``` is the filter to use for sampling.
+fn vertical_sample<I, P, S>(
+ image: &I,
+ new_height: u32,
+ filter: &mut Filter,
+) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, new_height);
+ let mut ws = Vec::new();
+
+ let max: f32 = NumCast::from(S::max_value()).unwrap();
+ let ratio = height as f32 / new_height as f32;
+ let sratio = if ratio < 1.0 { 1.0 } else { ratio };
+ let src_support = filter.support * sratio;
+
+ for outy in 0..new_height {
+ // For an explanation of this algorithm, see the comments
+ // in horizontal_sample.
+ let inputy = (outy as f32 + 0.5) * ratio;
+
+ let left = (inputy - src_support).floor() as i64;
+ let left = clamp(left, 0, <i64 as From<_>>::from(height) - 1) as u32;
+
+ let right = (inputy + src_support).ceil() as i64;
+ let right = clamp(
+ right,
+ <i64 as From<_>>::from(left) + 1,
+ <i64 as From<_>>::from(height),
+ ) as u32;
+
+ let inputy = inputy - 0.5;
+
+ ws.clear();
+ let mut sum = 0.0;
+ for i in left..right {
+ let w = (filter.kernel)((i as f32 - inputy) / sratio);
+ ws.push(w);
+ sum += w;
+ }
+
+ for x in 0..width {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ for (i, w) in ws.iter().enumerate() {
+ let p = image.get_pixel(x, left + i as u32);
+
+ let (k1, k2, k3, k4) = p.channels4();
+ let vec: (f32, f32, f32, f32) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ t.0 += vec.0 * w;
+ t.1 += vec.1 * w;
+ t.2 += vec.2 * w;
+ t.3 += vec.3 * w;
+ }
+
+ let (t1, t2, t3, t4) = (t.0 / sum, t.1 / sum, t.2 / sum, t.3 / sum);
+ let t = Pixel::from_channels(
+ NumCast::from(clamp(t1, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t2, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t3, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t4, 0.0, max)).unwrap(),
+ );
+
+ out.put_pixel(x, outy, t);
+ }
+ }
+
+ out
+}
+
+/// Local struct for keeping track of pixel sums for fast thumbnail averaging
+struct ThumbnailSum<S: Primitive + Enlargeable>(S::Larger, S::Larger, S::Larger, S::Larger);
+
+impl<S: Primitive + Enlargeable> ThumbnailSum<S> {
+ fn zeroed() -> Self {
+ ThumbnailSum(S::Larger::zero(), S::Larger::zero(), S::Larger::zero(), S::Larger::zero())
+ }
+
+ fn sample_val(val: S) -> S::Larger {
+ <S::Larger as NumCast>::from(val).unwrap()
+ }
+
+ fn add_pixel<P: Pixel<Subpixel=S>>(&mut self, pixel: P) {
+ let pixel = pixel.channels4();
+ self.0 += Self::sample_val(pixel.0);
+ self.1 += Self::sample_val(pixel.1);
+ self.2 += Self::sample_val(pixel.2);
+ self.3 += Self::sample_val(pixel.3);
+ }
+}
+
+/// Resize the supplied image to the specific dimensions.
+///
+/// For downscaling, this method uses a fast integer algorithm where each source pixel contributes
+/// to exactly one target pixel. May give aliasing artifacts if new size is close to old size.
+///
+/// In case the current width is smaller than the new width or similar for the height, another
+/// strategy is used instead. For each pixel in the output, a rectangular region of the input is
+/// determined, just as previously. But when no input pixel is part of this region, the nearest
+/// pixels are interpolated instead.
+///
+/// For speed reasons, all interpolation is performed linearly over the colour values. It will not
+/// take the pixel colour spaces into account.
+pub fn thumbnail<I, P, S>(image: &I, new_width: u32, new_height: u32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + Enlargeable + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(new_width, new_height);
+
+ let x_ratio = width as f32 / new_width as f32;
+ let y_ratio = height as f32 / new_height as f32;
+
+ for outy in 0..new_height {
+ let bottomf = outy as f32 * y_ratio;
+ let topf = bottomf + y_ratio;
+
+ let bottom = clamp(
+ bottomf.ceil() as u32,
+ 0,
+ height - 1,
+ );
+ let top = clamp(
+ topf.ceil() as u32,
+ bottom,
+ height,
+ );
+
+ for outx in 0..new_width {
+ let leftf = outx as f32 * x_ratio;
+ let rightf = leftf + x_ratio;
+
+ let left = clamp(
+ leftf.ceil() as u32,
+ 0,
+ width - 1,
+ );
+ let right = clamp(
+ rightf.ceil() as u32,
+ left,
+ width,
+ );
+
+ let avg = if bottom != top && left != right {
+ thumbnail_sample_block(image, left, right, bottom, top)
+ } else if bottom != top { // && left == right
+ // In the first column we have left == 0 and right > ceil(y_scale) > 0 so this
+ // assertion can never trigger.
+ debug_assert!(left > 0 && right > 0,
+ "First output column must have corresponding pixels");
+
+ let fraction_horizontal = (leftf.fract() + rightf.fract())/2.;
+ thumbnail_sample_fraction_horizontal(image, right - 1, fraction_horizontal, bottom, top)
+ } else if left != right { // && bottom == top
+ // In the first line we have bottom == 0 and top > ceil(x_scale) > 0 so this
+ // assertion can never trigger.
+ debug_assert!(bottom > 0 && top > 0,
+ "First output row must have corresponding pixels");
+
+ let fraction_vertical = (topf.fract() + bottomf.fract())/2.;
+ thumbnail_sample_fraction_vertical(image, left, right, top - 1, fraction_vertical)
+ } else { // bottom == top && left == right
+ let fraction_horizontal = (topf.fract() + bottomf.fract())/2.;
+ let fraction_vertical= (leftf.fract() + rightf.fract())/2.;
+
+ thumbnail_sample_fraction_both(image, right - 1, fraction_horizontal, top - 1, fraction_vertical)
+ };
+
+ let pixel = Pixel::from_channels(avg.0, avg.1, avg.2, avg.3);
+ out.put_pixel(outx, outy, pixel);
+ }
+ }
+
+ out
+}
+
+/// Get a pixel for a thumbnail where the input window encloses at least a full pixel.
+fn thumbnail_sample_block<I, P, S>(
+ image: &I,
+ left: u32,
+ right: u32,
+ bottom: u32,
+ top: u32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let mut sum = ThumbnailSum::zeroed();
+
+ for y in bottom..top {
+ for x in left..right {
+ let k = image.get_pixel(x, y);
+ sum.add_pixel(k);
+ }
+ }
+
+ let n = <S::Larger as NumCast>::from(
+ (right - left) * (top - bottom)).unwrap();
+ let round = <S::Larger as NumCast>::from(
+ n / NumCast::from(2).unwrap()).unwrap();
+ (
+ S::clamp_from((sum.0 + round)/n),
+ S::clamp_from((sum.1 + round)/n),
+ S::clamp_from((sum.2 + round)/n),
+ S::clamp_from((sum.3 + round)/n),
+ )
+}
+
+/// Get a thumbnail pixel where the input window encloses at least a vertical pixel.
+fn thumbnail_sample_fraction_horizontal<I, P, S>(
+ image: &I,
+ left: u32,
+ fraction_horizontal: f32,
+ bottom: u32,
+ top: u32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let fract = fraction_horizontal;
+
+ let mut sum_left = ThumbnailSum::zeroed();
+ let mut sum_right = ThumbnailSum::zeroed();
+ for x in bottom..top {
+ let k_left = image.get_pixel(left, x);
+ sum_left.add_pixel(k_left);
+
+ let k_right = image.get_pixel(left + 1, x);
+ sum_right.add_pixel(k_right);
+ }
+
+ // Now we approximate: left/n*(1-fract) + right/n*fract
+ let fact_right = fract /((top - bottom) as f32);
+ let fact_left = (1. - fract)/((top - bottom) as f32);
+
+ let mix_left_and_right = |leftv: S::Larger, rightv: S::Larger|
+ <S as NumCast>::from(
+ fact_left * leftv.to_f32().unwrap() +
+ fact_right * rightv.to_f32().unwrap()
+ ).expect("Average sample value should fit into sample type");
+
+ (
+ mix_left_and_right(sum_left.0, sum_right.0),
+ mix_left_and_right(sum_left.1, sum_right.1),
+ mix_left_and_right(sum_left.2, sum_right.2),
+ mix_left_and_right(sum_left.3, sum_right.3),
+ )
+}
+
+/// Get a thumbnail pixel where the input window encloses at least a horizontal pixel.
+fn thumbnail_sample_fraction_vertical<I, P, S>(
+ image: &I,
+ left: u32,
+ right: u32,
+ bottom: u32,
+ fraction_vertical: f32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let fract = fraction_vertical;
+
+ let mut sum_bot = ThumbnailSum::zeroed();
+ let mut sum_top = ThumbnailSum::zeroed();
+ for x in left..right {
+ let k_bot = image.get_pixel(x, bottom);
+ sum_bot.add_pixel(k_bot);
+
+ let k_top = image.get_pixel(x, bottom + 1);
+ sum_top.add_pixel(k_top);
+ }
+
+ // Now we approximate: bot/n*fract + top/n*(1-fract)
+ let fact_top = fract /((right - left) as f32);
+ let fact_bot = (1. - fract)/((right - left) as f32);
+
+ let mix_bot_and_top = |botv: S::Larger, topv: S::Larger|
+ <S as NumCast>::from(
+ fact_bot * botv.to_f32().unwrap() +
+ fact_top * topv.to_f32().unwrap()
+ ).expect("Average sample value should fit into sample type");
+
+ (
+ mix_bot_and_top(sum_bot.0, sum_top.0),
+ mix_bot_and_top(sum_bot.1, sum_top.1),
+ mix_bot_and_top(sum_bot.2, sum_top.2),
+ mix_bot_and_top(sum_bot.3, sum_top.3),
+ )
+}
+
+/// Get a single pixel for a thumbnail where the input window does not enclose any full pixel.
+fn thumbnail_sample_fraction_both<I, P, S>(
+ image: &I,
+ left: u32,
+ fraction_vertical: f32,
+ bottom: u32,
+ fraction_horizontal: f32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let k_bl = image.get_pixel(left, bottom ).channels4();
+ let k_tl = image.get_pixel(left, bottom + 1).channels4();
+ let k_br = image.get_pixel(left + 1, bottom ).channels4();
+ let k_tr = image.get_pixel(left + 1, bottom + 1).channels4();
+
+ let frac_v = fraction_vertical;
+ let frac_h = fraction_horizontal;
+
+ let fact_tr = frac_v * frac_h;
+ let fact_tl = frac_v * (1. - frac_h);
+ let fact_br = (1. - frac_v) * frac_h;
+ let fact_bl = (1. - frac_v) * (1. - frac_h);
+
+ let mix = |br: S, tr: S, bl: S, tl: S|
+ <S as NumCast>::from(
+ fact_br * br.to_f32().unwrap() +
+ fact_tr * tr.to_f32().unwrap() +
+ fact_bl * bl.to_f32().unwrap() +
+ fact_tl * tl.to_f32().unwrap()
+ ).expect("Average sample value should fit into sample type");
+
+ (
+ mix(k_br.0, k_tr.0, k_bl.0, k_tl.0),
+ mix(k_br.1, k_tr.1, k_bl.1, k_tl.1),
+ mix(k_br.2, k_tr.2, k_bl.2, k_tl.2),
+ mix(k_br.3, k_tr.3, k_bl.3, k_tl.3),
+ )
+}
+
+/// Perform a 3x3 box filter on the supplied image.
+/// ```kernel``` is an array of the filter weights of length 9.
+pub fn filter3x3<I, P, S>(image: &I, kernel: &[f32]) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ // The kernel's input positions relative to the current pixel.
+ let taps: &[(isize, isize)] = &[
+ (-1, -1),
+ (0, -1),
+ (1, -1),
+ (-1, 0),
+ (0, 0),
+ (1, 0),
+ (-1, 1),
+ (0, 1),
+ (1, 1),
+ ];
+
+ let (width, height) = image.dimensions();
+
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::max_value();
+ let max: f32 = NumCast::from(max).unwrap();
+
+ let sum = match kernel.iter().fold(0.0, |s, &item| s + item) {
+ x if x == 0.0 => 1.0,
+ sum => sum,
+ };
+ let sum = (sum, sum, sum, sum);
+
+ for y in 1..height - 1 {
+ for x in 1..width - 1 {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ // TODO: There is no need to recalculate the kernel for each pixel.
+ // Only a subtract and addition is needed for pixels after the first
+ // in each row.
+ for (&k, &(a, b)) in kernel.iter().zip(taps.iter()) {
+ let k = (k, k, k, k);
+ let x0 = x as isize + a;
+ let y0 = y as isize + b;
+
+ let p = image.get_pixel(x0 as u32, y0 as u32);
+
+ let (k1, k2, k3, k4) = p.channels4();
+
+ let vec: (f32, f32, f32, f32) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ t.0 += vec.0 * k.0;
+ t.1 += vec.1 * k.1;
+ t.2 += vec.2 * k.2;
+ t.3 += vec.3 * k.3;
+ }
+
+ let (t1, t2, t3, t4) = (t.0 / sum.0, t.1 / sum.1, t.2 / sum.2, t.3 / sum.3);
+
+ let t = Pixel::from_channels(
+ NumCast::from(clamp(t1, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t2, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t3, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t4, 0.0, max)).unwrap(),
+ );
+
+ out.put_pixel(x, y, t);
+ }
+ }
+
+ out
+}
+
+/// Resize the supplied image to the specified dimensions.
+/// ```nwidth``` and ```nheight``` are the new dimensions.
+/// ```filter``` is the sampling filter to use.
+pub fn resize<I: GenericImageView>(
+ image: &I,
+ nwidth: u32,
+ nheight: u32,
+ filter: FilterType,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+ <I::Pixel as Pixel>::Subpixel: 'static,
+{
+ let mut method = match filter {
+ FilterType::Nearest => Filter {
+ kernel: Box::new(box_kernel),
+ support: 0.0,
+ },
+ FilterType::Triangle => Filter {
+ kernel: Box::new(triangle_kernel),
+ support: 1.0,
+ },
+ FilterType::CatmullRom => Filter {
+ kernel: Box::new(catmullrom_kernel),
+ support: 2.0,
+ },
+ FilterType::Gaussian => Filter {
+ kernel: Box::new(gaussian_kernel),
+ support: 3.0,
+ },
+ FilterType::Lanczos3 => Filter {
+ kernel: Box::new(lanczos3_kernel),
+ support: 3.0,
+ },
+ };
+
+ let tmp = vertical_sample(image, nheight, &mut method);
+ horizontal_sample(&tmp, nwidth, &mut method)
+}
+
+/// Performs a Gaussian blur on the supplied image.
+/// ```sigma``` is a measure of how much to blur by.
+pub fn blur<I: GenericImageView>(
+ image: &I,
+ sigma: f32,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let sigma = if sigma < 0.0 { 1.0 } else { sigma };
+
+ let mut method = Filter {
+ kernel: Box::new(|x| gaussian(x, sigma)),
+ support: 2.0 * sigma,
+ };
+
+ let (width, height) = image.dimensions();
+
+ // Keep width and height the same for horizontal and
+ // vertical sampling.
+ let tmp = vertical_sample(image, height, &mut method);
+ horizontal_sample(&tmp, width, &mut method)
+}
+
+/// Performs an unsharpen mask on the supplied image.
+/// ```sigma``` is the amount to blur the image by.
+/// ```threshold``` is the threshold for the difference between
+///
+/// See <https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking>
+pub fn unsharpen<I, P, S>(image: &I, sigma: f32, threshold: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let mut tmp = blur(image, sigma);
+
+ let max = S::max_value();
+ let max: i32 = NumCast::from(max).unwrap();
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width {
+ let a = image.get_pixel(x, y);
+ let b = tmp.get_pixel_mut(x, y);
+
+ let p = a.map2(b, |c, d| {
+ let ic: i32 = NumCast::from(c).unwrap();
+ let id: i32 = NumCast::from(d).unwrap();
+
+ let diff = (ic - id).abs();
+
+ if diff > threshold {
+ let e = clamp(ic + diff, 0, max);
+
+ NumCast::from(e).unwrap()
+ } else {
+ c
+ }
+ });
+
+ *b = p;
+ }
+ }
+
+ tmp
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{resize, FilterType};
+ use crate::buffer::{ImageBuffer, RgbImage};
+ #[cfg(feature = "benchmarks")]
+ use test;
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "png"))]
+ fn bench_resize(b: &mut test::Bencher) {
+ use std::path::Path;
+ let img = crate::open(&Path::new("./examples/fractal.png")).unwrap();
+ b.iter(|| {
+ test::black_box(resize(&img, 200, 200, FilterType::Nearest));
+ });
+ b.bytes = 800 * 800 * 3 + 200 * 200 * 3;
+ }
+
+ #[test]
+ fn test_issue_186() {
+ let img: RgbImage = ImageBuffer::new(100, 100);
+ let _ = resize(&img, 50, 50, FilterType::Lanczos3);
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail(b: &mut test::Bencher) {
+ let path = concat!(env!("CARGO_MANIFEST_DIR"), "/tests/images/tiff/testsuite/mandrill.tiff");
+ let image = crate::open(path).unwrap();
+ b.iter(|| {
+ test::black_box(image.thumbnail(256, 256));
+ });
+ b.bytes = 512 * 512 * 4 + 256 * 256 * 4;
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail_upsize(b: &mut test::Bencher) {
+ let path = concat!(env!("CARGO_MANIFEST_DIR"), "/tests/images/tiff/testsuite/mandrill.tiff");
+ let image = crate::open(path).unwrap().thumbnail(256, 256);
+ b.iter(|| {
+ test::black_box(image.thumbnail(512, 512));
+ });
+ b.bytes = 512 * 512 * 4 + 256 * 256 * 4;
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail_upsize_irregular(b: &mut test::Bencher) {
+ let path = concat!(env!("CARGO_MANIFEST_DIR"), "/tests/images/tiff/testsuite/mandrill.tiff");
+ let image = crate::open(path).unwrap().thumbnail(193, 193);
+ b.iter(|| {
+ test::black_box(image.thumbnail(256, 256));
+ });
+ b.bytes = 193 * 193 * 4 + 256 * 256 * 4;
+ }
+}
diff --git a/third_party/rust/image/src/io/free_functions.rs b/third_party/rust/image/src/io/free_functions.rs
new file mode 100644
index 0000000000..227864d77e
--- /dev/null
+++ b/third_party/rust/image/src/io/free_functions.rs
@@ -0,0 +1,289 @@
+use std::ffi::OsString;
+use std::fs::File;
+use std::io::{BufRead, BufReader, BufWriter, Seek};
+use std::path::Path;
+use std::u32;
+
+#[cfg(feature = "bmp")]
+use crate::bmp;
+#[cfg(feature = "gif")]
+use crate::gif;
+#[cfg(feature = "hdr")]
+use crate::hdr;
+#[cfg(feature = "ico")]
+use crate::ico;
+#[cfg(feature = "jpeg")]
+use crate::jpeg;
+#[cfg(feature = "png")]
+use crate::png;
+#[cfg(feature = "pnm")]
+use crate::pnm;
+#[cfg(feature = "tga")]
+use crate::tga;
+#[cfg(feature = "dds")]
+use crate::dds;
+#[cfg(feature = "tiff")]
+use crate::tiff;
+#[cfg(feature = "webp")]
+use crate::webp;
+
+use crate::color;
+use crate::image;
+use crate::dynimage::DynamicImage;
+use crate::error::{ImageError, ImageFormatHint, ImageResult};
+use crate::image::{ImageDecoder, ImageEncoder, ImageFormat};
+
+/// Internal error type for guessing format from path.
+pub(crate) enum PathError {
+ /// The extension did not fit a supported format.
+ UnknownExtension(OsString),
+ /// Extension could not be converted to `str`.
+ NoExtension,
+}
+
+pub(crate) fn open_impl(path: &Path) -> ImageResult<DynamicImage> {
+ let fin = match File::open(path) {
+ Ok(f) => f,
+ Err(err) => return Err(ImageError::IoError(err)),
+ };
+ let fin = BufReader::new(fin);
+
+ load(fin, ImageFormat::from_path(path)?)
+}
+
+/// Create a new image from a Reader
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn load<R: BufRead + Seek>(r: R, format: ImageFormat) -> ImageResult<DynamicImage> {
+ #[allow(deprecated, unreachable_patterns)]
+ // Default is unreachable if all features are supported.
+ match format {
+ #[cfg(feature = "png")]
+ image::ImageFormat::Png => DynamicImage::from_decoder(png::PngDecoder::new(r)?),
+ #[cfg(feature = "gif")]
+ image::ImageFormat::Gif => DynamicImage::from_decoder(gif::GifDecoder::new(r)?),
+ #[cfg(feature = "jpeg")]
+ image::ImageFormat::Jpeg => DynamicImage::from_decoder(jpeg::JpegDecoder::new(r)?),
+ #[cfg(feature = "webp")]
+ image::ImageFormat::WebP => DynamicImage::from_decoder(webp::WebPDecoder::new(r)?),
+ #[cfg(feature = "tiff")]
+ image::ImageFormat::Tiff => DynamicImage::from_decoder(tiff::TiffDecoder::new(r)?),
+ #[cfg(feature = "tga")]
+ image::ImageFormat::Tga => DynamicImage::from_decoder(tga::TgaDecoder::new(r)?),
+ #[cfg(feature = "dds")]
+ image::ImageFormat::Dds => DynamicImage::from_decoder(dds::DdsDecoder::new(r)?),
+ #[cfg(feature = "bmp")]
+ image::ImageFormat::Bmp => DynamicImage::from_decoder(bmp::BmpDecoder::new(r)?),
+ #[cfg(feature = "ico")]
+ image::ImageFormat::Ico => DynamicImage::from_decoder(ico::IcoDecoder::new(r)?),
+ #[cfg(feature = "hdr")]
+ image::ImageFormat::Hdr => DynamicImage::from_decoder(hdr::HDRAdapter::new(BufReader::new(r))?),
+ #[cfg(feature = "pnm")]
+ image::ImageFormat::Pnm => DynamicImage::from_decoder(pnm::PnmDecoder::new(BufReader::new(r))?),
+ _ => Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())),
+ }
+}
+
+pub(crate) fn image_dimensions_impl(path: &Path) -> ImageResult<(u32, u32)> {
+ let format = image::ImageFormat::from_path(path)?;
+
+ let fin = File::open(path)?;
+ let fin = BufReader::new(fin);
+
+ image_dimensions_with_format_impl(fin, format)
+}
+
+pub(crate) fn image_dimensions_with_format_impl<R: BufRead + Seek>(fin: R, format: ImageFormat)
+ -> ImageResult<(u32, u32)>
+{
+ #[allow(unreachable_patterns)]
+ // Default is unreachable if all features are supported.
+ Ok(match format {
+ #[cfg(feature = "jpeg")]
+ image::ImageFormat::Jpeg => jpeg::JpegDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "png")]
+ image::ImageFormat::Png => png::PngDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "gif")]
+ image::ImageFormat::Gif => gif::GifDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "webp")]
+ image::ImageFormat::WebP => webp::WebPDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "tiff")]
+ image::ImageFormat::Tiff => tiff::TiffDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "tga")]
+ image::ImageFormat::Tga => tga::TgaDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "dds")]
+ image::ImageFormat::Dds => dds::DdsDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "bmp")]
+ image::ImageFormat::Bmp => bmp::BmpDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "ico")]
+ image::ImageFormat::Ico => ico::IcoDecoder::new(fin)?.dimensions(),
+ #[cfg(feature = "hdr")]
+ image::ImageFormat::Hdr => hdr::HDRAdapter::new(fin)?.dimensions(),
+ #[cfg(feature = "pnm")]
+ image::ImageFormat::Pnm => {
+ pnm::PnmDecoder::new(fin)?.dimensions()
+ }
+ format => return Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())),
+ })
+}
+
+pub(crate) fn save_buffer_impl(
+ path: &Path,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+) -> ImageResult<()> {
+ let fout = &mut BufWriter::new(File::create(path)?);
+ let ext = path.extension()
+ .and_then(|s| s.to_str())
+ .map_or("".to_string(), |s| s.to_ascii_lowercase());
+
+ match &*ext {
+ #[cfg(feature = "gif")]
+ "gif" => gif::Encoder::new(fout).encode(buf, width, height, color),
+ #[cfg(feature = "ico")]
+ "ico" => ico::ICOEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "jpeg")]
+ "jpg" | "jpeg" => jpeg::JPEGEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "png")]
+ "png" => png::PNGEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "pnm")]
+ "pbm" => pnm::PNMEncoder::new(fout)
+ .with_subtype(pnm::PNMSubtype::Bitmap(pnm::SampleEncoding::Binary))
+ .write_image(buf, width, height, color),
+ #[cfg(feature = "pnm")]
+ "pgm" => pnm::PNMEncoder::new(fout)
+ .with_subtype(pnm::PNMSubtype::Graymap(pnm::SampleEncoding::Binary))
+ .write_image(buf, width, height, color),
+ #[cfg(feature = "pnm")]
+ "ppm" => pnm::PNMEncoder::new(fout)
+ .with_subtype(pnm::PNMSubtype::Pixmap(pnm::SampleEncoding::Binary))
+ .write_image(buf, width, height, color),
+ #[cfg(feature = "pnm")]
+ "pam" => pnm::PNMEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "bmp")]
+ "bmp" => bmp::BMPEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "tiff")]
+ "tif" | "tiff" => tiff::TiffEncoder::new(fout)
+ .write_image(buf, width, height, color),
+ _ => Err(ImageError::Unsupported(ImageFormatHint::from(path).into())),
+ }
+}
+
+pub(crate) fn save_buffer_with_format_impl(
+ path: &Path,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: ImageFormat,
+) -> ImageResult<()> {
+ let fout = &mut BufWriter::new(File::create(path)?);
+
+ match format {
+ #[cfg(feature = "gif")]
+ image::ImageFormat::Gif => gif::Encoder::new(fout).encode(buf, width, height, color),
+ #[cfg(feature = "ico")]
+ image::ImageFormat::Ico => ico::ICOEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "jpeg")]
+ image::ImageFormat::Jpeg => jpeg::JPEGEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "png")]
+ image::ImageFormat::Png => png::PNGEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "bmp")]
+ image::ImageFormat::Bmp => bmp::BMPEncoder::new(fout).write_image(buf, width, height, color),
+ #[cfg(feature = "tiff")]
+ image::ImageFormat::Tiff => tiff::TiffEncoder::new(fout)
+ .write_image(buf, width, height, color),
+ format => return Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())),
+ }
+}
+
+/// Guess format from a path.
+///
+/// Returns `PathError::NoExtension` if the path has no extension or returns a
+/// `PathError::UnknownExtension` containing the extension if it can not be convert to a `str`.
+pub(crate) fn guess_format_from_path_impl(path: &Path) -> Result<ImageFormat, PathError> {
+ let exact_ext = path.extension();
+
+ let ext = exact_ext
+ .and_then(|s| s.to_str())
+ .map(str::to_ascii_lowercase);
+
+ let ext = ext.as_ref()
+ .map(String::as_str);
+
+ Ok(match ext {
+ Some("jpg") | Some("jpeg") => image::ImageFormat::Jpeg,
+ Some("png") => image::ImageFormat::Png,
+ Some("gif") => image::ImageFormat::Gif,
+ Some("webp") => image::ImageFormat::WebP,
+ Some("tif") | Some("tiff") => image::ImageFormat::Tiff,
+ Some("tga") => image::ImageFormat::Tga,
+ Some("dds") => image::ImageFormat::Dds,
+ Some("bmp") => image::ImageFormat::Bmp,
+ Some("ico") => image::ImageFormat::Ico,
+ Some("hdr") => image::ImageFormat::Hdr,
+ Some("pbm") | Some("pam") | Some("ppm") | Some("pgm") => image::ImageFormat::Pnm,
+ // The original extension is used, instead of _format
+ _ => return match exact_ext {
+ None => Err(PathError::NoExtension),
+ Some(os) => Err(PathError::UnknownExtension(os.to_owned())),
+ },
+ })
+}
+
+static MAGIC_BYTES: [(&'static [u8], ImageFormat); 18] = [
+ (b"\x89PNG\r\n\x1a\n", ImageFormat::Png),
+ (&[0xff, 0xd8, 0xff], ImageFormat::Jpeg),
+ (b"GIF89a", ImageFormat::Gif),
+ (b"GIF87a", ImageFormat::Gif),
+ (b"RIFF", ImageFormat::WebP), // TODO: better magic byte detection, see https://github.com/image-rs/image/issues/660
+ (b"MM\x00*", ImageFormat::Tiff),
+ (b"II*\x00", ImageFormat::Tiff),
+ (b"DDS ", ImageFormat::Dds),
+ (b"BM", ImageFormat::Bmp),
+ (&[0, 0, 1, 0], ImageFormat::Ico),
+ (b"#?RADIANCE", ImageFormat::Hdr),
+ (b"P1", ImageFormat::Pnm),
+ (b"P2", ImageFormat::Pnm),
+ (b"P3", ImageFormat::Pnm),
+ (b"P4", ImageFormat::Pnm),
+ (b"P5", ImageFormat::Pnm),
+ (b"P6", ImageFormat::Pnm),
+ (b"P7", ImageFormat::Pnm),
+];
+
+/// Guess image format from memory block
+///
+/// Makes an educated guess about the image format based on the Magic Bytes at the beginning.
+/// TGA is not supported by this function.
+/// This is not to be trusted on the validity of the whole memory block
+pub fn guess_format(buffer: &[u8]) -> ImageResult<ImageFormat> {
+ match guess_format_impl(buffer) {
+ Some(format) => Ok(format),
+ None => Err(ImageError::Unsupported(ImageFormatHint::Unknown.into())),
+ }
+}
+
+pub(crate) fn guess_format_impl(buffer: &[u8]) -> Option<ImageFormat> {
+ for &(signature, format) in &MAGIC_BYTES {
+ if buffer.starts_with(signature) {
+ return Some(format);
+ }
+ }
+
+ None
+}
+
+impl From<PathError> for ImageError {
+ fn from(path: PathError) -> Self {
+ let format_hint = match path {
+ PathError::NoExtension => ImageFormatHint::Unknown,
+ PathError::UnknownExtension(ext) => ImageFormatHint::PathExtension(ext.into()),
+ };
+ ImageError::Unsupported(format_hint.into())
+ }
+}
diff --git a/third_party/rust/image/src/io/mod.rs b/third_party/rust/image/src/io/mod.rs
new file mode 100644
index 0000000000..e7964c7259
--- /dev/null
+++ b/third_party/rust/image/src/io/mod.rs
@@ -0,0 +1,5 @@
+//! Input and output of images.
+mod reader;
+pub(crate) mod free_functions;
+
+pub use self::reader::Reader;
diff --git a/third_party/rust/image/src/io/reader.rs b/third_party/rust/image/src/io/reader.rs
new file mode 100644
index 0000000000..4da41f458b
--- /dev/null
+++ b/third_party/rust/image/src/io/reader.rs
@@ -0,0 +1,210 @@
+use std::fs::File;
+use std::io::{self, BufRead, BufReader, Cursor, Read, Seek, SeekFrom};
+use std::path::Path;
+
+use crate::dynimage::DynamicImage;
+use crate::image::ImageFormat;
+use crate::{ImageError, ImageResult};
+
+use super::free_functions;
+
+/// A multi-format image reader.
+///
+/// Wraps an input reader to facilitate automatic detection of an image's format, appropriate
+/// decoding method, and dispatches into the set of supported [`ImageDecoder`] implementations.
+///
+/// ## Usage
+///
+/// Opening a file, deducing the format based on the file path automatically, and trying to decode
+/// the image contained can be performed by constructing the reader and immediately consuming it.
+///
+/// ```no_run
+/// # use image::ImageError;
+/// # use image::io::Reader;
+/// # fn main() -> Result<(), ImageError> {
+/// let image = Reader::open("path/to/image.png")?
+/// .decode()?;
+/// # Ok(()) }
+/// ```
+///
+/// It is also possible to make a guess based on the content. This is especially handy if the
+/// source is some blob in memory and you have constructed the reader in another way. Here is an
+/// example with a `pnm` black-and-white subformat that encodes its pixel matrix with ascii values.
+///
+#[cfg_attr(feature = "pnm", doc = "```")]
+#[cfg_attr(not(feature = "pnm"), doc = "```no_run")]
+/// # use image::ImageError;
+/// # use image::io::Reader;
+/// # fn main() -> Result<(), ImageError> {
+/// use std::io::Cursor;
+/// use image::ImageFormat;
+///
+/// let raw_data = b"P1 2 2\n\
+/// 0 1\n\
+/// 1 0\n";
+///
+/// let mut reader = Reader::new(Cursor::new(raw_data))
+/// .with_guessed_format()
+/// .expect("Cursor io never fails");
+/// assert_eq!(reader.format(), Some(ImageFormat::Pnm));
+///
+/// let image = reader.decode()?;
+/// # Ok(()) }
+/// ```
+///
+/// As a final fallback or if only a specific format must be used, the reader always allows manual
+/// specification of the supposed image format with [`set_format`].
+///
+/// [`set_format`]: #method.set_format
+/// [`ImageDecoder`]: ../trait.ImageDecoder.html
+pub struct Reader<R: Read> {
+ /// The reader.
+ inner: R,
+ /// The format, if one has been set or deduced.
+ format: Option<ImageFormat>,
+}
+
+impl<R: Read> Reader<R> {
+ /// Create a new image reader without a preset format.
+ ///
+ /// It is possible to guess the format based on the content of the read object with
+ /// [`guess_format`], or to set the format directly with [`set_format`].
+ ///
+ /// [`guess_format`]: #method.guess_format
+ /// [`set_format`]: method.set_format
+ pub fn new(reader: R) -> Self {
+ Reader {
+ inner: reader,
+ format: None,
+ }
+ }
+
+ /// Construct a reader with specified format.
+ pub fn with_format(reader: R, format: ImageFormat) -> Self {
+ Reader {
+ inner: reader,
+ format: Some(format),
+ }
+ }
+
+ /// Get the currently determined format.
+ pub fn format(&self) -> Option<ImageFormat> {
+ self.format
+ }
+
+ /// Supply the format as which to interpret the read image.
+ pub fn set_format(&mut self, format: ImageFormat) {
+ self.format = Some(format);
+ }
+
+ /// Remove the current information on the image format.
+ ///
+ /// Note that many operations require format information to be present and will return e.g. an
+ /// `ImageError::UnsupportedError` when the image format has not been set.
+ pub fn clear_format(&mut self) {
+ self.format = None;
+ }
+
+ /// Unwrap the reader.
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+}
+
+impl Reader<BufReader<File>> {
+ /// Open a file to read, format will be guessed from path.
+ ///
+ /// This will not attempt any io operation on the opened file.
+ ///
+ /// If you want to inspect the content for a better guess on the format, which does not depend
+ /// on file extensions, follow this call with a call to [`guess_format`].
+ ///
+ /// [`guess_format`]: #method.guess_format
+ pub fn open<P>(path: P) -> io::Result<Self> where P: AsRef<Path> {
+ Self::open_impl(path.as_ref())
+ }
+
+ fn open_impl(path: &Path) -> io::Result<Self> {
+ let file = File::open(path)?;
+ Ok(Reader {
+ inner: BufReader::new(file),
+ format: ImageFormat::from_path(path).ok(),
+ })
+ }
+}
+
+impl<R: BufRead + Seek> Reader<R> {
+ /// Make a format guess based on the content, replacing it on success.
+ ///
+ /// Returns `Ok` with the guess if no io error occurs. Additionally, replaces the current
+ /// format if the guess was successful. If the guess was not unable to determine a format then
+ /// the current format of the reader is unchanged.
+ ///
+ /// Returns an error if the underlying reader fails. The format is unchanged. The error is a
+ /// `std::io::Error` and not `ImageError` since the only error case is an error when the
+ /// underlying reader seeks.
+ ///
+ /// When an error occurs, the reader may not have been properly reset and it is potentially
+ /// hazardous to continue with more io.
+ ///
+ /// ## Usage
+ ///
+ /// This supplements the path based type deduction from [`open`] with content based deduction.
+ /// This is more common in Linux and UNIX operating systems and also helpful if the path can
+ /// not be directly controlled.
+ ///
+ /// ```no_run
+ /// # use image::ImageError;
+ /// # use image::io::Reader;
+ /// # fn main() -> Result<(), ImageError> {
+ /// let image = Reader::open("image.unknown")?
+ /// .with_guessed_format()?
+ /// .decode()?;
+ /// # Ok(()) }
+ /// ```
+ pub fn with_guessed_format(mut self) -> io::Result<Self> {
+ let format = self.guess_format()?;
+ // Replace format if found, keep current state if not.
+ self.format = format.or(self.format);
+ Ok(self)
+ }
+
+ fn guess_format(&mut self) -> io::Result<Option<ImageFormat>> {
+ let mut start = [0; 16];
+
+ // Save current offset, read start, restore offset.
+ let cur = self.inner.seek(SeekFrom::Current(0))?;
+ let len = io::copy(
+ // Accept shorter files but read at most 16 bytes.
+ &mut self.inner.by_ref().take(16),
+ &mut Cursor::new(&mut start[..]))?;
+ self.inner.seek(SeekFrom::Start(cur))?;
+
+ Ok(free_functions::guess_format_impl(&start[..len as usize]))
+ }
+
+ /// Read the image dimensions.
+ ///
+ /// Uses the current format to construct the correct reader for the format.
+ ///
+ /// If no format was determined, returns an `ImageError::UnsupportedError`.
+ pub fn into_dimensions(mut self) -> ImageResult<(u32, u32)> {
+ let format = self.require_format()?;
+ free_functions::image_dimensions_with_format_impl(self.inner, format)
+ }
+
+ /// Read the image (replaces `load`).
+ ///
+ /// Uses the current format to construct the correct reader for the format.
+ ///
+ /// If no format was determined, returns an `ImageError::UnsupportedError`.
+ pub fn decode(mut self) -> ImageResult<DynamicImage> {
+ let format = self.require_format()?;
+ free_functions::load(self.inner, format)
+ }
+
+ fn require_format(&mut self) -> ImageResult<ImageFormat> {
+ self.format.ok_or_else(||
+ ImageError::UnsupportedError("Unable to determine image format".into()))
+ }
+}
diff --git a/third_party/rust/image/src/jpeg/decoder.rs b/third_party/rust/image/src/jpeg/decoder.rs
new file mode 100644
index 0000000000..0403a3fbbd
--- /dev/null
+++ b/third_party/rust/image/src/jpeg/decoder.rs
@@ -0,0 +1,136 @@
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::color::ColorType;
+use crate::image::ImageDecoder;
+use crate::error::{ImageError, ImageResult};
+
+/// JPEG decoder
+pub struct JpegDecoder<R> {
+ decoder: jpeg::Decoder<R>,
+ metadata: jpeg::ImageInfo,
+}
+
+impl<R: Read> JpegDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(r: R) -> ImageResult<JpegDecoder<R>> {
+ let mut decoder = jpeg::Decoder::new(r);
+
+ decoder.read_info().map_err(ImageError::from_jpeg)?;
+ let mut metadata = decoder.info().unwrap();
+
+ // We convert CMYK data to RGB before returning it to the user.
+ if metadata.pixel_format == jpeg::PixelFormat::CMYK32 {
+ metadata.pixel_format = jpeg::PixelFormat::RGB24;
+ }
+
+ Ok(JpegDecoder {
+ decoder,
+ metadata,
+ })
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct JpegReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for JpegReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for JpegDecoder<R> {
+ type Reader = JpegReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (u32::from(self.metadata.width), u32::from(self.metadata.height))
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::from_jpeg(self.metadata.pixel_format)
+ }
+
+ fn into_reader(mut self) -> ImageResult<Self::Reader> {
+ let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?;
+ data = match self.decoder.info().unwrap().pixel_format {
+ jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data),
+ _ => data,
+ };
+
+ Ok(JpegReader(Cursor::new(data), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?;
+ data = match self.decoder.info().unwrap().pixel_format {
+ jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data),
+ _ => data,
+ };
+
+ buf.copy_from_slice(&data);
+ Ok(())
+ }
+}
+
+fn cmyk_to_rgb(input: &[u8]) -> Vec<u8> {
+ let size = input.len() - input.len() / 4;
+ let mut output = Vec::with_capacity(size);
+
+ for pixel in input.chunks(4) {
+ let c = f32::from(pixel[0]) / 255.0;
+ let m = f32::from(pixel[1]) / 255.0;
+ let y = f32::from(pixel[2]) / 255.0;
+ let k = f32::from(pixel[3]) / 255.0;
+
+ // CMYK -> CMY
+ let c = c * (1.0 - k) + k;
+ let m = m * (1.0 - k) + k;
+ let y = y * (1.0 - k) + k;
+
+ // CMY -> RGB
+ let r = (1.0 - c) * 255.0;
+ let g = (1.0 - m) * 255.0;
+ let b = (1.0 - y) * 255.0;
+
+ output.push(r as u8);
+ output.push(g as u8);
+ output.push(b as u8);
+ }
+
+ output
+}
+
+impl ColorType {
+ fn from_jpeg(pixel_format: jpeg::PixelFormat) -> ColorType {
+ use jpeg::PixelFormat::*;
+ match pixel_format {
+ L8 => ColorType::L8,
+ RGB24 => ColorType::Rgb8,
+ CMYK32 => panic!(),
+ }
+ }
+}
+
+impl ImageError {
+ fn from_jpeg(err: jpeg::Error) -> ImageError {
+ use jpeg::Error::*;
+ match err {
+ Format(desc) => ImageError::FormatError(desc),
+ Unsupported(desc) => ImageError::UnsupportedError(format!("{:?}", desc)),
+ Io(err) => ImageError::IoError(err),
+ Internal(err) => ImageError::FormatError(err.to_string()),
+ }
+ }
+}
diff --git a/third_party/rust/image/src/jpeg/encoder.rs b/third_party/rust/image/src/jpeg/encoder.rs
new file mode 100644
index 0000000000..6b7ce5c270
--- /dev/null
+++ b/third_party/rust/image/src/jpeg/encoder.rs
@@ -0,0 +1,917 @@
+#![allow(clippy::too_many_arguments)]
+
+use byteorder::{BigEndian, WriteBytesExt};
+use crate::error::{ImageError, ImageResult};
+use crate::math::utils::clamp;
+use num_iter::range_step;
+use std::io::{self, Write};
+
+use crate::color;
+use crate::image::ImageEncoder;
+
+use super::entropy::build_huff_lut;
+use super::transform;
+
+// Markers
+// Baseline DCT
+static SOF0: u8 = 0xC0;
+// Huffman Tables
+static DHT: u8 = 0xC4;
+// Start of Image (standalone)
+static SOI: u8 = 0xD8;
+// End of image (standalone)
+static EOI: u8 = 0xD9;
+// Start of Scan
+static SOS: u8 = 0xDA;
+// Quantization Tables
+static DQT: u8 = 0xDB;
+// Application segments start and end
+static APP0: u8 = 0xE0;
+
+// section K.1
+// table K.1
+#[rustfmt::skip]
+static STD_LUMA_QTABLE: [u8; 64] = [
+ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99,
+];
+
+// table K.2
+#[rustfmt::skip]
+static STD_CHROMA_QTABLE: [u8; 64] = [
+ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+];
+
+// section K.3
+// Code lengths and values for table K.3
+static STD_LUMA_DC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+];
+
+static STD_LUMA_DC_VALUES: [u8; 12] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+];
+
+// Code lengths and values for table K.4
+static STD_CHROMA_DC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+];
+
+static STD_CHROMA_DC_VALUES: [u8; 12] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+];
+
+// Code lengths and values for table k.5
+static STD_LUMA_AC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D,
+];
+
+static STD_LUMA_AC_VALUES: [u8; 162] = [
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
+ 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA,
+];
+
+// Code lengths and values for table k.6
+static STD_CHROMA_AC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+];
+static STD_CHROMA_AC_VALUES: [u8; 162] = [
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0,
+ 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26,
+ 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3,
+ 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA,
+ 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA,
+];
+
+static DCCLASS: u8 = 0;
+static ACCLASS: u8 = 1;
+
+static LUMADESTINATION: u8 = 0;
+static CHROMADESTINATION: u8 = 1;
+
+static LUMAID: u8 = 1;
+static CHROMABLUEID: u8 = 2;
+static CHROMAREDID: u8 = 3;
+
+/// The permutation of dct coefficients.
+#[rustfmt::skip]
+static UNZIGZAG: [u8; 64] = [
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63,
+];
+
+/// A representation of a JPEG component
+#[derive(Copy, Clone)]
+struct Component {
+ /// The Component's identifier
+ id: u8,
+
+ /// Horizontal sampling factor
+ h: u8,
+
+ /// Vertical sampling factor
+ v: u8,
+
+ /// The quantization table selector
+ tq: u8,
+
+ /// Index to the Huffman DC Table
+ dc_table: u8,
+
+ /// Index to the AC Huffman Table
+ ac_table: u8,
+
+ /// The dc prediction of the component
+ _dc_pred: i32,
+}
+
+pub(crate) struct BitWriter<'a, W: 'a> {
+ w: &'a mut W,
+ accumulator: u32,
+ nbits: u8,
+}
+
+impl<'a, W: Write + 'a> BitWriter<'a, W> {
+ fn new(w: &'a mut W) -> Self {
+ BitWriter {
+ w,
+ accumulator: 0,
+ nbits: 0,
+ }
+ }
+
+ fn write_bits(&mut self, bits: u16, size: u8) -> io::Result<()> {
+ if size == 0 {
+ return Ok(());
+ }
+
+ self.accumulator |= u32::from(bits) << (32 - (self.nbits + size)) as usize;
+ self.nbits += size;
+
+ while self.nbits >= 8 {
+ let byte = (self.accumulator & (0xFFFF_FFFFu32 << 24)) >> 24;
+ self.w.write_all(&[byte as u8])?;
+
+ if byte == 0xFF {
+ self.w.write_all(&[0x00])?;
+ }
+
+ self.nbits -= 8;
+ self.accumulator <<= 8;
+ }
+
+ Ok(())
+ }
+
+ fn pad_byte(&mut self) -> io::Result<()> {
+ self.write_bits(0x7F, 7)
+ }
+
+ fn huffman_encode(&mut self, val: u8, table: &[(u8, u16)]) -> io::Result<()> {
+ let (size, code) = table[val as usize];
+
+ if size > 16 {
+ panic!("bad huffman value");
+ }
+
+ self.write_bits(code, size)
+ }
+
+ fn write_block(
+ &mut self,
+ block: &[i32],
+ prevdc: i32,
+ dctable: &[(u8, u16)],
+ actable: &[(u8, u16)],
+ ) -> io::Result<i32> {
+ // Differential DC encoding
+ let dcval = block[0];
+ let diff = dcval - prevdc;
+ let (size, value) = encode_coefficient(diff);
+
+ self.huffman_encode(size, dctable)?;
+ self.write_bits(value, size)?;
+
+ // Figure F.2
+ let mut zero_run = 0;
+ let mut k = 0usize;
+
+ loop {
+ k += 1;
+
+ if block[UNZIGZAG[k] as usize] == 0 {
+ if k == 63 {
+ self.huffman_encode(0x00, actable)?;
+ break;
+ }
+
+ zero_run += 1;
+ } else {
+ while zero_run > 15 {
+ self.huffman_encode(0xF0, actable)?;
+ zero_run -= 16;
+ }
+
+ let (size, value) = encode_coefficient(block[UNZIGZAG[k] as usize]);
+ let symbol = (zero_run << 4) | size;
+
+ self.huffman_encode(symbol, actable)?;
+ self.write_bits(value, size)?;
+
+ zero_run = 0;
+
+ if k == 63 {
+ break;
+ }
+ }
+ }
+
+ Ok(dcval)
+ }
+
+ fn write_segment(&mut self, marker: u8, data: Option<&[u8]>) -> io::Result<()> {
+ self.w.write_all(&[0xFF])?;
+ self.w.write_all(&[marker])?;
+
+ if let Some(b) = data {
+ self.w.write_u16::<BigEndian>(b.len() as u16 + 2)?;
+ self.w.write_all(b)?;
+ }
+ Ok(())
+ }
+}
+
+/// Represents a unit in which the density of an image is measured
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum PixelDensityUnit {
+ /// Represents the absence of a unit, the values indicate only a
+ /// [pixel aspect ratio](https://en.wikipedia.org/wiki/Pixel_aspect_ratio)
+ PixelAspectRatio,
+
+ /// Pixels per inch (2.54 cm)
+ Inches,
+
+ /// Pixels per centimeter
+ Centimeters,
+}
+
+/// Represents the pixel density of an image
+///
+/// For example, a 300 DPI image is represented by:
+///
+/// ```rust
+/// use image::jpeg::*;
+/// let hdpi = PixelDensity::dpi(300);
+/// assert_eq!(hdpi, PixelDensity {density: (300,300), unit: PixelDensityUnit::Inches})
+/// ```
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct PixelDensity {
+ /// A couple of values for (Xdensity, Ydensity)
+ pub density: (u16, u16),
+ /// The unit in which the density is measured
+ pub unit: PixelDensityUnit,
+}
+
+impl PixelDensity {
+ /// Creates the most common pixel density type:
+ /// the horizontal and the vertical density are equal,
+ /// and measured in pixels per inch.
+ pub fn dpi(density: u16) -> Self {
+ PixelDensity {
+ density: (density, density),
+ unit: PixelDensityUnit::Inches,
+ }
+ }
+}
+
+impl Default for PixelDensity {
+ /// Returns a pixel density with a pixel aspect ratio of 1
+ fn default() -> Self {
+ PixelDensity {
+ density: (1, 1),
+ unit: PixelDensityUnit::PixelAspectRatio,
+ }
+ }
+}
+
+/// The representation of a JPEG encoder
+pub struct JPEGEncoder<'a, W: 'a> {
+ writer: BitWriter<'a, W>,
+
+ components: Vec<Component>,
+ tables: Vec<u8>,
+
+ luma_dctable: Vec<(u8, u16)>,
+ luma_actable: Vec<(u8, u16)>,
+ chroma_dctable: Vec<(u8, u16)>,
+ chroma_actable: Vec<(u8, u16)>,
+
+ pixel_density: PixelDensity,
+}
+
+impl<'a, W: Write> JPEGEncoder<'a, W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: &mut W) -> JPEGEncoder<W> {
+ JPEGEncoder::new_with_quality(w, 75)
+ }
+
+ /// Create a new encoder that writes its output to ```w```, and has
+ /// the quality parameter ```quality``` with a value in the range 1-100
+ /// where 1 is the worst and 100 is the best.
+ pub fn new_with_quality(w: &mut W, quality: u8) -> JPEGEncoder<W> {
+ let ld = build_huff_lut(&STD_LUMA_DC_CODE_LENGTHS, &STD_LUMA_DC_VALUES);
+ let la = build_huff_lut(&STD_LUMA_AC_CODE_LENGTHS, &STD_LUMA_AC_VALUES);
+
+ let cd = build_huff_lut(&STD_CHROMA_DC_CODE_LENGTHS, &STD_CHROMA_DC_VALUES);
+ let ca = build_huff_lut(&STD_CHROMA_AC_CODE_LENGTHS, &STD_CHROMA_AC_VALUES);
+
+ let components = vec![
+ Component {
+ id: LUMAID,
+ h: 1,
+ v: 1,
+ tq: LUMADESTINATION,
+ dc_table: LUMADESTINATION,
+ ac_table: LUMADESTINATION,
+ _dc_pred: 0,
+ },
+ Component {
+ id: CHROMABLUEID,
+ h: 1,
+ v: 1,
+ tq: CHROMADESTINATION,
+ dc_table: CHROMADESTINATION,
+ ac_table: CHROMADESTINATION,
+ _dc_pred: 0,
+ },
+ Component {
+ id: CHROMAREDID,
+ h: 1,
+ v: 1,
+ tq: CHROMADESTINATION,
+ dc_table: CHROMADESTINATION,
+ ac_table: CHROMADESTINATION,
+ _dc_pred: 0,
+ },
+ ];
+
+ // Derive our quantization table scaling value using the libjpeg algorithm
+ let scale = u32::from(clamp(quality, 1, 100));
+ let scale = if scale < 50 {
+ 5000 / scale
+ } else {
+ 200 - scale * 2
+ };
+
+ let mut tables = Vec::new();
+ let scale_value = |&v: &u8| {
+ let value = (u32::from(v) * scale + 50) / 100;
+
+ clamp(value, 1, u32::from(u8::max_value())) as u8
+ };
+ tables.extend(STD_LUMA_QTABLE.iter().map(&scale_value));
+ tables.extend(STD_CHROMA_QTABLE.iter().map(&scale_value));
+
+ JPEGEncoder {
+ writer: BitWriter::new(w),
+
+ components,
+ tables,
+
+ luma_dctable: ld,
+ luma_actable: la,
+ chroma_dctable: cd,
+ chroma_actable: ca,
+
+ pixel_density: PixelDensity::default(),
+ }
+ }
+
+ /// Set the pixel density of the images the encoder will encode.
+ /// If this method is not called, then a default pixel aspect ratio of 1x1 will be applied,
+ /// and no DPI information will be stored in the image.
+ pub fn set_pixel_density(&mut self, pixel_density: PixelDensity) {
+ self.pixel_density = pixel_density;
+ }
+
+ /// Encodes the image ```image```
+ /// that has dimensions ```width``` and ```height```
+ /// and ```ColorType``` ```c```
+ ///
+ /// The Image in encoded with subsampling ratio 4:2:2
+ pub fn encode(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ c: color::ColorType,
+ ) -> ImageResult<()> {
+ let n = c.channel_count();
+ let num_components = if n == 1 || n == 2 { 1 } else { 3 };
+
+ self.writer.write_segment(SOI, None)?;
+
+ let mut buf = Vec::new();
+
+ build_jfif_header(&mut buf, self.pixel_density);
+ self.writer.write_segment(APP0, Some(&buf))?;
+
+ build_frame_header(
+ &mut buf,
+ 8,
+ width as u16,
+ height as u16,
+ &self.components[..num_components],
+ );
+ self.writer.write_segment(SOF0, Some(&buf))?;
+
+ assert_eq!(self.tables.len() / 64, 2);
+ let numtables = if num_components == 1 { 1 } else { 2 };
+
+ for (i, table) in self.tables.chunks(64).enumerate().take(numtables) {
+ build_quantization_segment(&mut buf, 8, i as u8, table);
+ self.writer.write_segment(DQT, Some(&buf))?;
+ }
+
+ build_huffman_segment(
+ &mut buf,
+ DCCLASS,
+ LUMADESTINATION,
+ &STD_LUMA_DC_CODE_LENGTHS,
+ &STD_LUMA_DC_VALUES,
+ );
+ self.writer.write_segment(DHT, Some(&buf))?;
+
+ build_huffman_segment(
+ &mut buf,
+ ACCLASS,
+ LUMADESTINATION,
+ &STD_LUMA_AC_CODE_LENGTHS,
+ &STD_LUMA_AC_VALUES,
+ );
+ self.writer.write_segment(DHT, Some(&buf))?;
+
+ if num_components == 3 {
+ build_huffman_segment(
+ &mut buf,
+ DCCLASS,
+ CHROMADESTINATION,
+ &STD_CHROMA_DC_CODE_LENGTHS,
+ &STD_CHROMA_DC_VALUES,
+ );
+ self.writer.write_segment(DHT, Some(&buf))?;
+
+ build_huffman_segment(
+ &mut buf,
+ ACCLASS,
+ CHROMADESTINATION,
+ &STD_CHROMA_AC_CODE_LENGTHS,
+ &STD_CHROMA_AC_VALUES,
+ );
+ self.writer.write_segment(DHT, Some(&buf))?;
+ }
+
+ build_scan_header(&mut buf, &self.components[..num_components]);
+ self.writer.write_segment(SOS, Some(&buf))?;
+
+ match c {
+ color::ColorType::Rgb8 => {
+ self.encode_rgb(image, width as usize, height as usize, 3)?
+ }
+ color::ColorType::Rgba8 => {
+ self.encode_rgb(image, width as usize, height as usize, 4)?
+ }
+ color::ColorType::L8 => {
+ self.encode_gray(image, width as usize, height as usize, 1)?
+ }
+ color::ColorType::La8 => {
+ self.encode_gray(image, width as usize, height as usize, 2)?
+ }
+ _ => {
+ return Err(ImageError::UnsupportedColor(c.into()))
+ }
+ };
+
+ self.writer.pad_byte()?;
+ self.writer.write_segment(EOI, None)?;
+ Ok(())
+ }
+
+ fn encode_gray(
+ &mut self,
+ image: &[u8],
+ width: usize,
+ height: usize,
+ bpp: usize,
+ ) -> io::Result<()> {
+ let mut yblock = [0u8; 64];
+ let mut y_dcprev = 0;
+ let mut dct_yblock = [0i32; 64];
+
+ for y in range_step(0, height, 8) {
+ for x in range_step(0, width, 8) {
+ // RGB -> YCbCr
+ copy_blocks_gray(image, x, y, width, bpp, &mut yblock);
+
+ // Level shift and fdct
+ // Coeffs are scaled by 8
+ transform::fdct(&yblock, &mut dct_yblock);
+
+ // Quantization
+ for (i, dct) in dct_yblock.iter_mut().enumerate().take(64) {
+ *dct = ((*dct / 8) as f32 / f32::from(self.tables[i])).round() as i32;
+ }
+
+ let la = &*self.luma_actable;
+ let ld = &*self.luma_dctable;
+
+ y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgb(
+ &mut self,
+ image: &[u8],
+ width: usize,
+ height: usize,
+ bpp: usize,
+ ) -> io::Result<()> {
+ let mut y_dcprev = 0;
+ let mut cb_dcprev = 0;
+ let mut cr_dcprev = 0;
+
+ let mut dct_yblock = [0i32; 64];
+ let mut dct_cb_block = [0i32; 64];
+ let mut dct_cr_block = [0i32; 64];
+
+ let mut yblock = [0u8; 64];
+ let mut cb_block = [0u8; 64];
+ let mut cr_block = [0u8; 64];
+
+ for y in range_step(0, height, 8) {
+ for x in range_step(0, width, 8) {
+ // RGB -> YCbCr
+ copy_blocks_ycbcr(
+ image,
+ x,
+ y,
+ width,
+ bpp,
+ &mut yblock,
+ &mut cb_block,
+ &mut cr_block,
+ );
+
+ // Level shift and fdct
+ // Coeffs are scaled by 8
+ transform::fdct(&yblock, &mut dct_yblock);
+ transform::fdct(&cb_block, &mut dct_cb_block);
+ transform::fdct(&cr_block, &mut dct_cr_block);
+
+ // Quantization
+ for i in 0usize..64 {
+ dct_yblock[i] =
+ ((dct_yblock[i] / 8) as f32 / f32::from(self.tables[i])).round() as i32;
+ dct_cb_block[i] = ((dct_cb_block[i] / 8) as f32
+ / f32::from(self.tables[64..][i]))
+ .round() as i32;
+ dct_cr_block[i] = ((dct_cr_block[i] / 8) as f32
+ / f32::from(self.tables[64..][i]))
+ .round() as i32;
+ }
+
+ let la = &*self.luma_actable;
+ let ld = &*self.luma_dctable;
+ let cd = &*self.chroma_dctable;
+ let ca = &*self.chroma_actable;
+
+ y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
+ cb_dcprev = self.writer.write_block(&dct_cb_block, cb_dcprev, cd, ca)?;
+ cr_dcprev = self.writer.write_block(&dct_cr_block, cr_dcprev, cd, ca)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<'a, W: Write> ImageEncoder for JPEGEncoder<'a, W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: color::ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+fn build_jfif_header(m: &mut Vec<u8>, density: PixelDensity) {
+ m.clear();
+
+ let _ = write!(m, "JFIF");
+ let _ = m.write_all(&[0]);
+ let _ = m.write_all(&[0x01]);
+ let _ = m.write_all(&[0x02]);
+ let _ = m.write_all(&[match density.unit {
+ PixelDensityUnit::PixelAspectRatio => 0x00,
+ PixelDensityUnit::Inches => 0x01,
+ PixelDensityUnit::Centimeters => 0x02,
+ }]);
+ let _ = m.write_u16::<BigEndian>(density.density.0);
+ let _ = m.write_u16::<BigEndian>(density.density.1);
+ let _ = m.write_all(&[0]);
+ let _ = m.write_all(&[0]);
+}
+
+fn build_frame_header(
+ m: &mut Vec<u8>,
+ precision: u8,
+ width: u16,
+ height: u16,
+ components: &[Component],
+) {
+ m.clear();
+
+ let _ = m.write_all(&[precision]);
+ let _ = m.write_u16::<BigEndian>(height);
+ let _ = m.write_u16::<BigEndian>(width);
+ let _ = m.write_all(&[components.len() as u8]);
+
+ for &comp in components.iter() {
+ let _ = m.write_all(&[comp.id]);
+ let hv = (comp.h << 4) | comp.v;
+ let _ = m.write_all(&[hv]);
+ let _ = m.write_all(&[comp.tq]);
+ }
+}
+
+fn build_scan_header(m: &mut Vec<u8>, components: &[Component]) {
+ m.clear();
+
+ let _ = m.write_all(&[components.len() as u8]);
+
+ for &comp in components.iter() {
+ let _ = m.write_all(&[comp.id]);
+ let tables = (comp.dc_table << 4) | comp.ac_table;
+ let _ = m.write_all(&[tables]);
+ }
+
+ // spectral start and end, approx. high and low
+ let _ = m.write_all(&[0]);
+ let _ = m.write_all(&[63]);
+ let _ = m.write_all(&[0]);
+}
+
+fn build_huffman_segment(
+ m: &mut Vec<u8>,
+ class: u8,
+ destination: u8,
+ numcodes: &[u8],
+ values: &[u8],
+) {
+ m.clear();
+
+ let tcth = (class << 4) | destination;
+ let _ = m.write_all(&[tcth]);
+
+ assert_eq!(numcodes.len(), 16);
+
+ let mut sum = 0usize;
+
+ for &i in numcodes.iter() {
+ let _ = m.write_all(&[i]);
+ sum += i as usize;
+ }
+
+ assert_eq!(sum, values.len());
+
+ for &i in values.iter() {
+ let _ = m.write_all(&[i]);
+ }
+}
+
+fn build_quantization_segment(m: &mut Vec<u8>, precision: u8, identifier: u8, qtable: &[u8]) {
+ assert_eq!(qtable.len() % 64, 0);
+ m.clear();
+
+ let p = if precision == 8 { 0 } else { 1 };
+
+ let pqtq = (p << 4) | identifier;
+ let _ = m.write_all(&[pqtq]);
+
+ for i in 0usize..64 {
+ let _ = m.write_all(&[qtable[UNZIGZAG[i] as usize]]);
+ }
+}
+
+fn encode_coefficient(coefficient: i32) -> (u8, u16) {
+ let mut magnitude = coefficient.abs() as u16;
+ let mut num_bits = 0u8;
+
+ while magnitude > 0 {
+ magnitude >>= 1;
+ num_bits += 1;
+ }
+
+ let mask = (1 << num_bits as usize) - 1;
+
+ let val = if coefficient < 0 {
+ (coefficient - 1) as u16 & mask
+ } else {
+ coefficient as u16 & mask
+ };
+
+ (num_bits, val)
+}
+
+fn rgb_to_ycbcr(r: u8, g: u8, b: u8) -> (u8, u8, u8) {
+ let r = f32::from(r);
+ let g = f32::from(g);
+ let b = f32::from(b);
+
+ let y = 0.299f32 * r + 0.587f32 * g + 0.114f32 * b;
+ let cb = -0.1687f32 * r - 0.3313f32 * g + 0.5f32 * b + 128f32;
+ let cr = 0.5f32 * r - 0.4187f32 * g - 0.0813f32 * b + 128f32;
+
+ (y as u8, cb as u8, cr as u8)
+}
+
+fn value_at(s: &[u8], index: usize) -> u8 {
+ if index < s.len() {
+ s[index]
+ } else {
+ s[s.len() - 1]
+ }
+}
+
+fn copy_blocks_ycbcr(
+ source: &[u8],
+ x0: usize,
+ y0: usize,
+ width: usize,
+ bpp: usize,
+ yb: &mut [u8; 64],
+ cbb: &mut [u8; 64],
+ crb: &mut [u8; 64],
+) {
+ for y in 0usize..8 {
+ let ystride = (y0 + y) * bpp * width;
+
+ for x in 0usize..8 {
+ let xstride = x0 * bpp + x * bpp;
+
+ let r = value_at(source, ystride + xstride);
+ let g = value_at(source, ystride + xstride + 1);
+ let b = value_at(source, ystride + xstride + 2);
+
+ let (yc, cb, cr) = rgb_to_ycbcr(r, g, b);
+
+ yb[y * 8 + x] = yc;
+ cbb[y * 8 + x] = cb;
+ crb[y * 8 + x] = cr;
+ }
+ }
+}
+
+fn copy_blocks_gray(
+ source: &[u8],
+ x0: usize,
+ y0: usize,
+ width: usize,
+ bpp: usize,
+ gb: &mut [u8; 64],
+) {
+ for y in 0usize..8 {
+ let ystride = (y0 + y) * bpp * width;
+
+ for x in 0usize..8 {
+ let xstride = x0 * bpp + x * bpp;
+ gb[y * 8 + x] = value_at(source, ystride + xstride);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::super::JpegDecoder;
+ use super::{JPEGEncoder, PixelDensity, build_jfif_header};
+ use crate::color::ColorType;
+ use crate::image::ImageDecoder;
+ use std::io::Cursor;
+
+ fn decode(encoded: &[u8]) -> Vec<u8> {
+ let decoder = JpegDecoder::new(Cursor::new(encoded))
+ .expect("Could not decode image");
+
+ let mut decoded = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut decoded).expect("Could not decode image");
+ decoded
+ }
+
+ #[test]
+ fn roundtrip_sanity_check() {
+ // create a 1x1 8-bit image buffer containing a single red pixel
+ let img = [255u8, 0, 0];
+
+ // encode it into a memory buffer
+ let mut encoded_img = Vec::new();
+ {
+ let mut encoder = JPEGEncoder::new_with_quality(&mut encoded_img, 100);
+ encoder
+ .encode(&img, 1, 1, ColorType::Rgb8)
+ .expect("Could not encode image");
+ }
+
+ // decode it from the memory buffer
+ {
+ let decoded = decode(&encoded_img);
+ // note that, even with the encode quality set to 100, we do not get the same image
+ // back. Therefore, we're going to assert that it's at least red-ish:
+ assert_eq!(3, decoded.len());
+ assert!(decoded[0] > 0x80);
+ assert!(decoded[1] < 0x80);
+ assert!(decoded[2] < 0x80);
+ }
+ }
+
+ #[test]
+ fn grayscale_roundtrip_sanity_check() {
+ // create a 2x2 8-bit image buffer containing a white diagonal
+ let img = [255u8, 0, 0, 255];
+
+ // encode it into a memory buffer
+ let mut encoded_img = Vec::new();
+ {
+ let mut encoder = JPEGEncoder::new_with_quality(&mut encoded_img, 100);
+ encoder
+ .encode(&img, 2, 2, ColorType::L8)
+ .expect("Could not encode image");
+ }
+
+ // decode it from the memory buffer
+ {
+ let decoded = decode(&encoded_img);
+ // note that, even with the encode quality set to 100, we do not get the same image
+ // back. Therefore, we're going to assert that the diagonal is at least white-ish:
+ assert_eq!(4, decoded.len());
+ assert!(decoded[0] > 0x80);
+ assert!(decoded[1] < 0x80);
+ assert!(decoded[2] < 0x80);
+ assert!(decoded[3] > 0x80);
+ }
+ }
+
+ #[test]
+ fn jfif_header_density_check() {
+ let mut buffer = Vec::new();
+ build_jfif_header(&mut buffer, PixelDensity::dpi(300));
+ assert_eq!(buffer, vec![
+ b'J', b'F', b'I', b'F',
+ 0, 1, 2, // JFIF version 1.2
+ 1, // density is in dpi
+ 300u16.to_be_bytes()[0], 300u16.to_be_bytes()[1],
+ 300u16.to_be_bytes()[0], 300u16.to_be_bytes()[1],
+ 0, 0, // No thumbnail
+ ]
+ );
+ }
+}
diff --git a/third_party/rust/image/src/jpeg/entropy.rs b/third_party/rust/image/src/jpeg/entropy.rs
new file mode 100644
index 0000000000..417ebd65a7
--- /dev/null
+++ b/third_party/rust/image/src/jpeg/entropy.rs
@@ -0,0 +1,61 @@
+/// Given an array containing the number of codes of each code length,
+/// this function generates the huffman codes lengths and their respective
+/// code lengths as specified by the JPEG spec.
+fn derive_codes_and_sizes(bits: &[u8]) -> (Vec<u8>, Vec<u16>) {
+ let mut huffsize = vec![0u8; 256];
+ let mut huffcode = vec![0u16; 256];
+
+ let mut k = 0;
+ let mut j;
+
+ // Annex C.2
+ // Figure C.1
+ // Generate table of individual code lengths
+ for i in 0u8..16 {
+ j = 0;
+
+ while j < bits[usize::from(i)] {
+ huffsize[k] = i + 1;
+ k += 1;
+ j += 1;
+ }
+ }
+
+ huffsize[k] = 0;
+
+ // Annex C.2
+ // Figure C.2
+ // Generate table of huffman codes
+ k = 0;
+ let mut code = 0u16;
+ let mut size = huffsize[0];
+
+ while huffsize[k] != 0 {
+ huffcode[k] = code;
+ code += 1;
+ k += 1;
+
+ if huffsize[k] == size {
+ continue;
+ }
+
+ // FIXME there is something wrong with this code
+ let diff = huffsize[k].wrapping_sub(size);
+ code = if diff < 16 { code << diff as usize } else { 0 };
+
+ size = size.wrapping_add(diff);
+ }
+
+ (huffsize, huffcode)
+}
+
+pub(crate) fn build_huff_lut(bits: &[u8], huffval: &[u8]) -> Vec<(u8, u16)> {
+ let mut lut = vec![(17u8, 0u16); 256];
+ let (huffsize, huffcode) = derive_codes_and_sizes(bits);
+
+ for (i, &v) in huffval.iter().enumerate() {
+ lut[v as usize] = (huffsize[i], huffcode[i]);
+ }
+
+ lut
+}
diff --git a/third_party/rust/image/src/jpeg/mod.rs b/third_party/rust/image/src/jpeg/mod.rs
new file mode 100644
index 0000000000..4d9bf7a8d6
--- /dev/null
+++ b/third_party/rust/image/src/jpeg/mod.rs
@@ -0,0 +1,16 @@
+//! Decoding and Encoding of JPEG Images
+//!
+//! JPEG (Joint Photographic Experts Group) is an image format that supports lossy compression.
+//! This module implements the Baseline JPEG standard.
+//!
+//! # Related Links
+//! * <http://www.w3.org/Graphics/JPEG/itu-t81.pdf> - The JPEG specification
+//!
+
+pub use self::decoder::JpegDecoder;
+pub use self::encoder::{JPEGEncoder, PixelDensity, PixelDensityUnit};
+
+mod decoder;
+mod encoder;
+mod entropy;
+mod transform;
diff --git a/third_party/rust/image/src/jpeg/transform.rs b/third_party/rust/image/src/jpeg/transform.rs
new file mode 100644
index 0000000000..1ed687016a
--- /dev/null
+++ b/third_party/rust/image/src/jpeg/transform.rs
@@ -0,0 +1,196 @@
+/*
+fdct is a Rust translation of jfdctint.c from the
+Independent JPEG Group's libjpeg version 9a
+obtained from http://www.ijg.org/files/jpegsr9a.zip
+It comes with the following conditions of distribution and use:
+
+ In plain English:
+
+ 1. We don't promise that this software works. (But if you find any bugs,
+ please let us know!)
+ 2. You can use this software for whatever you want. You don't have to pay us.
+ 3. You may not pretend that you wrote this software. If you use it in a
+ program, you must acknowledge somewhere in your documentation that
+ you've used the IJG code.
+
+ In legalese:
+
+ The authors make NO WARRANTY or representation, either express or implied,
+ with respect to this software, its quality, accuracy, merchantability, or
+ fitness for a particular purpose. This software is provided "AS IS", and you,
+ its user, assume the entire risk as to its quality and accuracy.
+
+ This software is copyright (C) 1991-2014, Thomas G. Lane, Guido Vollbeding.
+ All Rights Reserved except as specified below.
+
+ Permission is hereby granted to use, copy, modify, and distribute this
+ software (or portions thereof) for any purpose, without fee, subject to these
+ conditions:
+ (1) If any part of the source code for this software is distributed, then this
+ README file must be included, with this copyright and no-warranty notice
+ unaltered; and any additions, deletions, or changes to the original files
+ must be clearly indicated in accompanying documentation.
+ (2) If only executable code is distributed, then the accompanying
+ documentation must state that "this software is based in part on the work of
+ the Independent JPEG Group".
+ (3) Permission for use of this software is granted only if the user accepts
+ full responsibility for any undesirable consequences; the authors accept
+ NO LIABILITY for damages of any kind.
+
+ These conditions apply to any software derived from or based on the IJG code,
+ not just to the unmodified library. If you use our work, you ought to
+ acknowledge us.
+
+ Permission is NOT granted for the use of any IJG author's name or company name
+ in advertising or publicity relating to this software or products derived from
+ it. This software may be referred to only as "the Independent JPEG Group's
+ software".
+
+ We specifically permit and encourage the use of this software as the basis of
+ commercial products, provided that all warranty or liability claims are
+ assumed by the product vendor.
+*/
+
+static CONST_BITS: i32 = 13;
+static PASS1_BITS: i32 = 2;
+
+static FIX_0_298631336: i32 = 2446;
+static FIX_0_390180644: i32 = 3196;
+static FIX_0_541196100: i32 = 4433;
+static FIX_0_765366865: i32 = 6270;
+static FIX_0_899976223: i32 = 7373;
+static FIX_1_175875602: i32 = 9633;
+static FIX_1_501321110: i32 = 12_299;
+static FIX_1_847759065: i32 = 15_137;
+static FIX_1_961570560: i32 = 16_069;
+static FIX_2_053119869: i32 = 16_819;
+static FIX_2_562915447: i32 = 20_995;
+static FIX_3_072711026: i32 = 25_172;
+
+pub(crate) fn fdct(samples: &[u8], coeffs: &mut [i32]) {
+ // Pass 1: process rows.
+ // Results are scaled by sqrt(8) compared to a true DCT
+ // furthermore we scale the results by 2**PASS1_BITS
+ for y in 0usize..8 {
+ let y0 = y * 8;
+
+ // Even part
+ let t0 = i32::from(samples[y0]) + i32::from(samples[y0 + 7]);
+ let t1 = i32::from(samples[y0 + 1]) + i32::from(samples[y0 + 6]);
+ let t2 = i32::from(samples[y0 + 2]) + i32::from(samples[y0 + 5]);
+ let t3 = i32::from(samples[y0 + 3]) + i32::from(samples[y0 + 4]);
+
+ let t10 = t0 + t3;
+ let t12 = t0 - t3;
+ let t11 = t1 + t2;
+ let t13 = t1 - t2;
+
+ let t0 = i32::from(samples[y0]) - i32::from(samples[y0 + 7]);
+ let t1 = i32::from(samples[y0 + 1]) - i32::from(samples[y0 + 6]);
+ let t2 = i32::from(samples[y0 + 2]) - i32::from(samples[y0 + 5]);
+ let t3 = i32::from(samples[y0 + 3]) - i32::from(samples[y0 + 4]);
+
+ // Apply unsigned -> signed conversion
+ coeffs[y0] = (t10 + t11 - 8 * 128) << PASS1_BITS as usize;
+ coeffs[y0 + 4] = (t10 - t11) << PASS1_BITS as usize;
+
+ let mut z1 = (t12 + t13) * FIX_0_541196100;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ coeffs[y0 + 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS - PASS1_BITS) as usize;
+
+ // Odd part
+ let t12 = t0 + t2;
+ let t13 = t1 + t3;
+
+ let mut z1 = (t12 + t13) * FIX_1_175875602;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ let mut t12 = t12 * (-FIX_0_390180644);
+ let mut t13 = t13 * (-FIX_1_961570560);
+ t12 += z1;
+ t13 += z1;
+
+ let z1 = (t0 + t3) * (-FIX_0_899976223);
+ let mut t0 = t0 * FIX_1_501321110;
+ let mut t3 = t3 * FIX_0_298631336;
+ t0 += z1 + t12;
+ t3 += z1 + t13;
+
+ let z1 = (t1 + t2) * (-FIX_2_562915447);
+ let mut t1 = t1 * FIX_3_072711026;
+ let mut t2 = t2 * FIX_2_053119869;
+ t1 += z1 + t13;
+ t2 += z1 + t12;
+
+ coeffs[y0 + 1] = t0 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 3] = t1 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 5] = t2 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 7] = t3 >> (CONST_BITS - PASS1_BITS) as usize;
+ }
+
+ // Pass 2: process columns
+ // We remove the PASS1_BITS scaling but leave the results scaled up an
+ // overall factor of 8
+ for x in (0usize..8).rev() {
+ // Even part
+ let t0 = coeffs[x] + coeffs[x + 8 * 7];
+ let t1 = coeffs[x + 8] + coeffs[x + 8 * 6];
+ let t2 = coeffs[x + 8 * 2] + coeffs[x + 8 * 5];
+ let t3 = coeffs[x + 8 * 3] + coeffs[x + 8 * 4];
+
+ // Add fudge factor here for final descale
+ let t10 = t0 + t3 + (1 << (PASS1_BITS - 1) as usize);
+ let t12 = t0 - t3;
+ let t11 = t1 + t2;
+ let t13 = t1 - t2;
+
+ let t0 = coeffs[x] - coeffs[x + 8 * 7];
+ let t1 = coeffs[x + 8] - coeffs[x + 8 * 6];
+ let t2 = coeffs[x + 8 * 2] - coeffs[x + 8 * 5];
+ let t3 = coeffs[x + 8 * 3] - coeffs[x + 8 * 4];
+
+ coeffs[x] = (t10 + t11) >> PASS1_BITS as usize;
+ coeffs[x + 8 * 4] = (t10 - t11) >> PASS1_BITS as usize;
+
+ let mut z1 = (t12 + t13) * FIX_0_541196100;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS + PASS1_BITS - 1) as usize;
+
+ coeffs[x + 8 * 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS + PASS1_BITS) as usize;
+
+ // Odd part
+ let t12 = t0 + t2;
+ let t13 = t1 + t3;
+
+ let mut z1 = (t12 + t13) * FIX_1_175875602;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ let mut t12 = t12 * (-FIX_0_390180644);
+ let mut t13 = t13 * (-FIX_1_961570560);
+ t12 += z1;
+ t13 += z1;
+
+ let z1 = (t0 + t3) * (-FIX_0_899976223);
+ let mut t0 = t0 * FIX_1_501321110;
+ let mut t3 = t3 * FIX_0_298631336;
+ t0 += z1 + t12;
+ t3 += z1 + t13;
+
+ let z1 = (t1 + t2) * (-FIX_2_562915447);
+ let mut t1 = t1 * FIX_3_072711026;
+ let mut t2 = t2 * FIX_2_053119869;
+ t1 += z1 + t13;
+ t2 += z1 + t12;
+
+ coeffs[x + 8] = t0 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 3] = t1 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 5] = t2 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 7] = t3 >> (CONST_BITS + PASS1_BITS) as usize;
+ }
+}
diff --git a/third_party/rust/image/src/lib.rs b/third_party/rust/image/src/lib.rs
new file mode 100644
index 0000000000..4ad0118768
--- /dev/null
+++ b/third_party/rust/image/src/lib.rs
@@ -0,0 +1,141 @@
+//! This crate provides native rust implementations of
+//! image encoders and decoders and basic image manipulation
+//! functions.
+
+#![warn(missing_docs)]
+#![warn(unused_qualifications)]
+#![deny(unreachable_pub)]
+#![deny(deprecated)]
+#![deny(missing_copy_implementations)]
+#![cfg_attr(all(test, feature = "benchmarks"), feature(test))]
+// it's a bit of a pain otherwise
+#![allow(clippy::many_single_char_names)]
+
+#[cfg(all(test, feature = "benchmarks"))]
+extern crate test;
+
+#[cfg(test)]
+#[macro_use]
+extern crate quickcheck;
+
+use std::io::Write;
+
+pub use crate::color::{ColorType, ExtendedColorType};
+
+pub use crate::color::{Luma, LumaA, Rgb, Rgba, Bgr, Bgra};
+
+pub use crate::error::{ImageError, ImageResult};
+
+pub use crate::image::{AnimationDecoder,
+ GenericImage,
+ GenericImageView,
+ ImageDecoder,
+ ImageDecoderExt,
+ ImageEncoder,
+ ImageFormat,
+ ImageOutputFormat,
+ Progress,
+ // Iterators
+ Pixels,
+ SubImage};
+
+pub use crate::buffer::{ConvertBuffer,
+ GrayAlphaImage,
+ GrayImage,
+ // Image types
+ ImageBuffer,
+ Pixel,
+ RgbImage,
+ RgbaImage,
+ };
+
+pub use crate::flat::FlatSamples;
+
+// Traits
+pub use crate::traits::Primitive;
+
+// Opening and loading images
+pub use crate::io::free_functions::{guess_format, load};
+pub use crate::dynimage::{load_from_memory, load_from_memory_with_format, open,
+ save_buffer, save_buffer_with_format, image_dimensions};
+
+pub use crate::dynimage::DynamicImage;
+
+pub use crate::animation::{Delay, Frame, Frames};
+
+// More detailed error type
+pub mod error;
+
+// Math utils
+pub mod math;
+
+// Image processing functions
+pub mod imageops;
+
+// Io bindings
+pub mod io;
+
+// Buffer representations for ffi.
+pub mod flat;
+
+// Image codecs
+#[cfg(feature = "bmp")]
+pub mod bmp;
+#[cfg(feature = "dds")]
+pub mod dds;
+#[cfg(feature = "dxt")]
+pub mod dxt;
+#[cfg(feature = "gif")]
+pub mod gif;
+#[cfg(feature = "hdr")]
+pub mod hdr;
+#[cfg(feature = "ico")]
+pub mod ico;
+#[cfg(feature = "jpeg")]
+pub mod jpeg;
+#[cfg(feature = "png")]
+pub mod png;
+#[cfg(feature = "pnm")]
+pub mod pnm;
+#[cfg(feature = "tga")]
+pub mod tga;
+#[cfg(feature = "tiff")]
+pub mod tiff;
+#[cfg(feature = "webp")]
+pub mod webp;
+
+mod animation;
+mod buffer;
+mod color;
+mod dynimage;
+mod image;
+mod traits;
+mod utils;
+
+// Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of
+// the macro invocation.
+//
+// The inspiration for the macro and implementation is from
+// <https://github.com/GuillaumeGomez/doc-comment>
+//
+// MIT License
+//
+// Copyright (c) 2018 Guillaume Gomez
+macro_rules! insert_as_doc {
+ { $content:expr } => {
+ #[doc = $content] extern { }
+ }
+}
+
+// Provides the README.md as doc, to ensure the example works!
+insert_as_doc!(include_str!("../README.md"));
+
+// Copies data from `src` to `dst`
+//
+// Panics if the length of `dst` is less than the length of `src`.
+#[inline]
+fn copy_memory(src: &[u8], mut dst: &mut [u8]) {
+ let len_src = src.len();
+ assert!(dst.len() >= len_src);
+ dst.write_all(src).unwrap();
+}
diff --git a/third_party/rust/image/src/math/mod.rs b/third_party/rust/image/src/math/mod.rs
new file mode 100644
index 0000000000..4b862b5a48
--- /dev/null
+++ b/third_party/rust/image/src/math/mod.rs
@@ -0,0 +1,6 @@
+//! Mathematical helper functions and types.
+pub mod nq;
+pub mod utils;
+
+mod rect;
+pub use self::rect::Rect;
diff --git a/third_party/rust/image/src/math/nq.rs b/third_party/rust/image/src/math/nq.rs
new file mode 100644
index 0000000000..a6a502dffc
--- /dev/null
+++ b/third_party/rust/image/src/math/nq.rs
@@ -0,0 +1,409 @@
+//! NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
+//! See "Kohonen neural networks for optimal colour quantization"
+//! in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
+//! for a discussion of the algorithm.
+//! See also <https://scientificgems.wordpress.com/stuff/neuquant-fast-high-quality-image-quantization/>
+
+/* NeuQuant Neural-Net Quantization Algorithm
+ * ------------------------------------------
+ *
+ * Copyright (c) 1994 Anthony Dekker
+ *
+ * NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
+ * See "Kohonen neural networks for optimal colour quantization"
+ * in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
+ * for a discussion of the algorithm.
+ * See also https://scientificgems.wordpress.com/stuff/neuquant-fast-high-quality-image-quantization/
+ *
+ * Any party obtaining a copy of these files from the author, directly or
+ * indirectly, is granted, free of charge, a full and unrestricted irrevocable,
+ * world-wide, paid up, royalty-free, nonexclusive right and license to deal
+ * in this software and documentation files (the "Software"), including without
+ * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons who receive
+ * copies from any such party to do so, with the only requirement being
+ * that this copyright notice remain intact.
+ *
+ *
+ * Incorporated bugfixes and alpha channel handling from pngnq
+ * http://pngnq.sourceforge.net
+ *
+ */
+
+use crate::math::utils::clamp;
+use std::cmp::{max, min};
+
+const CHANNELS: usize = 4;
+
+const RADIUS_DEC: i32 = 30; // factor of 1/30 each cycle
+
+const ALPHA_BIASSHIFT: i32 = 10; // alpha starts at 1
+const INIT_ALPHA: i32 = 1 << ALPHA_BIASSHIFT; // biased by 10 bits
+
+const GAMMA: f64 = 1024.0;
+const BETA: f64 = 1.0 / GAMMA;
+const BETAGAMMA: f64 = BETA * GAMMA;
+
+// four primes near 500 - assume no image has a length so large
+// that it is divisible by all four primes
+const PRIMES: [usize; 4] = [499, 491, 478, 503];
+
+#[derive(Clone, Copy)]
+struct Quad<T> {
+ r: T,
+ g: T,
+ b: T,
+ a: T,
+}
+
+type Neuron = Quad<f64>;
+type Color = Quad<i32>;
+
+/// Neural network color quantizer
+pub struct NeuQuant {
+ network: Vec<Neuron>,
+ colormap: Vec<Color>,
+ netindex: Vec<usize>,
+ bias: Vec<f64>, // bias and freq arrays for learning
+ freq: Vec<f64>,
+ samplefac: i32,
+ netsize: usize,
+}
+
+impl NeuQuant {
+ /// Creates a new neural network and trains it with the supplied data
+ pub fn new(samplefac: i32, colors: usize, pixels: &[u8]) -> Self {
+ let netsize = colors;
+ let mut this = NeuQuant {
+ network: Vec::with_capacity(netsize),
+ colormap: Vec::with_capacity(netsize),
+ netindex: vec![0; 256],
+ bias: Vec::with_capacity(netsize),
+ freq: Vec::with_capacity(netsize),
+ samplefac,
+ netsize,
+ };
+ this.init(pixels);
+ this
+ }
+
+ /// Initializes the neural network and trains it with the supplied data
+ pub fn init(&mut self, pixels: &[u8]) {
+ self.network.clear();
+ self.colormap.clear();
+ self.bias.clear();
+ self.freq.clear();
+ let freq = (self.netsize as f64).recip();
+ for i in 0..self.netsize {
+ let tmp = (i as f64) * 256.0 / (self.netsize as f64);
+ // Sets alpha values at 0 for dark pixels.
+ let a = if i < 16 { i as f64 * 16.0 } else { 255.0 };
+ self.network.push(Neuron {
+ r: tmp,
+ g: tmp,
+ b: tmp,
+ a,
+ });
+ self.colormap.push(Color {
+ r: 0,
+ g: 0,
+ b: 0,
+ a: 255,
+ });
+ self.freq.push(freq);
+ self.bias.push(0.0);
+ }
+ self.learn(pixels);
+ self.build_colormap();
+ self.build_netindex();
+ }
+
+ /// Maps the pixel in-place to the best-matching color in the color map
+ #[inline(always)]
+ pub fn map_pixel(&self, pixel: &mut [u8]) {
+ assert_eq!(pixel.len(), 4);
+ match (pixel[0], pixel[1], pixel[2], pixel[3]) {
+ (r, g, b, a) => {
+ let i = self.search_netindex(b, g, r, a);
+ pixel[0] = self.colormap[i].r as u8;
+ pixel[1] = self.colormap[i].g as u8;
+ pixel[2] = self.colormap[i].b as u8;
+ pixel[3] = self.colormap[i].a as u8;
+ }
+ }
+ }
+
+ /// Finds the best-matching index in the color map for `pixel`
+ #[inline(always)]
+ pub fn index_of(&self, pixel: &[u8]) -> usize {
+ assert_eq!(pixel.len(), 4);
+ match (pixel[0], pixel[1], pixel[2], pixel[3]) {
+ (r, g, b, a) => self.search_netindex(b, g, r, a),
+ }
+ }
+
+ /// Move neuron i towards biased (a,b,g,r) by factor alpha
+ fn alter_single(&mut self, alpha: f64, i: i32, quad: Quad<f64>) {
+ let n = &mut self.network[i as usize];
+ n.b -= alpha * (n.b - quad.b);
+ n.g -= alpha * (n.g - quad.g);
+ n.r -= alpha * (n.r - quad.r);
+ n.a -= alpha * (n.a - quad.a);
+ }
+
+ /// Move neuron adjacent neurons towards biased (a,b,g,r) by factor alpha
+ fn alter_neighbour(&mut self, alpha: f64, rad: i32, i: i32, quad: Quad<f64>) {
+ let lo = max(i - rad, 0);
+ let hi = min(i + rad, self.netsize as i32);
+ let mut j = i + 1;
+ let mut k = i - 1;
+ let mut q = 0;
+
+ while (j < hi) || (k > lo) {
+ let rad_sq = f64::from(rad) * f64::from(rad);
+ let alpha = (alpha * (rad_sq - f64::from(q) * f64::from(q))) / rad_sq;
+ q += 1;
+ if j < hi {
+ let p = &mut self.network[j as usize];
+ p.b -= alpha * (p.b - quad.b);
+ p.g -= alpha * (p.g - quad.g);
+ p.r -= alpha * (p.r - quad.r);
+ p.a -= alpha * (p.a - quad.a);
+ j += 1;
+ }
+ if k > lo {
+ let p = &mut self.network[k as usize];
+ p.b -= alpha * (p.b - quad.b);
+ p.g -= alpha * (p.g - quad.g);
+ p.r -= alpha * (p.r - quad.r);
+ p.a -= alpha * (p.a - quad.a);
+ k -= 1;
+ }
+ }
+ }
+
+ /// Search for biased BGR values
+ /// finds closest neuron (min dist) and updates freq
+ /// finds best neuron (min dist-bias) and returns position
+ /// for frequently chosen neurons, freq[i] is high and bias[i] is negative
+ /// bias[i] = gamma*((1/self.netsize)-freq[i])
+ fn contest(&mut self, b: f64, g: f64, r: f64, a: f64) -> i32 {
+ use std::f64;
+
+ let mut bestd = f64::MAX;
+ let mut bestbiasd: f64 = bestd;
+ let mut bestpos = -1;
+ let mut bestbiaspos: i32 = bestpos;
+
+ for i in 0..self.netsize {
+ let bestbiasd_biased = bestbiasd + self.bias[i];
+ let mut dist;
+ let n = &self.network[i];
+ dist = (n.b - b).abs();
+ dist += (n.r - r).abs();
+ if dist < bestd || dist < bestbiasd_biased {
+ dist += (n.g - g).abs();
+ dist += (n.a - a).abs();
+ if dist < bestd {
+ bestd = dist;
+ bestpos = i as i32;
+ }
+ let biasdist = dist - self.bias[i];
+ if biasdist < bestbiasd {
+ bestbiasd = biasdist;
+ bestbiaspos = i as i32;
+ }
+ }
+ self.freq[i] -= BETA * self.freq[i];
+ self.bias[i] += BETAGAMMA * self.freq[i];
+ }
+ self.freq[bestpos as usize] += BETA;
+ self.bias[bestpos as usize] -= BETAGAMMA;
+ bestbiaspos
+ }
+
+ /// Main learning loop
+ /// Note: the number of learning cycles is crucial and the parameters are not
+ /// optimized for net sizes < 26 or > 256. 1064 colors seems to work fine
+ fn learn(&mut self, pixels: &[u8]) {
+ let initrad: i32 = self.netsize as i32 / 8; // for 256 cols, radius starts at 32
+ let radiusbiasshift: i32 = 6;
+ let radiusbias: i32 = 1 << radiusbiasshift;
+ let init_bias_radius: i32 = initrad * radiusbias;
+ let mut bias_radius = init_bias_radius;
+ let alphadec = 30 + ((self.samplefac - 1) / 3);
+ let lengthcount = pixels.len() / CHANNELS;
+ let samplepixels = lengthcount / self.samplefac as usize;
+ // learning cycles
+ let n_cycles = match self.netsize >> 1 {
+ n if n <= 100 => 100,
+ n => n,
+ };
+ let delta = match samplepixels / n_cycles {
+ 0 => 1,
+ n => n,
+ };
+ let mut alpha = INIT_ALPHA;
+
+ let mut rad = bias_radius >> radiusbiasshift;
+ if rad <= 1 {
+ rad = 0
+ };
+
+ let mut pos = 0;
+ let step = *PRIMES
+ .iter()
+ .find(|&&prime| lengthcount % prime != 0)
+ .unwrap_or(&PRIMES[3]);
+
+ let mut i = 0;
+ while i < samplepixels {
+ let (r, g, b, a) = {
+ let p = &pixels[CHANNELS * pos..][..CHANNELS];
+ (
+ f64::from(p[0]),
+ f64::from(p[1]),
+ f64::from(p[2]),
+ f64::from(p[3]),
+ )
+ };
+
+ let j = self.contest(b, g, r, a);
+
+ let alpha_ = (1.0 * f64::from(alpha)) / f64::from(INIT_ALPHA);
+ self.alter_single(alpha_, j, Quad { b, g, r, a });
+ if rad > 0 {
+ self.alter_neighbour(alpha_, rad, j, Quad { b, g, r, a })
+ };
+
+ pos += step;
+ while pos >= lengthcount {
+ pos -= lengthcount
+ }
+
+ i += 1;
+ if i % delta == 0 {
+ alpha -= alpha / alphadec;
+ bias_radius -= bias_radius / RADIUS_DEC;
+ rad = bias_radius >> radiusbiasshift;
+ if rad <= 1 {
+ rad = 0
+ };
+ }
+ }
+ }
+
+ /// initializes the color map
+ fn build_colormap(&mut self) {
+ for i in 0usize..self.netsize {
+ self.colormap[i].b = clamp(self.network[i].b.round() as i32, 0, 255);
+ self.colormap[i].g = clamp(self.network[i].g.round() as i32, 0, 255);
+ self.colormap[i].r = clamp(self.network[i].r.round() as i32, 0, 255);
+ self.colormap[i].a = clamp(self.network[i].a.round() as i32, 0, 255);
+ }
+ }
+
+ /// Insertion sort of network and building of netindex[0..255]
+ fn build_netindex(&mut self) {
+ let mut previouscol = 0;
+ let mut startpos = 0;
+
+ for i in 0..self.netsize {
+ let mut p = self.colormap[i];
+ let mut q;
+ let mut smallpos = i;
+ let mut smallval = p.g as usize; // index on g
+ // find smallest in i..netsize-1
+ for j in (i + 1)..self.netsize {
+ q = self.colormap[j];
+ if (q.g as usize) < smallval {
+ // index on g
+ smallpos = j;
+ smallval = q.g as usize; // index on g
+ }
+ }
+ q = self.colormap[smallpos];
+ // swap p (i) and q (smallpos) entries
+ if i != smallpos {
+ ::std::mem::swap(&mut p, &mut q);
+ self.colormap[i] = p;
+ self.colormap[smallpos] = q;
+ }
+ // smallval entry is now in position i
+ if smallval != previouscol {
+ self.netindex[previouscol] = (startpos + i) >> 1;
+ for j in (previouscol + 1)..smallval {
+ self.netindex[j] = i
+ }
+ previouscol = smallval;
+ startpos = i;
+ }
+ }
+ let max_netpos = self.netsize - 1;
+ self.netindex[previouscol] = (startpos + max_netpos) >> 1;
+ for j in (previouscol + 1)..256 {
+ self.netindex[j] = max_netpos
+ } // really 256
+ }
+
+ /// Search for best matching color
+ fn search_netindex(&self, b: u8, g: u8, r: u8, a: u8) -> usize {
+ let mut bestd = 1 << 30; // ~ 1_000_000
+ let mut best = 0;
+ // start at netindex[g] and work outwards
+ let mut i = self.netindex[g as usize];
+ let mut j = if i > 0 { i - 1 } else { 0 };
+
+ while (i < self.netsize) || (j > 0) {
+ if i < self.netsize {
+ let p = self.colormap[i];
+ let mut e = p.g - i32::from(g);
+ let mut dist = e * e; // index key
+ if dist >= bestd {
+ break;
+ } else {
+ e = p.b - i32::from(b);
+ dist += e * e;
+ if dist < bestd {
+ e = p.r - i32::from(r);
+ dist += e * e;
+ if dist < bestd {
+ e = p.a - i32::from(a);
+ dist += e * e;
+ if dist < bestd {
+ bestd = dist;
+ best = i;
+ }
+ }
+ }
+ i += 1;
+ }
+ }
+ if j > 0 {
+ let p = self.colormap[j];
+ let mut e = p.g - i32::from(g);
+ let mut dist = e * e; // index key
+ if dist >= bestd {
+ break;
+ } else {
+ e = p.b - i32::from(b);
+ dist += e * e;
+ if dist < bestd {
+ e = p.r - i32::from(r);
+ dist += e * e;
+ if dist < bestd {
+ e = p.a - i32::from(a);
+ dist += e * e;
+ if dist < bestd {
+ bestd = dist;
+ best = j;
+ }
+ }
+ }
+ j -= 1;
+ }
+ }
+ }
+ best
+ }
+}
diff --git a/third_party/rust/image/src/math/rect.rs b/third_party/rust/image/src/math/rect.rs
new file mode 100644
index 0000000000..74696be238
--- /dev/null
+++ b/third_party/rust/image/src/math/rect.rs
@@ -0,0 +1,12 @@
+/// A Rectangle defined by its top left corner, width and height.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Rect {
+ /// The x coordinate of the top left corner.
+ pub x: u32,
+ /// The y coordinate of the top left corner.
+ pub y: u32,
+ /// The rectangle's width.
+ pub width: u32,
+ /// The rectangle's height.
+ pub height: u32,
+}
diff --git a/third_party/rust/image/src/math/utils.rs b/third_party/rust/image/src/math/utils.rs
new file mode 100644
index 0000000000..1ddea5dbc2
--- /dev/null
+++ b/third_party/rust/image/src/math/utils.rs
@@ -0,0 +1,24 @@
+//! Shared mathematical utility functions.
+
+/// Cut value to be inside given range
+///
+/// ```
+/// use image::math::utils;
+///
+/// assert_eq!(utils::clamp(-5, 0, 10), 0);
+/// assert_eq!(utils::clamp( 6, 0, 10), 6);
+/// assert_eq!(utils::clamp(15, 0, 10), 10);
+/// ```
+#[inline]
+pub fn clamp<N>(a: N, min: N, max: N) -> N
+where
+ N: PartialOrd,
+{
+ if a < min {
+ return min;
+ }
+ if a > max {
+ return max;
+ }
+ a
+}
diff --git a/third_party/rust/image/src/png.rs b/third_party/rust/image/src/png.rs
new file mode 100644
index 0000000000..d4b083e094
--- /dev/null
+++ b/third_party/rust/image/src/png.rs
@@ -0,0 +1,333 @@
+//! Decoding and Encoding of PNG Images
+//!
+//! PNG (Portable Network Graphics) is an image format that supports lossless compression.
+//!
+//! # Related Links
+//! * <http://www.w3.org/TR/PNG/> - The PNG Specification
+//!
+
+use std::convert::TryFrom;
+use std::io::{self, Read, Write};
+
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{DecodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::image::{ImageDecoder, ImageEncoder, ImageFormat};
+
+/// PNG Reader
+///
+/// This reader will try to read the png one row at a time,
+/// however for interlaced png files this is not possible and
+/// these are therefore read at once.
+pub struct PNGReader<R: Read> {
+ reader: png::Reader<R>,
+ buffer: Vec<u8>,
+ index: usize,
+}
+
+impl<R: Read> PNGReader<R> {
+ fn new(mut reader: png::Reader<R>) -> ImageResult<PNGReader<R>> {
+ let len = reader.output_buffer_size();
+ // Since interlaced images do not come in
+ // scanline order it is almost impossible to
+ // read them in a streaming fashion, however
+ // this shouldn't be a too big of a problem
+ // as most interlaced images should fit in memory.
+ let buffer = if reader.info().interlaced {
+ let mut buffer = vec![0; len];
+ reader.next_frame(&mut buffer).map_err(ImageError::from_png)?;
+ buffer
+ } else {
+ Vec::new()
+ };
+
+ Ok(PNGReader {
+ reader,
+ buffer,
+ index: 0,
+ })
+ }
+}
+
+impl<R: Read> Read for PNGReader<R> {
+ fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
+ // io::Write::write for slice cannot fail
+ let readed = buf.write(&self.buffer[self.index..]).unwrap();
+
+ let mut bytes = readed;
+ self.index += readed;
+
+ while self.index >= self.buffer.len() {
+ match self.reader.next_row()? {
+ Some(row) => {
+ // Faster to copy directly to external buffer
+ let readed = buf.write(row).unwrap();
+ bytes += readed;
+
+ self.buffer = (&row[readed..]).to_owned();
+ self.index = 0;
+ }
+ None => return Ok(bytes)
+ }
+ }
+
+ Ok(bytes)
+ }
+
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ let mut bytes = self.buffer.len();
+ buf.extend_from_slice(&self.buffer);
+ self.buffer = Vec::new();
+ self.index = 0;
+
+ while let Some(row) = self.reader.next_row()? {
+ buf.extend_from_slice(row);
+ bytes += row.len();
+ }
+
+ Ok(bytes)
+ }
+}
+
+/// PNG decoder
+pub struct PngDecoder<R: Read> {
+ color_type: ColorType,
+ reader: png::Reader<R>,
+}
+
+impl<R: Read> PngDecoder<R> {
+ /// Creates a new decoder that decodes from the stream ```r```
+ pub fn new(r: R) -> ImageResult<PngDecoder<R>> {
+ let limits = png::Limits {
+ bytes: usize::max_value(),
+ };
+ let mut decoder = png::Decoder::new_with_limits(r, limits);
+ // By default the PNG decoder will scale 16 bpc to 8 bpc, so custom
+ // transformations must be set. EXPAND preserves the default behavior
+ // expanding bpc < 8 to 8 bpc.
+ decoder.set_transformations(png::Transformations::EXPAND);
+ let (_, mut reader) = decoder.read_info().map_err(ImageError::from_png)?;
+ let (color_type, bits) = reader.output_color_type();
+ let color_type = match (color_type, bits) {
+ (png::ColorType::Grayscale, png::BitDepth::Eight) => ColorType::L8,
+ (png::ColorType::Grayscale, png::BitDepth::Sixteen) => ColorType::L16,
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight) => ColorType::La8,
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen) => ColorType::La16,
+ (png::ColorType::RGB, png::BitDepth::Eight) => ColorType::Rgb8,
+ (png::ColorType::RGB, png::BitDepth::Sixteen) => ColorType::Rgb16,
+ (png::ColorType::RGBA, png::BitDepth::Eight) => ColorType::Rgba8,
+ (png::ColorType::RGBA, png::BitDepth::Sixteen) => ColorType::Rgba16,
+
+ (png::ColorType::Grayscale, png::BitDepth::One) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::L1)),
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::One) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::La1)),
+ (png::ColorType::RGB, png::BitDepth::One) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgb1)),
+ (png::ColorType::RGBA, png::BitDepth::One) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgba1)),
+
+ (png::ColorType::Grayscale, png::BitDepth::Two) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::L2)),
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Two) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::La2)),
+ (png::ColorType::RGB, png::BitDepth::Two) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgb2)),
+ (png::ColorType::RGBA, png::BitDepth::Two) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgba2)),
+
+ (png::ColorType::Grayscale, png::BitDepth::Four) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::L4)),
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Four) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::La4)),
+ (png::ColorType::RGB, png::BitDepth::Four) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgb4)),
+ (png::ColorType::RGBA, png::BitDepth::Four) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Rgba4)),
+
+ (png::ColorType::Indexed, bits) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Unknown(bits as u8))),
+ };
+
+ Ok(PngDecoder { color_type, reader })
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for PngDecoder<R> {
+ type Reader = PNGReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.reader.info().size()
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ PNGReader::new(self.reader)
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ self.reader.next_frame(buf).map_err(ImageError::from_png)?;
+ // PNG images are big endian. For 16 bit per channel and larger types,
+ // the buffer may need to be reordered to native endianness per the
+ // contract of `read_image`.
+ // TODO: assumes equal channel bit depth.
+ let bpc = self.color_type().bytes_per_pixel() / self.color_type().channel_count();
+ match bpc {
+ 1 => (), // No reodering necessary for u8
+ 2 => buf.chunks_mut(2).for_each(|c| {
+ let v = BigEndian::read_u16(c);
+ NativeEndian::write_u16(c, v)
+ }),
+ _ => unreachable!(),
+ }
+ Ok(())
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ let width = self.reader.info().width;
+ self.reader.output_line_size(width) as u64
+ }
+}
+
+/// PNG encoder
+pub struct PNGEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> PNGEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: W) -> PNGEncoder<W> {
+ PNGEncoder { w }
+ }
+
+ /// Encodes the image ```image```
+ /// that has dimensions ```width``` and ```height```
+ /// and ```ColorType``` ```c```
+ pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> {
+ let (ct, bits) = match color {
+ ColorType::L8 => (png::ColorType::Grayscale, png::BitDepth::Eight),
+ ColorType::L16 => (png::ColorType::Grayscale,png::BitDepth::Sixteen),
+ ColorType::La8 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight),
+ ColorType::La16 => (png::ColorType::GrayscaleAlpha,png::BitDepth::Sixteen),
+ ColorType::Rgb8 => (png::ColorType::RGB, png::BitDepth::Eight),
+ ColorType::Rgb16 => (png::ColorType::RGB,png::BitDepth::Sixteen),
+ ColorType::Rgba8 => (png::ColorType::RGBA, png::BitDepth::Eight),
+ ColorType::Rgba16 => (png::ColorType::RGBA,png::BitDepth::Sixteen),
+ _ => return Err(ImageError::UnsupportedColor(color.into())),
+ };
+
+ let mut encoder = png::Encoder::new(self.w, width, height);
+ encoder.set_color(ct);
+ encoder.set_depth(bits);
+ let mut writer = encoder.write_header().map_err(|e| ImageError::IoError(e.into()))?;
+ writer.write_image_data(data).map_err(|e| ImageError::IoError(e.into()))
+ }
+}
+
+impl<W: Write> ImageEncoder for PNGEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+ // PNG images are big endian. For 16 bit per channel and larger types,
+ // the buffer may need to be reordered to big endian per the
+ // contract of `write_image`.
+ // TODO: assumes equal channel bit depth.
+ let bpc = color_type.bytes_per_pixel() / color_type.channel_count();
+ match bpc {
+ 1 => self.encode(buf, width, height, color_type), // No reodering necessary for u8
+ 2 => {
+ // Because the buffer is immutable and the PNG encoder does not
+ // yet take Write/Read traits, create a temporary buffer for
+ // big endian reordering.
+ let mut reordered = vec![0; buf.len()];
+ buf.chunks(2)
+ .zip(reordered.chunks_mut(2))
+ .for_each(|(b, r)| BigEndian::write_u16(r, NativeEndian::read_u16(b)));
+ self.encode(&reordered, width, height, color_type)
+ },
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl ImageError {
+ fn from_png(err: png::DecodingError) -> ImageError {
+ use png::DecodingError::*;
+ match err {
+ IoError(err) => ImageError::IoError(err),
+ Format(message) => ImageError::Decoding(DecodingError::with_message(
+ ImageFormat::Png.into(),
+ message.into_owned(),
+ )),
+ LimitsExceeded => ImageError::InsufficientMemory,
+ // Other is used when the buffer to `Reader::next_frame` is too small.
+ Other(message) => ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(message.into_owned())
+ )),
+ err @ InvalidSignature
+ | err @ CrcMismatch { .. }
+ | err @ CorruptFlateStream => {
+ ImageError::Decoding(DecodingError::new(
+ ImageFormat::Png.into(),
+ err,
+ ))
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::image::ImageDecoder;
+ use std::io::Read;
+ use super::*;
+
+ #[test]
+ fn ensure_no_decoder_off_by_one() {
+ let dec = PngDecoder::new(std::fs::File::open("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png").unwrap())
+ .expect("Unable to read PNG file (does it exist?)");
+
+ assert_eq![(2000, 1000), dec.dimensions()];
+
+ assert_eq![
+ ColorType::Rgb8,
+ dec.color_type(),
+ "Image MUST have the Rgb8 format"
+ ];
+
+ let correct_bytes = dec
+ .into_reader()
+ .expect("Unable to read file")
+ .bytes()
+ .map(|x| x.expect("Unable to read byte"))
+ .collect::<Vec<u8>>();
+
+ assert_eq![6_000_000, correct_bytes.len()];
+ }
+
+ #[test]
+ fn underlying_error() {
+ use std::error::Error;
+
+ let mut not_png = std::fs::read("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png").unwrap();
+ not_png[0] = 0;
+
+ let error = PngDecoder::new(&not_png[..]).err().unwrap();
+ let _ = error
+ .source()
+ .unwrap()
+ .downcast_ref::<png::DecodingError>()
+ .expect("Caused by a png error");
+ }
+}
diff --git a/third_party/rust/image/src/pnm/autobreak.rs b/third_party/rust/image/src/pnm/autobreak.rs
new file mode 100644
index 0000000000..cea2cd8f2b
--- /dev/null
+++ b/third_party/rust/image/src/pnm/autobreak.rs
@@ -0,0 +1,124 @@
+//! Insert line breaks between written buffers when they would overflow the line length.
+use std::io;
+
+// The pnm standard says to insert line breaks after 70 characters. Assumes that no line breaks
+// are actually written. We have to be careful to fully commit buffers or not commit them at all,
+// otherwise we might insert a newline in the middle of a token.
+pub(crate) struct AutoBreak<W: io::Write> {
+ wrapped: W,
+ line_capacity: usize,
+ line: Vec<u8>,
+ has_newline: bool,
+ panicked: bool, // see https://github.com/rust-lang/rust/issues/30888
+}
+
+impl<W: io::Write> AutoBreak<W> {
+ pub(crate) fn new(writer: W, line_capacity: usize) -> Self {
+ AutoBreak {
+ wrapped: writer,
+ line_capacity,
+ line: Vec::with_capacity(line_capacity + 1),
+ has_newline: false,
+ panicked: false,
+ }
+ }
+
+ fn flush_buf(&mut self) -> io::Result<()> {
+ // from BufWriter
+ let mut written = 0;
+ let len = self.line.len();
+ let mut ret = Ok(());
+ while written < len {
+ self.panicked = true;
+ let r = self.wrapped.write(&self.line[written..]);
+ self.panicked = false;
+ match r {
+ Ok(0) => {
+ ret = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ break;
+ }
+ Ok(n) => written += n,
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => {
+ ret = Err(e);
+ break;
+ }
+ }
+ }
+ if written > 0 {
+ self.line.drain(..written);
+ }
+ ret
+ }
+}
+
+impl<W: io::Write> io::Write for AutoBreak<W> {
+ fn write(&mut self, buffer: &[u8]) -> io::Result<usize> {
+ if self.has_newline {
+ self.flush()?;
+ self.has_newline = false;
+ }
+
+ if !self.line.is_empty() && self.line.len() + buffer.len() > self.line_capacity {
+ self.line.push(b'\n');
+ self.has_newline = true;
+ self.flush()?;
+ self.has_newline = false;
+ }
+
+ self.line.extend_from_slice(buffer);
+ Ok(buffer.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.flush_buf()?;
+ self.wrapped.flush()
+ }
+}
+
+impl<W: io::Write> Drop for AutoBreak<W> {
+ fn drop(&mut self) {
+ if !self.panicked {
+ let _r = self.flush_buf();
+ // internal writer flushed automatically by Drop
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Write;
+
+ #[test]
+ fn test_aligned_writes() {
+ let mut output = Vec::new();
+
+ {
+ let mut writer = AutoBreak::new(&mut output, 10);
+ writer.write_all(b"0123456789").unwrap();
+ writer.write_all(b"0123456789").unwrap();
+ }
+
+ assert_eq!(output.as_slice(), b"0123456789\n0123456789");
+ }
+
+ #[test]
+ fn test_greater_writes() {
+ let mut output = Vec::new();
+
+ {
+ let mut writer = AutoBreak::new(&mut output, 10);
+ writer.write_all(b"012").unwrap();
+ writer.write_all(b"345").unwrap();
+ writer.write_all(b"0123456789").unwrap();
+ writer.write_all(b"012345678910").unwrap();
+ writer.write_all(b"_").unwrap();
+ }
+
+ assert_eq!(output.as_slice(), b"012345\n0123456789\n012345678910\n_");
+ }
+}
diff --git a/third_party/rust/image/src/pnm/decoder.rs b/third_party/rust/image/src/pnm/decoder.rs
new file mode 100644
index 0000000000..85c8e43787
--- /dev/null
+++ b/third_party/rust/image/src/pnm/decoder.rs
@@ -0,0 +1,1104 @@
+use std::convert::TryFrom;
+use std::io::{self, BufRead, BufReader, Cursor, Read};
+use std::str::{self, FromStr};
+use std::fmt::Display;
+use std::marker::PhantomData;
+use std::mem;
+
+use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader};
+use super::{HeaderRecord, PNMHeader, PNMSubtype, SampleEncoding};
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{ImageError, ImageResult};
+use crate::image::{self, ImageDecoder};
+use crate::utils;
+
+use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+/// Dynamic representation, represents all decodable (sample, depth) combinations.
+#[derive(Clone, Copy)]
+enum TupleType {
+ PbmBit,
+ BWBit,
+ GrayU8,
+ GrayU16,
+ RGBU8,
+ RGBU16,
+}
+
+trait Sample {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize>;
+
+ /// It is guaranteed that `bytes.len() == bytelen(width, height, samples)`
+ fn from_bytes(bytes: &[u8], width: u32, height: u32, samples: u32)
+ -> ImageResult<Vec<u8>>;
+
+ fn from_ascii(reader: &mut dyn Read, width: u32, height: u32, samples: u32)
+ -> ImageResult<Vec<u8>>;
+}
+
+struct U8;
+struct U16;
+struct PbmBit;
+struct BWBit;
+
+trait DecodableImageHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType>;
+}
+
+/// PNM decoder
+pub struct PnmDecoder<R> {
+ reader: BufReader<R>,
+ header: PNMHeader,
+ tuple: TupleType,
+}
+
+impl<R: Read> PnmDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```read```
+ pub fn new(read: R) -> ImageResult<PnmDecoder<R>> {
+ let mut buf = BufReader::new(read);
+ let magic = buf.read_magic_constant()?;
+ if magic[0] != b'P' {
+ return Err(ImageError::FormatError(
+ format!("Expected magic constant for pnm, P1 through P7 instead of {:?}", magic),
+ ));
+ }
+
+ let subtype = match magic[1] {
+ b'1' => PNMSubtype::Bitmap(SampleEncoding::Ascii),
+ b'2' => PNMSubtype::Graymap(SampleEncoding::Ascii),
+ b'3' => PNMSubtype::Pixmap(SampleEncoding::Ascii),
+ b'4' => PNMSubtype::Bitmap(SampleEncoding::Binary),
+ b'5' => PNMSubtype::Graymap(SampleEncoding::Binary),
+ b'6' => PNMSubtype::Pixmap(SampleEncoding::Binary),
+ b'7' => PNMSubtype::ArbitraryMap,
+ _ => {
+ return Err(ImageError::FormatError(
+ format!("Expected magic constant for pnm, P1 through P7 instead of {:?}", magic),
+ ));
+ }
+ };
+
+ match subtype {
+ PNMSubtype::Bitmap(enc) => PnmDecoder::read_bitmap_header(buf, enc),
+ PNMSubtype::Graymap(enc) => PnmDecoder::read_graymap_header(buf, enc),
+ PNMSubtype::Pixmap(enc) => PnmDecoder::read_pixmap_header(buf, enc),
+ PNMSubtype::ArbitraryMap => PnmDecoder::read_arbitrary_header(buf),
+ }
+ }
+
+ /// Extract the reader and header after an image has been read.
+ pub fn into_inner(self) -> (R, PNMHeader) {
+ (self.reader.into_inner(), self.header)
+ }
+
+ fn read_bitmap_header(
+ mut reader: BufReader<R>,
+ encoding: SampleEncoding,
+ ) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_bitmap_header(encoding)?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: TupleType::PbmBit,
+ header: PNMHeader {
+ decoded: HeaderRecord::Bitmap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_graymap_header(
+ mut reader: BufReader<R>,
+ encoding: SampleEncoding,
+ ) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_graymap_header(encoding)?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PNMHeader {
+ decoded: HeaderRecord::Graymap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_pixmap_header(
+ mut reader: BufReader<R>,
+ encoding: SampleEncoding,
+ ) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_pixmap_header(encoding)?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PNMHeader {
+ decoded: HeaderRecord::Pixmap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_arbitrary_header(mut reader: BufReader<R>) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_arbitrary_header()?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PNMHeader {
+ decoded: HeaderRecord::Arbitrary(header),
+ encoded: None,
+ },
+ })
+ }
+}
+
+trait HeaderReader: BufRead {
+ /// Reads the two magic constant bytes
+ fn read_magic_constant(&mut self) -> ImageResult<[u8; 2]> {
+ let mut magic: [u8; 2] = [0, 0];
+ self.read_exact(&mut magic)
+ .map_err(ImageError::IoError)?;
+ Ok(magic)
+ }
+
+ /// Reads a string as well as a single whitespace after it, ignoring comments
+ fn read_next_string(&mut self) -> ImageResult<String> {
+ let mut bytes = Vec::new();
+
+ // pair input bytes with a bool mask to remove comments
+ let mark_comments = self.bytes().scan(true, |partof, read| {
+ let byte = match read {
+ Err(err) => return Some((*partof, Err(err))),
+ Ok(byte) => byte,
+ };
+ let cur_enabled = *partof && byte != b'#';
+ let next_enabled = cur_enabled || (byte == b'\r' || byte == b'\n');
+ *partof = next_enabled;
+ Some((cur_enabled, Ok(byte)))
+ });
+
+ for (_, byte) in mark_comments.filter(|ref e| e.0) {
+ match byte {
+ Ok(b'\t') | Ok(b'\n') | Ok(b'\x0b') | Ok(b'\x0c') | Ok(b'\r') | Ok(b' ') => {
+ if !bytes.is_empty() {
+ break; // We're done as we already have some content
+ }
+ }
+ Ok(byte) if !byte.is_ascii() => {
+ return Err(ImageError::FormatError(
+ format!("Non ascii character {} in header", byte),
+ ));
+ },
+ Ok(byte) => {
+ bytes.push(byte);
+ },
+ Err(_) => break,
+ }
+ }
+
+ if bytes.is_empty() {
+ return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into()));
+ }
+
+ if !bytes.as_slice().is_ascii() {
+ // We have only filled the buffer with characters for which `byte.is_ascii()` holds.
+ unreachable!("Non ascii character should have returned sooner")
+ }
+
+ let string = String::from_utf8(bytes)
+ // We checked the precondition ourselves a few lines before, `bytes.as_slice().is_ascii()`.
+ .unwrap_or_else(|_| unreachable!("Only ascii characters should be decoded"));
+
+ Ok(string)
+ }
+
+ /// Read the next line
+ fn read_next_line(&mut self) -> ImageResult<String> {
+ let mut buffer = String::new();
+ self.read_line(&mut buffer)
+ .map_err(ImageError::IoError)?;
+ Ok(buffer)
+ }
+
+ fn read_next_u32(&mut self) -> ImageResult<u32> {
+ let s = self.read_next_string()?;
+ s.parse::<u32>()
+ .map_err(|err| ImageError::FormatError(
+ format!("Error parsing number {} in preamble: {}", s, err)
+ ))
+ }
+
+ fn read_bitmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<BitmapHeader> {
+ let width = self.read_next_u32()?;
+ let height = self.read_next_u32()?;
+ Ok(BitmapHeader {
+ encoding,
+ width,
+ height,
+ })
+ }
+
+ fn read_graymap_header(&mut self, encoding: SampleEncoding) -> ImageResult<GraymapHeader> {
+ self.read_pixmap_header(encoding).map(
+ |PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval,
+ }| GraymapHeader {
+ encoding,
+ width,
+ height,
+ maxwhite: maxval,
+ },
+ )
+ }
+
+ fn read_pixmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<PixmapHeader> {
+ let width = self.read_next_u32()?;
+ let height = self.read_next_u32()?;
+ let maxval = self.read_next_u32()?;
+ Ok(PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval,
+ })
+ }
+
+ fn read_arbitrary_header(&mut self) -> ImageResult<ArbitraryHeader> {
+ match self.bytes().next() {
+ None => return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into())),
+ Some(Err(io)) => return Err(ImageError::IoError(io)),
+ Some(Ok(b'\n')) => (),
+ Some(Ok(c)) => {
+ return Err(ImageError::FormatError(
+ format!("Expected newline after P7 magic instead of {}", c),
+ ))
+ }
+ }
+
+ let mut line = String::new();
+ let mut height: Option<u32> = None;
+ let mut width: Option<u32> = None;
+ let mut depth: Option<u32> = None;
+ let mut maxval: Option<u32> = None;
+ let mut tupltype: Option<String> = None;
+ loop {
+ line.truncate(0);
+ let len = self.read_line(&mut line).map_err(ImageError::IoError)?;
+ if len == 0 {
+ return Err(ImageError::FormatError(
+ "Unexpected end of pnm header".to_string(),
+ ))
+ }
+ if line.as_bytes()[0] == b'#' {
+ continue;
+ }
+ if !line.is_ascii() {
+ return Err(ImageError::FormatError(
+ "Only ascii characters allowed in pam header".to_string(),
+ ));
+ }
+ #[allow(deprecated)]
+ let (identifier, rest) = line.trim_left()
+ .split_at(line.find(char::is_whitespace).unwrap_or_else(|| line.len()));
+ match identifier {
+ "ENDHDR" => break,
+ "HEIGHT" => if height.is_some() {
+ return Err(ImageError::FormatError("Duplicate HEIGHT line".to_string()));
+ } else {
+ let h = rest.trim()
+ .parse::<u32>()
+ .map_err(|err| ImageError::FormatError(
+ format!("Invalid height {}: {}", rest, err)
+ ))?;
+ height = Some(h);
+ },
+ "WIDTH" => if width.is_some() {
+ return Err(ImageError::FormatError("Duplicate WIDTH line".to_string()));
+ } else {
+ let w = rest.trim()
+ .parse::<u32>()
+ .map_err(|err| ImageError::FormatError(
+ format!("Invalid width {}: {}", rest, err)
+ ))?;
+ width = Some(w);
+ },
+ "DEPTH" => if depth.is_some() {
+ return Err(ImageError::FormatError("Duplicate DEPTH line".to_string()));
+ } else {
+ let d = rest.trim()
+ .parse::<u32>()
+ .map_err(|err| ImageError::FormatError(
+ format!("Invalid depth {}: {}", rest, err)
+ ))?;
+ depth = Some(d);
+ },
+ "MAXVAL" => if maxval.is_some() {
+ return Err(ImageError::FormatError("Duplicate MAXVAL line".to_string()));
+ } else {
+ let m = rest.trim()
+ .parse::<u32>()
+ .map_err(|err| ImageError::FormatError(
+ format!("Invalid maxval {}: {}", rest, err)
+ ))?;
+ maxval = Some(m);
+ },
+ "TUPLTYPE" => {
+ let identifier = rest.trim();
+ if tupltype.is_some() {
+ let appended = tupltype.take().map(|mut v| {
+ v.push(' ');
+ v.push_str(identifier);
+ v
+ });
+ tupltype = appended;
+ } else {
+ tupltype = Some(identifier.to_string());
+ }
+ }
+ _ => return Err(ImageError::FormatError("Unknown header line".to_string())),
+ }
+ }
+
+ let (h, w, d, m) = match (height, width, depth, maxval) {
+ (None, _, _, _) => {
+ return Err(ImageError::FormatError(
+ "Expected one HEIGHT line".to_string(),
+ ))
+ }
+ (_, None, _, _) => {
+ return Err(ImageError::FormatError(
+ "Expected one WIDTH line".to_string(),
+ ))
+ }
+ (_, _, None, _) => {
+ return Err(ImageError::FormatError(
+ "Expected one DEPTH line".to_string(),
+ ))
+ }
+ (_, _, _, None) => {
+ return Err(ImageError::FormatError(
+ "Expected one MAXVAL line".to_string(),
+ ))
+ }
+ (Some(h), Some(w), Some(d), Some(m)) => (h, w, d, m),
+ };
+
+ let tupltype = match tupltype {
+ None => None,
+ Some(ref t) if t == "BLACKANDWHITE" => Some(ArbitraryTuplType::BlackAndWhite),
+ Some(ref t) if t == "BLACKANDWHITE_ALPHA" => {
+ Some(ArbitraryTuplType::BlackAndWhiteAlpha)
+ }
+ Some(ref t) if t == "GRAYSCALE" => Some(ArbitraryTuplType::Grayscale),
+ Some(ref t) if t == "GRAYSCALE_ALPHA" => Some(ArbitraryTuplType::GrayscaleAlpha),
+ Some(ref t) if t == "RGB" => Some(ArbitraryTuplType::RGB),
+ Some(ref t) if t == "RGB_ALPHA" => Some(ArbitraryTuplType::RGBAlpha),
+ Some(other) => Some(ArbitraryTuplType::Custom(other)),
+ };
+
+ Ok(ArbitraryHeader {
+ height: h,
+ width: w,
+ depth: d,
+ maxval: m,
+ tupltype,
+ })
+ }
+}
+
+impl<R: Read> HeaderReader for BufReader<R> {}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct PnmReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for PnmReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for PnmDecoder<R> {
+ type Reader = PnmReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.header.width(), self.header.height())
+ }
+
+ fn color_type(&self) -> ColorType {
+ match self.tuple {
+ TupleType::PbmBit => ColorType::L8,
+ TupleType::BWBit => ColorType::L8,
+ TupleType::GrayU8 => ColorType::L8,
+ TupleType::GrayU16 => ColorType::L16,
+ TupleType::RGBU8 => ColorType::Rgb8,
+ TupleType::RGBU16 => ColorType::Rgb16,
+ }
+ }
+
+ fn original_color_type(&self) -> ExtendedColorType {
+ match self.tuple {
+ TupleType::PbmBit => ExtendedColorType::L1,
+ TupleType::BWBit => ExtendedColorType::L1,
+ TupleType::GrayU8 => ExtendedColorType::L8,
+ TupleType::GrayU16 => ExtendedColorType::L16,
+ TupleType::RGBU8 => ExtendedColorType::Rgb8,
+ TupleType::RGBU16 => ExtendedColorType::Rgb16,
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(PnmReader(Cursor::new(image::decoder_to_vec(self)?), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ buf.copy_from_slice(&match self.tuple {
+ TupleType::PbmBit => self.read_samples::<PbmBit>(1),
+ TupleType::BWBit => self.read_samples::<BWBit>(1),
+ TupleType::RGBU8 => self.read_samples::<U8>(3),
+ TupleType::RGBU16 => self.read_samples::<U16>(3),
+ TupleType::GrayU8 => self.read_samples::<U8>(1),
+ TupleType::GrayU16 => self.read_samples::<U16>(1),
+ }?);
+ Ok(())
+ }
+}
+
+fn err_input_is_too_short() -> ImageError {
+ return ImageError::FormatError(
+ "Not enough data was provided to the Decoder to decode the image".into()
+ )
+}
+
+impl<R: Read> PnmDecoder<R> {
+ fn read_samples<S: Sample>(&mut self, components: u32) -> ImageResult<Vec<u8>> {
+ match self.subtype().sample_encoding() {
+ SampleEncoding::Binary => {
+ let width = self.header.width();
+ let height = self.header.height();
+ let bytecount = S::bytelen(width, height, components)?;
+ let mut bytes = vec![];
+
+ self.reader
+ .by_ref()
+ // This conversion is potentially lossy but unlikely and in that case we error
+ // later anyways.
+ .take(bytecount as u64)
+ .read_to_end(&mut bytes)?;
+
+ if bytes.len() != bytecount {
+ return Err(err_input_is_too_short());
+ }
+
+ let samples = S::from_bytes(&bytes, width, height, components)?;
+ Ok(samples)
+ }
+ SampleEncoding::Ascii => {
+ let samples = self.read_ascii::<S>(components)?;
+ Ok(samples)
+ }
+ }
+ }
+
+ fn read_ascii<Basic: Sample>(&mut self, components: u32) -> ImageResult<Vec<u8>> {
+ Basic::from_ascii(&mut self.reader, self.header.width(), self.header.height(), components)
+ }
+
+ /// Get the pnm subtype, depending on the magic constant contained in the header
+ pub fn subtype(&self) -> PNMSubtype {
+ self.header.subtype()
+ }
+}
+
+fn read_separated_ascii<T: FromStr>(reader: &mut dyn Read) -> ImageResult<T>
+ where T::Err: Display
+{
+ let is_separator = |v: &u8| match *v {
+ b'\t' | b'\n' | b'\x0b' | b'\x0c' | b'\r' | b' ' => true,
+ _ => false,
+ };
+
+ let token = reader
+ .bytes()
+ .skip_while(|v| v.as_ref().ok().map(&is_separator).unwrap_or(false))
+ .take_while(|v| v.as_ref().ok().map(|c| !is_separator(c)).unwrap_or(false))
+ .collect::<Result<Vec<u8>, _>>()?;
+
+ if !token.is_ascii() {
+ return Err(ImageError::FormatError(
+ "Non ascii character where sample value was expected".to_string(),
+ ));
+ }
+
+ let string = str::from_utf8(&token)
+ // We checked the precondition ourselves a few lines before, `token.is_ascii()`.
+ .unwrap_or_else(|_| unreachable!("Only ascii characters should be decoded"));
+
+ string
+ .parse()
+ .map_err(|err| ImageError::FormatError(format!("Error parsing {} as a sample: {}", string, err)))
+}
+
+impl Sample for U8 {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ Ok((width * height * samples) as usize)
+ }
+
+ fn from_bytes(
+ bytes: &[u8],
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ assert_eq!(bytes.len(), Self::bytelen(width, height, samples).unwrap());
+ Ok(bytes.to_vec())
+ }
+
+ fn from_ascii(
+ reader: &mut dyn Read,
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ (0..width*height*samples)
+ .map(|_| read_separated_ascii(reader))
+ .collect()
+ }
+}
+
+impl Sample for U16 {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ Ok((width * height * samples * 2) as usize)
+ }
+
+ fn from_bytes(
+ bytes: &[u8],
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ assert_eq!(bytes.len(), Self::bytelen(width, height, samples).unwrap());
+
+ let mut buffer = bytes.to_vec();
+ for chunk in buffer.chunks_mut(2) {
+ let v = BigEndian::read_u16(chunk);
+ NativeEndian::write_u16(chunk, v);
+ }
+ Ok(buffer)
+ }
+
+ fn from_ascii(
+ reader: &mut dyn Read,
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ let mut buffer = vec![0; (width * height * samples * 2) as usize];
+ for i in 0..(width*height*samples) as usize {
+ let v = read_separated_ascii::<u16>(reader)?;
+ NativeEndian::write_u16(&mut buffer[2*i..][..2], v);
+ }
+ Ok(buffer)
+ }
+}
+
+// The image is encoded in rows of bits, high order bits first. Any bits beyond the row bits should
+// be ignored. Also, contrary to rgb, black pixels are encoded as a 1 while white is 0. This will
+// need to be reversed for the grayscale output.
+impl Sample for PbmBit {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ let count = width * samples;
+ let linelen = (count / 8) + ((count % 8) != 0) as u32;
+ Ok((linelen * height) as usize)
+ }
+
+ fn from_bytes(
+ bytes: &[u8],
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ assert_eq!(bytes.len(), Self::bytelen(width, height, samples).unwrap());
+
+ let mut expanded = utils::expand_bits(1, width * samples, bytes);
+ for b in expanded.iter_mut() {
+ *b = !*b;
+ }
+ Ok(expanded)
+ }
+
+ fn from_ascii(
+ reader: &mut dyn Read,
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ let count = (width*height*samples) as usize;
+ let raw_samples = reader.bytes()
+ .filter_map(|ascii| match ascii {
+ Ok(b'0') => Some(Ok(1)),
+ Ok(b'1') => Some(Ok(0)),
+ Err(err) => Some(Err(ImageError::IoError(err))),
+ Ok(b'\t')
+ | Ok(b'\n')
+ | Ok(b'\x0b')
+ | Ok(b'\x0c')
+ | Ok(b'\r')
+ | Ok(b' ') => None,
+ Ok(c) => Some(Err(ImageError::FormatError(
+ format!("Unexpected character {} within sample raster", c),
+ ))),
+ })
+ .take(count)
+ .collect::<ImageResult<Vec<u8>>>()?;
+
+ if raw_samples.len() < count {
+ return Err(err_input_is_too_short())
+ }
+
+ Ok(raw_samples)
+ }
+}
+
+// Encoded just like a normal U8 but we check the values.
+impl Sample for BWBit {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ U8::bytelen(width, height, samples)
+ }
+
+ fn from_bytes(
+ bytes: &[u8],
+ width: u32,
+ height: u32,
+ samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ assert_eq!(bytes.len(), Self::bytelen(width, height, samples).unwrap());
+
+ let values = U8::from_bytes(bytes, width, height, samples)?;
+ if let Some(val) = values.iter().find(|&val| *val > 1) {
+ return Err(ImageError::FormatError(
+ format!("Sample value {} outside of bounds", val),
+ ));
+ };
+ Ok(values)
+ }
+
+ fn from_ascii(
+ _reader: &mut dyn Read,
+ _width: u32,
+ _height: u32,
+ _samples: u32,
+ ) -> ImageResult<Vec<u8>> {
+ unreachable!("BW bits from anymaps are never encoded as ascii")
+ }
+}
+
+impl DecodableImageHeader for BitmapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ Ok(TupleType::PbmBit)
+ }
+}
+
+impl DecodableImageHeader for GraymapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.maxwhite {
+ v if v <= 0xFF => Ok(TupleType::GrayU8),
+ v if v <= 0xFFFF => Ok(TupleType::GrayU16),
+ _ => Err(ImageError::FormatError(
+ "Image maxval is not less or equal to 65535".to_string(),
+ )),
+ }
+ }
+}
+
+impl DecodableImageHeader for PixmapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.maxval {
+ v if v <= 0xFF => Ok(TupleType::RGBU8),
+ v if v <= 0xFFFF => Ok(TupleType::RGBU16),
+ _ => Err(ImageError::FormatError(
+ "Image maxval is not less or equal to 65535".to_string(),
+ )),
+ }
+ }
+}
+
+impl DecodableImageHeader for ArbitraryHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.tupltype {
+ None if self.depth == 1 => Ok(TupleType::GrayU8),
+ None if self.depth == 2 => Err(ImageError::UnsupportedColor(ExtendedColorType::La8)),
+ None if self.depth == 3 => Ok(TupleType::RGBU8),
+ None if self.depth == 4 => Err(ImageError::UnsupportedColor(ExtendedColorType::Rgba8)),
+
+ Some(ArbitraryTuplType::BlackAndWhite) if self.maxval == 1 && self.depth == 1 => {
+ Ok(TupleType::BWBit)
+ }
+ Some(ArbitraryTuplType::BlackAndWhite) => Err(ImageError::FormatError(
+ "Invalid depth or maxval for tuple type BLACKANDWHITE".to_string(),
+ )),
+
+ Some(ArbitraryTuplType::Grayscale) if self.depth == 1 && self.maxval <= 0xFF => {
+ Ok(TupleType::GrayU8)
+ }
+ Some(ArbitraryTuplType::Grayscale) if self.depth <= 1 && self.maxval <= 0xFFFF => {
+ Ok(TupleType::GrayU16)
+ }
+ Some(ArbitraryTuplType::Grayscale) => Err(ImageError::FormatError(
+ "Invalid depth or maxval for tuple type GRAYSCALE".to_string(),
+ )),
+
+ Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFF => {
+ Ok(TupleType::RGBU8)
+ }
+ Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFFFF => {
+ Ok(TupleType::RGBU16)
+ }
+ Some(ArbitraryTuplType::RGB) => Err(ImageError::FormatError(
+ "Invalid depth for tuple type RGB".to_string(),
+ )),
+
+ Some(ArbitraryTuplType::BlackAndWhiteAlpha) => Err(ImageError::FormatError(
+ "Unsupported color type: BlackAndWhiteAlpha".to_string()
+ )),
+ Some(ArbitraryTuplType::GrayscaleAlpha) => {
+ Err(ImageError::UnsupportedColor(ExtendedColorType::La8))
+ }
+ Some(ArbitraryTuplType::RGBAlpha) => {
+ Err(ImageError::UnsupportedColor(ExtendedColorType::Rgba8))
+ }
+ _ => Err(ImageError::FormatError(
+ "Tuple type not recognized".to_string(),
+ )),
+ }
+ }
+}
+#[cfg(test)]
+mod tests {
+ use super::*;
+ /// Tests reading of a valid blackandwhite pam
+ #[test]
+ fn pam_blackandwhite() {
+ let pamdata = b"P7
+WIDTH 4
+HEIGHT 4
+DEPTH 1
+MAXVAL 1
+TUPLTYPE BLACKANDWHITE
+# Comment line
+ENDHDR
+\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(decoder.subtype(), PNMSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(
+ image,
+ vec![0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x00, 0x01]
+ );
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width: 4,
+ height: 4,
+ maxval: 1,
+ depth: 1,
+ tupltype: Some(ArbitraryTuplType::BlackAndWhite),
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// Tests reading of a valid grayscale pam
+ #[test]
+ fn pam_grayscale() {
+ let pamdata = b"P7
+WIDTH 4
+HEIGHT 4
+DEPTH 1
+MAXVAL 255
+TUPLTYPE GRAYSCALE
+# Comment line
+ENDHDR
+\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(decoder.subtype(), PNMSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(
+ image,
+ vec![0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad,
+ 0xbe, 0xef]
+ );
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width: 4,
+ height: 4,
+ depth: 1,
+ maxval: 255,
+ tupltype: Some(ArbitraryTuplType::Grayscale),
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// Tests reading of a valid rgb pam
+ #[test]
+ fn pam_rgb() {
+ let pamdata = b"P7
+# Comment line
+MAXVAL 255
+TUPLTYPE RGB
+DEPTH 3
+WIDTH 2
+HEIGHT 2
+ENDHDR
+\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::Rgb8);
+ assert_eq!(decoder.dimensions(), (2, 2));
+ assert_eq!(decoder.subtype(), PNMSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image,
+ vec![0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef]);
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ maxval: 255,
+ tupltype: Some(ArbitraryTuplType::RGB),
+ depth: 3,
+ width: 2,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pbm_binary() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let pbmbinary = [&b"P4 6 2\n"[..], &[0b01101100 as u8, 0b10110111]].concat();
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(
+ decoder.subtype(),
+ PNMSubtype::Bitmap(SampleEncoding::Binary)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Binary,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// A previous inifite loop.
+ #[test]
+ fn pbm_binary_ascii_termination() {
+ use std::io::{Cursor, Error, ErrorKind, Read, Result};
+ struct FailRead(Cursor<&'static [u8]>);
+
+ impl Read for FailRead {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ match self.0.read(buf) {
+ Ok(n) if n > 0 => Ok(n),
+ _ => Err(Error::new(
+ ErrorKind::BrokenPipe,
+ "Simulated broken pipe error"
+ )),
+ }
+ }
+ }
+
+ let pbmbinary = FailRead(Cursor::new(b"P1 1 1\n"));
+
+ let decoder = PnmDecoder::new(pbmbinary).unwrap();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).expect_err("Image is malformed");
+ }
+
+ #[test]
+ fn pbm_ascii() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`. Tests all
+ // whitespace characters that should be allowed (the 6 characters according to POSIX).
+ let pbmbinary = b"P1 6 2\n 0 1 1 0 1 1\n1 0 1 1 0\t\n\x0b\x0c\r1";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(decoder.subtype(), PNMSubtype::Bitmap(SampleEncoding::Ascii));
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pbm_ascii_nospace() {
+ // The data contains two rows of the image (each line is padded to the full byte). Notably,
+ // it is completely within specification for the ascii data not to contain separating
+ // whitespace for the pbm format or any mix.
+ let pbmbinary = b"P1 6 2\n011011101101";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(decoder.subtype(), PNMSubtype::Bitmap(SampleEncoding::Ascii));
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pgm_binary() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let elements = (0..16).collect::<Vec<_>>();
+ let pbmbinary = [&b"P5 4 4 255\n"[..], &elements].concat();
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(
+ decoder.subtype(),
+ PNMSubtype::Graymap(SampleEncoding::Binary)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, elements);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Binary,
+ width: 4,
+ height: 4,
+ maxwhite: 255,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pgm_ascii() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let pbmbinary = b"P2 4 4 255\n 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(
+ decoder.subtype(),
+ PNMSubtype::Graymap(SampleEncoding::Ascii)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, (0..16).collect::<Vec<_>>());
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PNMHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 4,
+ height: 4,
+ maxwhite: 255,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+}
diff --git a/third_party/rust/image/src/pnm/encoder.rs b/third_party/rust/image/src/pnm/encoder.rs
new file mode 100644
index 0000000000..5e9d2ed769
--- /dev/null
+++ b/third_party/rust/image/src/pnm/encoder.rs
@@ -0,0 +1,653 @@
+//! Encoding of PNM Images
+use std::fmt;
+use std::io;
+
+use std::io::Write;
+
+use super::AutoBreak;
+use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader};
+use super::{HeaderRecord, PNMHeader, PNMSubtype, SampleEncoding};
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{ImageError, ImageResult};
+use crate::image::ImageEncoder;
+
+use byteorder::{BigEndian, WriteBytesExt};
+
+enum HeaderStrategy {
+ Dynamic,
+ Subtype(PNMSubtype),
+ Chosen(PNMHeader),
+}
+
+#[derive(Clone, Copy)]
+pub enum FlatSamples<'a> {
+ U8(&'a [u8]),
+ U16(&'a [u16]),
+}
+
+/// Encodes images to any of the `pnm` image formats.
+pub struct PNMEncoder<W: Write> {
+ writer: W,
+ header: HeaderStrategy,
+}
+
+/// Encapsulate the checking system in the type system. Non of the fields are actually accessed
+/// but requiring them forces us to validly construct the struct anyways.
+struct CheckedImageBuffer<'a> {
+ _image: FlatSamples<'a>,
+ _width: u32,
+ _height: u32,
+ _color: ExtendedColorType,
+}
+
+// Check the header against the buffer. Each struct produces the next after a check.
+struct UncheckedHeader<'a> {
+ header: &'a PNMHeader,
+}
+
+struct CheckedDimensions<'a> {
+ unchecked: UncheckedHeader<'a>,
+ width: u32,
+ height: u32,
+}
+
+struct CheckedHeaderColor<'a> {
+ dimensions: CheckedDimensions<'a>,
+ color: ExtendedColorType,
+}
+
+struct CheckedHeader<'a> {
+ color: CheckedHeaderColor<'a>,
+ encoding: TupleEncoding<'a>,
+ _image: CheckedImageBuffer<'a>,
+}
+
+enum TupleEncoding<'a> {
+ PbmBits {
+ samples: FlatSamples<'a>,
+ width: u32,
+ },
+ Ascii {
+ samples: FlatSamples<'a>,
+ },
+ Bytes {
+ samples: FlatSamples<'a>,
+ },
+}
+
+impl<W: Write> PNMEncoder<W> {
+ /// Create new PNMEncoder from the `writer`.
+ ///
+ /// The encoded images will have some `pnm` format. If more control over the image type is
+ /// required, use either one of `with_subtype` or `with_header`. For more information on the
+ /// behaviour, see `with_dynamic_header`.
+ pub fn new(writer: W) -> Self {
+ PNMEncoder {
+ writer,
+ header: HeaderStrategy::Dynamic,
+ }
+ }
+
+ /// Encode a specific pnm subtype image.
+ ///
+ /// The magic number and encoding type will be chosen as provided while the rest of the header
+ /// data will be generated dynamically. Trying to encode incompatible images (e.g. encoding an
+ /// RGB image as Graymap) will result in an error.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_header` and `with_dynamic_header`.
+ pub fn with_subtype(self, subtype: PNMSubtype) -> Self {
+ PNMEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Subtype(subtype),
+ }
+ }
+
+ /// Enforce the use of a chosen header.
+ ///
+ /// While this option gives the most control over the actual written data, the encoding process
+ /// will error in case the header data and image parameters do not agree. It is the users
+ /// obligation to ensure that the width and height are set accordingly, for example.
+ ///
+ /// Choose this option if you want a lossless decoding/encoding round trip.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_subtype` and `with_dynamic_header`.
+ pub fn with_header(self, header: PNMHeader) -> Self {
+ PNMEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Chosen(header),
+ }
+ }
+
+ /// Create the header dynamically for each image.
+ ///
+ /// This is the default option upon creation of the encoder. With this, most images should be
+ /// encodable but the specific format chosen is out of the users control. The pnm subtype is
+ /// chosen arbitrarily by the library.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_subtype` and `with_header`.
+ pub fn with_dynamic_header(self) -> Self {
+ PNMEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Dynamic,
+ }
+ }
+
+ /// Encode an image whose samples are represented as `u8`.
+ ///
+ /// Some `pnm` subtypes are incompatible with some color options, a chosen header most
+ /// certainly with any deviation from the original decoded image.
+ pub fn encode<'s, S>(
+ &mut self,
+ image: S,
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()>
+ where
+ S: Into<FlatSamples<'s>>,
+ {
+ let image = image.into();
+ match self.header {
+ HeaderStrategy::Dynamic => self.write_dynamic_header(image, width, height, color.into()),
+ HeaderStrategy::Subtype(subtype) => {
+ self.write_subtyped_header(subtype, image, width, height, color.into())
+ }
+ HeaderStrategy::Chosen(ref header) => {
+ Self::write_with_header(&mut self.writer, header, image, width, height, color.into())
+ }
+ }
+ }
+
+ /// Choose any valid pnm format that the image can be expressed in and write its header.
+ ///
+ /// Returns how the body should be written if successful.
+ fn write_dynamic_header(
+ &mut self,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let depth = u32::from(color.channel_count());
+ let (maxval, tupltype) = match color {
+ ExtendedColorType::L1 => (1, ArbitraryTuplType::BlackAndWhite),
+ ExtendedColorType::L8 => (0xff, ArbitraryTuplType::Grayscale),
+ ExtendedColorType::L16 => (0xffff, ArbitraryTuplType::Grayscale),
+ ExtendedColorType::La1 => (1, ArbitraryTuplType::BlackAndWhiteAlpha),
+ ExtendedColorType::La8 => (0xff, ArbitraryTuplType::GrayscaleAlpha),
+ ExtendedColorType::La16 => (0xffff, ArbitraryTuplType::GrayscaleAlpha),
+ ExtendedColorType::Rgb8 => (0xff, ArbitraryTuplType::RGB),
+ ExtendedColorType::Rgb16 => (0xffff, ArbitraryTuplType::RGB),
+ ExtendedColorType::Rgba8 => (0xff, ArbitraryTuplType::RGBAlpha),
+ ExtendedColorType::Rgba16 => (0xffff, ArbitraryTuplType::RGBAlpha),
+ _ => {
+ return Err(ImageError::UnsupportedColor(color))
+ }
+ };
+
+ let header = PNMHeader {
+ decoded: HeaderRecord::Arbitrary(ArbitraryHeader {
+ width,
+ height,
+ depth,
+ maxval,
+ tupltype: Some(tupltype),
+ }),
+ encoded: None,
+ };
+
+ Self::write_with_header(&mut self.writer, &header, image, width, height, color)
+ }
+
+ /// Try to encode the image with the chosen format, give its corresponding pixel encoding type.
+ fn write_subtyped_header(
+ &mut self,
+ subtype: PNMSubtype,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let header = match (subtype, color) {
+ (PNMSubtype::ArbitraryMap, color) => {
+ return self.write_dynamic_header(image, width, height, color)
+ }
+ (PNMSubtype::Pixmap(encoding), ExtendedColorType::Rgb8) => PNMHeader {
+ decoded: HeaderRecord::Pixmap(PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval: 255,
+ }),
+ encoded: None,
+ },
+ (PNMSubtype::Graymap(encoding), ExtendedColorType::L8) => PNMHeader {
+ decoded: HeaderRecord::Graymap(GraymapHeader {
+ encoding,
+ width,
+ height,
+ maxwhite: 255,
+ }),
+ encoded: None,
+ },
+ (PNMSubtype::Bitmap(encoding), ExtendedColorType::L8)
+ | (PNMSubtype::Bitmap(encoding), ExtendedColorType::L1) => PNMHeader {
+ decoded: HeaderRecord::Bitmap(BitmapHeader {
+ encoding,
+ width,
+ height,
+ }),
+ encoded: None,
+ },
+ (_, _) => {
+ // FIXME https://github.com/image-rs/image/issues/921
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Color type can not be represented in the chosen format",
+ )))
+ }
+ };
+
+ Self::write_with_header(&mut self.writer, &header, image, width, height, color)
+ }
+
+ /// Try to encode the image with the chosen header, checking if values are correct.
+ ///
+ /// Returns how the body should be written if successful.
+ fn write_with_header(
+ writer: &mut dyn Write,
+ header: &PNMHeader,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let unchecked = UncheckedHeader { header };
+
+ unchecked
+ .check_header_dimensions(width, height)?
+ .check_header_color(color)?
+ .check_sample_values(image)?
+ .write_header(writer)?
+ .write_image(writer)
+ }
+}
+
+impl<W: Write> ImageEncoder for PNMEncoder<W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+impl<'a> CheckedImageBuffer<'a> {
+ fn check(
+ image: FlatSamples<'a>,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<CheckedImageBuffer<'a>> {
+ let components = color.channel_count() as usize;
+ let uwidth = width as usize;
+ let uheight = height as usize;
+ match Some(components)
+ .and_then(|v| v.checked_mul(uwidth))
+ .and_then(|v| v.checked_mul(uheight))
+ {
+ None => Err(ImageError::DimensionError),
+ Some(v) if v == image.len() => Ok(CheckedImageBuffer {
+ _image: image,
+ _width: width,
+ _height: height,
+ _color: color,
+ }),
+ Some(_) => Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ &"Image buffer does not correspond to size and colour".to_string()[..],
+ ))),
+ }
+ }
+}
+
+impl<'a> UncheckedHeader<'a> {
+ fn check_header_dimensions(
+ self,
+ width: u32,
+ height: u32,
+ ) -> ImageResult<CheckedDimensions<'a>> {
+ if self.header.width() != width || self.header.height() != height {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Chosen header does not match Image dimensions",
+ )));
+ }
+
+ Ok(CheckedDimensions {
+ unchecked: self,
+ width,
+ height,
+ })
+ }
+}
+
+impl<'a> CheckedDimensions<'a> {
+ // Check color compatibility with the header. This will only error when we are certain that
+ // the comination is bogus (e.g. combining Pixmap and Palette) but allows uncertain
+ // combinations (basically a ArbitraryTuplType::Custom with any color of fitting depth).
+ fn check_header_color(self, color: ExtendedColorType) -> ImageResult<CheckedHeaderColor<'a>> {
+ let components = u32::from(color.channel_count());
+
+ match *self.unchecked.header {
+ PNMHeader {
+ decoded: HeaderRecord::Bitmap(_),
+ ..
+ } => match color {
+ ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "PBM format only support luma color types",
+ )))
+ }
+ },
+ PNMHeader {
+ decoded: HeaderRecord::Graymap(_),
+ ..
+ } => match color {
+ ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "PGM format only support luma color types",
+ )))
+ }
+ },
+ PNMHeader {
+ decoded: HeaderRecord::Pixmap(_),
+ ..
+ } => match color {
+ ExtendedColorType::Rgb8 => (),
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "PPM format only support ExtendedColorType::Rgb8",
+ )))
+ }
+ },
+ PNMHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ depth,
+ ref tupltype,
+ ..
+ }),
+ ..
+ } => match (tupltype, color) {
+ (&Some(ArbitraryTuplType::BlackAndWhite), ExtendedColorType::L1) => (),
+ (&Some(ArbitraryTuplType::BlackAndWhiteAlpha), ExtendedColorType::La8) => (),
+
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L1) => (),
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L8) => (),
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L16) => (),
+ (&Some(ArbitraryTuplType::GrayscaleAlpha), ExtendedColorType::La8) => (),
+
+ (&Some(ArbitraryTuplType::RGB), ExtendedColorType::Rgb8) => (),
+ (&Some(ArbitraryTuplType::RGBAlpha), ExtendedColorType::Rgba8) => (),
+
+ (&None, _) if depth == components => (),
+ (&Some(ArbitraryTuplType::Custom(_)), _) if depth == components => (),
+ _ if depth != components => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("Depth mismatch: header {} vs. color {}", depth, components),
+ )))
+ }
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid color type for selected PAM color type",
+ )))
+ }
+ },
+ }
+
+ Ok(CheckedHeaderColor {
+ dimensions: self,
+ color,
+ })
+ }
+}
+
+impl<'a> CheckedHeaderColor<'a> {
+ fn check_sample_values(self, image: FlatSamples<'a>) -> ImageResult<CheckedHeader<'a>> {
+ let header_maxval = match self.dimensions.unchecked.header.decoded {
+ HeaderRecord::Bitmap(_) => 1,
+ HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
+ HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
+ HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
+ };
+
+ // We trust the image color bit count to be correct at least.
+ let max_sample = match self.color {
+ ExtendedColorType::Unknown(n) if n <= 16 => (1 << n) - 1,
+ ExtendedColorType::L1 => 1,
+ ExtendedColorType::L8
+ | ExtendedColorType::La8
+ | ExtendedColorType::Rgb8
+ | ExtendedColorType::Rgba8
+ | ExtendedColorType::Bgr8
+ | ExtendedColorType::Bgra8
+ => 0xff,
+ ExtendedColorType::L16
+ | ExtendedColorType::La16
+ | ExtendedColorType::Rgb16
+ | ExtendedColorType::Rgba16
+ => 0xffff,
+ ExtendedColorType::__NonExhaustive(marker) => match marker._private {},
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Unsupported target color type",
+ )))
+ }
+ };
+
+ // Avoid the performance heavy check if possible, e.g. if the header has been chosen by us.
+ if header_maxval < max_sample && !image.all_smaller(header_maxval) {
+ // FIXME https://github.com/image-rs/image/issues/921, No ImageError variant seems
+ // appropriate in this situation UnsupportedHeaderFormat maybe?
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Sample value greater than allowed for chosen header",
+ )));
+ }
+
+ let encoding = image.encoding_for(&self.dimensions.unchecked.header.decoded);
+
+ let image = CheckedImageBuffer::check(
+ image,
+ self.dimensions.width,
+ self.dimensions.height,
+ self.color,
+ )?;
+
+ Ok(CheckedHeader {
+ color: self,
+ encoding,
+ _image: image,
+ })
+ }
+}
+
+impl<'a> CheckedHeader<'a> {
+ fn write_header(self, writer: &mut dyn Write) -> ImageResult<TupleEncoding<'a>> {
+ self.header().write(writer)?;
+ Ok(self.encoding)
+ }
+
+ fn header(&self) -> &PNMHeader {
+ self.color.dimensions.unchecked.header
+ }
+}
+
+struct SampleWriter<'a>(&'a mut dyn Write);
+
+impl<'a> SampleWriter<'a> {
+ fn write_samples_ascii<V>(self, samples: V) -> io::Result<()>
+ where
+ V: Iterator,
+ V::Item: fmt::Display,
+ {
+ let mut auto_break_writer = AutoBreak::new(self.0, 70);
+ for value in samples {
+ write!(auto_break_writer, "{} ", value)?;
+ }
+ auto_break_writer.flush()
+ }
+
+ fn write_pbm_bits<V>(self, samples: &[V], width: u32) -> io::Result<()>
+ /* Default gives 0 for all primitives. TODO: replace this with `Zeroable` once it hits stable */
+ where
+ V: Default + Eq + Copy,
+ {
+ // The length of an encoded scanline
+ let line_width = (width - 1) / 8 + 1;
+
+ // We'll be writing single bytes, so buffer
+ let mut line_buffer = Vec::with_capacity(line_width as usize);
+
+ for line in samples.chunks(width as usize) {
+ for byte_bits in line.chunks(8) {
+ let mut byte = 0u8;
+ for i in 0..8 {
+ // Black pixels are encoded as 1s
+ if let Some(&v) = byte_bits.get(i) {
+ if v == V::default() {
+ byte |= 1u8 << (7 - i)
+ }
+ }
+ }
+ line_buffer.push(byte)
+ }
+ self.0.write_all(line_buffer.as_slice())?;
+ line_buffer.clear();
+ }
+
+ self.0.flush()
+ }
+}
+
+impl<'a> FlatSamples<'a> {
+ fn len(&self) -> usize {
+ match *self {
+ FlatSamples::U8(arr) => arr.len(),
+ FlatSamples::U16(arr) => arr.len(),
+ }
+ }
+
+ fn all_smaller(&self, max_val: u32) -> bool {
+ match *self {
+ FlatSamples::U8(arr) => arr.iter().any(|&val| u32::from(val) > max_val),
+ FlatSamples::U16(arr) => arr.iter().any(|&val| u32::from(val) > max_val),
+ }
+ }
+
+ fn encoding_for(&self, header: &HeaderRecord) -> TupleEncoding<'a> {
+ match *header {
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Binary,
+ width,
+ ..
+ }) => TupleEncoding::PbmBits {
+ samples: *self,
+ width,
+ },
+
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ }) => TupleEncoding::Ascii { samples: *self },
+
+ HeaderRecord::Arbitrary(_) => TupleEncoding::Bytes { samples: *self },
+
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ })
+ | HeaderRecord::Pixmap(PixmapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ }) => TupleEncoding::Ascii { samples: *self },
+
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Binary,
+ ..
+ })
+ | HeaderRecord::Pixmap(PixmapHeader {
+ encoding: SampleEncoding::Binary,
+ ..
+ }) => TupleEncoding::Bytes { samples: *self },
+ }
+ }
+}
+
+impl<'a> From<&'a [u8]> for FlatSamples<'a> {
+ fn from(samples: &'a [u8]) -> Self {
+ FlatSamples::U8(samples)
+ }
+}
+
+impl<'a> From<&'a [u16]> for FlatSamples<'a> {
+ fn from(samples: &'a [u16]) -> Self {
+ FlatSamples::U16(samples)
+ }
+}
+
+impl<'a> TupleEncoding<'a> {
+ fn write_image(&self, writer: &mut dyn Write) -> ImageResult<()> {
+ match *self {
+ TupleEncoding::PbmBits {
+ samples: FlatSamples::U8(samples),
+ width,
+ } => SampleWriter(writer)
+ .write_pbm_bits(samples, width)
+ .map_err(ImageError::IoError),
+ TupleEncoding::PbmBits {
+ samples: FlatSamples::U16(samples),
+ width,
+ } => SampleWriter(writer)
+ .write_pbm_bits(samples, width)
+ .map_err(ImageError::IoError),
+
+ TupleEncoding::Bytes {
+ samples: FlatSamples::U8(samples),
+ } => writer.write_all(samples).map_err(ImageError::IoError),
+ TupleEncoding::Bytes {
+ samples: FlatSamples::U16(samples),
+ } => samples
+ .iter()
+ .map(|&sample| {
+ writer
+ .write_u16::<BigEndian>(sample)
+ .map_err(ImageError::IoError)
+ })
+ .collect(),
+
+ TupleEncoding::Ascii {
+ samples: FlatSamples::U8(samples),
+ } => SampleWriter(writer)
+ .write_samples_ascii(samples.iter())
+ .map_err(ImageError::IoError),
+ TupleEncoding::Ascii {
+ samples: FlatSamples::U16(samples),
+ } => SampleWriter(writer)
+ .write_samples_ascii(samples.iter())
+ .map_err(ImageError::IoError),
+ }
+ }
+}
diff --git a/third_party/rust/image/src/pnm/header.rs b/third_party/rust/image/src/pnm/header.rs
new file mode 100644
index 0000000000..927d8fa64c
--- /dev/null
+++ b/third_party/rust/image/src/pnm/header.rs
@@ -0,0 +1,348 @@
+use std::io;
+
+/// The kind of encoding used to store sample values
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum SampleEncoding {
+ /// Samples are unsigned binary integers in big endian
+ Binary,
+
+ /// Samples are encoded as decimal ascii strings separated by whitespace
+ Ascii,
+}
+
+/// Denotes the category of the magic number
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum PNMSubtype {
+ /// Magic numbers P1 and P4
+ Bitmap(SampleEncoding),
+
+ /// Magic numbers P2 and P5
+ Graymap(SampleEncoding),
+
+ /// Magic numbers P3 and P6
+ Pixmap(SampleEncoding),
+
+ /// Magic number P7
+ ArbitraryMap,
+}
+
+/// Stores the complete header data of a file.
+///
+/// Internally, provides mechanisms for lossless reencoding. After reading a file with the decoder
+/// it is possible to recover the header and construct an encoder. Using the encoder on the just
+/// loaded image should result in a byte copy of the original file (for single image pnms without
+/// additional trailing data).
+pub struct PNMHeader {
+ pub(crate) decoded: HeaderRecord,
+ pub(crate) encoded: Option<Vec<u8>>,
+}
+
+pub(crate) enum HeaderRecord {
+ Bitmap(BitmapHeader),
+ Graymap(GraymapHeader),
+ Pixmap(PixmapHeader),
+ Arbitrary(ArbitraryHeader),
+}
+
+/// Header produced by a `pbm` file ("Portable Bit Map")
+#[derive(Clone, Copy, Debug)]
+pub struct BitmapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+}
+
+/// Header produced by a `pgm` file ("Portable Gray Map")
+#[derive(Clone, Copy, Debug)]
+pub struct GraymapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Maximum sample value within the image
+ pub maxwhite: u32,
+}
+
+/// Header produced by a `ppm` file ("Portable Pixel Map")
+#[derive(Clone, Copy, Debug)]
+pub struct PixmapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Maximum sample value within the image
+ pub maxval: u32,
+}
+
+/// Header produced by a `pam` file ("Portable Arbitrary Map")
+#[derive(Clone, Debug)]
+pub struct ArbitraryHeader {
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Number of color channels
+ pub depth: u32,
+
+ /// Maximum sample value within the image
+ pub maxval: u32,
+
+ /// Color interpretation of image pixels
+ pub tupltype: Option<ArbitraryTuplType>,
+}
+
+/// Standardized tuple type specifiers in the header of a `pam`.
+#[derive(Clone, Debug)]
+pub enum ArbitraryTuplType {
+ /// Pixels are either black (0) or white (1)
+ BlackAndWhite,
+
+ /// Pixels are either black (0) or white (1) and a second alpha channel
+ BlackAndWhiteAlpha,
+
+ /// Pixels represent the amount of white
+ Grayscale,
+
+ /// Grayscale with an additional alpha channel
+ GrayscaleAlpha,
+
+ /// Three channels: Red, Green, Blue
+ RGB,
+
+ /// Four channels: Red, Green, Blue, Alpha
+ RGBAlpha,
+
+ /// An image format which is not standardized
+ Custom(String),
+}
+
+impl PNMSubtype {
+ /// Get the two magic constant bytes corresponding to this format subtype.
+ pub fn magic_constant(self) -> &'static [u8; 2] {
+ match self {
+ PNMSubtype::Bitmap(SampleEncoding::Ascii) => b"P1",
+ PNMSubtype::Graymap(SampleEncoding::Ascii) => b"P2",
+ PNMSubtype::Pixmap(SampleEncoding::Ascii) => b"P3",
+ PNMSubtype::Bitmap(SampleEncoding::Binary) => b"P4",
+ PNMSubtype::Graymap(SampleEncoding::Binary) => b"P5",
+ PNMSubtype::Pixmap(SampleEncoding::Binary) => b"P6",
+ PNMSubtype::ArbitraryMap => b"P7",
+ }
+ }
+
+ /// Whether samples are stored as binary or as decimal ascii
+ pub fn sample_encoding(self) -> SampleEncoding {
+ match self {
+ PNMSubtype::ArbitraryMap => SampleEncoding::Binary,
+ PNMSubtype::Bitmap(enc) => enc,
+ PNMSubtype::Graymap(enc) => enc,
+ PNMSubtype::Pixmap(enc) => enc,
+ }
+ }
+}
+
+impl PNMHeader {
+ /// Retrieve the format subtype from which the header was created.
+ pub fn subtype(&self) -> PNMSubtype {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { encoding, .. }) => PNMSubtype::Bitmap(encoding),
+ HeaderRecord::Graymap(GraymapHeader { encoding, .. }) => PNMSubtype::Graymap(encoding),
+ HeaderRecord::Pixmap(PixmapHeader { encoding, .. }) => PNMSubtype::Pixmap(encoding),
+ HeaderRecord::Arbitrary(ArbitraryHeader { .. }) => PNMSubtype::ArbitraryMap,
+ }
+ }
+
+ /// The width of the image this header is for.
+ pub fn width(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { width, .. }) => width,
+ HeaderRecord::Graymap(GraymapHeader { width, .. }) => width,
+ HeaderRecord::Pixmap(PixmapHeader { width, .. }) => width,
+ HeaderRecord::Arbitrary(ArbitraryHeader { width, .. }) => width,
+ }
+ }
+
+ /// The height of the image this header is for.
+ pub fn height(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { height, .. }) => height,
+ HeaderRecord::Graymap(GraymapHeader { height, .. }) => height,
+ HeaderRecord::Pixmap(PixmapHeader { height, .. }) => height,
+ HeaderRecord::Arbitrary(ArbitraryHeader { height, .. }) => height,
+ }
+ }
+
+ /// The biggest value a sample can have. In other words, the colour resolution.
+ pub fn maximal_sample(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { .. }) => 1,
+ HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
+ HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
+ HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
+ }
+ }
+
+ /// Retrieve the underlying bitmap header if any
+ pub fn as_bitmap(&self) -> Option<&BitmapHeader> {
+ match self.decoded {
+ HeaderRecord::Bitmap(ref bitmap) => Some(bitmap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying graymap header if any
+ pub fn as_graymap(&self) -> Option<&GraymapHeader> {
+ match self.decoded {
+ HeaderRecord::Graymap(ref graymap) => Some(graymap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying pixmap header if any
+ pub fn as_pixmap(&self) -> Option<&PixmapHeader> {
+ match self.decoded {
+ HeaderRecord::Pixmap(ref pixmap) => Some(pixmap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying arbitrary header if any
+ pub fn as_arbitrary(&self) -> Option<&ArbitraryHeader> {
+ match self.decoded {
+ HeaderRecord::Arbitrary(ref arbitrary) => Some(arbitrary),
+ _ => None,
+ }
+ }
+
+ /// Write the header back into a binary stream
+ pub fn write(&self, writer: &mut dyn io::Write) -> io::Result<()> {
+ writer.write_all(self.subtype().magic_constant())?;
+ match *self {
+ PNMHeader {
+ encoded: Some(ref content),
+ ..
+ } => writer.write_all(content),
+ PNMHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {}", width, height),
+ PNMHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ maxwhite,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {} {}", width, height, maxwhite),
+ PNMHeader {
+ decoded:
+ HeaderRecord::Pixmap(PixmapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ maxval,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {} {}", width, height, maxval),
+ PNMHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width,
+ height,
+ depth,
+ maxval,
+ ref tupltype,
+ }),
+ ..
+ } => {
+ #[allow(unused_assignments)]
+ // Declared here so its lifetime exceeds the matching. This is a trivial
+ // constructor, no allocation takes place and in the custom case we must allocate
+ // regardless due to borrow. Still, the warnings checker does pick this up :/
+ // Could use std::borrow::Cow instead but that really doesn't achieve anything but
+ // increasing type complexity.
+ let mut custom_fallback = String::new();
+
+ let tupltype = match *tupltype {
+ None => "",
+ Some(ArbitraryTuplType::BlackAndWhite) => "TUPLTYPE BLACKANDWHITE\n",
+ Some(ArbitraryTuplType::BlackAndWhiteAlpha) => "TUPLTYPE BLACKANDWHITE_ALPHA\n",
+ Some(ArbitraryTuplType::Grayscale) => "TUPLTYPE GRAYSCALE\n",
+ Some(ArbitraryTuplType::GrayscaleAlpha) => "TUPLTYPE GRAYSCALE_ALPHA\n",
+ Some(ArbitraryTuplType::RGB) => "TUPLTYPE RGB\n",
+ Some(ArbitraryTuplType::RGBAlpha) => "TUPLTYPE RGB_ALPHA\n",
+ Some(ArbitraryTuplType::Custom(ref custom)) => {
+ custom_fallback = format!("TUPLTYPE {}", custom);
+ &custom_fallback
+ }
+ };
+
+ writeln!(
+ writer,
+ "\nWIDTH {}\nHEIGHT {}\nDEPTH {}\nMAXVAL {}\n{}ENDHDR",
+ width, height, depth, maxval, tupltype
+ )
+ }
+ }
+ }
+}
+
+impl From<BitmapHeader> for PNMHeader {
+ fn from(header: BitmapHeader) -> Self {
+ PNMHeader {
+ decoded: HeaderRecord::Bitmap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<GraymapHeader> for PNMHeader {
+ fn from(header: GraymapHeader) -> Self {
+ PNMHeader {
+ decoded: HeaderRecord::Graymap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<PixmapHeader> for PNMHeader {
+ fn from(header: PixmapHeader) -> Self {
+ PNMHeader {
+ decoded: HeaderRecord::Pixmap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<ArbitraryHeader> for PNMHeader {
+ fn from(header: ArbitraryHeader) -> Self {
+ PNMHeader {
+ decoded: HeaderRecord::Arbitrary(header),
+ encoded: None,
+ }
+ }
+}
diff --git a/third_party/rust/image/src/pnm/mod.rs b/third_party/rust/image/src/pnm/mod.rs
new file mode 100644
index 0000000000..f1c568f13a
--- /dev/null
+++ b/third_party/rust/image/src/pnm/mod.rs
@@ -0,0 +1,149 @@
+//! Decoding of netpbm image formats (pbm, pgm, ppm and pam).
+//!
+//! The formats pbm, pgm and ppm are fully supported. The pam decoder recognizes the tuple types
+//! `BLACKANDWHITE`, `GRAYSCALE` and `RGB` and explicitely recognizes but rejects their `_ALPHA`
+//! variants for now as alpha color types are unsupported.
+use self::autobreak::AutoBreak;
+pub use self::decoder::PnmDecoder;
+pub use self::encoder::PNMEncoder;
+use self::header::HeaderRecord;
+pub use self::header::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader,
+ PixmapHeader};
+pub use self::header::{PNMHeader, PNMSubtype, SampleEncoding};
+
+mod autobreak;
+mod decoder;
+mod encoder;
+mod header;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use byteorder::{ByteOrder, NativeEndian};
+ use crate::color::ColorType;
+ use crate::image::ImageDecoder;
+
+ fn execute_roundtrip_default(buffer: &[u8], width: u32, height: u32, color: ColorType) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PNMEncoder::new(&mut encoded_buffer);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image.as_slice(), buffer);
+ }
+
+ fn execute_roundtrip_with_subtype(
+ buffer: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ subtype: PNMSubtype,
+ ) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PNMEncoder::new(&mut encoded_buffer).with_subtype(subtype);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(header.subtype(), subtype);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image.as_slice(), buffer);
+ }
+
+ fn execute_roundtrip_u16(buffer: &[u16], width: u32, height: u32, color: ColorType) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PNMEncoder::new(&mut encoded_buffer);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ let mut buffer_u8 = vec![0; buffer.len() * 2];
+ NativeEndian::write_u16_into(buffer, &mut buffer_u8[..]);
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image, buffer_u8);
+ }
+
+ #[test]
+ fn roundtrip_rgb() {
+ #[rustfmt::skip]
+ let buf: [u8; 27] = [
+ 0, 0, 0,
+ 0, 0, 255,
+ 0, 255, 0,
+ 0, 255, 255,
+ 255, 0, 0,
+ 255, 0, 255,
+ 255, 255, 0,
+ 255, 255, 255,
+ 255, 255, 255,
+ ];
+ execute_roundtrip_default(&buf, 3, 3, ColorType::Rgb8);
+ execute_roundtrip_with_subtype(&buf, 3, 3, ColorType::Rgb8, PNMSubtype::ArbitraryMap);
+ execute_roundtrip_with_subtype(
+ &buf,
+ 3,
+ 3,
+ ColorType::Rgb8,
+ PNMSubtype::Pixmap(SampleEncoding::Binary),
+ );
+ execute_roundtrip_with_subtype(
+ &buf,
+ 3,
+ 3,
+ ColorType::Rgb8,
+ PNMSubtype::Pixmap(SampleEncoding::Ascii),
+ );
+ }
+
+ #[test]
+ fn roundtrip_u16() {
+ let buf: [u16; 6] = [0, 1, 0xFFFF, 0x1234, 0x3412, 0xBEAF];
+
+ execute_roundtrip_u16(&buf, 6, 1, ColorType::L16);
+ }
+}
diff --git a/third_party/rust/image/src/tga/decoder.rs b/third_party/rust/image/src/tga/decoder.rs
new file mode 100644
index 0000000000..29145e1fb3
--- /dev/null
+++ b/third_party/rust/image/src/tga/decoder.rs
@@ -0,0 +1,529 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::io;
+use std::io::{Read, Seek};
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult};
+use crate::image::{ImageDecoder, ImageReadBuffer};
+
+enum ImageType {
+ NoImageData = 0,
+ /// Uncompressed images
+ RawColorMap = 1,
+ RawTrueColor = 2,
+ RawGrayScale = 3,
+ /// Run length encoded images
+ RunColorMap = 9,
+ RunTrueColor = 10,
+ RunGrayScale = 11,
+ Unknown,
+}
+
+impl ImageType {
+ /// Create a new image type from a u8
+ fn new(img_type: u8) -> ImageType {
+ match img_type {
+ 0 => ImageType::NoImageData,
+
+ 1 => ImageType::RawColorMap,
+ 2 => ImageType::RawTrueColor,
+ 3 => ImageType::RawGrayScale,
+
+ 9 => ImageType::RunColorMap,
+ 10 => ImageType::RunTrueColor,
+ 11 => ImageType::RunGrayScale,
+
+ _ => ImageType::Unknown,
+ }
+ }
+
+ /// Check if the image format uses colors as opposed to gray scale
+ fn is_color(&self) -> bool {
+ match *self {
+ ImageType::RawColorMap
+ | ImageType::RawTrueColor
+ | ImageType::RunTrueColor
+ | ImageType::RunColorMap => true,
+ _ => false,
+ }
+ }
+
+ /// Does the image use a color map
+ fn is_color_mapped(&self) -> bool {
+ match *self {
+ ImageType::RawColorMap | ImageType::RunColorMap => true,
+ _ => false,
+ }
+ }
+
+ /// Is the image run length encoded
+ fn is_encoded(&self) -> bool {
+ match *self {
+ ImageType::RunColorMap | ImageType::RunTrueColor | ImageType::RunGrayScale => true,
+ _ => false,
+ }
+ }
+}
+
+/// Header used by TGA image files
+#[derive(Debug)]
+struct Header {
+ id_length: u8, // length of ID string
+ map_type: u8, // color map type
+ image_type: u8, // image type code
+ map_origin: u16, // starting index of map
+ map_length: u16, // length of map
+ map_entry_size: u8, // size of map entries in bits
+ x_origin: u16, // x-origin of image
+ y_origin: u16, // y-origin of image
+ image_width: u16, // width of image
+ image_height: u16, // height of image
+ pixel_depth: u8, // bits per pixel
+ image_desc: u8, // image descriptor
+}
+
+impl Header {
+ /// Create a header with all values set to zero
+ fn new() -> Header {
+ Header {
+ id_length: 0,
+ map_type: 0,
+ image_type: 0,
+ map_origin: 0,
+ map_length: 0,
+ map_entry_size: 0,
+ x_origin: 0,
+ y_origin: 0,
+ image_width: 0,
+ image_height: 0,
+ pixel_depth: 0,
+ image_desc: 0,
+ }
+ }
+
+ /// Load the header with values from the reader
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Header> {
+ Ok(Header {
+ id_length: r.read_u8()?,
+ map_type: r.read_u8()?,
+ image_type: r.read_u8()?,
+ map_origin: r.read_u16::<LittleEndian>()?,
+ map_length: r.read_u16::<LittleEndian>()?,
+ map_entry_size: r.read_u8()?,
+ x_origin: r.read_u16::<LittleEndian>()?,
+ y_origin: r.read_u16::<LittleEndian>()?,
+ image_width: r.read_u16::<LittleEndian>()?,
+ image_height: r.read_u16::<LittleEndian>()?,
+ pixel_depth: r.read_u8()?,
+ image_desc: r.read_u8()?,
+ })
+ }
+}
+
+struct ColorMap {
+ /// sizes in bytes
+ start_offset: usize,
+ entry_size: usize,
+ bytes: Vec<u8>,
+}
+
+impl ColorMap {
+ pub(crate) fn from_reader(
+ r: &mut dyn Read,
+ start_offset: u16,
+ num_entries: u16,
+ bits_per_entry: u8,
+ ) -> ImageResult<ColorMap> {
+ let bytes_per_entry = (bits_per_entry as usize + 7) / 8;
+
+ let mut bytes = vec![0; bytes_per_entry * num_entries as usize];
+ r.read_exact(&mut bytes)?;
+
+ Ok(ColorMap {
+ entry_size: bytes_per_entry,
+ start_offset: start_offset as usize,
+ bytes,
+ })
+ }
+
+ /// Get one entry from the color map
+ pub(crate) fn get(&self, index: usize) -> &[u8] {
+ let entry = self.start_offset + self.entry_size * index;
+ &self.bytes[entry..entry + self.entry_size]
+ }
+}
+
+/// The representation of a TGA decoder
+pub struct TgaDecoder<R> {
+ r: R,
+
+ width: usize,
+ height: usize,
+ bytes_per_pixel: usize,
+ has_loaded_metadata: bool,
+
+ image_type: ImageType,
+ color_type: ColorType,
+
+ header: Header,
+ color_map: Option<ColorMap>,
+
+ // Used in read_scanline
+ line_read: Option<usize>,
+ line_remain_buff: Vec<u8>,
+}
+
+impl<R: Read + Seek> TgaDecoder<R> {
+ /// Create a new decoder that decodes from the stream `r`
+ pub fn new(r: R) -> ImageResult<TgaDecoder<R>> {
+ let mut decoder = TgaDecoder {
+ r,
+
+ width: 0,
+ height: 0,
+ bytes_per_pixel: 0,
+ has_loaded_metadata: false,
+
+ image_type: ImageType::Unknown,
+ color_type: ColorType::L8,
+
+ header: Header::new(),
+ color_map: None,
+
+ line_read: None,
+ line_remain_buff: Vec::new(),
+ };
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ fn read_header(&mut self) -> ImageResult<()> {
+ self.header = Header::from_reader(&mut self.r)?;
+ self.image_type = ImageType::new(self.header.image_type);
+ self.width = self.header.image_width as usize;
+ self.height = self.header.image_height as usize;
+ self.bytes_per_pixel = (self.header.pixel_depth as usize + 7) / 8;
+ Ok(())
+ }
+
+ fn read_metadata(&mut self) -> ImageResult<()> {
+ if !self.has_loaded_metadata {
+ self.read_header()?;
+ self.read_image_id()?;
+ self.read_color_map()?;
+ self.read_color_information()?;
+ self.has_loaded_metadata = true;
+ }
+ Ok(())
+ }
+
+ /// Loads the color information for the decoder
+ ///
+ /// To keep things simple, we won't handle bit depths that aren't divisible
+ /// by 8 and are less than 32.
+ fn read_color_information(&mut self) -> ImageResult<()> {
+ if self.header.pixel_depth % 8 != 0 {
+ return Err(ImageError::UnsupportedError(
+ "Bit depth must be divisible by 8".to_string(),
+ ));
+ }
+ if self.header.pixel_depth > 32 {
+ return Err(ImageError::UnsupportedError(
+ "Bit depth must be less than 32".to_string(),
+ ));
+ }
+
+ let num_alpha_bits = self.header.image_desc & 0b1111;
+
+ let other_channel_bits = if self.header.map_type != 0 {
+ self.header.map_entry_size
+ } else {
+ if num_alpha_bits > self.header.pixel_depth {
+ return Err(ImageError::UnsupportedError(
+ format!("Color format not supported. Alpha bits: {}", num_alpha_bits),
+ ));
+ }
+
+ self.header.pixel_depth - num_alpha_bits
+ };
+ let color = self.image_type.is_color();
+
+ match (num_alpha_bits, other_channel_bits, color) {
+ // really, the encoding is BGR and BGRA, this is fixed
+ // up with `TgaDecoder::reverse_encoding`.
+ (0, 32, true) => self.color_type = ColorType::Rgba8,
+ (8, 24, true) => self.color_type = ColorType::Rgba8,
+ (0, 24, true) => self.color_type = ColorType::Rgb8,
+ (8, 8, false) => self.color_type = ColorType::La8,
+ (0, 8, false) => self.color_type = ColorType::L8,
+ _ => {
+ return Err(ImageError::UnsupportedError(format!(
+ "Color format not supported. Bit depth: {}, Alpha bits: {}",
+ other_channel_bits, num_alpha_bits
+ )))
+ }
+ }
+ Ok(())
+ }
+
+ /// Read the image id field
+ ///
+ /// We're not interested in this field, so this function skips it if it
+ /// is present
+ fn read_image_id(&mut self) -> ImageResult<()> {
+ self.r
+ .seek(io::SeekFrom::Current(i64::from(self.header.id_length)))?;
+ Ok(())
+ }
+
+ fn read_color_map(&mut self) -> ImageResult<()> {
+ if self.header.map_type == 1 {
+ self.color_map = Some(ColorMap::from_reader(
+ &mut self.r,
+ self.header.map_origin,
+ self.header.map_length,
+ self.header.map_entry_size,
+ )?);
+ }
+ Ok(())
+ }
+
+ /// Expands indices into its mapped color
+ fn expand_color_map(&self, pixel_data: &[u8]) -> Vec<u8> {
+ #[inline]
+ fn bytes_to_index(bytes: &[u8]) -> usize {
+ let mut result = 0usize;
+ for byte in bytes.iter() {
+ result = result << 8 | *byte as usize;
+ }
+ result
+ }
+
+ let bytes_per_entry = (self.header.map_entry_size as usize + 7) / 8;
+ let mut result = Vec::with_capacity(self.width * self.height * bytes_per_entry);
+
+ let color_map = self.color_map.as_ref().unwrap();
+
+ for chunk in pixel_data.chunks(self.bytes_per_pixel) {
+ let index = bytes_to_index(chunk);
+ result.extend(color_map.get(index).iter().cloned());
+ }
+
+ result
+ }
+
+ /// Reads a run length encoded data for given number of bytes
+ fn read_encoded_data(&mut self, num_bytes: usize) -> io::Result<Vec<u8>> {
+ let mut pixel_data = Vec::with_capacity(num_bytes);
+
+ while pixel_data.len() < num_bytes {
+ let run_packet = self.r.read_u8()?;
+ // If the highest bit in `run_packet` is set, then we repeat pixels
+ //
+ // Note: the TGA format adds 1 to both counts because having a count
+ // of 0 would be pointless.
+ if (run_packet & 0x80) != 0 {
+ // high bit set, so we will repeat the data
+ let repeat_count = ((run_packet & !0x80) + 1) as usize;
+ let mut data = Vec::with_capacity(self.bytes_per_pixel);
+ self.r
+ .by_ref()
+ .take(self.bytes_per_pixel as u64)
+ .read_to_end(&mut data)?;
+ for _ in 0usize..repeat_count {
+ pixel_data.extend(data.iter().cloned());
+ }
+ } else {
+ // not set, so `run_packet+1` is the number of non-encoded pixels
+ let num_raw_bytes = (run_packet + 1) as usize * self.bytes_per_pixel;
+ self.r
+ .by_ref()
+ .take(num_raw_bytes as u64)
+ .read_to_end(&mut pixel_data)?;
+ }
+ }
+
+ Ok(pixel_data)
+ }
+
+ /// Reads a run length encoded packet
+ fn read_all_encoded_data(&mut self) -> ImageResult<Vec<u8>> {
+ let num_bytes = self.width * self.height * self.bytes_per_pixel;
+
+ Ok(self.read_encoded_data(num_bytes)?)
+ }
+
+ /// Reads a run length encoded line
+ fn read_encoded_line(&mut self) -> io::Result<Vec<u8>> {
+ let line_num_bytes = self.width * self.bytes_per_pixel;
+ let remain_len = self.line_remain_buff.len();
+
+ if remain_len >= line_num_bytes {
+ let remain_buf = self.line_remain_buff.clone();
+
+ self.line_remain_buff = remain_buf[line_num_bytes..].to_vec();
+ return Ok(remain_buf[0..line_num_bytes].to_vec());
+ }
+
+ let num_bytes = line_num_bytes - remain_len;
+
+ let line_data = self.read_encoded_data(num_bytes)?;
+
+ let mut pixel_data = Vec::with_capacity(line_num_bytes);
+ pixel_data.append(&mut self.line_remain_buff);
+ pixel_data.extend_from_slice(&line_data[..num_bytes]);
+
+ // put the remain data to line_remain_buff
+ self.line_remain_buff = line_data[num_bytes..].to_vec();
+
+ Ok(pixel_data)
+ }
+
+ /// Reverse from BGR encoding to RGB encoding
+ ///
+ /// TGA files are stored in the BGRA encoding. This function swaps
+ /// the blue and red bytes in the `pixels` array.
+ fn reverse_encoding(&mut self, pixels: &mut [u8]) {
+ // We only need to reverse the encoding of color images
+ match self.color_type {
+ ColorType::Rgb8 | ColorType::Rgba8 => {
+ for chunk in pixels.chunks_mut(self.bytes_per_pixel) {
+ chunk.swap(0, 2);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ /// Flip the image vertically depending on the screen origin bit
+ ///
+ /// The bit in position 5 of the image descriptor byte is the screen origin bit.
+ /// If it's 1, the origin is in the top left corner.
+ /// If it's 0, the origin is in the bottom left corner.
+ /// This function checks the bit, and if it's 0, flips the image vertically.
+ fn flip_vertically(&mut self, pixels: &mut [u8]) {
+ if self.is_flipped_vertically() {
+ let num_bytes = pixels.len();
+
+ let width_bytes = num_bytes / self.height;
+
+ // Flip the image vertically.
+ for vertical_index in 0..(self.height / 2) {
+ let vertical_target = (self.height - vertical_index) * width_bytes - width_bytes;
+
+ for horizontal_index in 0..width_bytes {
+ let source = vertical_index * width_bytes + horizontal_index;
+ let target = vertical_target + horizontal_index;
+
+ pixels.swap(target, source);
+ }
+ }
+ }
+ }
+
+ /// Check whether the image is vertically flipped
+ ///
+ /// The bit in position 5 of the image descriptor byte is the screen origin bit.
+ /// If it's 1, the origin is in the top left corner.
+ /// If it's 0, the origin is in the bottom left corner.
+ /// This function checks the bit, and if it's 0, flips the image vertically.
+ fn is_flipped_vertically(&self) -> bool {
+ let screen_origin_bit = 0b10_0000 & self.header.image_desc != 0;
+ !screen_origin_bit
+ }
+
+ fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ if let Some(line_read) = self.line_read {
+ if line_read == self.height {
+ return Ok(0);
+ }
+ }
+
+ // read the pixels from the data region
+ let mut pixel_data = if self.image_type.is_encoded() {
+ self.read_encoded_line()?
+ } else {
+ let num_raw_bytes = self.width * self.bytes_per_pixel;
+ let mut buf = vec![0; num_raw_bytes];
+ self.r.by_ref().read_exact(&mut buf)?;
+ buf
+ };
+
+ // expand the indices using the color map if necessary
+ if self.image_type.is_color_mapped() {
+ pixel_data = self.expand_color_map(&pixel_data)
+ }
+ self.reverse_encoding(&mut pixel_data);
+
+ // copy to the output buffer
+ buf[..pixel_data.len()].copy_from_slice(&pixel_data);
+
+ self.line_read = Some(self.line_read.unwrap_or(0) + 1);
+
+ Ok(pixel_data.len())
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TgaDecoder<R> {
+ type Reader = TGAReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width as u32, self.height as u32)
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ // This cannot overflow because TGA has a maximum width of u16::MAX_VALUE and
+ // `bytes_per_pixel` is a u8.
+ u64::from(self.color_type.bytes_per_pixel()) * self.width as u64
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(TGAReader {
+ buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()),
+ decoder: self,
+ })
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ // read the pixels from the data region
+ let len = if self.image_type.is_encoded() {
+ let pixel_data = self.read_all_encoded_data()?;
+ buf[0..pixel_data.len()].copy_from_slice(&pixel_data);
+ pixel_data.len()
+ } else {
+ let num_raw_bytes = self.width * self.height * self.bytes_per_pixel;
+ self.r.by_ref().read_exact(&mut buf[0..num_raw_bytes])?;
+ num_raw_bytes
+ };
+
+ // expand the indices using the color map if necessary
+ if self.image_type.is_color_mapped() {
+ let pixel_data = self.expand_color_map(&buf[0..len]);
+ buf.copy_from_slice(&pixel_data);
+ }
+
+ self.reverse_encoding(buf);
+
+ self.flip_vertically(buf);
+
+ Ok(())
+ }
+}
+
+pub struct TGAReader<R> {
+ buffer: ImageReadBuffer,
+ decoder: TgaDecoder<R>,
+}
+impl<R: Read + Seek> Read for TGAReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let decoder = &mut self.decoder;
+ self.buffer.read(buf, |buf| decoder.read_scanline(buf))
+ }
+}
diff --git a/third_party/rust/image/src/tga/mod.rs b/third_party/rust/image/src/tga/mod.rs
new file mode 100644
index 0000000000..af185871e7
--- /dev/null
+++ b/third_party/rust/image/src/tga/mod.rs
@@ -0,0 +1,13 @@
+//! Decoding of TGA Images
+//!
+//! # Related Links
+//! <http://googlesites.inequation.org/tgautilities>
+
+/// A decoder for TGA images
+///
+/// Currently this decoder does not support 8, 15 and 16 bit color images.
+pub use self::decoder::TgaDecoder;
+
+//TODO add 8, 15, 16 bit color support
+
+mod decoder;
diff --git a/third_party/rust/image/src/tiff.rs b/third_party/rust/image/src/tiff.rs
new file mode 100644
index 0000000000..6a556257f4
--- /dev/null
+++ b/third_party/rust/image/src/tiff.rs
@@ -0,0 +1,175 @@
+//! Decoding and Encoding of TIFF Images
+//!
+//! TIFF (Tagged Image File Format) is a versatile image format that supports
+//! lossless and lossy compression.
+//!
+//! # Related Links
+//! * <http://partners.adobe.com/public/developer/tiff/index.html> - The TIFF specification
+
+extern crate tiff;
+
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Write, Seek};
+use std::marker::PhantomData;
+use std::mem;
+
+use byteorder::{NativeEndian, ByteOrder};
+
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{ImageError, ImageResult};
+use crate::image::{ImageDecoder, ImageEncoder};
+use crate::utils::vec_u16_into_u8;
+
+/// Decoder for TIFF images.
+pub struct TiffDecoder<R>
+ where R: Read + Seek
+{
+ dimensions: (u32, u32),
+ color_type: ColorType,
+ inner: tiff::decoder::Decoder<R>,
+}
+
+impl<R> TiffDecoder<R>
+ where R: Read + Seek
+{
+ /// Create a new TiffDecoder.
+ pub fn new(r: R) -> Result<TiffDecoder<R>, ImageError> {
+ let mut inner = tiff::decoder::Decoder::new(r).map_err(ImageError::from_tiff)?;
+ let dimensions = inner.dimensions().map_err(ImageError::from_tiff)?;
+ let color_type = match inner.colortype().map_err(ImageError::from_tiff)? {
+ tiff::ColorType::Gray(8) => ColorType::L8,
+ tiff::ColorType::Gray(16) => ColorType::L16,
+ tiff::ColorType::GrayA(8) => ColorType::La8,
+ tiff::ColorType::GrayA(16) => ColorType::La16,
+ tiff::ColorType::RGB(8) => ColorType::Rgb8,
+ tiff::ColorType::RGB(16) => ColorType::Rgb16,
+ tiff::ColorType::RGBA(8) => ColorType::Rgba8,
+ tiff::ColorType::RGBA(16) => ColorType::Rgba16,
+
+ tiff::ColorType::Palette(n) | tiff::ColorType::Gray(n) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Unknown(n))),
+ tiff::ColorType::GrayA(n) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Unknown(n*2))),
+ tiff::ColorType::RGB(n) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Unknown(n*3))),
+ tiff::ColorType::RGBA(n) | tiff::ColorType::CMYK(n) =>
+ return Err(ImageError::UnsupportedColor(ExtendedColorType::Unknown(n*4))),
+ };
+
+ Ok(TiffDecoder {
+ dimensions,
+ color_type,
+ inner,
+ })
+ }
+}
+
+impl ImageError {
+ fn from_tiff(err: tiff::TiffError) -> ImageError {
+ match err {
+ tiff::TiffError::IoError(err) => ImageError::IoError(err),
+ tiff::TiffError::FormatError(desc) => ImageError::FormatError(desc.to_string()),
+ tiff::TiffError::UnsupportedError(desc) => ImageError::UnsupportedError(desc.to_string()),
+ tiff::TiffError::LimitsExceeded => ImageError::InsufficientMemory,
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct TiffReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for TiffReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TiffDecoder<R> {
+ type Reader = TiffReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.dimensions
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn into_reader(mut self) -> ImageResult<Self::Reader> {
+ let buf = match self.inner.read_image().map_err(ImageError::from_tiff)? {
+ tiff::decoder::DecodingResult::U8(v) => v,
+ tiff::decoder::DecodingResult::U16(v) => vec_u16_into_u8(v),
+ };
+
+ Ok(TiffReader(Cursor::new(buf), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.inner.read_image().map_err(ImageError::from_tiff)? {
+ tiff::decoder::DecodingResult::U8(v) => {
+ buf.copy_from_slice(&v);
+ }
+ tiff::decoder::DecodingResult::U16(v) => {
+ NativeEndian::write_u16_into(&v, buf);
+ }
+ }
+ Ok(())
+ }
+}
+
+/// Encoder for tiff images
+pub struct TiffEncoder<W> {
+ w: W,
+}
+
+// Utility to simplify and deduplicate error handling during 16-bit encoding.
+fn u8_slice_as_u16(buf: &[u8]) -> ImageResult<&[u16]> {
+ bytemuck::try_cast_slice(buf)
+ // If the buffer is not aligned or the correct length for a u16 slice, err.
+ .map_err(|_| ImageError::IoError(std::io::ErrorKind::InvalidData.into()))
+}
+
+impl<W: Write + Seek> TiffEncoder<W> {
+ /// Create a new encoder that writes its output to `w`
+ pub fn new(w: W) -> TiffEncoder<W> {
+ TiffEncoder { w }
+ }
+
+ /// Encodes the image `image` that has dimensions `width` and `height` and `ColorType` `c`.
+ ///
+ /// 16-bit types assume the buffer is native endian.
+ pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> {
+ let mut encoder = tiff::encoder::TiffEncoder::new(self.w).map_err(ImageError::from_tiff)?;
+ match color {
+ ColorType::L8 => encoder.write_image::<tiff::encoder::colortype::Gray8>(width, height, data),
+ ColorType::Rgb8 => encoder.write_image::<tiff::encoder::colortype::RGB8>(width, height, data),
+ ColorType::Rgba8 => encoder.write_image::<tiff::encoder::colortype::RGBA8>(width, height, data),
+ ColorType::L16 => encoder.write_image::<tiff::encoder::colortype::Gray16>(width, height, &u8_slice_as_u16(data)?),
+ ColorType::Rgb16 => encoder.write_image::<tiff::encoder::colortype::RGB16>(width, height, &u8_slice_as_u16(data)?),
+ ColorType::Rgba16 => encoder.write_image::<tiff::encoder::colortype::RGBA16>(width, height, &u8_slice_as_u16(data)?),
+ _ => return Err(ImageError::UnsupportedColor(color.into()))
+ }.map_err(ImageError::from_tiff)?;
+
+ Ok(())
+ }
+}
+
+impl<W: Write + Seek> ImageEncoder for TiffEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
diff --git a/third_party/rust/image/src/traits.rs b/third_party/rust/image/src/traits.rs
new file mode 100644
index 0000000000..6e558d4a13
--- /dev/null
+++ b/third_party/rust/image/src/traits.rs
@@ -0,0 +1,75 @@
+//! This module provides useful traits that were deprecated in rust
+
+// Note copied from the stdlib under MIT license
+
+use num_traits::{Bounded, Num, NumCast};
+use std::ops::AddAssign;
+
+/// Types which are safe to treat as an immutable byte slice in a pixel layout
+/// for image encoding.
+pub trait EncodableLayout: seals::EncodableLayout {
+ /// Get the bytes of this value.
+ fn as_bytes(&self) -> &[u8];
+}
+
+impl EncodableLayout for [u8] {
+ fn as_bytes(&self) -> &[u8] {
+ bytemuck::cast_slice(self)
+ }
+}
+
+impl EncodableLayout for [u16] {
+ fn as_bytes(&self) -> &[u8] {
+ bytemuck::cast_slice(self)
+ }
+}
+
+/// Primitive trait from old stdlib
+pub trait Primitive: Copy + NumCast + Num + PartialOrd<Self> + Clone + Bounded {}
+
+impl Primitive for usize {}
+impl Primitive for u8 {}
+impl Primitive for u16 {}
+impl Primitive for u32 {}
+impl Primitive for u64 {}
+impl Primitive for isize {}
+impl Primitive for i8 {}
+impl Primitive for i16 {}
+impl Primitive for i32 {}
+impl Primitive for i64 {}
+impl Primitive for f32 {}
+impl Primitive for f64 {}
+
+/// An Enlargable::Larger value should be enough to calculate
+/// the sum (average) of a few hundred or thousand Enlargeable values.
+pub trait Enlargeable: Sized + Bounded + NumCast {
+ type Larger: Primitive + AddAssign + 'static;
+
+ fn clamp_from(n: Self::Larger) -> Self {
+ // Note: Only unsigned value types supported.
+ if n > NumCast::from(Self::max_value()).unwrap() {
+ Self::max_value()
+ } else {
+ NumCast::from(n).unwrap()
+ }
+ }
+}
+
+impl Enlargeable for u8 {
+ type Larger = u32;
+}
+impl Enlargeable for u16 {
+ type Larger = u32;
+}
+impl Enlargeable for u32 {
+ type Larger = u64;
+}
+
+
+/// Private module for supertraits of sealed traits.
+mod seals {
+ pub trait EncodableLayout {}
+
+ impl EncodableLayout for [u8] {}
+ impl EncodableLayout for [u16] {}
+}
diff --git a/third_party/rust/image/src/utils/mod.rs b/third_party/rust/image/src/utils/mod.rs
new file mode 100644
index 0000000000..3728443646
--- /dev/null
+++ b/third_party/rust/image/src/utils/mod.rs
@@ -0,0 +1,127 @@
+//! Utilities
+
+use byteorder::{NativeEndian, ByteOrder};
+use num_iter::range_step;
+use std::mem;
+use std::iter::repeat;
+
+#[inline(always)]
+pub(crate) fn expand_packed<F>(buf: &mut [u8], channels: usize, bit_depth: u8, mut func: F)
+where
+ F: FnMut(u8, &mut [u8]),
+{
+ let pixels = buf.len() / channels * bit_depth as usize;
+ let extra = pixels % 8;
+ let entries = pixels / 8 + match extra {
+ 0 => 0,
+ _ => 1,
+ };
+ let mask = ((1u16 << bit_depth) - 1) as u8;
+ let i = (0..entries)
+ .rev() // Reverse iterator
+ .flat_map(|idx|
+ // This has to be reversed to
+ range_step(0, 8, bit_depth)
+ .zip(repeat(idx))
+ )
+ .skip(extra);
+ let channels = channels as isize;
+ let j = range_step(buf.len() as isize - channels, -channels, -channels);
+ //let j = range_step(0, buf.len(), channels).rev(); // ideal solution;
+ for ((shift, i), j) in i.zip(j) {
+ let pixel = (buf[i] & (mask << shift)) >> shift;
+ func(pixel, &mut buf[j as usize..(j + channels) as usize])
+ }
+}
+
+/// Expand a buffer of packed 1, 2, or 4 bits integers into u8's. Assumes that
+/// every `row_size` entries there are padding bits up to the next byte boundry.
+pub(crate) fn expand_bits(bit_depth: u8, row_size: u32, buf: &[u8]) -> Vec<u8> {
+ // Note: this conversion assumes that the scanlines begin on byte boundaries
+ let mask = (1u8 << bit_depth as usize) - 1;
+ let scaling_factor = 255 / ((1 << bit_depth as usize) - 1);
+ let bit_width = row_size * u32::from(bit_depth);
+ let skip = if bit_width % 8 == 0 {
+ 0
+ } else {
+ (8 - bit_width % 8) / u32::from(bit_depth)
+ };
+ let row_len = row_size + skip;
+ let mut p = Vec::new();
+ let mut i = 0;
+ for v in buf {
+ for shift in num_iter::range_step_inclusive(8i8-(bit_depth as i8), 0, -(bit_depth as i8)) {
+ // skip the pixels that can be neglected because scanlines should
+ // start at byte boundaries
+ if i % (row_len as usize) < (row_size as usize) {
+ let pixel = (v & mask << shift as usize) >> shift as usize;
+ p.push(pixel * scaling_factor);
+ }
+ i += 1;
+ }
+ }
+ p
+}
+
+pub(crate) fn vec_u16_into_u8(vec: Vec<u16>) -> Vec<u8> {
+ // Do this way until we find a way to not alloc/dealloc but get llvm to realloc instead.
+ vec_u16_copy_u8(&vec)
+}
+
+pub(crate) fn vec_u16_copy_u8(vec: &[u16]) -> Vec<u8> {
+ let mut new = vec![0; vec.len() * mem::size_of::<u16>()];
+ NativeEndian::write_u16_into(&vec[..], &mut new[..]);
+ new
+}
+
+
+/// A marker struct for __NonExhaustive enums.
+///
+/// This is an empty type that can not be constructed. When an enum contains a tuple variant that
+/// includes this type the optimizer can statically determined tha the branch is never taken while
+/// at the same time the matching of the branch is required.
+///
+/// The effect is thus very similar to the actual `#[non_exhaustive]` attribute with no runtime
+/// costs. Also note that we use a dirty trick to not only hide this type from the doc but make it
+/// inaccessible. The visibility in this module is pub but the module itself is not and the
+/// top-level crate never exports the type.
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
+pub struct NonExhaustiveMarker {
+ /// Allows this crate, and this crate only, to match on the impossibility of this variant.
+ pub(crate) _private: Empty,
+}
+
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
+pub(crate) enum Empty { }
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn gray_to_luma8_skip() {
+ let check = |bit_depth, w, from, to| {
+ assert_eq!(
+ super::expand_bits(bit_depth, w, from),
+ to);
+ };
+ // Bit depth 1, skip is more than half a byte
+ check(
+ 1, 10,
+ &[0b11110000, 0b11000000, 0b00001111, 0b11000000],
+ vec![255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255]);
+ // Bit depth 2, skip is more than half a byte
+ check(
+ 2, 5,
+ &[0b11110000, 0b11000000, 0b00001111, 0b11000000],
+ vec![255, 255, 0, 0, 255, 0, 0, 255, 255, 255]);
+ // Bit depth 2, skip is 0
+ check(
+ 2, 4,
+ &[0b11110000, 0b00001111],
+ vec![255, 255, 0, 0, 0, 0, 255, 255]);
+ // Bit depth 4, skip is half a byte
+ check(
+ 4, 1,
+ &[0b11110011, 0b00001100],
+ vec![255, 0]);
+ }
+}
diff --git a/third_party/rust/image/src/webp/decoder.rs b/third_party/rust/image/src/webp/decoder.rs
new file mode 100644
index 0000000000..b67a16013a
--- /dev/null
+++ b/third_party/rust/image/src/webp/decoder.rs
@@ -0,0 +1,138 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::default::Default;
+use std::io::{self, Cursor, Read};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::image::ImageDecoder;
+use crate::error::{ImageError, ImageResult};
+
+use crate::color;
+
+use super::vp8::Frame;
+use super::vp8::Vp8Decoder;
+
+/// WebP Image format decoder. Currently only supportes the luma channel (meaning that decoded
+/// images will be grayscale).
+pub struct WebPDecoder<R> {
+ r: R,
+ frame: Frame,
+ have_frame: bool,
+}
+
+impl<R: Read> WebPDecoder<R> {
+ /// Create a new WebPDecoder from the Reader ```r```.
+ /// This function takes ownership of the Reader.
+ pub fn new(r: R) -> ImageResult<WebPDecoder<R>> {
+ let f: Frame = Default::default();
+
+ let mut decoder = WebPDecoder {
+ r,
+ have_frame: false,
+ frame: f,
+ };
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ fn read_riff_header(&mut self) -> ImageResult<u32> {
+ let mut riff = Vec::with_capacity(4);
+ self.r.by_ref().take(4).read_to_end(&mut riff)?;
+ let size = self.r.read_u32::<LittleEndian>()?;
+ let mut webp = Vec::with_capacity(4);
+ self.r.by_ref().take(4).read_to_end(&mut webp)?;
+
+ if &*riff != b"RIFF" {
+ return Err(ImageError::FormatError(
+ "Invalid RIFF signature.".to_string(),
+ ));
+ }
+
+ if &*webp != b"WEBP" {
+ return Err(ImageError::FormatError(
+ "Invalid WEBP signature.".to_string(),
+ ));
+ }
+
+ Ok(size)
+ }
+
+ fn read_vp8_header(&mut self) -> ImageResult<()> {
+ let mut vp8 = Vec::with_capacity(4);
+ self.r.by_ref().take(4).read_to_end(&mut vp8)?;
+
+ if &*vp8 != b"VP8 " {
+ return Err(ImageError::FormatError(
+ "Invalid VP8 signature.".to_string(),
+ ));
+ }
+
+ let _len = self.r.read_u32::<LittleEndian>()?;
+
+ Ok(())
+ }
+
+ fn read_frame(&mut self) -> ImageResult<()> {
+ let mut framedata = Vec::new();
+ self.r.read_to_end(&mut framedata)?;
+ let m = io::Cursor::new(framedata);
+
+ let mut v = Vp8Decoder::new(m);
+ let frame = v.decode_frame()?;
+
+ self.frame = frame.clone();
+
+ Ok(())
+ }
+
+ fn read_metadata(&mut self) -> ImageResult<()> {
+ if !self.have_frame {
+ self.read_riff_header()?;
+ self.read_vp8_header()?;
+ self.read_frame()?;
+
+ self.have_frame = true;
+ }
+
+ Ok(())
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct WebpReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for WebpReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for WebPDecoder<R> {
+ type Reader = WebpReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (u32::from(self.frame.width), u32::from(self.frame.height))
+ }
+
+ fn color_type(&self) -> color::ColorType {
+ color::ColorType::L8
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(WebpReader(Cursor::new(self.frame.ybuf), PhantomData))
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ buf.copy_from_slice(&self.frame.ybuf);
+ Ok(())
+ }
+}
diff --git a/third_party/rust/image/src/webp/mod.rs b/third_party/rust/image/src/webp/mod.rs
new file mode 100644
index 0000000000..24f57ee4d2
--- /dev/null
+++ b/third_party/rust/image/src/webp/mod.rs
@@ -0,0 +1,8 @@
+//! Decoding of WebP Images
+
+pub use self::decoder::WebPDecoder;
+
+mod decoder;
+mod transform;
+
+pub mod vp8;
diff --git a/third_party/rust/image/src/webp/transform.rs b/third_party/rust/image/src/webp/transform.rs
new file mode 100644
index 0000000000..3b3ef5a2a8
--- /dev/null
+++ b/third_party/rust/image/src/webp/transform.rs
@@ -0,0 +1,77 @@
+static CONST1: i64 = 20091;
+static CONST2: i64 = 35468;
+
+pub(crate) fn idct4x4(block: &mut [i32]) {
+ // The intermediate results may overflow the types, so we stretch the type.
+ fn fetch(block: &mut [i32], idx: usize) -> i64 {
+ i64::from(block[idx])
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, i) + fetch(block, 8 + i);
+ let b1 = fetch(block, i) - fetch(block, 8 + i);
+
+ let t1 = (fetch(block, 4 + i) * CONST2) >> 16;
+ let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16);
+ let t2 = (fetch(block, 12 + i) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[i] = (a1 + d1) as i32;
+ block[4 + i] = (b1 + c1) as i32;
+ block[4 * 3 + i] = (a1 - d1) as i32;
+ block[4 * 2 + i] = (b1 - c1) as i32;
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2);
+ let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2);
+
+ let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16;
+ let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16);
+ let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[4 * i] = ((a1 + d1 + 4) >> 3) as i32;
+ block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32;
+ block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32;
+ block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32;
+ }
+}
+
+// 14.3
+pub(crate) fn iwht4x4(block: &mut [i32]) {
+ for i in 0usize..4 {
+ let a1 = block[i] + block[12 + i];
+ let b1 = block[4 + i] + block[8 + i];
+ let c1 = block[4 + i] - block[8 + i];
+ let d1 = block[i] - block[12 + i];
+
+ block[i] = a1 + b1;
+ block[4 + i] = c1 + d1;
+ block[8 + i] = a1 - b1;
+ block[12 + i] = d1 - c1;
+ }
+
+ for i in 0usize..4 {
+ let a1 = block[4 * i] + block[4 * i + 3];
+ let b1 = block[4 * i + 1] + block[4 * i + 2];
+ let c1 = block[4 * i + 1] - block[4 * i + 2];
+ let d1 = block[4 * i] - block[4 * i + 3];
+
+ let a2 = a1 + b1;
+ let b2 = c1 + d1;
+ let c2 = a1 - b1;
+ let d2 = d1 - c1;
+
+ block[4 * i] = (a2 + 3) >> 3;
+ block[4 * i + 1] = (b2 + 3) >> 3;
+ block[4 * i + 2] = (c2 + 3) >> 3;
+ block[4 * i + 3] = (d2 + 3) >> 3;
+ }
+}
diff --git a/third_party/rust/image/src/webp/vp8.rs b/third_party/rust/image/src/webp/vp8.rs
new file mode 100644
index 0000000000..ed1da1ee76
--- /dev/null
+++ b/third_party/rust/image/src/webp/vp8.rs
@@ -0,0 +1,2003 @@
+//! An implementation of the VP8 Video Codec
+//!
+//! This module contains a partial implementation of the
+//! VP8 video format as defined in RFC-6386.
+//!
+//! It decodes Keyframes only sans Loop Filtering.
+//! VP8 is the underpinning of the WebP image format
+//!
+//! # Related Links
+//! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide
+//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of
+//! of the VP8 format
+//!
+
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::default::Default;
+use std::cmp;
+use std::io::Read;
+
+use super::transform;
+use crate::{ImageError, ImageResult};
+
+use crate::math::utils::clamp;
+
+const MAX_SEGMENTS: usize = 4;
+const NUM_DCT_TOKENS: usize = 12;
+
+// Prediction modes
+const DC_PRED: i8 = 0;
+const V_PRED: i8 = 1;
+const H_PRED: i8 = 2;
+const TM_PRED: i8 = 3;
+const B_PRED: i8 = 4;
+
+const B_DC_PRED: i8 = 0;
+const B_TM_PRED: i8 = 1;
+const B_VE_PRED: i8 = 2;
+const B_HE_PRED: i8 = 3;
+const B_LD_PRED: i8 = 4;
+const B_RD_PRED: i8 = 5;
+const B_VR_PRED: i8 = 6;
+const B_VL_PRED: i8 = 7;
+const B_HD_PRED: i8 = 8;
+const B_HU_PRED: i8 = 9;
+
+// Prediction mode enum
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum LumaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+
+ /// Each Y subblock is independently predicted.
+ B = B_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum ChromaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum IntraMode {
+ DC = B_DC_PRED,
+ TM = B_TM_PRED,
+ VE = B_VE_PRED,
+ HE = B_HE_PRED,
+ LD = B_LD_PRED,
+ RD = B_RD_PRED,
+ VR = B_VR_PRED,
+ VL = B_VL_PRED,
+ HD = B_HD_PRED,
+ HU = B_HU_PRED,
+}
+
+type Prob = u8;
+
+static SEGMENT_ID_TREE: [i8; 6] = [2, 4, -0, -1, -2, -3];
+
+// Section 11.2
+// Tree for determining the keyframe luma intra prediction modes:
+static KEYFRAME_YMODE_TREE: [i8; 8] = [-B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED];
+
+// Default probabilities for decoding the keyframe luma modes
+static KEYFRAME_YMODE_PROBS: [Prob; 4] = [145, 156, 163, 128];
+
+// Tree for determining the keyframe B_PRED mode:
+static KEYFRAME_BPRED_MODE_TREE: [i8; 18] = [
+ -B_DC_PRED, 2, -B_TM_PRED, 4, -B_VE_PRED, 6, 8, 12, -B_HE_PRED, 10, -B_RD_PRED, -B_VR_PRED,
+ -B_LD_PRED, 14, -B_VL_PRED, 16, -B_HD_PRED, -B_HU_PRED,
+];
+
+// Probabilities for the BPRED_MODE_TREE
+static KEYFRAME_BPRED_MODE_PROBS: [[[u8; 9]; 10]; 10] = [
+ [
+ [231, 120, 48, 89, 115, 113, 120, 152, 112],
+ [152, 179, 64, 126, 170, 118, 46, 70, 95],
+ [175, 69, 143, 80, 85, 82, 72, 155, 103],
+ [56, 58, 10, 171, 218, 189, 17, 13, 152],
+ [144, 71, 10, 38, 171, 213, 144, 34, 26],
+ [114, 26, 17, 163, 44, 195, 21, 10, 173],
+ [121, 24, 80, 195, 26, 62, 44, 64, 85],
+ [170, 46, 55, 19, 136, 160, 33, 206, 71],
+ [63, 20, 8, 114, 114, 208, 12, 9, 226],
+ [81, 40, 11, 96, 182, 84, 29, 16, 36],
+ ],
+ [
+ [134, 183, 89, 137, 98, 101, 106, 165, 148],
+ [72, 187, 100, 130, 157, 111, 32, 75, 80],
+ [66, 102, 167, 99, 74, 62, 40, 234, 128],
+ [41, 53, 9, 178, 241, 141, 26, 8, 107],
+ [104, 79, 12, 27, 217, 255, 87, 17, 7],
+ [74, 43, 26, 146, 73, 166, 49, 23, 157],
+ [65, 38, 105, 160, 51, 52, 31, 115, 128],
+ [87, 68, 71, 44, 114, 51, 15, 186, 23],
+ [47, 41, 14, 110, 182, 183, 21, 17, 194],
+ [66, 45, 25, 102, 197, 189, 23, 18, 22],
+ ],
+ [
+ [88, 88, 147, 150, 42, 46, 45, 196, 205],
+ [43, 97, 183, 117, 85, 38, 35, 179, 61],
+ [39, 53, 200, 87, 26, 21, 43, 232, 171],
+ [56, 34, 51, 104, 114, 102, 29, 93, 77],
+ [107, 54, 32, 26, 51, 1, 81, 43, 31],
+ [39, 28, 85, 171, 58, 165, 90, 98, 64],
+ [34, 22, 116, 206, 23, 34, 43, 166, 73],
+ [68, 25, 106, 22, 64, 171, 36, 225, 114],
+ [34, 19, 21, 102, 132, 188, 16, 76, 124],
+ [62, 18, 78, 95, 85, 57, 50, 48, 51],
+ ],
+ [
+ [193, 101, 35, 159, 215, 111, 89, 46, 111],
+ [60, 148, 31, 172, 219, 228, 21, 18, 111],
+ [112, 113, 77, 85, 179, 255, 38, 120, 114],
+ [40, 42, 1, 196, 245, 209, 10, 25, 109],
+ [100, 80, 8, 43, 154, 1, 51, 26, 71],
+ [88, 43, 29, 140, 166, 213, 37, 43, 154],
+ [61, 63, 30, 155, 67, 45, 68, 1, 209],
+ [142, 78, 78, 16, 255, 128, 34, 197, 171],
+ [41, 40, 5, 102, 211, 183, 4, 1, 221],
+ [51, 50, 17, 168, 209, 192, 23, 25, 82],
+ ],
+ [
+ [125, 98, 42, 88, 104, 85, 117, 175, 82],
+ [95, 84, 53, 89, 128, 100, 113, 101, 45],
+ [75, 79, 123, 47, 51, 128, 81, 171, 1],
+ [57, 17, 5, 71, 102, 57, 53, 41, 49],
+ [115, 21, 2, 10, 102, 255, 166, 23, 6],
+ [38, 33, 13, 121, 57, 73, 26, 1, 85],
+ [41, 10, 67, 138, 77, 110, 90, 47, 114],
+ [101, 29, 16, 10, 85, 128, 101, 196, 26],
+ [57, 18, 10, 102, 102, 213, 34, 20, 43],
+ [117, 20, 15, 36, 163, 128, 68, 1, 26],
+ ],
+ [
+ [138, 31, 36, 171, 27, 166, 38, 44, 229],
+ [67, 87, 58, 169, 82, 115, 26, 59, 179],
+ [63, 59, 90, 180, 59, 166, 93, 73, 154],
+ [40, 40, 21, 116, 143, 209, 34, 39, 175],
+ [57, 46, 22, 24, 128, 1, 54, 17, 37],
+ [47, 15, 16, 183, 34, 223, 49, 45, 183],
+ [46, 17, 33, 183, 6, 98, 15, 32, 183],
+ [65, 32, 73, 115, 28, 128, 23, 128, 205],
+ [40, 3, 9, 115, 51, 192, 18, 6, 223],
+ [87, 37, 9, 115, 59, 77, 64, 21, 47],
+ ],
+ [
+ [104, 55, 44, 218, 9, 54, 53, 130, 226],
+ [64, 90, 70, 205, 40, 41, 23, 26, 57],
+ [54, 57, 112, 184, 5, 41, 38, 166, 213],
+ [30, 34, 26, 133, 152, 116, 10, 32, 134],
+ [75, 32, 12, 51, 192, 255, 160, 43, 51],
+ [39, 19, 53, 221, 26, 114, 32, 73, 255],
+ [31, 9, 65, 234, 2, 15, 1, 118, 73],
+ [88, 31, 35, 67, 102, 85, 55, 186, 85],
+ [56, 21, 23, 111, 59, 205, 45, 37, 192],
+ [55, 38, 70, 124, 73, 102, 1, 34, 98],
+ ],
+ [
+ [102, 61, 71, 37, 34, 53, 31, 243, 192],
+ [69, 60, 71, 38, 73, 119, 28, 222, 37],
+ [68, 45, 128, 34, 1, 47, 11, 245, 171],
+ [62, 17, 19, 70, 146, 85, 55, 62, 70],
+ [75, 15, 9, 9, 64, 255, 184, 119, 16],
+ [37, 43, 37, 154, 100, 163, 85, 160, 1],
+ [63, 9, 92, 136, 28, 64, 32, 201, 85],
+ [86, 6, 28, 5, 64, 255, 25, 248, 1],
+ [56, 8, 17, 132, 137, 255, 55, 116, 128],
+ [58, 15, 20, 82, 135, 57, 26, 121, 40],
+ ],
+ [
+ [164, 50, 31, 137, 154, 133, 25, 35, 218],
+ [51, 103, 44, 131, 131, 123, 31, 6, 158],
+ [86, 40, 64, 135, 148, 224, 45, 183, 128],
+ [22, 26, 17, 131, 240, 154, 14, 1, 209],
+ [83, 12, 13, 54, 192, 255, 68, 47, 28],
+ [45, 16, 21, 91, 64, 222, 7, 1, 197],
+ [56, 21, 39, 155, 60, 138, 23, 102, 213],
+ [85, 26, 85, 85, 128, 128, 32, 146, 171],
+ [18, 11, 7, 63, 144, 171, 4, 4, 246],
+ [35, 27, 10, 146, 174, 171, 12, 26, 128],
+ ],
+ [
+ [190, 80, 35, 99, 180, 80, 126, 54, 45],
+ [85, 126, 47, 87, 176, 51, 41, 20, 32],
+ [101, 75, 128, 139, 118, 146, 116, 128, 85],
+ [56, 41, 15, 176, 236, 85, 37, 9, 62],
+ [146, 36, 19, 30, 171, 255, 97, 27, 20],
+ [71, 30, 17, 119, 118, 255, 17, 18, 138],
+ [101, 38, 60, 138, 55, 70, 43, 26, 142],
+ [138, 45, 61, 62, 219, 1, 81, 188, 64],
+ [32, 41, 20, 117, 151, 142, 20, 21, 163],
+ [112, 19, 12, 61, 195, 128, 48, 4, 24],
+ ],
+];
+
+// Section 11.4 Tree for determining macroblock the chroma mode
+static KEYFRAME_UV_MODE_TREE: [i8; 6] = [-DC_PRED, 2, -V_PRED, 4, -H_PRED, -TM_PRED];
+
+// Probabilities for determining macroblock mode
+static KEYFRAME_UV_MODE_PROBS: [Prob; 3] = [142, 114, 183];
+
+// Section 13.4
+type TokenProbTables = [[[[Prob; NUM_DCT_TOKENS - 1]; 3]; 8]; 4];
+
+// Probabilities that a token's probability will be updated
+static COEFF_UPDATE_PROBS: TokenProbTables = [
+ [
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255],
+ [250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255],
+ [234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255],
+ [251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+];
+
+// Section 13.5
+// Default Probabilities for tokens
+static COEFF_PROBS: TokenProbTables = [
+ [
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128],
+ [189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128],
+ [106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128],
+ [181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128],
+ [78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128],
+ ],
+ [
+ [1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128],
+ [184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128],
+ [77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128],
+ [170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128],
+ [37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128],
+ [207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128],
+ [102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128],
+ [177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128],
+ [80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62],
+ [131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1],
+ [68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128],
+ ],
+ [
+ [1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128],
+ [184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128],
+ [81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128],
+ ],
+ [
+ [1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128],
+ [99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128],
+ [23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128],
+ ],
+ [
+ [1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128],
+ [109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128],
+ [44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128],
+ [94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128],
+ [22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128],
+ ],
+ [
+ [1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128],
+ [124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128],
+ [35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128],
+ [121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128],
+ [45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128],
+ ],
+ [
+ [1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128],
+ [203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128],
+ [175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128],
+ [73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128],
+ ],
+ [
+ [1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128],
+ [239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128],
+ [155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128],
+ [201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128],
+ [69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128],
+ ],
+ [
+ [1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128],
+ [223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128],
+ [141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128],
+ [149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128],
+ [213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128],
+ [55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255],
+ [126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128],
+ [61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128],
+ ],
+ [
+ [1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128],
+ [166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128],
+ [39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128],
+ ],
+ [
+ [1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128],
+ [124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128],
+ [24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128],
+ ],
+ [
+ [1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128],
+ [149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128],
+ [28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128],
+ ],
+ [
+ [1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128],
+ [123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128],
+ [20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128],
+ ],
+ [
+ [1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128],
+ [168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128],
+ [47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128],
+ [141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128],
+ [42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+];
+
+// DCT Tokens
+const DCT_0: i8 = 0;
+const DCT_1: i8 = 1;
+const DCT_2: i8 = 2;
+const DCT_3: i8 = 3;
+const DCT_4: i8 = 4;
+const DCT_CAT1: i8 = 5;
+const DCT_CAT2: i8 = 6;
+const DCT_CAT3: i8 = 7;
+const DCT_CAT4: i8 = 8;
+const DCT_CAT5: i8 = 9;
+const DCT_CAT6: i8 = 10;
+const DCT_EOB: i8 = 11;
+
+static DCT_TOKEN_TREE: [i8; 22] = [
+ -DCT_EOB, 2, -DCT_0, 4, -DCT_1, 6, 8, 12, -DCT_2, 10, -DCT_3, -DCT_4, 14, 16, -DCT_CAT1,
+ -DCT_CAT2, 18, 20, -DCT_CAT3, -DCT_CAT4, -DCT_CAT5, -DCT_CAT6,
+];
+
+static PROB_DCT_CAT: [[Prob; 12]; 6] = [
+ [159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [165, 145, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0],
+ [180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0],
+ [254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0],
+];
+
+static DCT_CAT_BASE: [u8; 6] = [5, 7, 11, 19, 35, 67];
+static COEFF_BANDS: [u8; 16] = [0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7];
+
+#[rustfmt::skip]
+static DC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 16, 17, 17,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 91, 93, 95, 96, 98, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 122, 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 143, 145, 148, 151, 154, 157,
+];
+
+#[rustfmt::skip]
+static AC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 60,
+ 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92,
+ 94, 96, 98, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 119, 122, 125, 128,
+ 131, 134, 137, 140, 143, 146, 149, 152,
+ 155, 158, 161, 164, 167, 170, 173, 177,
+ 181, 185, 189, 193, 197, 201, 205, 209,
+ 213, 217, 221, 225, 229, 234, 239, 245,
+ 249, 254, 259, 264, 269, 274, 279, 284,
+];
+
+static ZIGZAG: [u8; 16] = [0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15];
+
+struct BoolReader {
+ buf: Vec<u8>,
+ index: usize,
+
+ range: u32,
+ value: u32,
+ bit_count: u8,
+}
+
+impl BoolReader {
+ pub(crate) fn new() -> BoolReader {
+ BoolReader {
+ buf: Vec::new(),
+ range: 0,
+ value: 0,
+ bit_count: 0,
+ index: 0,
+ }
+ }
+
+ pub(crate) fn init(&mut self, buf: Vec<u8>) -> ImageResult<()> {
+ if buf.len() < 2 {
+ return Err(ImageError::FormatError(
+ "Expected at least 2 bytes of decoder initialization data".into()));
+ }
+
+ self.buf = buf;
+ // Direct access safe, since length has just been validated.
+ self.value = (u32::from(self.buf[0]) << 8) | u32::from(self.buf[1]);
+ self.index = 2;
+ self.range = 255;
+ self.bit_count = 0;
+
+ Ok(())
+ }
+
+ pub(crate) fn read_bool(&mut self, probability: u8) -> bool {
+ let split = 1 + (((self.range - 1) * u32::from(probability)) >> 8);
+ let bigsplit = split << 8;
+
+ let retval = if self.value >= bigsplit {
+ self.range -= split;
+ self.value -= bigsplit;
+ true
+ } else {
+ self.range = split;
+ false
+ };
+
+ while self.range < 128 {
+ self.value <<= 1;
+ self.range <<= 1;
+ self.bit_count += 1;
+
+ if self.bit_count == 8 {
+ self.bit_count = 0;
+
+ // If no more bits are available, just don't do anything.
+ // This strategy is suggested in the reference implementation of RFC6386 (p.135)
+ if self.index < self.buf.len() {
+ self.value |= u32::from(self.buf[self.index]);
+ self.index += 1;
+ }
+ }
+ }
+
+ retval
+ }
+
+ pub(crate) fn read_literal(&mut self, n: u8) -> u8 {
+ let mut v = 0u8;
+ let mut n = n;
+
+ while n != 0 {
+ v = (v << 1) + self.read_bool(128u8) as u8;
+ n -= 1;
+ }
+
+ v
+ }
+
+ pub(crate) fn read_magnitude_and_sign(&mut self, n: u8) -> i32 {
+ let magnitude = self.read_literal(n);
+ let sign = self.read_literal(1);
+
+ if sign == 1 {
+ -i32::from(magnitude)
+ } else {
+ i32::from(magnitude)
+ }
+ }
+
+ pub(crate) fn read_with_tree(&mut self, tree: &[i8], probs: &[Prob], start: isize) -> i8 {
+ let mut index = start;
+
+ loop {
+ let a = self.read_bool(probs[index as usize >> 1]);
+ let b = index + a as isize;
+ index = tree[b as usize] as isize;
+
+ if index <= 0 {
+ break;
+ }
+ }
+
+ -index as i8
+ }
+
+ pub(crate) fn read_flag(&mut self) -> bool {
+ 0 != self.read_literal(1)
+ }
+}
+
+#[derive(Default, Clone, Copy)]
+struct MacroBlock {
+ bpred: [IntraMode; 16],
+ complexity: [u8; 9],
+ luma_mode: LumaMode,
+ chroma_mode: ChromaMode,
+ segmentid: u8,
+}
+
+/// A Representation of the last decoded video frame
+#[derive(Default, Debug, Clone)]
+pub struct Frame {
+ /// The width of the luma plane
+ pub width: u16,
+
+ /// The height of the luma plane
+ pub height: u16,
+
+ /// The luma plane of the frame
+ pub ybuf: Vec<u8>,
+
+ /// Indicates whether this frame is a keyframe
+ pub keyframe: bool,
+
+ version: u8,
+
+ /// Indicates whether this frame is intended for display
+ pub for_display: bool,
+
+ // Section 9.2
+ /// The pixel type of the frame as defined by Section 9.2
+ /// of the VP8 Specification
+ pub pixel_type: u8,
+
+ // Section 9.4 and 15
+ filter: u8,
+ filter_level: u8,
+ sharpness_level: u8,
+}
+
+#[derive(Clone, Copy, Default)]
+struct Segment {
+ ydc: i16,
+ yac: i16,
+
+ y2dc: i16,
+ y2ac: i16,
+
+ uvdc: i16,
+ uvac: i16,
+
+ delta_values: bool,
+
+ quantizer_level: i8,
+ loopfilter_level: i8,
+}
+
+/// VP8 Decoder
+///
+/// Only decodes keyframes
+pub struct Vp8Decoder<R> {
+ r: R,
+ b: BoolReader,
+
+ mbwidth: u16,
+ mbheight: u16,
+
+ frame: Frame,
+
+ segments_enabled: bool,
+ segments_update_map: bool,
+ segment: [Segment; MAX_SEGMENTS],
+
+ partitions: [BoolReader; 8],
+ num_partitions: u8,
+
+ segment_tree_probs: [Prob; 3],
+ token_probs: Box<TokenProbTables>,
+
+ // Section 9.10
+ prob_intra: Prob,
+
+ // Section 9.11
+ prob_skip_false: Option<Prob>,
+
+ top: Vec<MacroBlock>,
+ left: MacroBlock,
+
+ top_border: Vec<u8>,
+ left_border: Vec<u8>,
+}
+
+impl<R: Read> Vp8Decoder<R> {
+ /// Create a new decoder.
+ /// The reader must present a raw vp8 bitstream to the decoder
+ pub fn new(r: R) -> Vp8Decoder<R> {
+ let f = Frame::default();
+ let s = Segment::default();
+ let m = MacroBlock::default();
+
+ Vp8Decoder {
+ r,
+ b: BoolReader::new(),
+
+ mbwidth: 0,
+ mbheight: 0,
+
+ frame: f,
+ segments_enabled: false,
+ segments_update_map: false,
+ segment: [s; MAX_SEGMENTS],
+
+ partitions: [
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ ],
+
+ num_partitions: 1,
+
+ segment_tree_probs: [255u8; 3],
+ token_probs: Box::new(COEFF_PROBS),
+
+ // Section 9.10
+ prob_intra: 0u8,
+
+ // Section 9.11
+ prob_skip_false: None,
+
+ top: Vec::new(),
+ left: m,
+
+ top_border: Vec::new(),
+ left_border: Vec::new(),
+ }
+ }
+
+ fn update_token_probabilities(&mut self) {
+ for (i, is) in COEFF_UPDATE_PROBS.iter().enumerate() {
+ for (j, js) in is.iter().enumerate() {
+ for (k, ks) in js.iter().enumerate() {
+ for (t, prob) in ks.iter().enumerate().take(NUM_DCT_TOKENS - 1) {
+ if self.b.read_bool(*prob) {
+ let v = self.b.read_literal(8);
+ self.token_probs[i][j][k][t] = v;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn init_partitions(&mut self, n: usize) -> ImageResult<()> {
+ if n > 1 {
+ let mut sizes = vec![0; 3 * n - 3];
+ self.r.read_exact(sizes.as_mut_slice())?;
+
+ for (i, s) in sizes.chunks(3).enumerate() {
+ let size = {s}.read_u24::<LittleEndian>()
+ .expect("Reading from &[u8] can't fail and the chunk is complete");
+
+ let mut buf = vec![0; size as usize];
+ self.r.read_exact(buf.as_mut_slice())?;
+
+ self.partitions[i].init(buf)?;
+ }
+ }
+
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.partitions[n - 1].init(buf)?;
+
+ Ok(())
+ }
+
+ fn read_quantization_indices(&mut self) {
+ fn dc_quant(index: i32) -> i16 {
+ DC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ fn ac_quant(index: i32) -> i16 {
+ AC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ let yac_abs = self.b.read_literal(7);
+ let ydc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2dc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2ac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvdc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let n = if self.segments_enabled {
+ MAX_SEGMENTS
+ } else {
+ 1
+ };
+ for i in 0usize..n {
+ let base = i32::from(if !self.segment[i].delta_values {
+ i16::from(self.segment[i].quantizer_level)
+ } else {
+ i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs)
+ });
+
+ self.segment[i].ydc = dc_quant(base + ydc_delta);
+ self.segment[i].yac = ac_quant(base);
+
+ self.segment[i].y2dc = dc_quant(base + y2dc_delta) * 2;
+ // The intermediate result (max`284*155`) can be larger than the `i16` range.
+ self.segment[i].y2ac = (i32::from(ac_quant(base + y2ac_delta)) * 155 / 100) as i16;
+
+ self.segment[i].uvdc = dc_quant(base + uvdc_delta);
+ self.segment[i].uvac = ac_quant(base + uvac_delta);
+
+ if self.segment[i].y2ac < 8 {
+ self.segment[i].y2ac = 8;
+ }
+
+ if self.segment[i].uvdc > 132 {
+ self.segment[i].uvdc = 132;
+ }
+ }
+ }
+
+ fn read_loop_filter_adjustments(&mut self) {
+ if self.b.read_flag() {
+ for _i in 0usize..4 {
+ let ref_frame_delta_update_flag = self.b.read_flag();
+
+ let _delta = if ref_frame_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+
+ for _i in 0usize..4 {
+ let mb_mode_delta_update_flag = self.b.read_flag();
+
+ let _delta = if mb_mode_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+ }
+ }
+
+ fn read_segment_updates(&mut self) {
+ // Section 9.3
+ self.segments_update_map = self.b.read_flag();
+ let update_segment_feature_data = self.b.read_flag();
+
+ if update_segment_feature_data {
+ let segment_feature_mode = self.b.read_flag();
+
+ for i in 0usize..MAX_SEGMENTS {
+ self.segment[i].delta_values = !segment_feature_mode;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].quantizer_level = if update {
+ self.b.read_magnitude_and_sign(7)
+ } else {
+ 0i32
+ } as i8;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].loopfilter_level = if update {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ } as i8;
+ }
+ }
+
+ if self.segments_update_map {
+ for i in 0usize..3 {
+ let update = self.b.read_flag();
+
+ self.segment_tree_probs[i] = if update { self.b.read_literal(8) } else { 255 };
+ }
+ }
+ }
+
+ fn read_frame_header(&mut self) -> ImageResult<()> {
+ let tag = self.r.read_u24::<LittleEndian>()?;
+
+ self.frame.keyframe = tag & 1 == 0;
+ self.frame.version = ((tag >> 1) & 7) as u8;
+ self.frame.for_display = (tag >> 4) & 1 != 0;
+
+ let first_partition_size = tag >> 5;
+
+ if self.frame.keyframe {
+ let mut tag = [0u8; 3];
+ self.r.read_exact(&mut tag)?;
+
+ if tag != [0x9d, 0x01, 0x2a] {
+ return Err(ImageError::FormatError(
+ format!("Invalid magic bytes {:?} for vp8", tag)))
+ }
+
+ let w = self.r.read_u16::<LittleEndian>()?;
+ let h = self.r.read_u16::<LittleEndian>()?;
+
+ self.frame.width = w & 0x3FFF;
+ self.frame.height = h & 0x3FFF;
+
+ self.top = init_top_macroblocks(self.frame.width as usize);
+ // Almost always the first macro block, except when non exists (i.e. `width == 0`)
+ self.left = self.top.get(0).cloned()
+ .unwrap_or_else(MacroBlock::default);
+
+ self.mbwidth = (self.frame.width + 15) / 16;
+ self.mbheight = (self.frame.height + 15) / 16;
+
+ self.frame.ybuf = vec![0u8; self.frame.width as usize * self.frame.height as usize];
+
+ self.top_border = vec![127u8; self.frame.width as usize + 4 + 16];
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ let mut buf = vec![0; first_partition_size as usize];
+ self.r.read_exact(&mut buf)?;
+
+ // initialise binary decoder
+ self.b.init(buf)?;
+
+ if self.frame.keyframe {
+ let color_space = self.b.read_literal(1);
+ self.frame.pixel_type = self.b.read_literal(1);
+
+ if color_space != 0 {
+ return Err(ImageError::FormatError(
+ "Only YUV color space is specified.".to_string()))
+ }
+ }
+
+ self.segments_enabled = self.b.read_flag();
+ if self.segments_enabled {
+ self.read_segment_updates();
+ }
+
+ self.frame.filter = self.b.read_literal(1);
+ self.frame.filter_level = self.b.read_literal(6);
+ self.frame.sharpness_level = self.b.read_literal(3);
+
+ let lf_adjust_enable = self.b.read_flag();
+ if lf_adjust_enable {
+ self.read_loop_filter_adjustments();
+ }
+
+ self.num_partitions = (1usize << self.b.read_literal(2) as usize) as u8;
+ let num_partitions = self.num_partitions as usize;
+ self.init_partitions(num_partitions)?;
+
+ self.read_quantization_indices();
+
+ if !self.frame.keyframe {
+ // 9.7 refresh golden frame and altref frame
+ return Err(ImageError::UnsupportedError(
+ "Frames that are not keyframes are not supported".into()))
+ // FIXME: support this?
+ } else {
+ // Refresh entropy probs ?????
+ let _ = self.b.read_literal(1);
+ }
+
+ self.update_token_probabilities();
+
+ let mb_no_skip_coeff = self.b.read_literal(1);
+ self.prob_skip_false = if mb_no_skip_coeff == 1 {
+ Some(self.b.read_literal(8))
+ } else {
+ None
+ };
+
+ if !self.frame.keyframe {
+ // 9.10 remaining frame data
+ self.prob_intra = 0;
+
+ return Err(ImageError::UnsupportedError(
+ "Frames that are not keyframes are not supported".into()))
+ // FIXME: support this?
+ } else {
+ // Reset motion vectors
+ }
+
+ Ok(())
+ }
+
+ fn read_macroblock_header(&mut self, mbx: usize) -> ImageResult<(bool, MacroBlock)> {
+ let mut mb = MacroBlock::default();
+
+ mb.segmentid = if self.segments_enabled && self.segments_update_map {
+ self.b
+ .read_with_tree(&SEGMENT_ID_TREE, &self.segment_tree_probs, 0) as u8
+ } else {
+ 0
+ };
+
+ let skip_coeff = if self.prob_skip_false.is_some() {
+ self.b.read_bool(*self.prob_skip_false.as_ref().unwrap())
+ } else {
+ false
+ };
+
+ let inter_predicted = if !self.frame.keyframe {
+ self.b.read_bool(self.prob_intra)
+ } else {
+ false
+ };
+
+ if inter_predicted {
+ return Err(ImageError::UnsupportedError(
+ "VP8 inter prediction is not implemented yet".into()));
+ }
+
+ if self.frame.keyframe {
+ // intra prediction
+ let luma = self.b
+ .read_with_tree(&KEYFRAME_YMODE_TREE, &KEYFRAME_YMODE_PROBS, 0);
+ mb.luma_mode = LumaMode::from_i8(luma)
+ .ok_or_else(|| ImageError::FormatError(
+ format!("Invalid luma prediction mode {}", luma))
+ )?;
+
+ match mb.luma_mode.into_intra() {
+ // `LumaMode::B` - This is predicted individually
+ None => {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let top = self.top[mbx].bpred[12 + x];
+ let left = self.left.bpred[y];
+ let intra = self.b.read_with_tree(
+ &KEYFRAME_BPRED_MODE_TREE,
+ &KEYFRAME_BPRED_MODE_PROBS[top as usize][left as usize],
+ 0,
+ );
+ let bmode = IntraMode::from_i8(intra)
+ .ok_or_else(|| ImageError::FormatError(
+ format!("Invalid intra prediction mode {}", intra))
+ )?;
+ mb.bpred[x + y * 4] = bmode;
+
+ self.top[mbx].bpred[12 + x] = bmode;
+ self.left.bpred[y] = bmode;
+ }
+ }
+ },
+ Some(mode) => {
+ for i in 0usize..4 {
+ mb.bpred[12 + i] = mode;
+ self.left.bpred[i] = mode;
+ }
+ }
+ }
+
+ let chroma = self.b
+ .read_with_tree(&KEYFRAME_UV_MODE_TREE, &KEYFRAME_UV_MODE_PROBS, 0);
+ mb.chroma_mode = ChromaMode::from_i8(chroma)
+ .ok_or_else(|| ImageError::FormatError(
+ format!("Invalid chroma prediction mode {}", chroma))
+ )?;
+ }
+
+ self.top[mbx].chroma_mode = mb.chroma_mode;
+ self.top[mbx].luma_mode = mb.luma_mode;
+ self.top[mbx].bpred = mb.bpred;
+
+ Ok((skip_coeff, mb))
+ }
+
+ fn intra_predict(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) {
+ let stride = 1usize + 16 + 4;
+ let w = self.frame.width as usize;
+ let mw = self.mbwidth as usize;
+ let mut ws = create_border(mbx, mby, mw, &self.top_border, &self.left_border);
+
+ match mb.luma_mode {
+ LumaMode::V => predict_vpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::H => predict_hpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::TM => predict_tmpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::DC => predict_dcpred(&mut ws, 16, stride, mby != 0, mbx != 0),
+ LumaMode::B => predict_4x4(&mut ws, stride, &mb.bpred, resdata),
+ }
+
+ if mb.luma_mode != LumaMode::B {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ let rb = &resdata[i * 16..i * 16 + 16];
+ let y0 = 1 + y * 4;
+ let x0 = 1 + x * 4;
+
+ add_residue(&mut ws, rb, y0, x0, stride);
+ }
+ }
+ }
+
+ self.left_border[0] = ws[16];
+
+ for i in 0usize..16 {
+ self.top_border[mbx * 16 + i] = ws[16 * stride + 1 + i];
+ self.left_border[i + 1] = ws[(i + 1) * stride + 16];
+ }
+
+ // Length is the remainder to the border, but maximally the current chunk.
+ let ylength = cmp::min(self.frame.height as usize - mby*16, 16);
+ let xlength = cmp::min(self.frame.width as usize - mbx*16, 16);
+
+ for y in 0usize..ylength {
+ for x in 0usize..xlength {
+ self.frame.ybuf[(mby * 16 + y) * w + mbx * 16 + x] = ws[(1 + y) * stride + 1 + x];
+ }
+ }
+ }
+
+ fn read_coefficients(
+ &mut self,
+ block: &mut [i32],
+ p: usize,
+ plane: usize,
+ complexity: usize,
+ dcq: i16,
+ acq: i16,
+ ) -> bool {
+ let first = if plane == 0 { 1usize } else { 0usize };
+ let probs = &self.token_probs[plane];
+ let tree = &DCT_TOKEN_TREE;
+
+ let mut complexity = complexity;
+ let mut has_coefficients = false;
+ let mut skip = false;
+
+ for i in first..16usize {
+ let table = &probs[COEFF_BANDS[i] as usize][complexity];
+
+ let token = if !skip {
+ self.partitions[p].read_with_tree(tree, table, 0)
+ } else {
+ self.partitions[p].read_with_tree(tree, table, 2)
+ };
+
+ let mut abs_value = i32::from(match token {
+ DCT_EOB => break,
+
+ DCT_0 => {
+ skip = true;
+ has_coefficients = true;
+ complexity = 0;
+ continue;
+ }
+
+ literal @ DCT_1..=DCT_4 => i16::from(literal),
+
+ category @ DCT_CAT1..=DCT_CAT6 => {
+ let t = PROB_DCT_CAT[(category - DCT_CAT1) as usize];
+
+ let mut extra = 0i16;
+ let mut j = 0;
+
+ while t[j] > 0 {
+ extra = extra + extra + self.partitions[p].read_bool(t[j]) as i16;
+ j += 1;
+ }
+
+ i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra
+ }
+
+ c => panic!(format!("unknown token: {}", c)),
+ });
+
+ skip = false;
+
+ complexity = if abs_value == 0 {
+ 0
+ } else if abs_value == 1 {
+ 1
+ } else {
+ 2
+ };
+
+ if self.partitions[p].read_bool(128) {
+ abs_value = -abs_value;
+ }
+
+ block[ZIGZAG[i] as usize] =
+ abs_value * i32::from(if ZIGZAG[i] > 0 { acq } else { dcq });
+
+ has_coefficients = true;
+ }
+
+ has_coefficients
+ }
+
+ fn read_residual_data(&mut self, mb: &MacroBlock, mbx: usize, p: usize) -> [i32; 384] {
+ let sindex = mb.segmentid as usize;
+ let mut blocks = [0i32; 384];
+ let mut plane = if mb.luma_mode == LumaMode::B { 3 } else { 1 };
+
+ if plane == 1 {
+ let complexity = self.top[mbx].complexity[0] + self.left.complexity[0];
+ let mut block = [0i32; 16];
+ let dcq = self.segment[sindex].y2dc;
+ let acq = self.segment[sindex].y2ac;
+ let n = self.read_coefficients(&mut block, p, plane, complexity as usize, dcq, acq);
+
+ self.left.complexity[0] = if n { 1 } else { 0 };
+ self.top[mbx].complexity[0] = if n { 1 } else { 0 };
+
+ transform::iwht4x4(&mut block);
+
+ for k in 0usize..16 {
+ blocks[16 * k] = block[k];
+ }
+
+ plane = 0;
+ }
+
+ for y in 0usize..4 {
+ let mut left = self.left.complexity[y + 1];
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + 1] + left;
+ let dcq = self.segment[sindex].ydc;
+ let acq = self.segment[sindex].yac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + 1] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + 1] = left;
+ }
+
+ plane = 2;
+
+ for &j in &[5usize, 7usize] {
+ for y in 0usize..2 {
+ let mut left = self.left.complexity[y + j];
+
+ for x in 0usize..2 {
+ let i = x + y * 2 + if j == 5 { 16 } else { 20 };
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + j] + left;
+ let dcq = self.segment[sindex].uvdc;
+ let acq = self.segment[sindex].uvac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + j] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + j] = left;
+ }
+ }
+
+ blocks
+ }
+
+ /// Decodes the current frame and returns a reference to it
+ pub fn decode_frame(&mut self) -> ImageResult<&Frame> {
+ self.read_frame_header()?;
+
+ for mby in 0..self.mbheight as usize {
+ let p = mby % self.num_partitions as usize;
+ self.left = MacroBlock::default();
+
+ for mbx in 0..self.mbwidth as usize {
+ let (skip, mb) = self.read_macroblock_header(mbx)?;
+ let blocks = if !skip {
+ self.read_residual_data(&mb, mbx, p)
+ } else {
+ if mb.luma_mode != LumaMode::B {
+ self.left.complexity[0] = 0;
+ self.top[mbx].complexity[0] = 0;
+ }
+
+ for i in 1usize..9 {
+ self.left.complexity[i] = 0;
+ self.top[mbx].complexity[i] = 0;
+ }
+
+ [0i32; 384]
+ };
+
+ self.intra_predict(mbx, mby, &mb, &blocks);
+ }
+
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ Ok(&self.frame)
+ }
+}
+
+impl LumaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => LumaMode::DC,
+ V_PRED => LumaMode::V,
+ H_PRED => LumaMode::H,
+ TM_PRED => LumaMode::TM,
+ B_PRED => LumaMode::B,
+ _ => return None,
+ })
+ }
+
+ fn into_intra(self) -> Option<IntraMode> {
+ Some(match self {
+ LumaMode::DC => IntraMode::DC,
+ LumaMode::V => IntraMode::VE,
+ LumaMode::H => IntraMode::HE,
+ LumaMode::TM => IntraMode::TM,
+ LumaMode::B => return None,
+ })
+ }
+}
+
+impl Default for LumaMode {
+ fn default() -> Self {
+ LumaMode::DC
+ }
+}
+
+impl ChromaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => ChromaMode::DC,
+ V_PRED => ChromaMode::V,
+ H_PRED => ChromaMode::H,
+ TM_PRED => ChromaMode::TM,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for ChromaMode {
+ fn default() -> Self {
+ ChromaMode::DC
+ }
+}
+
+impl IntraMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ B_DC_PRED => IntraMode::DC,
+ B_TM_PRED => IntraMode::TM,
+ B_VE_PRED => IntraMode::VE,
+ B_HE_PRED => IntraMode::HE,
+ B_LD_PRED => IntraMode::LD,
+ B_RD_PRED => IntraMode::RD,
+ B_VR_PRED => IntraMode::VR,
+ B_VL_PRED => IntraMode::VL,
+ B_HD_PRED => IntraMode::HD,
+ B_HU_PRED => IntraMode::HU,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for IntraMode {
+ fn default() -> Self {
+ IntraMode::DC
+ }
+}
+
+fn init_top_macroblocks(width: usize) -> Vec<MacroBlock> {
+ let mb_width = (width + 15) / 16;
+
+ let mb = MacroBlock {
+ // Section 11.3 #3
+ bpred: [IntraMode::DC; 16],
+ luma_mode: LumaMode::DC,
+ .. MacroBlock::default()
+ };
+
+ vec![mb; mb_width]
+}
+
+fn create_border(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8]) -> [u8; 357] {
+ let stride = 1usize + 16 + 4;
+ let mut ws = [0u8; (1 + 16) * (1 + 16 + 4)];
+
+ // A
+ {
+ let above = &mut ws[1..stride];
+ if mby == 0 {
+ for above in above.iter_mut() {
+ *above = 127;
+ }
+ } else {
+ for i in 0usize..16 {
+ above[i] = top[mbx * 16 + i];
+ }
+
+ if mbx == mbw - 1 {
+ for above in above.iter_mut().skip(16) {
+ *above = top[mbx * 16 + 15];
+ }
+ } else {
+ for i in 16usize..above.len() {
+ above[i] = top[mbx * 16 + i];
+ }
+ }
+ }
+ }
+
+ for i in 17usize..stride {
+ ws[4 * stride + i] = ws[i];
+ ws[8 * stride + i] = ws[i];
+ ws[12 * stride + i] = ws[i];
+ }
+
+ // L
+ if mbx == 0 {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = 129;
+ }
+ } else {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = left[i + 1];
+ }
+ }
+
+ // P
+ ws[0] = if mby == 0 {
+ 127
+ } else if mbx == 0 {
+ 129
+ } else {
+ left[0]
+ };
+
+ ws
+}
+
+fn avg3(left: u8, this: u8, right: u8) -> u8 {
+ let avg = (u16::from(left) + 2 * u16::from(this) + u16::from(right) + 2) >> 2;
+ avg as u8
+}
+
+fn avg2(this: u8, right: u8) -> u8 {
+ let avg = (u16::from(this) + u16::from(right) + 1) >> 1;
+ avg as u8
+}
+
+fn add_residue(pblock: &mut [u8], rblock: &[i32], y0: usize, x0: usize, stride: usize) {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let a = rblock[x + y * 4];
+ let b = pblock[(y0 + y) * stride + x0 + x];
+ let c = clamp(a + i32::from(b), 0, 255) as u8;
+ pblock[(y0 + y) * stride + x0 + x] = c;
+ }
+ }
+}
+
+fn predict_4x4(ws: &mut [u8], stride: usize, modes: &[IntraMode], resdata: &[i32]) {
+ for sby in 0usize..4 {
+ for sbx in 0usize..4 {
+ let i = sbx + sby * 4;
+ let y0 = sby * 4 + 1;
+ let x0 = sbx * 4 + 1;
+ let rb = &resdata[i * 16..i * 16 + 16];
+
+ match modes[i] {
+ IntraMode::TM => predict_tmpred(ws, 4, x0, y0, stride),
+ IntraMode::VE => predict_bvepred(ws, x0, y0, stride),
+ IntraMode::HE => predict_bhepred(ws, x0, y0, stride),
+ IntraMode::DC => predict_bdcpred(ws, x0, y0, stride),
+ IntraMode::LD => predict_bldpred(ws, x0, y0, stride),
+ IntraMode::RD => predict_brdpred(ws, x0, y0, stride),
+ IntraMode::VR => predict_bvrpred(ws, x0, y0, stride),
+ IntraMode::VL => predict_bvlpred(ws, x0, y0, stride),
+ IntraMode::HD => predict_bhdpred(ws, x0, y0, stride),
+ IntraMode::HU => predict_bhupred(ws, x0, y0, stride),
+ }
+
+ add_residue(ws, rb, y0, x0, stride);
+ }
+ }
+}
+
+fn predict_vpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0) + stride * (y0 + y - 1)];
+ }
+ }
+}
+
+fn predict_hpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0 - 1) + stride * (y0 + y)];
+ }
+ }
+}
+
+fn predict_dcpred(a: &mut [u8], size: usize, stride: usize, above: bool, left: bool) {
+ let mut sum = 0;
+ let mut shf = if size == 8 { 2 } else { 3 };
+
+ if left {
+ for y in 0usize..size {
+ sum += u32::from(a[(y + 1) * stride]);
+ }
+
+ shf += 1;
+ }
+
+ if above {
+ for x in 0usize..size {
+ sum += u32::from(a[x + 1]);
+ }
+
+ shf += 1;
+ }
+
+ let dcval = if !left && !above {
+ 128
+ } else {
+ (sum + (1 << (shf - 1))) >> shf
+ };
+
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + 1) + stride * (y + 1)] = dcval as u8;
+ }
+ }
+}
+
+fn predict_tmpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ let pred = i32::from(a[(y0 + y) * stride + x0 - 1])
+ + i32::from(a[(y0 - 1) * stride + x0 + x])
+ - i32::from(a[(y0 - 1) * stride + x0 - 1]);
+
+ a[(x + x0) + stride * (y + y0)] = clamp(pred, 0, 255) as u8;
+ }
+ }
+}
+
+fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let mut v = 4;
+ for i in 0usize..4 {
+ v += u32::from(a[(y0 + i) * stride + x0 - 1]) + u32::from(a[(y0 - 1) * stride + x0 + i]);
+ }
+
+ v >>= 3;
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ a[x + x0 + stride * (y + y0)] = v as u8;
+ }
+ }
+}
+
+fn topleft_pixel(a: &[u8], x0: usize, y0: usize, stride: usize) -> u8 {
+ a[(y0 - 1) * stride + x0 - 1]
+}
+
+fn top_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8, u8, u8, u8, u8) {
+ let a0 = a[(y0 - 1) * stride + x0];
+ let a1 = a[(y0 - 1) * stride + x0 + 1];
+ let a2 = a[(y0 - 1) * stride + x0 + 2];
+ let a3 = a[(y0 - 1) * stride + x0 + 3];
+ let a4 = a[(y0 - 1) * stride + x0 + 4];
+ let a5 = a[(y0 - 1) * stride + x0 + 5];
+ let a6 = a[(y0 - 1) * stride + x0 + 6];
+ let a7 = a[(y0 - 1) * stride + x0 + 7];
+
+ (a0, a1, a2, a3, a4, a5, a6, a7)
+}
+
+fn left_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8) {
+ let l0 = a[y0 * stride + x0 - 1];
+ let l1 = a[(y0 + 1) * stride + x0 - 1];
+ let l2 = a[(y0 + 2) * stride + x0 - 1];
+ let l3 = a[(y0 + 3) * stride + x0 - 1];
+
+ (l0, l1, l2, l3)
+}
+
+fn edge_pixels(
+ a: &[u8],
+ x0: usize,
+ y0: usize,
+ stride: usize,
+) -> (u8, u8, u8, u8, u8, u8, u8, u8, u8) {
+ let e8 = a[(y0 - 1) * stride + x0 + 3];
+ let e7 = a[(y0 - 1) * stride + x0 + 2];
+ let e6 = a[(y0 - 1) * stride + x0 + 1];
+ let e5 = a[(y0 - 1) * stride + x0];
+ let e4 = a[(y0 - 1) * stride + x0 - 1];
+ let e3 = a[y0 * stride + x0 - 1];
+ let e2 = a[(y0 + 1) * stride + x0 - 1];
+ let e1 = a[(y0 + 2) * stride + x0 - 1];
+ let e0 = a[(y0 + 3) * stride + x0 - 1];
+
+ (e0, e1, e2, e3, e4, e5, e6, e7, e8)
+}
+
+fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg3(p, a0, a1);
+ a[(y0 + 1) * stride + x0] = avg3(p, a0, a1);
+ a[(y0 + 2) * stride + x0] = avg3(p, a0, a1);
+ a[(y0 + 3) * stride + x0] = avg3(p, a0, a1);
+
+ a[y0 * stride + x0 + 1] = avg3(a0, a1, a2);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(a0, a1, a2);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(a0, a1, a2);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(a0, a1, a2);
+
+ a[y0 * stride + x0 + 2] = avg3(a1, a2, a3);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(a1, a2, a3);
+ a[(y0 + 2) * stride + x0 + 2] = avg3(a1, a2, a3);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(a1, a2, a3);
+
+ a[y0 * stride + x0 + 3] = avg3(a2, a3, a4);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(a2, a3, a4);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(a2, a3, a4);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(a2, a3, a4);
+}
+
+fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg3(p, l0, l1);
+ a[y0 * stride + x0 + 1] = avg3(p, l0, l1);
+ a[y0 * stride + x0 + 2] = avg3(p, l0, l1);
+ a[y0 * stride + x0 + 3] = avg3(p, l0, l1);
+
+ a[(y0 + 1) * stride + x0] = avg3(l0, l1, l2);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(l0, l1, l2);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(l0, l1, l2);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(l0, l1, l2);
+
+ a[(y0 + 2) * stride + x0] = avg3(l1, l2, l3);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(l1, l2, l3);
+ a[(y0 + 2) * stride + x0 + 2] = avg3(l1, l2, l3);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(l1, l2, l3);
+
+ a[(y0 + 3) * stride + x0] = avg3(l2, l3, l3);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(l2, l3, l3);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(l2, l3, l3);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(l2, l3, l3);
+}
+
+fn predict_bldpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg3(a0, a1, a2);
+ a[y0 * stride + x0 + 1] = avg3(a1, a2, a3);
+ a[(y0 + 1) * stride + x0] = avg3(a1, a2, a3);
+ a[y0 * stride + x0 + 2] = avg3(a2, a3, a4);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(a2, a3, a4);
+ a[(y0 + 2) * stride + x0] = avg3(a2, a3, a4);
+ a[y0 * stride + x0 + 3] = avg3(a3, a4, a5);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(a3, a4, a5);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(a3, a4, a5);
+ a[(y0 + 3) * stride + x0] = avg3(a3, a4, a5);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(a4, a5, a6);
+ a[(y0 + 2) * stride + x0 + 2] = avg3(a4, a5, a6);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(a4, a5, a6);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(a5, a6, a7);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(a5, a6, a7);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(a6, a7, a7);
+}
+
+fn predict_brdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg3(e0, e1, e2);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0] = avg3(e1, e2, e3);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(e2, e3, e4);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0] = avg3(e2, e3, e4);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e3, e4, e5);
+ a[(y0 + 2) * stride + x0 + 2] = avg3(e3, e4, e5);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[y0 * stride + x0] = avg3(e3, e4, e5);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(e4, e5, e6);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[y0 * stride + x0 + 1] = avg3(e4, e5, e6);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e5, e6, e7);
+ a[y0 * stride + x0 + 2] = avg3(e5, e6, e7);
+ a[y0 * stride + x0 + 3] = avg3(e6, e7, e8);
+}
+
+fn predict_bvrpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (_, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0] = avg3(e2, e3, e4);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[(y0 + 1) * stride + x0] = avg3(e3, e4, e5);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(e4, e5);
+ a[y0 * stride + x0] = avg2(e4, e5);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e4, e5, e6);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e5, e6);
+ a[y0 * stride + x0 + 1] = avg2(e5, e6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e5, e6, e7);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(e5, e6, e7);
+ a[(y0 + 2) * stride + x0 + 3] = avg2(e6, e7);
+ a[y0 * stride + x0 + 2] = avg2(e6, e7);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e6, e7, e8);
+ a[y0 * stride + x0 + 3] = avg2(e7, e8);
+}
+
+fn predict_bvlpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(a0, a1);
+ a[(y0 + 1) * stride + x0] = avg3(a0, a1, a2);
+ a[(y0 + 2) * stride + x0] = avg2(a1, a2);
+ a[y0 * stride + x0 + 1] = avg2(a1, a2);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(a1, a2, a3);
+ a[(y0 + 3) * stride + x0] = avg3(a1, a2, a3);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(a2, a3);
+ a[y0 * stride + x0 + 2] = avg2(a2, a3);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(a2, a3, a4);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(a2, a3, a4);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(a3, a4);
+ a[y0 * stride + x0 + 3] = avg2(a3, a4);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(a3, a4, a5);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(a3, a4, a5);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(a4, a5, a6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(a5, a6, a7);
+}
+
+fn predict_bhdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, _) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg2(e0, e1);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e0, e1, e2);
+ a[(y0 + 2) * stride + x0] = avg2(e1, e2);
+ a[(y0 + 3) * stride + x0 + 2] = avg2(e1, e2);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(e1, e2, e3);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e2, e3);
+ a[(y0 + 1) * stride + x0] = avg2(e2, e3);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(e3, e4);
+ a[y0 * stride + x0] = avg2(e3, e4);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[y0 * stride + x0 + 3] = avg3(e5, e6, e7);
+}
+
+fn predict_bhupred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(l0, l1);
+ a[y0 * stride + x0 + 1] = avg3(l0, l1, l2);
+ a[y0 * stride + x0 + 2] = avg2(l1, l2);
+ a[(y0 + 1) * stride + x0] = avg2(l1, l2);
+ a[y0 * stride + x0 + 3] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(l2, l3);
+ a[(y0 + 2) * stride + x0] = avg2(l2, l3);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 2] = l3;
+ a[(y0 + 2) * stride + x0 + 3] = l3;
+ a[(y0 + 3) * stride + x0] = l3;
+ a[(y0 + 3) * stride + x0 + 1] = l3;
+ a[(y0 + 3) * stride + x0 + 2] = l3;
+ a[(y0 + 3) * stride + x0 + 3] = l3;
+}