summaryrefslogtreecommitdiffstats
path: root/gfx/angle/checkout/src/image_util/loadimage.inc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /gfx/angle/checkout/src/image_util/loadimage.inc
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/angle/checkout/src/image_util/loadimage.inc')
-rw-r--r--gfx/angle/checkout/src/image_util/loadimage.inc201
1 files changed, 201 insertions, 0 deletions
diff --git a/gfx/angle/checkout/src/image_util/loadimage.inc b/gfx/angle/checkout/src/image_util/loadimage.inc
new file mode 100644
index 0000000000..58c0b9faf5
--- /dev/null
+++ b/gfx/angle/checkout/src/image_util/loadimage.inc
@@ -0,0 +1,201 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "common/mathutil.h"
+
+#include <string.h>
+
+namespace angle
+{
+
+namespace priv
+{
+
+template <typename T>
+inline T *OffsetDataPointer(uint8_t *data, size_t y, size_t z, size_t rowPitch, size_t depthPitch)
+{
+ return reinterpret_cast<T*>(data + (y * rowPitch) + (z * depthPitch));
+}
+
+template <typename T>
+inline const T *OffsetDataPointer(const uint8_t *data, size_t y, size_t z, size_t rowPitch, size_t depthPitch)
+{
+ return reinterpret_cast<const T*>(data + (y * rowPitch) + (z * depthPitch));
+}
+
+} // namespace priv
+
+template <typename type, size_t componentCount>
+inline void LoadToNative(size_t width, size_t height, size_t depth,
+ const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch,
+ uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch)
+{
+ const size_t rowSize = width * sizeof(type) * componentCount;
+ const size_t layerSize = rowSize * height;
+ const size_t imageSize = layerSize * depth;
+
+ if (layerSize == inputDepthPitch && layerSize == outputDepthPitch)
+ {
+ ASSERT(rowSize == inputRowPitch && rowSize == outputRowPitch);
+ memcpy(output, input, imageSize);
+ }
+ else if (rowSize == inputRowPitch && rowSize == outputRowPitch)
+ {
+ for (size_t z = 0; z < depth; z++)
+ {
+ const type *source = priv::OffsetDataPointer<type>(input, 0, z, inputRowPitch, inputDepthPitch);
+ type *dest = priv::OffsetDataPointer<type>(output, 0, z, outputRowPitch, outputDepthPitch);
+
+ memcpy(dest, source, layerSize);
+ }
+ }
+ else
+ {
+ for (size_t z = 0; z < depth; z++)
+ {
+ for (size_t y = 0; y < height; y++)
+ {
+ const type *source = priv::OffsetDataPointer<type>(input, y, z, inputRowPitch, inputDepthPitch);
+ type *dest = priv::OffsetDataPointer<type>(output, y, z, outputRowPitch, outputDepthPitch);
+ memcpy(dest, source, width * sizeof(type) * componentCount);
+ }
+ }
+ }
+}
+
+template <typename type, uint32_t fourthComponentBits>
+inline void LoadToNative3To4(size_t width, size_t height, size_t depth,
+ const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch,
+ uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch)
+{
+ const type fourthValue = gl::bitCast<type>(fourthComponentBits);
+
+ for (size_t z = 0; z < depth; z++)
+ {
+ for (size_t y = 0; y < height; y++)
+ {
+ const type *source = priv::OffsetDataPointer<type>(input, y, z, inputRowPitch, inputDepthPitch);
+ type *dest = priv::OffsetDataPointer<type>(output, y, z, outputRowPitch, outputDepthPitch);
+ for (size_t x = 0; x < width; x++)
+ {
+ memcpy(&dest[x * 4], &source[x * 3], sizeof(type) * 3);
+ dest[x * 4 + 3] = fourthValue;
+ }
+ }
+ }
+}
+
+template <size_t componentCount>
+inline void Load32FTo16F(size_t width, size_t height, size_t depth,
+ const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch,
+ uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch)
+{
+ const size_t elementWidth = componentCount * width;
+
+ for (size_t z = 0; z < depth; z++)
+ {
+ for (size_t y = 0; y < height; y++)
+ {
+ const float *source = priv::OffsetDataPointer<float>(input, y, z, inputRowPitch, inputDepthPitch);
+ uint16_t *dest = priv::OffsetDataPointer<uint16_t>(output, y, z, outputRowPitch, outputDepthPitch);
+
+ for (size_t x = 0; x < elementWidth; x++)
+ {
+ dest[x] = gl::float32ToFloat16(source[x]);
+ }
+ }
+ }
+}
+
+template <size_t blockWidth, size_t blockHeight, size_t blockDepth, size_t blockSize>
+inline void LoadCompressedToNative(size_t width, size_t height, size_t depth,
+ const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch,
+ uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch)
+{
+ const size_t columns = (width + (blockWidth - 1)) / blockWidth;
+ const size_t rows = (height + (blockHeight - 1)) / blockHeight;
+ const size_t layers = (depth + (blockDepth - 1)) / blockDepth;
+
+ for (size_t z = 0; z < layers; ++z)
+ {
+ for (size_t y = 0; y < rows; ++y)
+ {
+ const uint8_t *source = priv::OffsetDataPointer<uint8_t>(input, y, z, inputRowPitch, inputDepthPitch);
+ uint8_t *dest = priv::OffsetDataPointer<uint8_t>(output, y, z, outputRowPitch, outputDepthPitch);
+ memcpy(dest, source, columns * blockSize);
+ }
+ }
+}
+
+template <typename type, uint32_t firstBits, uint32_t secondBits, uint32_t thirdBits, uint32_t fourthBits>
+inline void Initialize4ComponentData(size_t width, size_t height, size_t depth,
+ uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch)
+{
+ type writeValues[4] =
+ {
+ gl::bitCast<type>(firstBits),
+ gl::bitCast<type>(secondBits),
+ gl::bitCast<type>(thirdBits),
+ gl::bitCast<type>(fourthBits),
+ };
+
+ for (size_t z = 0; z < depth; z++)
+ {
+ for (size_t y = 0; y < height; y++)
+ {
+ type *destRow = priv::OffsetDataPointer<type>(output, y, z, outputRowPitch, outputDepthPitch);
+ for (size_t x = 0; x < width; x++)
+ {
+ type* destPixel = destRow + x * 4;
+
+ // This could potentially be optimized by generating an entire row of initialization
+ // data and copying row by row instead of pixel by pixel.
+ memcpy(destPixel, writeValues, sizeof(type) * 4);
+ }
+ }
+ }
+}
+
+template <size_t blockWidth, size_t blockHeight>
+inline void LoadASTCToRGBA8(size_t width,
+ size_t height,
+ size_t depth,
+ const uint8_t *input,
+ size_t inputRowPitch,
+ size_t inputDepthPitch,
+ uint8_t *output,
+ size_t outputRowPitch,
+ size_t outputDepthPitch)
+{
+ LoadASTCToRGBA8Inner(width, height, depth, blockWidth, blockHeight, input, inputRowPitch,
+ inputDepthPitch, output, outputRowPitch, outputDepthPitch);
+}
+
+template <uint32_t indexBits, uint32_t redBlueBits, uint32_t greenBits, uint32_t alphaBits>
+inline void LoadPalettedToRGBA8(size_t width,
+ size_t height,
+ size_t depth,
+ const uint8_t *input,
+ size_t inputRowPitch,
+ size_t inputDepthPitch,
+ uint8_t *output,
+ size_t outputRowPitch,
+ size_t outputDepthPitch)
+{
+ static_assert(indexBits == 4 || indexBits == 8);
+ static_assert(redBlueBits == 4 || redBlueBits == 5 || redBlueBits == 8);
+ static_assert(greenBits == 4 || greenBits == 5 || greenBits == 6 || greenBits == 8);
+ static_assert(alphaBits == 0 || alphaBits == 1 || alphaBits == 4 || alphaBits == 8);
+ constexpr uint32_t colorBits = 2 * redBlueBits + greenBits + alphaBits;
+ static_assert(colorBits == 16 || colorBits == 24 || colorBits == 32);
+
+ LoadPalettedToRGBA8Impl(width, height, depth,
+ indexBits, redBlueBits, greenBits, alphaBits,
+ input, inputRowPitch, inputDepthPitch,
+ output, outputRowPitch, outputDepthPitch);
+}
+
+} // namespace angle