summaryrefslogtreecommitdiffstats
path: root/media/ffvpx/libavutil/mem_internal.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /media/ffvpx/libavutil/mem_internal.h
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--media/ffvpx/libavutil/mem_internal.h138
1 files changed, 138 insertions, 0 deletions
diff --git a/media/ffvpx/libavutil/mem_internal.h b/media/ffvpx/libavutil/mem_internal.h
new file mode 100644
index 0000000000..2448c606f1
--- /dev/null
+++ b/media/ffvpx/libavutil/mem_internal.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MEM_INTERNAL_H
+#define AVUTIL_MEM_INTERNAL_H
+
+#include "config.h"
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "macros.h"
+#include "mem.h"
+#include "version.h"
+
+/**
+ * @def DECLARE_ALIGNED(n,t,v)
+ * Declare a variable that is aligned in memory.
+ *
+ * @code{.c}
+ * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
+ * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
+ *
+ * // The default-alignment equivalent would be
+ * uint16_t aligned_int = 42;
+ * uint8_t aligned_array[128];
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_ALIGNED(n,t,v)
+ * Declare an aligned variable appropriate for use in inline assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_CONST(n,t,v)
+ * Declare a static constant aligned variable appropriate for use in inline
+ * assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__DJGPP__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+#elif defined(__GNUC__) || defined(__clang__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+// Some broken preprocessors need a second expansion
+// to be forced to tokenize __VA_ARGS__
+#define E1(x) x
+
+#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
+ uint8_t la_##v[sizeof(t s o) + (a)]; \
+ t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
+
+#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
+ DECLARE_ALIGNED(a, t, la_##v) s o; \
+ t (*v) o = la_##v
+
+#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
+#endif
+
+#endif /* AVUTIL_MEM_INTERNAL_H */