diff options
Diffstat (limited to 'media/libvpx/libvpx/vpx_ports')
26 files changed, 2483 insertions, 0 deletions
diff --git a/media/libvpx/libvpx/vpx_ports/arm.h b/media/libvpx/libvpx/vpx_ports/arm.h new file mode 100644 index 0000000000..6458a2c5b0 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/arm.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_ARM_H_ +#define VPX_VPX_PORTS_ARM_H_ +#include <stdlib.h> +#include "vpx_config.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*ARMv5TE "Enhanced DSP" instructions.*/ +#define HAS_EDSP 0x01 +/*ARMv6 "Parallel" or "Media" instructions.*/ +#define HAS_MEDIA 0x02 +/*ARMv7 optional NEON instructions.*/ +#define HAS_NEON 0x04 + +int arm_cpu_caps(void); + +// Earlier gcc compilers have issues with some neon intrinsics +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 4 && \ + __GNUC_MINOR__ <= 6 +#define VPX_INCOMPATIBLE_GCC +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_ARM_H_ diff --git a/media/libvpx/libvpx/vpx_ports/arm_cpudetect.c b/media/libvpx/libvpx/vpx_ports/arm_cpudetect.c new file mode 100644 index 0000000000..4f9d480ade --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/arm_cpudetect.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <stdlib.h> +#include <string.h> + +#include "./vpx_config.h" +#include "vpx_ports/arm.h" + +#ifdef WINAPI_FAMILY +#include <winapifamily.h> +#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +#define getenv(x) NULL +#endif +#endif + +static int arm_cpu_env_flags(int *flags) { + char *env; + env = getenv("VPX_SIMD_CAPS"); + if (env && *env) { + *flags = (int)strtol(env, NULL, 0); + return 0; + } + *flags = 0; + return -1; +} + +static int arm_cpu_env_mask(void) { + char *env; + env = getenv("VPX_SIMD_CAPS_MASK"); + return env && *env ? (int)strtol(env, NULL, 0) : ~0; +} + +#if !CONFIG_RUNTIME_CPU_DETECT + +int arm_cpu_caps(void) { + /* This function should actually be a no-op. There is no way to adjust any of + * these because the RTCD tables do not exist: the functions are called + * statically */ + int flags; + int mask; + if (!arm_cpu_env_flags(&flags)) { + return flags; + } + mask = arm_cpu_env_mask(); +#if HAVE_NEON || HAVE_NEON_ASM + flags |= HAS_NEON; +#endif /* HAVE_NEON || HAVE_NEON_ASM */ + return flags & mask; +} + +#elif defined(_MSC_VER) /* end !CONFIG_RUNTIME_CPU_DETECT */ +/*For GetExceptionCode() and EXCEPTION_ILLEGAL_INSTRUCTION.*/ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#ifndef WIN32_EXTRA_LEAN +#define WIN32_EXTRA_LEAN +#endif +#include <windows.h> + +int arm_cpu_caps(void) { + int flags; + int mask; + if (!arm_cpu_env_flags(&flags)) { + return flags; + } + mask = arm_cpu_env_mask(); +/* MSVC has no inline __asm support for ARM, but it does let you __emit + * instructions via their assembled hex code. + * All of these instructions should be essentially nops. + */ +#if HAVE_NEON || HAVE_NEON_ASM + if (mask & HAS_NEON) { + __try { + /*VORR q0,q0,q0*/ + __emit(0xF2200150); + flags |= HAS_NEON; + } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) { + /*Ignore exception.*/ + } + } +#endif /* HAVE_NEON || HAVE_NEON_ASM */ + return flags & mask; +} + +#elif defined(__ANDROID__) /* end _MSC_VER */ +#include <cpu-features.h> + +int arm_cpu_caps(void) { + int flags; + int mask; + uint64_t features; + if (!arm_cpu_env_flags(&flags)) { + return flags; + } + mask = arm_cpu_env_mask(); + features = android_getCpuFeatures(); + +#if HAVE_NEON || HAVE_NEON_ASM + if (features & ANDROID_CPU_ARM_FEATURE_NEON) flags |= HAS_NEON; +#endif /* HAVE_NEON || HAVE_NEON_ASM */ + return flags & mask; +} + +#elif defined(__linux__) /* end __ANDROID__ */ + +#include <stdio.h> + +int arm_cpu_caps(void) { + FILE *fin; + int flags; + int mask; + if (!arm_cpu_env_flags(&flags)) { + return flags; + } + mask = arm_cpu_env_mask(); + /* Reading /proc/self/auxv would be easier, but that doesn't work reliably + * on Android. + * This also means that detection will fail in Scratchbox. + */ + fin = fopen("/proc/cpuinfo", "r"); + if (fin != NULL) { + /* 512 should be enough for anybody (it's even enough for all the flags + * that x86 has accumulated... so far). + */ + char buf[512]; + while (fgets(buf, 511, fin) != NULL) { +#if HAVE_NEON || HAVE_NEON_ASM + if (memcmp(buf, "Features", 8) == 0) { + char *p; + p = strstr(buf, " neon"); + if (p != NULL && (p[5] == ' ' || p[5] == '\n')) { + flags |= HAS_NEON; + } + } +#endif /* HAVE_NEON || HAVE_NEON_ASM */ + } + fclose(fin); + } + return flags & mask; +} +#else /* end __linux__ */ +#error \ + "--enable-runtime-cpu-detect selected, but no CPU detection method " \ +"available for your platform. Reconfigure with --disable-runtime-cpu-detect." +#endif diff --git a/media/libvpx/libvpx/vpx_ports/asmdefs_mmi.h b/media/libvpx/libvpx/vpx_ports/asmdefs_mmi.h new file mode 100644 index 0000000000..400a51cc32 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/asmdefs_mmi.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_ASMDEFS_MMI_H_ +#define VPX_VPX_PORTS_ASMDEFS_MMI_H_ + +#include "./vpx_config.h" +#include "vpx/vpx_integer.h" + +#if HAVE_MMI + +#if HAVE_MIPS64 +#define mips_reg int64_t +#define MMI_ADDU(reg1, reg2, reg3) \ + "daddu " #reg1 ", " #reg2 ", " #reg3 " \n\t" + +#define MMI_ADDIU(reg1, reg2, immediate) \ + "daddiu " #reg1 ", " #reg2 ", " #immediate " \n\t" + +#define MMI_ADDI(reg1, reg2, immediate) \ + "daddi " #reg1 ", " #reg2 ", " #immediate " \n\t" + +#define MMI_SUBU(reg1, reg2, reg3) \ + "dsubu " #reg1 ", " #reg2 ", " #reg3 " \n\t" + +#define MMI_L(reg, addr, bias) \ + "ld " #reg ", " #bias "(" #addr ") \n\t" + +#define MMI_SRL(reg1, reg2, shift) \ + "ssrld " #reg1 ", " #reg2 ", " #shift " \n\t" + +#define MMI_SLL(reg1, reg2, shift) \ + "dsll " #reg1 ", " #reg2 ", " #shift " \n\t" + +#define MMI_MTC1(reg, fp) \ + "dmtc1 " #reg ", " #fp " \n\t" + +#define MMI_LI(reg, immediate) \ + "dli " #reg ", " #immediate " \n\t" + +#else +#define mips_reg int32_t +#define MMI_ADDU(reg1, reg2, reg3) \ + "addu " #reg1 ", " #reg2 ", " #reg3 " \n\t" + +#define MMI_ADDIU(reg1, reg2, immediate) \ + "addiu " #reg1 ", " #reg2 ", " #immediate " \n\t" + +#define MMI_ADDI(reg1, reg2, immediate) \ + "addi " #reg1 ", " #reg2 ", " #immediate " \n\t" + +#define MMI_SUBU(reg1, reg2, reg3) \ + "subu " #reg1 ", " #reg2 ", " #reg3 " \n\t" + +#define MMI_L(reg, addr, bias) \ + "lw " #reg ", " #bias "(" #addr ") \n\t" + +#define MMI_SRL(reg1, reg2, shift) \ + "ssrlw " #reg1 ", " #reg2 ", " #shift " \n\t" + +#define MMI_SLL(reg1, reg2, shift) \ + "sll " #reg1 ", " #reg2 ", " #shift " \n\t" + +#define MMI_MTC1(reg, fp) \ + "mtc1 " #reg ", " #fp " \n\t" + +#define MMI_LI(reg, immediate) \ + "li " #reg ", " #immediate " \n\t" + +#endif /* HAVE_MIPS64 */ + +#endif /* HAVE_MMI */ + +#endif // VPX_VPX_PORTS_ASMDEFS_MMI_H_ diff --git a/media/libvpx/libvpx/vpx_ports/bitops.h b/media/libvpx/libvpx/vpx_ports/bitops.h new file mode 100644 index 0000000000..1b5cdaa6dd --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/bitops.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_BITOPS_H_ +#define VPX_VPX_PORTS_BITOPS_H_ + +#include <assert.h> + +#include "vpx_ports/msvc.h" + +#ifdef _MSC_VER +#if defined(_M_X64) || defined(_M_IX86) +#include <intrin.h> +#define USE_MSC_INTRINSICS +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// These versions of get_lsb() and get_msb() are only valid when n != 0 +// because all of the optimized versions are undefined when n == 0: +// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html + +// use GNU builtins where available. +#if defined(__GNUC__) && \ + ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4) +static INLINE int get_lsb(unsigned int n) { + assert(n != 0); + return __builtin_ctz(n); +} + +static INLINE int get_msb(unsigned int n) { + assert(n != 0); + return 31 ^ __builtin_clz(n); +} +#elif defined(USE_MSC_INTRINSICS) +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) + +static INLINE int get_lsb(unsigned int n) { + unsigned long first_set_bit; // NOLINT(runtime/int) + _BitScanForward(&first_set_bit, n); + return first_set_bit; +} + +static INLINE int get_msb(unsigned int n) { + unsigned long first_set_bit; + assert(n != 0); + _BitScanReverse(&first_set_bit, n); + return first_set_bit; +} +#undef USE_MSC_INTRINSICS +#else +static INLINE int get_lsb(unsigned int n) { + int i; + assert(n != 0); + for (i = 0; i < 32 && !(n & 1); ++i) n >>= 1; + return i; +} + +// Returns (int)floor(log2(n)). n must be > 0. +static INLINE int get_msb(unsigned int n) { + int log = 0; + unsigned int value = n; + int i; + + assert(n != 0); + + for (i = 4; i >= 0; --i) { + const int shift = (1 << i); + const unsigned int x = value >> shift; + if (x != 0) { + value = x; + log += shift; + } + } + return log; +} +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_BITOPS_H_ diff --git a/media/libvpx/libvpx/vpx_ports/compiler_attributes.h b/media/libvpx/libvpx/vpx_ports/compiler_attributes.h new file mode 100644 index 0000000000..4b468749b8 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/compiler_attributes.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2020 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_COMPILER_ATTRIBUTES_H_ +#define VPX_VPX_PORTS_COMPILER_ATTRIBUTES_H_ + +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif // !defined(__has_feature) + +#if !defined(__has_attribute) +#define __has_attribute(x) 0 +#endif // !defined(__has_attribute) + +//------------------------------------------------------------------------------ +// Sanitizer attributes. + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +#define VPX_WITH_ASAN 1 +#else +#define VPX_WITH_ASAN 0 +#endif // __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) + +#if defined(__clang__) && __has_attribute(no_sanitize) +// Both of these have defined behavior and are used in certain operations or +// optimizations thereof. There are cases where an overflow may be unintended, +// however, so use of these attributes should be done with care. +#define VPX_NO_UNSIGNED_OVERFLOW_CHECK \ + __attribute__((no_sanitize("unsigned-integer-overflow"))) +#if __clang_major__ >= 12 +#define VPX_NO_UNSIGNED_SHIFT_CHECK \ + __attribute__((no_sanitize("unsigned-shift-base"))) +#endif // __clang__ >= 12 +#endif // __clang__ + +#ifndef VPX_NO_UNSIGNED_OVERFLOW_CHECK +#define VPX_NO_UNSIGNED_OVERFLOW_CHECK +#endif +#ifndef VPX_NO_UNSIGNED_SHIFT_CHECK +#define VPX_NO_UNSIGNED_SHIFT_CHECK +#endif + +//------------------------------------------------------------------------------ +// Variable attributes. + +#if __has_attribute(uninitialized) +// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for +// the specified variable. +// +// -ftrivial-auto-var-init is security risk mitigation feature, so attribute +// should not be used "just in case", but only to fix real performance +// bottlenecks when other approaches do not work. In general the compiler is +// quite effective at eliminating unneeded initializations introduced by the +// flag, e.g. when they are followed by actual initialization by a program. +// However if compiler optimization fails and code refactoring is hard, the +// attribute can be used as a workaround. +#define VPX_UNINITIALIZED __attribute__((uninitialized)) +#else +#define VPX_UNINITIALIZED +#endif // __has_attribute(uninitialized) + +#endif // VPX_VPX_PORTS_COMPILER_ATTRIBUTES_H_ diff --git a/media/libvpx/libvpx/vpx_ports/emmintrin_compat.h b/media/libvpx/libvpx/vpx_ports/emmintrin_compat.h new file mode 100644 index 0000000000..d6cc68ee4d --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/emmintrin_compat.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_EMMINTRIN_COMPAT_H_ +#define VPX_VPX_PORTS_EMMINTRIN_COMPAT_H_ + +#if defined(__GNUC__) && __GNUC__ < 4 +/* From emmintrin.h (gcc 4.5.3) */ +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m128 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castpd_ps(__m128d __A) { + return (__m128)__A; +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castpd_si128(__m128d __A) { + return (__m128i)__A; +} + +extern __inline __m128d + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castps_pd(__m128 __A) { + return (__m128d)__A; +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castps_si128(__m128 __A) { + return (__m128i)__A; +} + +extern __inline __m128 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castsi128_ps(__m128i __A) { + return (__m128)__A; +} + +extern __inline __m128d + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castsi128_pd(__m128i __A) { + return (__m128d)__A; +} +#endif + +#endif // VPX_VPX_PORTS_EMMINTRIN_COMPAT_H_ diff --git a/media/libvpx/libvpx/vpx_ports/emms_mmx.asm b/media/libvpx/libvpx/vpx_ports/emms_mmx.asm new file mode 100644 index 0000000000..b31b25ebde --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/emms_mmx.asm @@ -0,0 +1,18 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + +%include "vpx_ports/x86_abi_support.asm" + +section .text +globalsym(vpx_clear_system_state) +sym(vpx_clear_system_state): + emms + ret diff --git a/media/libvpx/libvpx/vpx_ports/emms_mmx.c b/media/libvpx/libvpx/vpx_ports/emms_mmx.c new file mode 100644 index 0000000000..f1036b98ed --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/emms_mmx.c @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2018 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <mmintrin.h> + +#include "vpx_ports/system_state.h" + +void vpx_clear_system_state() { _mm_empty(); } diff --git a/media/libvpx/libvpx/vpx_ports/float_control_word.asm b/media/libvpx/libvpx/vpx_ports/float_control_word.asm new file mode 100644 index 0000000000..bb75b7a31f --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/float_control_word.asm @@ -0,0 +1,33 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + +%include "vpx_ports/x86_abi_support.asm" + +section .text + +%if LIBVPX_YASM_WIN64 +globalsym(vpx_winx64_fldcw) +sym(vpx_winx64_fldcw): + sub rsp, 8 + mov [rsp], rcx ; win x64 specific + fldcw [rsp] + add rsp, 8 + ret + + +globalsym(vpx_winx64_fstcw) +sym(vpx_winx64_fstcw): + sub rsp, 8 + fstcw [rsp] + mov rax, [rsp] + add rsp, 8 + ret +%endif diff --git a/media/libvpx/libvpx/vpx_ports/loongarch.h b/media/libvpx/libvpx/vpx_ports/loongarch.h new file mode 100644 index 0000000000..d93ff9f5f0 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/loongarch.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * Contributed by Jin Bo <jinbo@loongson.cn> + * Contributed by Lu Wang <wanglu@loongson.cn> + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_LOONGARCH_H_ +#define VPX_VPX_PORTS_LOONGARCH_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define HAS_LSX 0x01 +#define HAS_LASX 0x02 + +int loongarch_cpu_caps(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_LOONGARCH_H_ diff --git a/media/libvpx/libvpx/vpx_ports/loongarch_cpudetect.c b/media/libvpx/libvpx/vpx_ports/loongarch_cpudetect.c new file mode 100644 index 0000000000..7b4322d35e --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/loongarch_cpudetect.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * Contributed by Jin Bo <jinbo@loongson.cn> + * Contributed by Lu Wang <wanglu@loongson.cn> + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vpx_config.h" +#include "vpx_ports/loongarch.h" + +#define LOONGARCH_CFG2 0x02 +#define LOONGARCH_CFG2_LSX (1 << 6) +#define LOONGARCH_CFG2_LASX (1 << 7) + +#if CONFIG_RUNTIME_CPU_DETECT +#if defined(__loongarch__) && defined(__linux__) +int loongarch_cpu_caps(void) { + int reg = 0; + int flag = 0; + + __asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(reg) : "r"(LOONGARCH_CFG2)); + if (reg & LOONGARCH_CFG2_LSX) flag |= HAS_LSX; + + if (reg & LOONGARCH_CFG2_LASX) flag |= HAS_LASX; + + return flag; +} +#else /* end __loongarch__ && __linux__ */ +#error \ + "--enable-runtime-cpu-detect selected, but no CPU detection method " \ +"available for your platform. Reconfigure with --disable-runtime-cpu-detect." +#endif +#else /* end CONFIG_RUNTIME_CPU_DETECT */ +int loongarch_cpu_caps(void) { return 0; } +#endif diff --git a/media/libvpx/libvpx/vpx_ports/mem.h b/media/libvpx/libvpx/vpx_ports/mem.h new file mode 100644 index 0000000000..5eccfe8f50 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/mem.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_MEM_H_ +#define VPX_VPX_PORTS_MEM_H_ + +#include "vpx_config.h" +#include "vpx/vpx_integer.h" + +#if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C) +#define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n))) +#elif defined(_MSC_VER) +#define DECLARE_ALIGNED(n, typ, val) __declspec(align(n)) typ val +#else +#warning No alignment directives known for this compiler. +#define DECLARE_ALIGNED(n, typ, val) typ val +#endif + +#if HAVE_NEON && defined(_MSC_VER) +#define __builtin_prefetch(x) +#endif + +/* Shift down with rounding */ +#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n)-1))) >> (n)) +#define ROUND64_POWER_OF_TWO(value, n) (((value) + (1ULL << ((n)-1))) >> (n)) + +#define ALIGN_POWER_OF_TWO(value, n) \ + (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) + +#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)(x)) << 1)) +#define CAST_TO_SHORTPTR(x) ((uint16_t *)((uintptr_t)(x))) +#if CONFIG_VP9_HIGHBITDEPTH +#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)(x)) >> 1)) +#define CAST_TO_BYTEPTR(x) ((uint8_t *)((uintptr_t)(x))) +#endif // CONFIG_VP9_HIGHBITDEPTH + +#endif // VPX_VPX_PORTS_MEM_H_ diff --git a/media/libvpx/libvpx/vpx_ports/mem_ops.h b/media/libvpx/libvpx/vpx_ports/mem_ops.h new file mode 100644 index 0000000000..b17015e7ec --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/mem_ops.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_MEM_OPS_H_ +#define VPX_VPX_PORTS_MEM_OPS_H_ + +/* \file + * \brief Provides portable memory access primitives + * + * This function provides portable primitives for getting and setting of + * signed and unsigned integers in 16, 24, and 32 bit sizes. The operations + * can be performed on unaligned data regardless of hardware support for + * unaligned accesses. + * + * The type used to pass the integral values may be changed by defining + * MEM_VALUE_T with the appropriate type. The type given must be an integral + * numeric type. + * + * The actual functions instantiated have the MEM_VALUE_T type name pasted + * on to the symbol name. This allows the developer to instantiate these + * operations for multiple types within the same translation unit. This is + * of somewhat questionable utility, but the capability exists nonetheless. + * Users not making use of this functionality should call the functions + * without the type name appended, and the preprocessor will take care of + * it. + * + * NOTE: This code is not supported on platforms where char > 1 octet ATM. + */ + +#ifndef MAU_T +/* Minimum Access Unit for this target */ +#define MAU_T unsigned char +#endif + +#ifndef MEM_VALUE_T +#define MEM_VALUE_T int +#endif + +#undef MEM_VALUE_T_SZ_BITS +#define MEM_VALUE_T_SZ_BITS (sizeof(MEM_VALUE_T) << 3) + +#undef mem_ops_wrap_symbol +#define mem_ops_wrap_symbol(fn) mem_ops_wrap_symbol2(fn, MEM_VALUE_T) +#undef mem_ops_wrap_symbol2 +#define mem_ops_wrap_symbol2(fn, typ) mem_ops_wrap_symbol3(fn, typ) +#undef mem_ops_wrap_symbol3 +#define mem_ops_wrap_symbol3(fn, typ) fn##_as_##typ + +/* + * Include aligned access routines + */ +#define INCLUDED_BY_MEM_OPS_H +#include "mem_ops_aligned.h" +#undef INCLUDED_BY_MEM_OPS_H + +#undef mem_get_be16 +#define mem_get_be16 mem_ops_wrap_symbol(mem_get_be16) +static unsigned MEM_VALUE_T mem_get_be16(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = mem[0] << 8; + val |= mem[1]; + return val; +} + +#undef mem_get_be24 +#define mem_get_be24 mem_ops_wrap_symbol(mem_get_be24) +static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = mem[0] << 16; + val |= mem[1] << 8; + val |= mem[2]; + return val; +} + +#undef mem_get_be32 +#define mem_get_be32 mem_ops_wrap_symbol(mem_get_be32) +static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = ((unsigned MEM_VALUE_T)mem[0]) << 24; + val |= mem[1] << 16; + val |= mem[2] << 8; + val |= mem[3]; + return val; +} + +#undef mem_get_le16 +#define mem_get_le16 mem_ops_wrap_symbol(mem_get_le16) +static unsigned MEM_VALUE_T mem_get_le16(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = mem[1] << 8; + val |= mem[0]; + return val; +} + +#undef mem_get_le24 +#define mem_get_le24 mem_ops_wrap_symbol(mem_get_le24) +static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +#undef mem_get_le32 +#define mem_get_le32 mem_ops_wrap_symbol(mem_get_le32) +static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) { + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; + + val = ((unsigned MEM_VALUE_T)mem[3]) << 24; + val |= mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +#define mem_get_s_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) { \ + const MAU_T *mem = (const MAU_T *)vmem; \ + signed MEM_VALUE_T val = mem_get_##end##sz(mem); \ + return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz); \ + } + +/* clang-format off */ +#undef mem_get_sbe16 +#define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16) +mem_get_s_generic(be, 16) + +#undef mem_get_sbe24 +#define mem_get_sbe24 mem_ops_wrap_symbol(mem_get_sbe24) +mem_get_s_generic(be, 24) + +#undef mem_get_sbe32 +#define mem_get_sbe32 mem_ops_wrap_symbol(mem_get_sbe32) +mem_get_s_generic(be, 32) + +#undef mem_get_sle16 +#define mem_get_sle16 mem_ops_wrap_symbol(mem_get_sle16) +mem_get_s_generic(le, 16) + +#undef mem_get_sle24 +#define mem_get_sle24 mem_ops_wrap_symbol(mem_get_sle24) +mem_get_s_generic(le, 24) + +#undef mem_get_sle32 +#define mem_get_sle32 mem_ops_wrap_symbol(mem_get_sle32) +mem_get_s_generic(le, 32) + +#undef mem_put_be16 +#define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16) +static VPX_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 8) & 0xff); + mem[1] = (MAU_T)((val >> 0) & 0xff); +} + +#undef mem_put_be24 +#define mem_put_be24 mem_ops_wrap_symbol(mem_put_be24) +static VPX_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 16) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 0) & 0xff); +} + +#undef mem_put_be32 +#define mem_put_be32 mem_ops_wrap_symbol(mem_put_be32) +static VPX_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 24) & 0xff); + mem[1] = (MAU_T)((val >> 16) & 0xff); + mem[2] = (MAU_T)((val >> 8) & 0xff); + mem[3] = (MAU_T)((val >> 0) & 0xff); +} + +#undef mem_put_le16 +#define mem_put_le16 mem_ops_wrap_symbol(mem_put_le16) +static VPX_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); +} + +#undef mem_put_le24 +#define mem_put_le24 mem_ops_wrap_symbol(mem_put_le24) +static VPX_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 16) & 0xff); +} + +#undef mem_put_le32 +#define mem_put_le32 mem_ops_wrap_symbol(mem_put_le32) +static VPX_INLINE void mem_put_le32(void *vmem, MEM_VALUE_T val) { + MAU_T *mem = (MAU_T *)vmem; + + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 16) & 0xff); + mem[3] = (MAU_T)((val >> 24) & 0xff); +} +/* clang-format on */ +#endif // VPX_VPX_PORTS_MEM_OPS_H_ diff --git a/media/libvpx/libvpx/vpx_ports/mem_ops_aligned.h b/media/libvpx/libvpx/vpx_ports/mem_ops_aligned.h new file mode 100644 index 0000000000..8649b87623 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/mem_ops_aligned.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_MEM_OPS_ALIGNED_H_ +#define VPX_VPX_PORTS_MEM_OPS_ALIGNED_H_ + +#include "vpx/vpx_integer.h" + +/* \file + * \brief Provides portable memory access primitives for operating on aligned + * data + * + * This file is split from mem_ops.h for easier maintenance. See mem_ops.h + * for a more detailed description of these primitives. + */ +#ifndef INCLUDED_BY_MEM_OPS_H +#error Include mem_ops.h, not mem_ops_aligned.h directly. +#endif + +/* Architectures that provide instructions for doing this byte swapping + * could redefine these macros. + */ +#define swap_endian_16(val, raw) \ + do { \ + val = (uint16_t)(((raw >> 8) & 0x00ff) | ((raw << 8) & 0xff00)); \ + } while (0) +#define swap_endian_32(val, raw) \ + do { \ + val = ((raw >> 24) & 0x000000ff) | ((raw >> 8) & 0x0000ff00) | \ + ((raw << 8) & 0x00ff0000) | ((raw << 24) & 0xff000000); \ + } while (0) +#define swap_endian_16_se(val, raw) \ + do { \ + swap_endian_16(val, raw); \ + val = ((val << 16) >> 16); \ + } while (0) +#define swap_endian_32_se(val, raw) swap_endian_32(val, raw) + +#define mem_get_ne_aligned_generic(end, sz) \ + static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \ + const void *vmem) { \ + const uint##sz##_t *mem = (const uint##sz##_t *)vmem; \ + return *mem; \ + } + +#define mem_get_sne_aligned_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \ + const void *vmem) { \ + const int##sz##_t *mem = (const int##sz##_t *)vmem; \ + return *mem; \ + } + +#define mem_get_se_aligned_generic(end, sz) \ + static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \ + const void *vmem) { \ + const uint##sz##_t *mem = (const uint##sz##_t *)vmem; \ + unsigned MEM_VALUE_T val, raw = *mem; \ + swap_endian_##sz(val, raw); \ + return val; \ + } + +#define mem_get_sse_aligned_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \ + const void *vmem) { \ + const int##sz##_t *mem = (const int##sz##_t *)vmem; \ + unsigned MEM_VALUE_T val, raw = *mem; \ + swap_endian_##sz##_se(val, raw); \ + return val; \ + } + +#define mem_put_ne_aligned_generic(end, sz) \ + static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem, \ + MEM_VALUE_T val) { \ + uint##sz##_t *mem = (uint##sz##_t *)vmem; \ + *mem = (uint##sz##_t)val; \ + } + +#define mem_put_se_aligned_generic(end, sz) \ + static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem, \ + MEM_VALUE_T val) { \ + uint##sz##_t *mem = (uint##sz##_t *)vmem, raw; \ + swap_endian_##sz(raw, val); \ + *mem = (uint##sz##_t)raw; \ + } + +#include "vpx_config.h" +#if CONFIG_BIG_ENDIAN +#define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be, sz) +#define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be, sz) +#define mem_get_le_aligned_generic(sz) mem_get_se_aligned_generic(le, sz) +#define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le, sz) +#define mem_put_be_aligned_generic(sz) mem_put_ne_aligned_generic(be, sz) +#define mem_put_le_aligned_generic(sz) mem_put_se_aligned_generic(le, sz) +#else +#define mem_get_be_aligned_generic(sz) mem_get_se_aligned_generic(be, sz) +#define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be, sz) +#define mem_get_le_aligned_generic(sz) mem_get_ne_aligned_generic(le, sz) +#define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le, sz) +#define mem_put_be_aligned_generic(sz) mem_put_se_aligned_generic(be, sz) +#define mem_put_le_aligned_generic(sz) mem_put_ne_aligned_generic(le, sz) +#endif + +/* clang-format off */ +#undef mem_get_be16_aligned +#define mem_get_be16_aligned mem_ops_wrap_symbol(mem_get_be16_aligned) +mem_get_be_aligned_generic(16) + +#undef mem_get_be32_aligned +#define mem_get_be32_aligned mem_ops_wrap_symbol(mem_get_be32_aligned) +mem_get_be_aligned_generic(32) + +#undef mem_get_le16_aligned +#define mem_get_le16_aligned mem_ops_wrap_symbol(mem_get_le16_aligned) +mem_get_le_aligned_generic(16) + +#undef mem_get_le32_aligned +#define mem_get_le32_aligned mem_ops_wrap_symbol(mem_get_le32_aligned) +mem_get_le_aligned_generic(32) + +#undef mem_get_sbe16_aligned +#define mem_get_sbe16_aligned mem_ops_wrap_symbol(mem_get_sbe16_aligned) +mem_get_sbe_aligned_generic(16) + +#undef mem_get_sbe32_aligned +#define mem_get_sbe32_aligned mem_ops_wrap_symbol(mem_get_sbe32_aligned) +mem_get_sbe_aligned_generic(32) + +#undef mem_get_sle16_aligned +#define mem_get_sle16_aligned mem_ops_wrap_symbol(mem_get_sle16_aligned) +mem_get_sle_aligned_generic(16) + +#undef mem_get_sle32_aligned +#define mem_get_sle32_aligned mem_ops_wrap_symbol(mem_get_sle32_aligned) +mem_get_sle_aligned_generic(32) + +#undef mem_put_be16_aligned +#define mem_put_be16_aligned mem_ops_wrap_symbol(mem_put_be16_aligned) +mem_put_be_aligned_generic(16) + +#undef mem_put_be32_aligned +#define mem_put_be32_aligned mem_ops_wrap_symbol(mem_put_be32_aligned) +mem_put_be_aligned_generic(32) + +#undef mem_put_le16_aligned +#define mem_put_le16_aligned mem_ops_wrap_symbol(mem_put_le16_aligned) +mem_put_le_aligned_generic(16) + +#undef mem_put_le32_aligned +#define mem_put_le32_aligned mem_ops_wrap_symbol(mem_put_le32_aligned) +mem_put_le_aligned_generic(32) + +#undef mem_get_ne_aligned_generic +#undef mem_get_se_aligned_generic +#undef mem_get_sne_aligned_generic +#undef mem_get_sse_aligned_generic +#undef mem_put_ne_aligned_generic +#undef mem_put_se_aligned_generic +#undef swap_endian_16 +#undef swap_endian_32 +#undef swap_endian_16_se +#undef swap_endian_32_se +/* clang-format on */ + +#endif // VPX_VPX_PORTS_MEM_OPS_ALIGNED_H_ diff --git a/media/libvpx/libvpx/vpx_ports/mips.h b/media/libvpx/libvpx/vpx_ports/mips.h new file mode 100644 index 0000000000..439de754fd --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/mips.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2020 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_MIPS_H_ +#define VPX_VPX_PORTS_MIPS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define HAS_MMI 0x01 +#define HAS_MSA 0x02 + +int mips_cpu_caps(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_MIPS_H_ diff --git a/media/libvpx/libvpx/vpx_ports/mips_cpudetect.c b/media/libvpx/libvpx/vpx_ports/mips_cpudetect.c new file mode 100644 index 0000000000..e0eca2d48d --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/mips_cpudetect.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include <stdio.h> +#include <string.h> +#include "./vpx_config.h" +#include "vpx_ports/mips.h" + +#if CONFIG_RUNTIME_CPU_DETECT +#if defined(__mips__) && defined(__linux__) +int mips_cpu_caps(void) { + char cpuinfo_line[512]; + int flag = 0x0; + FILE *f = fopen("/proc/cpuinfo", "r"); + if (!f) { + // Assume nothing if /proc/cpuinfo is unavailable. + // This will occur for Chrome sandbox for Pepper or Render process. + return 0; + } + while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) { + if (memcmp(cpuinfo_line, "cpu model", 9) == 0) { + // Workaround early kernel without mmi in ASEs line. + if (strstr(cpuinfo_line, "Loongson-3")) { + flag |= HAS_MMI; + } else if (strstr(cpuinfo_line, "Loongson-2K")) { + flag |= HAS_MMI | HAS_MSA; + } + } + if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) { + if (strstr(cpuinfo_line, "loongson-mmi") && + strstr(cpuinfo_line, "loongson-ext")) { + flag |= HAS_MMI; + } + if (strstr(cpuinfo_line, "msa")) { + flag |= HAS_MSA; + } + // ASEs is the last line, so we can break here. + break; + } + } + fclose(f); + return flag; +} +#else /* end __mips__ && __linux__ */ +#error \ + "--enable-runtime-cpu-detect selected, but no CPU detection method " \ +"available for your platform. Reconfigure with --disable-runtime-cpu-detect." +#endif +#else /* end CONFIG_RUNTIME_CPU_DETECT */ +int mips_cpu_caps(void) { return 0; } +#endif diff --git a/media/libvpx/libvpx/vpx_ports/msvc.h b/media/libvpx/libvpx/vpx_ports/msvc.h new file mode 100644 index 0000000000..d58de3535a --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/msvc.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_MSVC_H_ +#define VPX_VPX_PORTS_MSVC_H_ +#ifdef _MSC_VER + +#include "./vpx_config.h" + +#if _MSC_VER < 1900 // VS2015 provides snprintf +#define snprintf _snprintf +#endif // _MSC_VER < 1900 + +#if _MSC_VER < 1800 // VS2013 provides round +#include <math.h> +static INLINE double round(double x) { + if (x < 0) + return ceil(x - 0.5); + else + return floor(x + 0.5); +} +#endif // _MSC_VER < 1800 + +#endif // _MSC_VER +#endif // VPX_VPX_PORTS_MSVC_H_ diff --git a/media/libvpx/libvpx/vpx_ports/ppc.h b/media/libvpx/libvpx/vpx_ports/ppc.h new file mode 100644 index 0000000000..a11f4e8732 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/ppc.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_PPC_H_ +#define VPX_VPX_PORTS_PPC_H_ +#include <stdlib.h> + +#include "./vpx_config.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define HAS_VSX 0x01 + +int ppc_simd_caps(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_PPC_H_ diff --git a/media/libvpx/libvpx/vpx_ports/ppc_cpudetect.c b/media/libvpx/libvpx/vpx_ports/ppc_cpudetect.c new file mode 100644 index 0000000000..374a0271c9 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/ppc_cpudetect.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2017 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <fcntl.h> +#include <unistd.h> +#include <stdint.h> +#include <asm/cputable.h> +#include <linux/auxvec.h> + +#include "./vpx_config.h" +#include "vpx_ports/ppc.h" + +#if CONFIG_RUNTIME_CPU_DETECT +static int cpu_env_flags(int *flags) { + char *env; + env = getenv("VPX_SIMD_CAPS"); + if (env && *env) { + *flags = (int)strtol(env, NULL, 0); + return 0; + } + *flags = 0; + return -1; +} + +static int cpu_env_mask(void) { + char *env; + env = getenv("VPX_SIMD_CAPS_MASK"); + return env && *env ? (int)strtol(env, NULL, 0) : ~0; +} + +int ppc_simd_caps(void) { + int flags; + int mask; + int fd; + ssize_t count; + unsigned int i; + uint64_t buf[64]; + + // If VPX_SIMD_CAPS is set then allow only those capabilities. + if (!cpu_env_flags(&flags)) { + return flags; + } + + mask = cpu_env_mask(); + + fd = open("/proc/self/auxv", O_RDONLY); + if (fd < 0) { + return 0; + } + + while ((count = read(fd, buf, sizeof(buf))) > 0) { + for (i = 0; i < (count / sizeof(*buf)); i += 2) { + if (buf[i] == AT_HWCAP) { +#if HAVE_VSX + if (buf[i + 1] & PPC_FEATURE_HAS_VSX) { + flags |= HAS_VSX; + } +#endif // HAVE_VSX + goto out_close; + } else if (buf[i] == AT_NULL) { + goto out_close; + } + } + } +out_close: + close(fd); + return flags & mask; +} +#else +// If there is no RTCD the function pointers are not used and can not be +// changed. +int ppc_simd_caps(void) { return 0; } +#endif // CONFIG_RUNTIME_CPU_DETECT diff --git a/media/libvpx/libvpx/vpx_ports/static_assert.h b/media/libvpx/libvpx/vpx_ports/static_assert.h new file mode 100644 index 0000000000..f632d9f1e8 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/static_assert.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2020 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_STATIC_ASSERT_H_ +#define VPX_VPX_PORTS_STATIC_ASSERT_H_ + +#if defined(_MSC_VER) +#define VPX_STATIC_ASSERT(boolexp) \ + do { \ + char vpx_static_assert[(boolexp) ? 1 : -1]; \ + (void)vpx_static_assert; \ + } while (0) +#else // !_MSC_VER +#define VPX_STATIC_ASSERT(boolexp) \ + do { \ + struct { \ + unsigned int vpx_static_assert : (boolexp) ? 1 : -1; \ + } vpx_static_assert; \ + (void)vpx_static_assert; \ + } while (0) +#endif // _MSC_VER + +#endif // VPX_VPX_PORTS_STATIC_ASSERT_H_ diff --git a/media/libvpx/libvpx/vpx_ports/system_state.h b/media/libvpx/libvpx/vpx_ports/system_state.h new file mode 100644 index 0000000000..32ebd0ed8c --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/system_state.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_SYSTEM_STATE_H_ +#define VPX_VPX_PORTS_SYSTEM_STATE_H_ + +#include "./vpx_config.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if (VPX_ARCH_X86 || VPX_ARCH_X86_64) && HAVE_MMX +extern void vpx_clear_system_state(void); +#else +#define vpx_clear_system_state() +#endif // (VPX_ARCH_X86 || VPX_ARCH_X86_64) && HAVE_MMX + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_SYSTEM_STATE_H_ diff --git a/media/libvpx/libvpx/vpx_ports/vpx_once.h b/media/libvpx/libvpx/vpx_ports/vpx_once.h new file mode 100644 index 0000000000..d8a8ed89fe --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/vpx_once.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_VPX_ONCE_H_ +#define VPX_VPX_PORTS_VPX_ONCE_H_ + +#include "vpx_config.h" + +/* Implement a function wrapper to guarantee initialization + * thread-safety for library singletons. + * + * NOTE: These functions use static locks, and can only be + * used with one common argument per compilation unit. So + * + * file1.c: + * vpx_once(foo); + * ... + * vpx_once(foo); + * + * file2.c: + * vpx_once(bar); + * + * will ensure foo() and bar() are each called only once, but in + * + * file1.c: + * vpx_once(foo); + * vpx_once(bar): + * + * bar() will never be called because the lock is used up + * by the call to foo(). + */ + +#if CONFIG_MULTITHREAD && defined(_WIN32) +#include <windows.h> +#include <stdlib.h> +/* Declare a per-compilation-unit state variable to track the progress + * of calling func() only once. This must be at global scope because + * local initializers are not thread-safe in MSVC prior to Visual + * Studio 2015. + * + * As a static, once_state will be zero-initialized as program start. + */ +static LONG once_state; +static void once(void (*func)(void)) { + /* Try to advance once_state from its initial value of 0 to 1. + * Only one thread can succeed in doing so. + */ + if (InterlockedCompareExchange(&once_state, 1, 0) == 0) { + /* We're the winning thread, having set once_state to 1. + * Call our function. */ + func(); + /* Now advance once_state to 2, unblocking any other threads. */ + InterlockedIncrement(&once_state); + return; + } + + /* We weren't the winning thread, but we want to block on + * the state variable so we don't return before func() + * has finished executing elsewhere. + * + * Try to advance once_state from 2 to 2, which is only possible + * after the winning thead advances it from 1 to 2. + */ + while (InterlockedCompareExchange(&once_state, 2, 2) != 2) { + /* State isn't yet 2. Try again. + * + * We are used for singleton initialization functions, + * which should complete quickly. Contention will likewise + * be rare, so it's worthwhile to use a simple but cpu- + * intensive busy-wait instead of successive backoff, + * waiting on a kernel object, or another heavier-weight scheme. + * + * We can at least yield our timeslice. + */ + Sleep(0); + } + + /* We've seen once_state advance to 2, so we know func() + * has been called. And we've left once_state as we found it, + * so other threads will have the same experience. + * + * It's safe to return now. + */ + return; +} + +#elif CONFIG_MULTITHREAD && defined(__OS2__) +#define INCL_DOS +#include <os2.h> +static void once(void (*func)(void)) { + static volatile int done; + + /* If the initialization is complete, return early. */ + if (done) return; + + /* Causes all other threads in the process to block themselves + * and give up their time slice. + */ + DosEnterCritSec(); + + if (!done) { + func(); + done = 1; + } + + /* Restores normal thread dispatching for the current process. */ + DosExitCritSec(); +} + +#elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H +#include <pthread.h> +static void once(void (*func)(void)) { + static pthread_once_t lock = PTHREAD_ONCE_INIT; + pthread_once(&lock, func); +} + +#else +/* No-op version that performs no synchronization. *_rtcd() is idempotent, + * so as long as your platform provides atomic loads/stores of pointers + * no synchronization is strictly necessary. + */ + +static void once(void (*func)(void)) { + static volatile int done; + + if (!done) { + func(); + done = 1; + } +} +#endif + +#endif // VPX_VPX_PORTS_VPX_ONCE_H_ diff --git a/media/libvpx/libvpx/vpx_ports/vpx_ports.mk b/media/libvpx/libvpx/vpx_ports/vpx_ports.mk new file mode 100644 index 0000000000..28cb9cce50 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/vpx_ports.mk @@ -0,0 +1,53 @@ +## +## Copyright (c) 2012 The WebM project authors. All Rights Reserved. +## +## Use of this source code is governed by a BSD-style license +## that can be found in the LICENSE file in the root of the source +## tree. An additional intellectual property rights grant can be found +## in the file PATENTS. All contributing project authors may +## be found in the AUTHORS file in the root of the source tree. +## + + +PORTS_SRCS-yes += vpx_ports.mk + +PORTS_SRCS-yes += bitops.h +PORTS_SRCS-yes += compiler_attributes.h +PORTS_SRCS-yes += mem.h +PORTS_SRCS-yes += msvc.h +PORTS_SRCS-yes += static_assert.h +PORTS_SRCS-yes += system_state.h +PORTS_SRCS-yes += vpx_timer.h + +ifeq ($(VPX_ARCH_X86),yes) +PORTS_SRCS-$(HAVE_MMX) += emms_mmx.c +endif +ifeq ($(VPX_ARCH_X86_64),yes) +# Visual Studio x64 does not support the _mm_empty() intrinsic. +PORTS_SRCS-$(HAVE_MMX) += emms_mmx.asm +endif + +ifeq ($(VPX_ARCH_X86_64),yes) +PORTS_SRCS-yes += float_control_word.asm +endif + +ifeq ($(VPX_ARCH_X86)$(VPX_ARCH_X86_64),yes) +PORTS_SRCS-yes += x86.h +PORTS_SRCS-yes += x86_abi_support.asm +endif + +PORTS_SRCS-$(VPX_ARCH_ARM) += arm_cpudetect.c +PORTS_SRCS-$(VPX_ARCH_ARM) += arm.h + +PORTS_SRCS-$(VPX_ARCH_PPC) += ppc_cpudetect.c +PORTS_SRCS-$(VPX_ARCH_PPC) += ppc.h + +PORTS_SRCS-$(VPX_ARCH_MIPS) += mips_cpudetect.c +PORTS_SRCS-$(VPX_ARCH_MIPS) += mips.h + +PORTS_SRCS-$(VPX_ARCH_LOONGARCH) += loongarch_cpudetect.c +PORTS_SRCS-$(VPX_ARCH_LOONGARCH) += loongarch.h + +ifeq ($(VPX_ARCH_MIPS), yes) +PORTS_SRCS-yes += asmdefs_mmi.h +endif diff --git a/media/libvpx/libvpx/vpx_ports/vpx_timer.h b/media/libvpx/libvpx/vpx_ports/vpx_timer.h new file mode 100644 index 0000000000..4934d5296a --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/vpx_timer.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_VPX_TIMER_H_ +#define VPX_VPX_PORTS_VPX_TIMER_H_ + +#include "./vpx_config.h" + +#include "vpx/vpx_integer.h" + +#if CONFIG_OS_SUPPORT + +#if defined(_WIN32) +/* + * Win32 specific includes + */ +#undef NOMINMAX +#define NOMINMAX +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include <windows.h> +#else +/* + * POSIX specific includes + */ +#include <sys/time.h> + +/* timersub is not provided by msys at this time. */ +#ifndef timersub +#define timersub(a, b, result) \ + do { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } while (0) +#endif +#endif + +struct vpx_usec_timer { +#if defined(_WIN32) + LARGE_INTEGER begin, end; +#else + struct timeval begin, end; +#endif +}; + +static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) { +#if defined(_WIN32) + QueryPerformanceCounter(&t->begin); +#else + gettimeofday(&t->begin, NULL); +#endif +} + +static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) { +#if defined(_WIN32) + QueryPerformanceCounter(&t->end); +#else + gettimeofday(&t->end, NULL); +#endif +} + +static INLINE int64_t vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { +#if defined(_WIN32) + LARGE_INTEGER freq, diff; + + diff.QuadPart = t->end.QuadPart - t->begin.QuadPart; + + QueryPerformanceFrequency(&freq); + return diff.QuadPart * 1000000 / freq.QuadPart; +#else + struct timeval diff; + + timersub(&t->end, &t->begin, &diff); + return (int64_t)diff.tv_sec * 1000000 + diff.tv_usec; +#endif +} + +#else /* CONFIG_OS_SUPPORT = 0*/ + +/* Empty timer functions if CONFIG_OS_SUPPORT = 0 */ +#ifndef timersub +#define timersub(a, b, result) +#endif + +struct vpx_usec_timer { + void *dummy; +}; + +static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {} + +static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {} + +static INLINE int vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; } + +#endif /* CONFIG_OS_SUPPORT */ + +#endif // VPX_VPX_PORTS_VPX_TIMER_H_ diff --git a/media/libvpx/libvpx/vpx_ports/x86.h b/media/libvpx/libvpx/vpx_ports/x86.h new file mode 100644 index 0000000000..795fb2923f --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/x86.h @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_VPX_PORTS_X86_H_ +#define VPX_VPX_PORTS_X86_H_ +#include <stdlib.h> + +#if defined(_MSC_VER) +#include <intrin.h> /* For __cpuidex, __rdtsc */ +#endif + +#include "vpx_config.h" +#include "vpx/vpx_integer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + VPX_CPU_UNKNOWN = -1, + VPX_CPU_AMD, + VPX_CPU_AMD_OLD, + VPX_CPU_CENTAUR, + VPX_CPU_CYRIX, + VPX_CPU_INTEL, + VPX_CPU_NEXGEN, + VPX_CPU_NSC, + VPX_CPU_RISE, + VPX_CPU_SIS, + VPX_CPU_TRANSMETA, + VPX_CPU_TRANSMETA_OLD, + VPX_CPU_UMC, + VPX_CPU_VIA, + + VPX_CPU_LAST +} vpx_cpu_t; + +#if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__) +#if VPX_ARCH_X86_64 +#define cpuid(func, func2, ax, bx, cx, dx) \ + __asm__ __volatile__("cpuid \n\t" \ + : "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)) +#else +#define cpuid(func, func2, ax, bx, cx, dx) \ + __asm__ __volatile__( \ + "mov %%ebx, %%edi \n\t" \ + "cpuid \n\t" \ + "xchg %%edi, %%ebx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)) +#endif +#elif defined(__SUNPRO_C) || \ + defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/ +#if VPX_ARCH_X86_64 +#define cpuid(func, func2, ax, bx, cx, dx) \ + asm volatile( \ + "xchg %rsi, %rbx \n\t" \ + "cpuid \n\t" \ + "movl %ebx, %edi \n\t" \ + "xchg %rsi, %rbx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)) +#else +#define cpuid(func, func2, ax, bx, cx, dx) \ + asm volatile( \ + "pushl %ebx \n\t" \ + "cpuid \n\t" \ + "movl %ebx, %edi \n\t" \ + "popl %ebx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)) +#endif +#else /* end __SUNPRO__ */ +#if VPX_ARCH_X86_64 +#if defined(_MSC_VER) && _MSC_VER > 1500 +#define cpuid(func, func2, a, b, c, d) \ + do { \ + int regs[4]; \ + __cpuidex(regs, func, func2); \ + a = regs[0]; \ + b = regs[1]; \ + c = regs[2]; \ + d = regs[3]; \ + } while (0) +#else +#define cpuid(func, func2, a, b, c, d) \ + do { \ + int regs[4]; \ + __cpuid(regs, func); \ + a = regs[0]; \ + b = regs[1]; \ + c = regs[2]; \ + d = regs[3]; \ + } while (0) +#endif +#else +#define cpuid(func, func2, a, b, c, d) \ + __asm mov eax, func __asm mov ecx, func2 __asm cpuid __asm mov a, \ + eax __asm mov b, ebx __asm mov c, ecx __asm mov d, edx +#endif +#endif /* end others */ + +// NaCl has no support for xgetbv or the raw opcode. +#if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__)) +static INLINE uint64_t xgetbv(void) { + const uint32_t ecx = 0; + uint32_t eax, edx; + // Use the raw opcode for xgetbv for compatibility with older toolchains. + __asm__ volatile(".byte 0x0f, 0x01, 0xd0\n" + : "=a"(eax), "=d"(edx) + : "c"(ecx)); + return ((uint64_t)edx << 32) | eax; +} +#elif (defined(_M_X64) || defined(_M_IX86)) && defined(_MSC_FULL_VER) && \ + _MSC_FULL_VER >= 160040219 // >= VS2010 SP1 +#include <immintrin.h> +#define xgetbv() _xgetbv(0) +#elif defined(_MSC_VER) && defined(_M_IX86) +static INLINE uint64_t xgetbv(void) { + uint32_t eax_, edx_; + __asm { + xor ecx, ecx // ecx = 0 + // Use the raw opcode for xgetbv for compatibility with older toolchains. + __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0 + mov eax_, eax + mov edx_, edx + } + return ((uint64_t)edx_ << 32) | eax_; +} +#else +#define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains. +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1700 +#undef NOMINMAX +#define NOMINMAX +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include <windows.h> +#if WINAPI_FAMILY_PARTITION(WINAPI_FAMILY_APP) +#define getenv(x) NULL +#endif +#endif + +#define HAS_MMX 0x001 +#define HAS_SSE 0x002 +#define HAS_SSE2 0x004 +#define HAS_SSE3 0x008 +#define HAS_SSSE3 0x010 +#define HAS_SSE4_1 0x020 +#define HAS_AVX 0x040 +#define HAS_AVX2 0x080 +#define HAS_AVX512 0x100 +#ifndef BIT +#define BIT(n) (1u << (n)) +#endif + +static INLINE int x86_simd_caps(void) { + unsigned int flags = 0; + unsigned int mask = ~0u; + unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx; + char *env; + (void)reg_ebx; + + /* See if the CPU capabilities are being overridden by the environment */ + env = getenv("VPX_SIMD_CAPS"); + + if (env && *env) return (int)strtol(env, NULL, 0); + + env = getenv("VPX_SIMD_CAPS_MASK"); + + if (env && *env) mask = (unsigned int)strtoul(env, NULL, 0); + + /* Ensure that the CPUID instruction supports extended features */ + cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx); + + if (max_cpuid_val < 1) return 0; + + /* Get the standard feature flags */ + cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx); + + if (reg_edx & BIT(23)) flags |= HAS_MMX; + + if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */ + + if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */ + + if (reg_ecx & BIT(0)) flags |= HAS_SSE3; + + if (reg_ecx & BIT(9)) flags |= HAS_SSSE3; + + if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1; + + // bits 27 (OSXSAVE) & 28 (256-bit AVX) + if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) { + // Check for OS-support of YMM state. Necessary for AVX and AVX2. + if ((xgetbv() & 0x6) == 0x6) { + flags |= HAS_AVX; + + if (max_cpuid_val >= 7) { + /* Get the leaf 7 feature flags. Needed to check for AVX2 support */ + cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx); + + if (reg_ebx & BIT(5)) flags |= HAS_AVX2; + + // bits 16 (AVX-512F) & 17 (AVX-512DQ) & 28 (AVX-512CD) & + // 30 (AVX-512BW) & 32 (AVX-512VL) + if ((reg_ebx & (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31))) == + (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31))) { + // Check for OS-support of ZMM and YMM state. Necessary for AVX-512. + if ((xgetbv() & 0xe6) == 0xe6) flags |= HAS_AVX512; + } + } + } + } + + (void)reg_eax; // Avoid compiler warning on unused-but-set variable. + + return flags & mask; +} + +// Fine-Grain Measurement Functions +// +// If you are timing a small region of code, access the timestamp counter +// (TSC) via: +// +// unsigned int start = x86_tsc_start(); +// ... +// unsigned int end = x86_tsc_end(); +// unsigned int diff = end - start; +// +// The start/end functions introduce a few more instructions than using +// x86_readtsc directly, but prevent the CPU's out-of-order execution from +// affecting the measurement (by having earlier/later instructions be evaluated +// in the time interval). See the white paper, "How to Benchmark Code +// Execution Times on Intel(R) IA-32 and IA-64 Instruction Set Architectures" by +// Gabriele Paoloni for more information. +// +// If you are timing a large function (CPU time > a couple of seconds), use +// x86_readtsc64 to read the timestamp counter in a 64-bit integer. The +// out-of-order leakage that can occur is minimal compared to total runtime. +static INLINE unsigned int x86_readtsc(void) { +#if defined(__GNUC__) && __GNUC__ + unsigned int tsc; + __asm__ __volatile__("rdtsc\n\t" : "=a"(tsc) :); + return tsc; +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) + unsigned int tsc; + asm volatile("rdtsc\n\t" : "=a"(tsc) :); + return tsc; +#else +#if VPX_ARCH_X86_64 + return (unsigned int)__rdtsc(); +#else + __asm rdtsc; +#endif +#endif +} +// 64-bit CPU cycle counter +static INLINE uint64_t x86_readtsc64(void) { +#if defined(__GNUC__) && __GNUC__ + uint32_t hi, lo; + __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)hi << 32) | lo; +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) + uint_t hi, lo; + asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi)); + return ((uint64_t)hi << 32) | lo; +#else +#if VPX_ARCH_X86_64 + return (uint64_t)__rdtsc(); +#else + __asm rdtsc; +#endif +#endif +} + +// 32-bit CPU cycle counter with a partial fence against out-of-order execution. +static INLINE unsigned int x86_readtscp(void) { +#if defined(__GNUC__) && __GNUC__ + unsigned int tscp; + __asm__ __volatile__("rdtscp\n\t" : "=a"(tscp) :); + return tscp; +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) + unsigned int tscp; + asm volatile("rdtscp\n\t" : "=a"(tscp) :); + return tscp; +#elif defined(_MSC_VER) + unsigned int ui; + return (unsigned int)__rdtscp(&ui); +#else +#if VPX_ARCH_X86_64 + return (unsigned int)__rdtscp(); +#else + __asm rdtscp; +#endif +#endif +} + +static INLINE unsigned int x86_tsc_start(void) { + unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx; + // This call should not be removed. See function notes above. + cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx); + // Avoid compiler warnings on unused-but-set variables. + (void)reg_eax; + (void)reg_ebx; + (void)reg_ecx; + (void)reg_edx; + return x86_readtsc(); +} + +static INLINE unsigned int x86_tsc_end(void) { + uint32_t v = x86_readtscp(); + unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx; + // This call should not be removed. See function notes above. + cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx); + // Avoid compiler warnings on unused-but-set variables. + (void)reg_eax; + (void)reg_ebx; + (void)reg_ecx; + (void)reg_edx; + return v; +} + +#if defined(__GNUC__) && __GNUC__ +#define x86_pause_hint() __asm__ __volatile__("pause \n\t") +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) +#define x86_pause_hint() asm volatile("pause \n\t") +#else +#if VPX_ARCH_X86_64 +#define x86_pause_hint() _mm_pause(); +#else +#define x86_pause_hint() __asm pause +#endif +#endif + +#if defined(__GNUC__) && __GNUC__ +static void x87_set_control_word(unsigned short mode) { + __asm__ __volatile__("fldcw %0" : : "m"(*&mode)); +} +static unsigned short x87_get_control_word(void) { + unsigned short mode; + __asm__ __volatile__("fstcw %0\n\t" : "=m"(*&mode) :); + return mode; +} +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) +static void x87_set_control_word(unsigned short mode) { + asm volatile("fldcw %0" : : "m"(*&mode)); +} +static unsigned short x87_get_control_word(void) { + unsigned short mode; + asm volatile("fstcw %0\n\t" : "=m"(*&mode) :); + return mode; +} +#elif VPX_ARCH_X86_64 +/* No fldcw intrinsics on Windows x64, punt to external asm */ +extern void vpx_winx64_fldcw(unsigned short mode); +extern unsigned short vpx_winx64_fstcw(void); +#define x87_set_control_word vpx_winx64_fldcw +#define x87_get_control_word vpx_winx64_fstcw +#else +static void x87_set_control_word(unsigned short mode) { + __asm { fldcw mode } +} +static unsigned short x87_get_control_word(void) { + unsigned short mode; + __asm { fstcw mode } + return mode; +} +#endif + +static INLINE unsigned int x87_set_double_precision(void) { + unsigned int mode = x87_get_control_word(); + // Intel 64 and IA-32 Architectures Developer's Manual: Vol. 1 + // https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-vol-1-manual.pdf + // 8.1.5.2 Precision Control Field + // Bits 8 and 9 (0x300) of the x87 FPU Control Word ("Precision Control") + // determine the number of bits used in floating point calculations. To match + // later SSE instructions restrict x87 operations to Double Precision (0x200). + // Precision PC Field + // Single Precision (24-Bits) 00B + // Reserved 01B + // Double Precision (53-Bits) 10B + // Extended Precision (64-Bits) 11B + x87_set_control_word((mode & ~0x300u) | 0x200u); + return mode; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VPX_VPX_PORTS_X86_H_ diff --git a/media/libvpx/libvpx/vpx_ports/x86_abi_support.asm b/media/libvpx/libvpx/vpx_ports/x86_abi_support.asm new file mode 100644 index 0000000000..6b2d6b9684 --- /dev/null +++ b/media/libvpx/libvpx/vpx_ports/x86_abi_support.asm @@ -0,0 +1,425 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + +%include "vpx_config.asm" + +; 32/64 bit compatibility macros +; +; In general, we make the source use 64 bit syntax, then twiddle with it using +; the preprocessor to get the 32 bit syntax on 32 bit platforms. +; +%ifidn __OUTPUT_FORMAT__,elf32 +%define ABI_IS_32BIT 1 +%elifidn __OUTPUT_FORMAT__,macho32 +%define ABI_IS_32BIT 1 +%elifidn __OUTPUT_FORMAT__,win32 +%define ABI_IS_32BIT 1 +%elifidn __OUTPUT_FORMAT__,aout +%define ABI_IS_32BIT 1 +%else +%define ABI_IS_32BIT 0 +%endif + +%if ABI_IS_32BIT +%define rax eax +%define rbx ebx +%define rcx ecx +%define rdx edx +%define rsi esi +%define rdi edi +%define rsp esp +%define rbp ebp +%define movsxd mov +%macro movq 2 + %ifidn %1,eax + movd %1,%2 + %elifidn %2,eax + movd %1,%2 + %elifidn %1,ebx + movd %1,%2 + %elifidn %2,ebx + movd %1,%2 + %elifidn %1,ecx + movd %1,%2 + %elifidn %2,ecx + movd %1,%2 + %elifidn %1,edx + movd %1,%2 + %elifidn %2,edx + movd %1,%2 + %elifidn %1,esi + movd %1,%2 + %elifidn %2,esi + movd %1,%2 + %elifidn %1,edi + movd %1,%2 + %elifidn %2,edi + movd %1,%2 + %elifidn %1,esp + movd %1,%2 + %elifidn %2,esp + movd %1,%2 + %elifidn %1,ebp + movd %1,%2 + %elifidn %2,ebp + movd %1,%2 + %else + movq %1,%2 + %endif +%endmacro +%endif + + +; LIBVPX_YASM_WIN64 +; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64 +; or win64 is defined on the Yasm command line. +%ifidn __OUTPUT_FORMAT__,win64 +%define LIBVPX_YASM_WIN64 1 +%elifidn __OUTPUT_FORMAT__,x64 +%define LIBVPX_YASM_WIN64 1 +%else +%define LIBVPX_YASM_WIN64 0 +%endif + +; Declare groups of platforms +%ifidn __OUTPUT_FORMAT__,elf32 + %define LIBVPX_ELF 1 +%elifidn __OUTPUT_FORMAT__,elfx32 + %define LIBVPX_ELF 1 +%elifidn __OUTPUT_FORMAT__,elf64 + %define LIBVPX_ELF 1 +%else + %define LIBVPX_ELF 0 +%endif + +%ifidn __OUTPUT_FORMAT__,macho32 + %define LIBVPX_MACHO 1 +%elifidn __OUTPUT_FORMAT__,macho64 + %define LIBVPX_MACHO 1 +%else + %define LIBVPX_MACHO 0 +%endif + +; sym() +; Return the proper symbol name for the target ABI. +; +; Certain ABIs, notably MS COFF and Darwin MACH-O, require that symbols +; with C linkage be prefixed with an underscore. +; +%if LIBVPX_ELF || LIBVPX_YASM_WIN64 + %define sym(x) x +%else + ; Mach-O / COFF + %define sym(x) _ %+ x +%endif + +; globalsym() +; Return a global declaration with the proper decoration for the target ABI. +; +; When CHROMIUM is defined, include attributes to hide the symbol from the +; global namespace. +; +; Chromium doesn't like exported global symbols due to symbol clashing with +; plugins among other things. +; +; Requires Chromium's patched copy of yasm: +; http://src.chromium.org/viewvc/chrome?view=rev&revision=73761 +; http://www.tortall.net/projects/yasm/ticket/236 +; or nasm > 2.14. +; +%ifdef CHROMIUM + %ifdef __NASM_VER__ + %if __NASM_VERSION_ID__ < 0x020e0000 ; 2.14 + ; nasm < 2.14 does not support :private_extern directive + %fatal Must use nasm 2.14 or newer + %endif + %endif + + %if LIBVPX_ELF + %define globalsym(x) global sym(x) %+ :function hidden + %elif LIBVPX_MACHO + %define globalsym(x) global sym(x) %+ :private_extern + %else + ; COFF / PE32+ + %define globalsym(x) global sym(x) + %endif +%else + %define globalsym(x) global sym(x) +%endif + +; arg() +; Return the address specification of the given argument +; +%if ABI_IS_32BIT + %define arg(x) [ebp+8+4*x] +%else + ; 64 bit ABI passes arguments in registers. This is a workaround to get up + ; and running quickly. Relies on SHADOW_ARGS_TO_STACK + %if LIBVPX_YASM_WIN64 + %define arg(x) [rbp+16+8*x] + %else + %define arg(x) [rbp-8-8*x] + %endif +%endif + +; REG_SZ_BYTES, REG_SZ_BITS +; Size of a register +%if ABI_IS_32BIT +%define REG_SZ_BYTES 4 +%define REG_SZ_BITS 32 +%else +%define REG_SZ_BYTES 8 +%define REG_SZ_BITS 64 +%endif + + +; ALIGN_STACK <alignment> <register> +; This macro aligns the stack to the given alignment (in bytes). The stack +; is left such that the previous value of the stack pointer is the first +; argument on the stack (ie, the inverse of this macro is 'pop rsp.') +; This macro uses one temporary register, which is not preserved, and thus +; must be specified as an argument. +%macro ALIGN_STACK 2 + mov %2, rsp + and rsp, -%1 + lea rsp, [rsp - (%1 - REG_SZ_BYTES)] + push %2 +%endmacro + + +; +; The Microsoft assembler tries to impose a certain amount of type safety in +; its register usage. YASM doesn't recognize these directives, so we just +; %define them away to maintain as much compatibility as possible with the +; original inline assembler we're porting from. +; +%idefine PTR +%idefine XMMWORD +%idefine MMWORD + +; PIC macros +; +%if ABI_IS_32BIT + %if CONFIG_PIC=1 + %ifidn __OUTPUT_FORMAT__,elf32 + %define WRT_PLT wrt ..plt + %macro GET_GOT 1 + extern _GLOBAL_OFFSET_TABLE_ + push %1 + call %%get_got + %%sub_offset: + jmp %%exitGG + %%get_got: + mov %1, [esp] + add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc + ret + %%exitGG: + %undef GLOBAL + %define GLOBAL(x) x + %1 wrt ..gotoff + %undef RESTORE_GOT + %define RESTORE_GOT pop %1 + %endmacro + %elifidn __OUTPUT_FORMAT__,macho32 + %macro GET_GOT 1 + push %1 + call %%get_got + %%get_got: + pop %1 + %undef GLOBAL + %define GLOBAL(x) x + %1 - %%get_got + %undef RESTORE_GOT + %define RESTORE_GOT pop %1 + %endmacro + %endif + %endif + + %ifdef CHROMIUM + %ifidn __OUTPUT_FORMAT__,macho32 + %define HIDDEN_DATA(x) x:private_extern + %else + %define HIDDEN_DATA(x) x + %endif + %else + %define HIDDEN_DATA(x) x + %endif +%else + %macro GET_GOT 1 + %endmacro + %define GLOBAL(x) rel x + %ifidn __OUTPUT_FORMAT__,elf64 + %define WRT_PLT wrt ..plt + %define HIDDEN_DATA(x) x:data hidden + %elifidn __OUTPUT_FORMAT__,elfx32 + %define WRT_PLT wrt ..plt + %define HIDDEN_DATA(x) x:data hidden + %elifidn __OUTPUT_FORMAT__,macho64 + %ifdef CHROMIUM + %define HIDDEN_DATA(x) x:private_extern + %else + %define HIDDEN_DATA(x) x + %endif + %else + %define HIDDEN_DATA(x) x + %endif +%endif +%ifnmacro GET_GOT + %macro GET_GOT 1 + %endmacro + %define GLOBAL(x) x +%endif +%ifndef RESTORE_GOT +%define RESTORE_GOT +%endif +%ifndef WRT_PLT +%define WRT_PLT +%endif + +%if ABI_IS_32BIT + %macro SHADOW_ARGS_TO_STACK 1 + %endm + %define UNSHADOW_ARGS +%else +%if LIBVPX_YASM_WIN64 + %macro SHADOW_ARGS_TO_STACK 1 ; argc + %if %1 > 0 + mov arg(0),rcx + %endif + %if %1 > 1 + mov arg(1),rdx + %endif + %if %1 > 2 + mov arg(2),r8 + %endif + %if %1 > 3 + mov arg(3),r9 + %endif + %endm +%else + %macro SHADOW_ARGS_TO_STACK 1 ; argc + %if %1 > 0 + push rdi + %endif + %if %1 > 1 + push rsi + %endif + %if %1 > 2 + push rdx + %endif + %if %1 > 3 + push rcx + %endif + %if %1 > 4 + push r8 + %endif + %if %1 > 5 + push r9 + %endif + %if %1 > 6 + %assign i %1-6 + %assign off 16 + %rep i + mov rax,[rbp+off] + push rax + %assign off off+8 + %endrep + %endif + %endm +%endif + %define UNSHADOW_ARGS mov rsp, rbp +%endif + +; Win64 ABI requires that XMM6:XMM15 are callee saved +; SAVE_XMM n, [u] +; store registers 6-n on the stack +; if u is specified, use unaligned movs. +; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return +; value. Typically we follow this up with 'push rbp' - re-aligning the stack - +; but in some cases this is not done and unaligned movs must be used. +%if LIBVPX_YASM_WIN64 +%macro SAVE_XMM 1-2 a + %if %1 < 6 + %error Only xmm registers 6-15 must be preserved + %else + %assign last_xmm %1 + %define movxmm movdq %+ %2 + %assign xmm_stack_space ((last_xmm - 5) * 16) + sub rsp, xmm_stack_space + %assign i 6 + %rep (last_xmm - 5) + movxmm [rsp + ((i - 6) * 16)], xmm %+ i + %assign i i+1 + %endrep + %endif +%endmacro +%macro RESTORE_XMM 0 + %ifndef last_xmm + %error RESTORE_XMM must be paired with SAVE_XMM n + %else + %assign i last_xmm + %rep (last_xmm - 5) + movxmm xmm %+ i, [rsp +((i - 6) * 16)] + %assign i i-1 + %endrep + add rsp, xmm_stack_space + ; there are a couple functions which return from multiple places. + ; otherwise, we could uncomment these: + ; %undef last_xmm + ; %undef xmm_stack_space + ; %undef movxmm + %endif +%endmacro +%else +%macro SAVE_XMM 1-2 +%endmacro +%macro RESTORE_XMM 0 +%endmacro +%endif + +; Name of the rodata section +; +; .rodata seems to be an elf-ism, as it doesn't work on OSX. +; +%ifidn __OUTPUT_FORMAT__,macho64 +%define SECTION_RODATA section .text +%elifidn __OUTPUT_FORMAT__,macho32 +%macro SECTION_RODATA 0 +section .text +%endmacro +%elifidn __OUTPUT_FORMAT__,aout +%define SECTION_RODATA section .data +%else +%define SECTION_RODATA section .rodata +%endif + + +; Tell GNU ld that we don't require an executable stack. +%ifidn __OUTPUT_FORMAT__,elf32 +section .note.GNU-stack noalloc noexec nowrite progbits +section .text +%elifidn __OUTPUT_FORMAT__,elf64 +section .note.GNU-stack noalloc noexec nowrite progbits +section .text +%elifidn __OUTPUT_FORMAT__,elfx32 +section .note.GNU-stack noalloc noexec nowrite progbits +section .text +%endif + +; On Android platforms use lrand48 when building postproc routines. Prior to L +; rand() was not available. +%if CONFIG_POSTPROC=1 || CONFIG_VP9_POSTPROC=1 +%ifdef __ANDROID__ +extern sym(lrand48) +%define LIBVPX_RAND lrand48 +%else +extern sym(rand) +%define LIBVPX_RAND rand +%endif +%endif ; CONFIG_POSTPROC || CONFIG_VP9_POSTPROC |