summaryrefslogtreecommitdiffstats
path: root/media/ffvpx/libavutil/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'media/ffvpx/libavutil/cpu.c')
-rw-r--r--media/ffvpx/libavutil/cpu.c285
1 files changed, 285 insertions, 0 deletions
diff --git a/media/ffvpx/libavutil/cpu.c b/media/ffvpx/libavutil/cpu.c
new file mode 100644
index 0000000000..48d195168c
--- /dev/null
+++ b/media/ffvpx/libavutil/cpu.c
@@ -0,0 +1,285 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#if HAVE_SCHED_GETAFFINITY
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+#include <sched.h>
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include "attributes.h"
+#include "cpu.h"
+#include "cpu_internal.h"
+#include "opt.h"
+#include "common.h"
+
+#if HAVE_GETPROCESSAFFINITYMASK || HAVE_WINRT
+#include <windows.h>
+#endif
+#if HAVE_SYSCTL
+#if HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static atomic_int cpu_flags = ATOMIC_VAR_INIT(-1);
+static atomic_int cpu_count = ATOMIC_VAR_INIT(-1);
+
+static int get_cpu_flags(void)
+{
+#if ARCH_MIPS
+ return ff_get_cpu_flags_mips();
+#elif ARCH_AARCH64
+ return ff_get_cpu_flags_aarch64();
+#elif ARCH_ARM
+ return ff_get_cpu_flags_arm();
+#elif ARCH_PPC
+ return ff_get_cpu_flags_ppc();
+#elif ARCH_RISCV
+ return ff_get_cpu_flags_riscv();
+#elif ARCH_X86
+ return ff_get_cpu_flags_x86();
+#elif ARCH_LOONGARCH
+ return ff_get_cpu_flags_loongarch();
+#endif
+ return 0;
+}
+
+void av_force_cpu_flags(int arg){
+ if (ARCH_X86 &&
+ (arg & ( AV_CPU_FLAG_3DNOW |
+ AV_CPU_FLAG_3DNOWEXT |
+ AV_CPU_FLAG_MMXEXT |
+ AV_CPU_FLAG_SSE |
+ AV_CPU_FLAG_SSE2 |
+ AV_CPU_FLAG_SSE2SLOW |
+ AV_CPU_FLAG_SSE3 |
+ AV_CPU_FLAG_SSE3SLOW |
+ AV_CPU_FLAG_SSSE3 |
+ AV_CPU_FLAG_SSE4 |
+ AV_CPU_FLAG_SSE42 |
+ AV_CPU_FLAG_AVX |
+ AV_CPU_FLAG_AVXSLOW |
+ AV_CPU_FLAG_XOP |
+ AV_CPU_FLAG_FMA3 |
+ AV_CPU_FLAG_FMA4 |
+ AV_CPU_FLAG_AVX2 |
+ AV_CPU_FLAG_AVX512 ))
+ && !(arg & AV_CPU_FLAG_MMX)) {
+ av_log(NULL, AV_LOG_WARNING, "MMX implied by specified flags\n");
+ arg |= AV_CPU_FLAG_MMX;
+ }
+
+ atomic_store_explicit(&cpu_flags, arg, memory_order_relaxed);
+}
+
+int av_get_cpu_flags(void)
+{
+ int flags = atomic_load_explicit(&cpu_flags, memory_order_relaxed);
+ if (flags == -1) {
+ flags = get_cpu_flags();
+ atomic_store_explicit(&cpu_flags, flags, memory_order_relaxed);
+ }
+ return flags;
+}
+
+int av_parse_cpu_caps(unsigned *flags, const char *s)
+{
+ static const AVOption cpuflags_opts[] = {
+ { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+#if ARCH_PPC
+ { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" },
+#elif ARCH_X86
+ { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
+ { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" },
+ { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" },
+ { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE }, .unit = "flags" },
+ { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2 }, .unit = "flags" },
+ { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2SLOW }, .unit = "flags" },
+ { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3 }, .unit = "flags" },
+ { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3SLOW }, .unit = "flags" },
+ { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSSE3 }, .unit = "flags" },
+ { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" },
+ { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE4 }, .unit = "flags" },
+ { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE42 }, .unit = "flags" },
+ { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX }, .unit = "flags" },
+ { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVXSLOW }, .unit = "flags" },
+ { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_XOP }, .unit = "flags" },
+ { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA3 }, .unit = "flags" },
+ { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA4 }, .unit = "flags" },
+ { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX2 }, .unit = "flags" },
+ { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" },
+ { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI2 }, .unit = "flags" },
+ { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOW }, .unit = "flags" },
+ { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" },
+ { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" },
+ { "aesni", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AESNI }, .unit = "flags" },
+ { "avx512" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512 }, .unit = "flags" },
+ { "avx512icl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512ICL }, .unit = "flags" },
+ { "slowgather", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SLOW_GATHER }, .unit = "flags" },
+
+#define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX
+#define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE
+#define CPU_FLAG_P4 CPU_FLAG_P3| AV_CPU_FLAG_SSE2
+ { "pentium2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P2 }, .unit = "flags" },
+ { "pentium3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P3 }, .unit = "flags" },
+ { "pentium4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P4 }, .unit = "flags" },
+
+#define CPU_FLAG_K62 AV_CPU_FLAG_MMX | AV_CPU_FLAG_3DNOW
+#define CPU_FLAG_ATHLON CPU_FLAG_K62 | AV_CPU_FLAG_CMOV | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMX2
+#define CPU_FLAG_ATHLONXP CPU_FLAG_ATHLON | AV_CPU_FLAG_SSE
+#define CPU_FLAG_K8 CPU_FLAG_ATHLONXP | AV_CPU_FLAG_SSE2
+ { "k6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
+ { "k62", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K62 }, .unit = "flags" },
+ { "athlon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLON }, .unit = "flags" },
+ { "athlonxp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLONXP }, .unit = "flags" },
+ { "k8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K8 }, .unit = "flags" },
+#elif ARCH_ARM
+ { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" },
+ { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" },
+ { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" },
+ { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
+ { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" },
+ { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" },
+ { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
+ { "setend", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SETEND }, .unit = "flags" },
+#elif ARCH_AARCH64
+ { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
+ { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
+ { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
+ { "dotprod", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_DOTPROD }, .unit = "flags" },
+ { "i8mm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_I8MM }, .unit = "flags" },
+#elif ARCH_MIPS
+ { "mmi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMI }, .unit = "flags" },
+ { "msa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MSA }, .unit = "flags" },
+#elif ARCH_LOONGARCH
+ { "lsx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LSX }, .unit = "flags" },
+ { "lasx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LASX }, .unit = "flags" },
+#elif ARCH_RISCV
+ { "rvi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVI }, .unit = "flags" },
+ { "rvf", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVF }, .unit = "flags" },
+ { "rvd", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVD }, .unit = "flags" },
+ { "zve32x", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I32 }, .unit = "flags" },
+ { "zve32f", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F32 }, .unit = "flags" },
+ { "zve64x", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I64 }, .unit = "flags" },
+ { "zve64d", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F64 }, .unit = "flags" },
+ { "zba", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_ADDR }, .unit = "flags" },
+ { "zbb", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_BASIC }, .unit = "flags" },
+#endif
+ { NULL },
+ };
+ static const AVClass class = {
+ .class_name = "cpuflags",
+ .item_name = av_default_item_name,
+ .option = cpuflags_opts,
+ .version = LIBAVUTIL_VERSION_INT,
+ };
+ const AVClass *pclass = &class;
+
+ return av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, flags);
+}
+
+int av_cpu_count(void)
+{
+ static atomic_int printed = ATOMIC_VAR_INIT(0);
+
+ int nb_cpus = 1;
+ int count = 0;
+#if HAVE_WINRT
+ SYSTEM_INFO sysinfo;
+#endif
+#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT)
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+
+ if (!sched_getaffinity(0, sizeof(cpuset), &cpuset))
+ nb_cpus = CPU_COUNT(&cpuset);
+#elif HAVE_GETPROCESSAFFINITYMASK
+ DWORD_PTR proc_aff, sys_aff;
+ if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
+ nb_cpus = av_popcount64(proc_aff);
+#elif HAVE_SYSCTL && defined(HW_NCPUONLINE)
+ int mib[2] = { CTL_HW, HW_NCPUONLINE };
+ size_t len = sizeof(nb_cpus);
+
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
+ nb_cpus = 0;
+#elif HAVE_SYSCTL && defined(HW_NCPU)
+ int mib[2] = { CTL_HW, HW_NCPU };
+ size_t len = sizeof(nb_cpus);
+
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
+ nb_cpus = 0;
+#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN)
+ nb_cpus = sysconf(_SC_NPROC_ONLN);
+#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
+ nb_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+#elif HAVE_WINRT
+ GetNativeSystemInfo(&sysinfo);
+ nb_cpus = sysinfo.dwNumberOfProcessors;
+#endif
+
+ if (!atomic_exchange_explicit(&printed, 1, memory_order_relaxed))
+ av_log(NULL, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus);
+
+ count = atomic_load_explicit(&cpu_count, memory_order_relaxed);
+
+ if (count > 0) {
+ nb_cpus = count;
+ av_log(NULL, AV_LOG_DEBUG, "overriding to %d logical cores\n", nb_cpus);
+ }
+
+ return nb_cpus;
+}
+
+void av_cpu_force_count(int count)
+{
+ atomic_store_explicit(&cpu_count, count, memory_order_relaxed);
+}
+
+size_t av_cpu_max_align(void)
+{
+#if ARCH_MIPS
+ return ff_get_cpu_max_align_mips();
+#elif ARCH_AARCH64
+ return ff_get_cpu_max_align_aarch64();
+#elif ARCH_ARM
+ return ff_get_cpu_max_align_arm();
+#elif ARCH_PPC
+ return ff_get_cpu_max_align_ppc();
+#elif ARCH_X86
+ return ff_get_cpu_max_align_x86();
+#elif ARCH_LOONGARCH
+ return ff_get_cpu_max_align_loongarch();
+#endif
+
+ return 8;
+}