From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- .../dpdk/lib/librte_eal/ppc/include/meson.build | 18 + .../dpdk/lib/librte_eal/ppc/include/rte_altivec.h | 22 ++ .../dpdk/lib/librte_eal/ppc/include/rte_atomic.h | 413 +++++++++++++++++++++ .../lib/librte_eal/ppc/include/rte_byteorder.h | 120 ++++++ .../dpdk/lib/librte_eal/ppc/include/rte_cpuflags.h | 61 +++ .../dpdk/lib/librte_eal/ppc/include/rte_cycles.h | 46 +++ src/spdk/dpdk/lib/librte_eal/ppc/include/rte_io.h | 18 + .../dpdk/lib/librte_eal/ppc/include/rte_mcslock.h | 18 + .../dpdk/lib/librte_eal/ppc/include/rte_memcpy.h | 209 +++++++++++ .../dpdk/lib/librte_eal/ppc/include/rte_pause.h | 29 ++ .../dpdk/lib/librte_eal/ppc/include/rte_prefetch.h | 41 ++ .../dpdk/lib/librte_eal/ppc/include/rte_rwlock.h | 40 ++ .../dpdk/lib/librte_eal/ppc/include/rte_spinlock.h | 88 +++++ .../lib/librte_eal/ppc/include/rte_ticketlock.h | 18 + .../dpdk/lib/librte_eal/ppc/include/rte_vect.h | 36 ++ src/spdk/dpdk/lib/librte_eal/ppc/meson.build | 10 + src/spdk/dpdk/lib/librte_eal/ppc/rte_cpuflags.c | 110 ++++++ src/spdk/dpdk/lib/librte_eal/ppc/rte_cycles.c | 11 + src/spdk/dpdk/lib/librte_eal/ppc/rte_hypervisor.c | 11 + 19 files changed, 1319 insertions(+) create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/meson.build create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_altivec.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_atomic.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_byteorder.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cpuflags.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cycles.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_io.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_mcslock.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_memcpy.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_pause.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_prefetch.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_rwlock.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_spinlock.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_ticketlock.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/include/rte_vect.h create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/meson.build create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/rte_cpuflags.c create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/rte_cycles.c create mode 100644 src/spdk/dpdk/lib/librte_eal/ppc/rte_hypervisor.c (limited to 'src/spdk/dpdk/lib/librte_eal/ppc') diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/meson.build b/src/spdk/dpdk/lib/librte_eal/ppc/include/meson.build new file mode 100644 index 000000000..ab4bd2809 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Luca Boccassi + +arch_headers = files( + 'rte_altivec.h', + 'rte_atomic.h', + 'rte_byteorder.h', + 'rte_cpuflags.h', + 'rte_cycles.h', + 'rte_io.h', + 'rte_memcpy.h', + 'rte_pause.h', + 'rte_prefetch.h', + 'rte_rwlock.h', + 'rte_spinlock.h', + 'rte_vect.h', +) +install_headers(arch_headers, subdir: get_option('include_subdir_arch')) diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_altivec.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_altivec.h new file mode 100644 index 000000000..1551a9454 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_altivec.h @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) Mellanox 2020. + */ + +#ifndef _RTE_ALTIVEC_H_ +#define _RTE_ALTIVEC_H_ + +/* To include altivec.h, GCC version must be >= 4.8 */ +#include + +/* + * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11. + * Otherwise there would be a type conflict between stdbool and altivec. + */ +#if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__) +#undef bool +/* redefine as in stdbool.h */ +#define bool _Bool +#endif + +#endif /* _RTE_ALTIVEC_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_atomic.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_atomic.h new file mode 100644 index 000000000..7e3e13118 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_atomic.h @@ -0,0 +1,413 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Inspired from FreeBSD src/sys/powerpc/include/atomic.h + * Copyright (c) 2008 Marcel Moolenaar + * Copyright (c) 2001 Benno Rice + * Copyright (c) 2001 David E. O'Brien + * Copyright (c) 1998 Doug Rabson + * All rights reserved. + */ + +#ifndef _RTE_ATOMIC_PPC_64_H_ +#define _RTE_ATOMIC_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "generic/rte_atomic.h" + +#define rte_mb() asm volatile("sync" : : : "memory") + +#define rte_wmb() asm volatile("sync" : : : "memory") + +#define rte_rmb() asm volatile("sync" : : : "memory") + +#define rte_smp_mb() rte_mb() + +#define rte_smp_wmb() rte_wmb() + +#define rte_smp_rmb() rte_rmb() + +#define rte_io_mb() rte_mb() + +#define rte_io_wmb() rte_wmb() + +#define rte_io_rmb() rte_rmb() + +#define rte_cio_wmb() rte_wmb() + +#define rte_cio_rmb() rte_rmb() + +/*------------------------- 16 bit atomic operations -------------------------*/ +/* To be compatible with Power7, use GCC built-in functions for 16 bit + * operations */ + +#ifndef RTE_FORCE_INTRINSICS +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, + __ATOMIC_ACQUIRE) ? 1 : 0; +} + +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} + +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); +} + +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); +} + +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +} + +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +} + +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) +{ + return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +} + +/*------------------------- 32 bit atomic operations -------------------------*/ + +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + unsigned int ret = 0; + + asm volatile( + "\tlwsync\n" + "1:\tlwarx %[ret], 0, %[dst]\n" + "cmplw %[exp], %[ret]\n" + "bne 2f\n" + "stwcx. %[src], 0, %[dst]\n" + "bne- 1b\n" + "li %[ret], 1\n" + "b 3f\n" + "2:\n" + "stwcx. %[ret], 0, %[dst]\n" + "li %[ret], 0\n" + "3:\n" + "isync\n" + : [ret] "=&r" (ret), "=m" (*dst) + : [dst] "r" (dst), + [exp] "r" (exp), + [src] "r" (src), + "m" (*dst) + : "cc", "memory"); + + return ret; +} + +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} + +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + int t; + + asm volatile( + "1: lwarx %[t],0,%[cnt]\n" + "addic %[t],%[t],1\n" + "stwcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "=m" (v->cnt) + : [cnt] "r" (&v->cnt), "m" (v->cnt) + : "cc", "xer", "memory"); +} + +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + int t; + + asm volatile( + "1: lwarx %[t],0,%[cnt]\n" + "addic %[t],%[t],-1\n" + "stwcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "=m" (v->cnt) + : [cnt] "r" (&v->cnt), "m" (v->cnt) + : "cc", "xer", "memory"); +} + +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + int ret; + + asm volatile( + "\n\tlwsync\n" + "1: lwarx %[ret],0,%[cnt]\n" + "addic %[ret],%[ret],1\n" + "stwcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [cnt] "r" (&v->cnt) + : "cc", "xer", "memory"); + + return ret == 0; +} + +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + int ret; + + asm volatile( + "\n\tlwsync\n" + "1: lwarx %[ret],0,%[cnt]\n" + "addic %[ret],%[ret],-1\n" + "stwcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [cnt] "r" (&v->cnt) + : "cc", "xer", "memory"); + + return ret == 0; +} + +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) +{ + return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +} + +/*------------------------- 64 bit atomic operations -------------------------*/ + +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + unsigned int ret = 0; + + asm volatile ( + "\tlwsync\n" + "1: ldarx %[ret], 0, %[dst]\n" + "cmpld %[exp], %[ret]\n" + "bne 2f\n" + "stdcx. %[src], 0, %[dst]\n" + "bne- 1b\n" + "li %[ret], 1\n" + "b 3f\n" + "2:\n" + "stdcx. %[ret], 0, %[dst]\n" + "li %[ret], 0\n" + "3:\n" + "isync\n" + : [ret] "=&r" (ret), "=m" (*dst) + : [dst] "r" (dst), + [exp] "r" (exp), + [src] "r" (src), + "m" (*dst) + : "cc", "memory"); + return ret; +} + +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ + long ret; + + asm volatile("ld%U1%X1 %[ret],%[cnt]" + : [ret] "=r"(ret) + : [cnt] "m"(v->cnt)); + + return ret; +} + +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ + asm volatile("std%U0%X0 %[new_value],%[cnt]" + : [cnt] "=m"(v->cnt) + : [new_value] "r"(new_value)); +} + +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + long t; + + asm volatile( + "1: ldarx %[t],0,%[cnt]\n" + "add %[t],%[inc],%[t]\n" + "stdcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "=m" (v->cnt) + : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt) + : "cc", "memory"); +} + +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + long t; + + asm volatile( + "1: ldarx %[t],0,%[cnt]\n" + "subf %[t],%[dec],%[t]\n" + "stdcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "+m" (v->cnt) + : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt) + : "cc", "memory"); +} + +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + long t; + + asm volatile( + "1: ldarx %[t],0,%[cnt]\n" + "addic %[t],%[t],1\n" + "stdcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "+m" (v->cnt) + : [cnt] "r" (&v->cnt), "m" (v->cnt) + : "cc", "xer", "memory"); +} + +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + long t; + + asm volatile( + "1: ldarx %[t],0,%[cnt]\n" + "addic %[t],%[t],-1\n" + "stdcx. %[t],0,%[cnt]\n" + "bne- 1b\n" + : [t] "=&r" (t), "+m" (v->cnt) + : [cnt] "r" (&v->cnt), "m" (v->cnt) + : "cc", "xer", "memory"); +} + +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + long ret; + + asm volatile( + "\n\tlwsync\n" + "1: ldarx %[ret],0,%[cnt]\n" + "add %[ret],%[inc],%[ret]\n" + "stdcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [inc] "r" (inc), [cnt] "r" (&v->cnt) + : "cc", "memory"); + + return ret; +} + +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + long ret; + + asm volatile( + "\n\tlwsync\n" + "1: ldarx %[ret],0,%[cnt]\n" + "subf %[ret],%[dec],%[ret]\n" + "stdcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [dec] "r" (dec), [cnt] "r" (&v->cnt) + : "cc", "memory"); + + return ret; +} + +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + long ret; + + asm volatile( + "\n\tlwsync\n" + "1: ldarx %[ret],0,%[cnt]\n" + "addic %[ret],%[ret],1\n" + "stdcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [cnt] "r" (&v->cnt) + : "cc", "xer", "memory"); + + return ret == 0; +} + +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + long ret; + + asm volatile( + "\n\tlwsync\n" + "1: ldarx %[ret],0,%[cnt]\n" + "addic %[ret],%[ret],-1\n" + "stdcx. %[ret],0,%[cnt]\n" + "bne- 1b\n" + "isync\n" + : [ret] "=&r" (ret) + : [cnt] "r" (&v->cnt) + : "cc", "xer", "memory"); + + return ret == 0; +} + +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ + return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +} + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ATOMIC_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_byteorder.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_byteorder.h new file mode 100644 index 000000000..bfdded40f --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_byteorder.h @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Inspired from FreeBSD src/sys/powerpc/include/endian.h + * Copyright (c) 1987, 1991, 1993 + * The Regents of the University of California. All rights reserved. + */ + +#ifndef _RTE_BYTEORDER_PPC_64_H_ +#define _RTE_BYTEORDER_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "generic/rte_byteorder.h" + +/* + * An architecture-optimized byte swap for a 16-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap16(). + */ +static inline uint16_t rte_arch_bswap16(uint16_t _x) +{ + return (_x >> 8) | ((_x << 8) & 0xff00); +} + +/* + * An architecture-optimized byte swap for a 32-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap32(). + */ +static inline uint32_t rte_arch_bswap32(uint32_t _x) +{ + return (_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) | + ((_x << 24) & 0xff000000); +} + +/* + * An architecture-optimized byte swap for a 64-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap64(). + */ +/* 64-bit mode */ +static inline uint64_t rte_arch_bswap64(uint64_t _x) +{ + return (_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) | + ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) | + ((_x << 24) & (0xffULL << 40)) | + ((_x << 40) & (0xffULL << 48)) | ((_x << 56)); +} + +#ifndef RTE_FORCE_INTRINSICS +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) + +#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap32(x) : \ + rte_arch_bswap32(x))) + +#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap64(x) : \ + rte_arch_bswap64(x))) +#else +/* + * __builtin_bswap16 is only available gcc 4.8 and upwards + */ +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) +#endif +#endif + +/* Power 8 have both little endian and big endian mode + * Power 7 only support big endian + */ +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + +#define rte_cpu_to_le_16(x) (x) +#define rte_cpu_to_le_32(x) (x) +#define rte_cpu_to_le_64(x) (x) + +#define rte_cpu_to_be_16(x) rte_bswap16(x) +#define rte_cpu_to_be_32(x) rte_bswap32(x) +#define rte_cpu_to_be_64(x) rte_bswap64(x) + +#define rte_le_to_cpu_16(x) (x) +#define rte_le_to_cpu_32(x) (x) +#define rte_le_to_cpu_64(x) (x) + +#define rte_be_to_cpu_16(x) rte_bswap16(x) +#define rte_be_to_cpu_32(x) rte_bswap32(x) +#define rte_be_to_cpu_64(x) rte_bswap64(x) + +#else /* RTE_BIG_ENDIAN */ + +#define rte_cpu_to_le_16(x) rte_bswap16(x) +#define rte_cpu_to_le_32(x) rte_bswap32(x) +#define rte_cpu_to_le_64(x) rte_bswap64(x) + +#define rte_cpu_to_be_16(x) (x) +#define rte_cpu_to_be_32(x) (x) +#define rte_cpu_to_be_64(x) (x) + +#define rte_le_to_cpu_16(x) rte_bswap16(x) +#define rte_le_to_cpu_32(x) rte_bswap32(x) +#define rte_le_to_cpu_64(x) rte_bswap64(x) + +#define rte_be_to_cpu_16(x) (x) +#define rte_be_to_cpu_32(x) (x) +#define rte_be_to_cpu_64(x) (x) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_BYTEORDER_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cpuflags.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cpuflags.h new file mode 100644 index 000000000..a88355d17 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cpuflags.h @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#ifndef _RTE_CPUFLAGS_PPC_64_H_ +#define _RTE_CPUFLAGS_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Enumeration of all CPU features supported + */ +enum rte_cpu_flag_t { + RTE_CPUFLAG_PPC_LE = 0, + RTE_CPUFLAG_TRUE_LE, + RTE_CPUFLAG_PSERIES_PERFMON_COMPAT, + RTE_CPUFLAG_VSX, + RTE_CPUFLAG_ARCH_2_06, + RTE_CPUFLAG_POWER6_EXT, + RTE_CPUFLAG_DFP, + RTE_CPUFLAG_PA6T, + RTE_CPUFLAG_ARCH_2_05, + RTE_CPUFLAG_ICACHE_SNOOP, + RTE_CPUFLAG_SMT, + RTE_CPUFLAG_BOOKE, + RTE_CPUFLAG_CELLBE, + RTE_CPUFLAG_POWER5_PLUS, + RTE_CPUFLAG_POWER5, + RTE_CPUFLAG_POWER4, + RTE_CPUFLAG_NOTB, + RTE_CPUFLAG_EFP_DOUBLE, + RTE_CPUFLAG_EFP_SINGLE, + RTE_CPUFLAG_SPE, + RTE_CPUFLAG_UNIFIED_CACHE, + RTE_CPUFLAG_4xxMAC, + RTE_CPUFLAG_MMU, + RTE_CPUFLAG_FPU, + RTE_CPUFLAG_ALTIVEC, + RTE_CPUFLAG_PPC601, + RTE_CPUFLAG_PPC64, + RTE_CPUFLAG_PPC32, + RTE_CPUFLAG_TAR, + RTE_CPUFLAG_LSEL, + RTE_CPUFLAG_EBB, + RTE_CPUFLAG_DSCR, + RTE_CPUFLAG_HTM, + RTE_CPUFLAG_ARCH_2_07, + /* The last item */ + RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */ +}; + +#include "generic/rte_cpuflags.h" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CPUFLAGS_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cycles.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cycles.h new file mode 100644 index 000000000..5585f9273 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_cycles.h @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#ifndef _RTE_CYCLES_PPC_64_H_ +#define _RTE_CYCLES_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include "generic/rte_cycles.h" + +#include +#include + +/** + * Read the time base register. + * + * @return + * The time base for this lcore. + */ +static inline uint64_t +rte_rdtsc(void) +{ + return __ppc_get_timebase(); +} + +static inline uint64_t +rte_rdtsc_precise(void) +{ + rte_mb(); + return rte_rdtsc(); +} + +static inline uint64_t +rte_get_tsc_cycles(void) { return rte_rdtsc(); } + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CYCLES_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_io.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_io.h new file mode 100644 index 000000000..01455065e --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_io.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc + */ + +#ifndef _RTE_IO_PPC_64_H_ +#define _RTE_IO_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_io.h" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_IO_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_mcslock.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_mcslock.h new file mode 100644 index 000000000..c58a6edc1 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_mcslock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Arm Limited + */ + +#ifndef _RTE_MCSLOCK_PPC_64_H_ +#define _RTE_MCSLOCK_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_mcslock.h" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MCSLOCK_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_memcpy.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_memcpy.h new file mode 100644 index 000000000..c2a1f356d --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_memcpy.h @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#ifndef _RTE_MEMCPY_PPC_64_H_ +#define _RTE_MEMCPY_PPC_64_H_ + +#include +#include + +#include "rte_altivec.h" +#include "rte_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_memcpy.h" + +#if (GCC_VERSION >= 90000 && GCC_VERSION < 90400) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif + +static inline void +rte_mov16(uint8_t *dst, const uint8_t *src) +{ + vec_vsx_st(vec_vsx_ld(0, src), 0, dst); +} + +static inline void +rte_mov32(uint8_t *dst, const uint8_t *src) +{ + vec_vsx_st(vec_vsx_ld(0, src), 0, dst); + vec_vsx_st(vec_vsx_ld(16, src), 16, dst); +} + +static inline void +rte_mov48(uint8_t *dst, const uint8_t *src) +{ + vec_vsx_st(vec_vsx_ld(0, src), 0, dst); + vec_vsx_st(vec_vsx_ld(16, src), 16, dst); + vec_vsx_st(vec_vsx_ld(32, src), 32, dst); +} + +static inline void +rte_mov64(uint8_t *dst, const uint8_t *src) +{ + vec_vsx_st(vec_vsx_ld(0, src), 0, dst); + vec_vsx_st(vec_vsx_ld(16, src), 16, dst); + vec_vsx_st(vec_vsx_ld(32, src), 32, dst); + vec_vsx_st(vec_vsx_ld(48, src), 48, dst); +} + +static inline void +rte_mov128(uint8_t *dst, const uint8_t *src) +{ + vec_vsx_st(vec_vsx_ld(0, src), 0, dst); + vec_vsx_st(vec_vsx_ld(16, src), 16, dst); + vec_vsx_st(vec_vsx_ld(32, src), 32, dst); + vec_vsx_st(vec_vsx_ld(48, src), 48, dst); + vec_vsx_st(vec_vsx_ld(64, src), 64, dst); + vec_vsx_st(vec_vsx_ld(80, src), 80, dst); + vec_vsx_st(vec_vsx_ld(96, src), 96, dst); + vec_vsx_st(vec_vsx_ld(112, src), 112, dst); +} + +static inline void +rte_mov256(uint8_t *dst, const uint8_t *src) +{ + rte_mov128(dst, src); + rte_mov128(dst + 128, src + 128); +} + +#define rte_memcpy(dst, src, n) \ + __extension__ ({ \ + (__builtin_constant_p(n)) ? \ + memcpy((dst), (src), (n)) : \ + rte_memcpy_func((dst), (src), (n)); }) + +static inline void * +rte_memcpy_func(void *dst, const void *src, size_t n) +{ + void *ret = dst; + + /* We can't copy < 16 bytes using XMM registers so do it manually. */ + if (n < 16) { + if (n & 0x01) { + *(uint8_t *)dst = *(const uint8_t *)src; + dst = (uint8_t *)dst + 1; + src = (const uint8_t *)src + 1; + } + if (n & 0x02) { + *(uint16_t *)dst = *(const uint16_t *)src; + dst = (uint16_t *)dst + 1; + src = (const uint16_t *)src + 1; + } + if (n & 0x04) { + *(uint32_t *)dst = *(const uint32_t *)src; + dst = (uint32_t *)dst + 1; + src = (const uint32_t *)src + 1; + } + if (n & 0x08) + *(uint64_t *)dst = *(const uint64_t *)src; + return ret; + } + + /* Special fast cases for <= 128 bytes */ + if (n <= 32) { + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + rte_mov16((uint8_t *)dst - 16 + n, + (const uint8_t *)src - 16 + n); + return ret; + } + + if (n <= 64) { + rte_mov32((uint8_t *)dst, (const uint8_t *)src); + rte_mov32((uint8_t *)dst - 32 + n, + (const uint8_t *)src - 32 + n); + return ret; + } + + if (n <= 128) { + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + rte_mov64((uint8_t *)dst - 64 + n, + (const uint8_t *)src - 64 + n); + return ret; + } + + /* + * For large copies > 128 bytes. This combination of 256, 64 and 16 byte + * copies was found to be faster than doing 128 and 32 byte copies as + * well. + */ + for ( ; n >= 256; n -= 256) { + rte_mov256((uint8_t *)dst, (const uint8_t *)src); + dst = (uint8_t *)dst + 256; + src = (const uint8_t *)src + 256; + } + + /* + * We split the remaining bytes (which will be less than 256) into + * 64byte (2^6) chunks. + * Using incrementing integers in the case labels of a switch statement + * encourages the compiler to use a jump table. To get incrementing + * integers, we shift the 2 relevant bits to the LSB position to first + * get decrementing integers, and then subtract. + */ + switch (3 - (n >> 6)) { + case 0x00: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + case 0x01: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + case 0x02: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + default: + ; + } + + /* + * We split the remaining bytes (which will be less than 64) into + * 16byte (2^4) chunks, using the same switch structure as above. + */ + switch (3 - (n >> 4)) { + case 0x00: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + case 0x01: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + case 0x02: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + default: + ; + } + + /* Copy any remaining bytes, without going beyond end of buffers */ + if (n != 0) + rte_mov16((uint8_t *)dst - 16 + n, + (const uint8_t *)src - 16 + n); + return ret; +} + +#if (GCC_VERSION >= 90000 && GCC_VERSION < 90400) +#pragma GCC diagnostic pop +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMCPY_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_pause.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_pause.h new file mode 100644 index 000000000..16e47ce22 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_pause.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _RTE_PAUSE_PPC64_H_ +#define _RTE_PAUSE_PPC64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "rte_atomic.h" + +#include "generic/rte_pause.h" + +static inline void rte_pause(void) +{ + /* Set hardware multi-threading low priority */ + asm volatile("or 1,1,1"); + /* Set hardware multi-threading medium priority */ + asm volatile("or 2,2,2"); + rte_compiler_barrier(); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PAUSE_PPC64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_prefetch.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_prefetch.h new file mode 100644 index 000000000..9ba07c815 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_prefetch.h @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#ifndef _RTE_PREFETCH_PPC_64_H_ +#define _RTE_PREFETCH_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "generic/rte_prefetch.h" + +static inline void rte_prefetch0(const volatile void *p) +{ + asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p)); +} + +static inline void rte_prefetch1(const volatile void *p) +{ + asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p)); +} + +static inline void rte_prefetch2(const volatile void *p) +{ + asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p)); +} + +static inline void rte_prefetch_non_temporal(const volatile void *p) +{ + /* non-temporal version not available, fallback to rte_prefetch0 */ + rte_prefetch0(p); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PREFETCH_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_rwlock.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_rwlock.h new file mode 100644 index 000000000..9fadc0407 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_rwlock.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef _RTE_RWLOCK_PPC_64_H_ +#define _RTE_RWLOCK_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_rwlock.h" + +static inline void +rte_rwlock_read_lock_tm(rte_rwlock_t *rwl) +{ + rte_rwlock_read_lock(rwl); +} + +static inline void +rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl) +{ + rte_rwlock_read_unlock(rwl); +} + +static inline void +rte_rwlock_write_lock_tm(rte_rwlock_t *rwl) +{ + rte_rwlock_write_lock(rwl); +} + +static inline void +rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl) +{ + rte_rwlock_write_unlock(rwl); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_RWLOCK_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_spinlock.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_spinlock.h new file mode 100644 index 000000000..149ec245c --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_spinlock.h @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#ifndef _RTE_SPINLOCK_PPC_64_H_ +#define _RTE_SPINLOCK_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include "generic/rte_spinlock.h" + +/* Fixme: Use intrinsics to implement the spinlock on Power architecture */ + +#ifndef RTE_FORCE_INTRINSICS + +static inline void +rte_spinlock_lock(rte_spinlock_t *sl) +{ + while (__sync_lock_test_and_set(&sl->locked, 1)) + while (sl->locked) + rte_pause(); +} + +static inline void +rte_spinlock_unlock(rte_spinlock_t *sl) +{ + __sync_lock_release(&sl->locked); +} + +static inline int +rte_spinlock_trylock(rte_spinlock_t *sl) +{ + return __sync_lock_test_and_set(&sl->locked, 1) == 0; +} + +#endif + +static inline int rte_tm_supported(void) +{ + return 0; +} + +static inline void +rte_spinlock_lock_tm(rte_spinlock_t *sl) +{ + rte_spinlock_lock(sl); /* fall-back */ +} + +static inline int +rte_spinlock_trylock_tm(rte_spinlock_t *sl) +{ + return rte_spinlock_trylock(sl); +} + +static inline void +rte_spinlock_unlock_tm(rte_spinlock_t *sl) +{ + rte_spinlock_unlock(sl); +} + +static inline void +rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr) +{ + rte_spinlock_recursive_lock(slr); /* fall-back */ +} + +static inline void +rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr) +{ + rte_spinlock_recursive_unlock(slr); +} + +static inline int +rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr) +{ + return rte_spinlock_recursive_trylock(slr); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_SPINLOCK_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_ticketlock.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_ticketlock.h new file mode 100644 index 000000000..c175e9eab --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_ticketlock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Arm Limited + */ + +#ifndef _RTE_TICKETLOCK_PPC_64_H_ +#define _RTE_TICKETLOCK_PPC_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_ticketlock.h" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_TICKETLOCK_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_vect.h b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_vect.h new file mode 100644 index 000000000..b0545c878 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_vect.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2016. + */ + +#ifndef _RTE_VECT_PPC_64_H_ +#define _RTE_VECT_PPC_64_H_ + +#include "rte_altivec.h" + +#include "generic/rte_vect.h" +#include "rte_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef vector signed int xmm_t; + +#define XMM_SIZE (sizeof(xmm_t)) +#define XMM_MASK (XMM_SIZE - 1) + +typedef union rte_xmm { + xmm_t x; + uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; + uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; + uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; + uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; + double pd[XMM_SIZE / sizeof(double)]; +} __rte_aligned(16) rte_xmm_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_VECT_PPC_64_H_ */ diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/meson.build b/src/spdk/dpdk/lib/librte_eal/ppc/meson.build new file mode 100644 index 000000000..f4b6d95c4 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Luca Boccassi + +subdir('include') + +sources += files( + 'rte_cpuflags.c', + 'rte_cycles.c', + 'rte_hypervisor.c', +) diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/rte_cpuflags.c b/src/spdk/dpdk/lib/librte_eal/ppc/rte_cpuflags.c new file mode 100644 index 000000000..3bb7563ce --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/rte_cpuflags.c @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2014. + */ + +#include "rte_cpuflags.h" + +#include +#include +#include +#include + +/* Symbolic values for the entries in the auxiliary table */ +#define AT_HWCAP 16 +#define AT_HWCAP2 26 + +/* software based registers */ +enum cpu_register_t { + REG_NONE = 0, + REG_HWCAP, + REG_HWCAP2, + REG_MAX +}; + +typedef uint32_t hwcap_registers_t[REG_MAX]; + +struct feature_entry { + uint32_t reg; + uint32_t bit; +#define CPU_FLAG_NAME_MAX_LEN 64 + char name[CPU_FLAG_NAME_MAX_LEN]; +}; + +#define FEAT_DEF(name, reg, bit) \ + [RTE_CPUFLAG_##name] = {reg, bit, #name}, + +const struct feature_entry rte_cpu_feature_table[] = { + FEAT_DEF(PPC_LE, REG_HWCAP, 0) + FEAT_DEF(TRUE_LE, REG_HWCAP, 1) + FEAT_DEF(PSERIES_PERFMON_COMPAT, REG_HWCAP, 6) + FEAT_DEF(VSX, REG_HWCAP, 7) + FEAT_DEF(ARCH_2_06, REG_HWCAP, 8) + FEAT_DEF(POWER6_EXT, REG_HWCAP, 9) + FEAT_DEF(DFP, REG_HWCAP, 10) + FEAT_DEF(PA6T, REG_HWCAP, 11) + FEAT_DEF(ARCH_2_05, REG_HWCAP, 12) + FEAT_DEF(ICACHE_SNOOP, REG_HWCAP, 13) + FEAT_DEF(SMT, REG_HWCAP, 14) + FEAT_DEF(BOOKE, REG_HWCAP, 15) + FEAT_DEF(CELLBE, REG_HWCAP, 16) + FEAT_DEF(POWER5_PLUS, REG_HWCAP, 17) + FEAT_DEF(POWER5, REG_HWCAP, 18) + FEAT_DEF(POWER4, REG_HWCAP, 19) + FEAT_DEF(NOTB, REG_HWCAP, 20) + FEAT_DEF(EFP_DOUBLE, REG_HWCAP, 21) + FEAT_DEF(EFP_SINGLE, REG_HWCAP, 22) + FEAT_DEF(SPE, REG_HWCAP, 23) + FEAT_DEF(UNIFIED_CACHE, REG_HWCAP, 24) + FEAT_DEF(4xxMAC, REG_HWCAP, 25) + FEAT_DEF(MMU, REG_HWCAP, 26) + FEAT_DEF(FPU, REG_HWCAP, 27) + FEAT_DEF(ALTIVEC, REG_HWCAP, 28) + FEAT_DEF(PPC601, REG_HWCAP, 29) + FEAT_DEF(PPC64, REG_HWCAP, 30) + FEAT_DEF(PPC32, REG_HWCAP, 31) + FEAT_DEF(TAR, REG_HWCAP2, 26) + FEAT_DEF(LSEL, REG_HWCAP2, 27) + FEAT_DEF(EBB, REG_HWCAP2, 28) + FEAT_DEF(DSCR, REG_HWCAP2, 29) + FEAT_DEF(HTM, REG_HWCAP2, 30) + FEAT_DEF(ARCH_2_07, REG_HWCAP2, 31) +}; + +/* + * Read AUXV software register and get cpu features for Power + */ +static void +rte_cpu_get_features(hwcap_registers_t out) +{ + out[REG_HWCAP] = rte_cpu_getauxval(AT_HWCAP); + out[REG_HWCAP2] = rte_cpu_getauxval(AT_HWCAP2); +} + +/* + * Checks if a particular flag is available on current machine. + */ +int +rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) +{ + const struct feature_entry *feat; + hwcap_registers_t regs = {0}; + + if (feature >= RTE_CPUFLAG_NUMFLAGS) + return -ENOENT; + + feat = &rte_cpu_feature_table[feature]; + if (feat->reg == REG_NONE) + return -EFAULT; + + rte_cpu_get_features(regs); + return (regs[feat->reg] >> feat->bit) & 1; +} + +const char * +rte_cpu_get_flag_name(enum rte_cpu_flag_t feature) +{ + if (feature >= RTE_CPUFLAG_NUMFLAGS) + return NULL; + return rte_cpu_feature_table[feature].name; +} diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/rte_cycles.c b/src/spdk/dpdk/lib/librte_eal/ppc/rte_cycles.c new file mode 100644 index 000000000..c96a2143b --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/rte_cycles.c @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IBM Corporation 2019. + */ + +#include "eal_private.h" + +uint64_t +get_tsc_freq_arch(void) +{ + return 0; +} diff --git a/src/spdk/dpdk/lib/librte_eal/ppc/rte_hypervisor.c b/src/spdk/dpdk/lib/librte_eal/ppc/rte_hypervisor.c new file mode 100644 index 000000000..08a1c97d1 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_eal/ppc/rte_hypervisor.c @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include "rte_hypervisor.h" + +enum rte_hypervisor +rte_hypervisor_get(void) +{ + return RTE_HYPERVISOR_UNKNOWN; +} -- cgit v1.2.3