summaryrefslogtreecommitdiffstats
path: root/src/liblzma/common/memcmplen.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/liblzma/common/memcmplen.h')
-rw-r--r--src/liblzma/common/memcmplen.h37
1 files changed, 26 insertions, 11 deletions
diff --git a/src/liblzma/common/memcmplen.h b/src/liblzma/common/memcmplen.h
index 99d9c51..394a485 100644
--- a/src/liblzma/common/memcmplen.h
+++ b/src/liblzma/common/memcmplen.h
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: 0BSD
+
///////////////////////////////////////////////////////////////////////////////
//
/// \file memcmplen.h
@@ -5,9 +7,6 @@
//
// Author: Lasse Collin
//
-// This file has been put into the public domain.
-// You can do whatever you want with this file.
-//
///////////////////////////////////////////////////////////////////////////////
#ifndef LZMA_MEMCMPLEN_H
@@ -24,7 +23,8 @@
// can use the intrinsics without the header file.
#if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
&& defined(_MSC_VER) \
- && defined(_M_X64) \
+ && (defined(_M_X64) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC)) \
&& !defined(__INTEL_COMPILER)
# include <intrin.h>
#endif
@@ -57,20 +57,35 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
assert(limit <= UINT32_MAX / 2);
#if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
- && ((TUKLIB_GNUC_REQ(3, 4) && defined(__x86_64__)) \
+ && (((TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) \
+ && (defined(__x86_64__) \
+ || defined(__aarch64__))) \
|| (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
|| (defined(__INTEL_COMPILER) && defined(_M_X64)) \
- || (defined(_MSC_VER) && defined(_M_X64)))
- // I keep this x86-64 only for now since that's where I know this
- // to be a good method. This may be fine on other 64-bit CPUs too.
- // On big endian one should use xor instead of subtraction and switch
- // to __builtin_clzll().
+ || (defined(_MSC_VER) && (defined(_M_X64) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC))))
+ // This is only for x86-64 and ARM64 for now. This might be fine on
+ // other 64-bit processors too. On big endian one should use xor
+ // instead of subtraction and switch to __builtin_clzll().
+ //
+ // Reasons to use subtraction instead of xor:
+ //
+ // - On some x86-64 processors (Intel Sandy Bridge to Tiger Lake),
+ // sub+jz and sub+jnz can be fused but xor+jz or xor+jnz cannot.
+ // Thus using subtraction has potential to be a tiny amount faster
+ // since the code checks if the quotient is non-zero.
+ //
+ // - Some processors (Intel Pentium 4) used to have more ALU
+ // resources for add/sub instructions than and/or/xor.
+ //
+ // The processor info is based on Agner Fog's microarchitecture.pdf
+ // version 2023-05-26. https://www.agner.org/optimize/
#define LZMA_MEMCMPLEN_EXTRA 8
while (len < limit) {
const uint64_t x = read64ne(buf1 + len) - read64ne(buf2 + len);
if (x != 0) {
// MSVC or Intel C compiler on Windows
-# if (defined(_MSC_VER) || defined(__INTEL_COMPILER)) && defined(_M_X64)
+# if defined(_MSC_VER) || defined(__INTEL_COMPILER)
unsigned long tmp;
_BitScanForward64(&tmp, x);
len += (uint32_t)tmp >> 3;