summaryrefslogtreecommitdiffstats
path: root/src/include/port
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/port')
-rw-r--r--src/include/port/aix.h14
-rw-r--r--src/include/port/atomics.h524
-rw-r--r--src/include/port/atomics/arch-arm.h26
-rw-r--r--src/include/port/atomics/arch-hppa.h17
-rw-r--r--src/include/port/atomics/arch-ia64.h29
-rw-r--r--src/include/port/atomics/arch-ppc.h254
-rw-r--r--src/include/port/atomics/arch-x86.h252
-rw-r--r--src/include/port/atomics/fallback.h170
-rw-r--r--src/include/port/atomics/generic-acc.h106
-rw-r--r--src/include/port/atomics/generic-gcc.h286
-rw-r--r--src/include/port/atomics/generic-msvc.h101
-rw-r--r--src/include/port/atomics/generic-sunpro.h106
-rw-r--r--src/include/port/atomics/generic.h401
-rw-r--r--src/include/port/cygwin.h17
-rw-r--r--src/include/port/darwin.h8
-rw-r--r--src/include/port/freebsd.h10
-rw-r--r--src/include/port/hpux.h3
-rw-r--r--src/include/port/linux.h22
-rw-r--r--src/include/port/netbsd.h1
-rw-r--r--src/include/port/openbsd.h1
-rw-r--r--src/include/port/pg_bitutils.h272
-rw-r--r--src/include/port/pg_bswap.h161
-rw-r--r--src/include/port/pg_crc32c.h101
-rw-r--r--src/include/port/pg_iovec.h54
-rw-r--r--src/include/port/pg_pthread.h41
-rw-r--r--src/include/port/solaris.h26
-rw-r--r--src/include/port/win32.h69
-rw-r--r--src/include/port/win32/arpa/inet.h3
-rw-r--r--src/include/port/win32/dlfcn.h1
-rw-r--r--src/include/port/win32/grp.h1
-rw-r--r--src/include/port/win32/netdb.h1
-rw-r--r--src/include/port/win32/netinet/in.h3
-rw-r--r--src/include/port/win32/pwd.h3
-rw-r--r--src/include/port/win32/sys/socket.h33
-rw-r--r--src/include/port/win32/sys/wait.h3
-rw-r--r--src/include/port/win32_msvc/dirent.h34
-rw-r--r--src/include/port/win32_msvc/sys/file.h1
-rw-r--r--src/include/port/win32_msvc/sys/param.h1
-rw-r--r--src/include/port/win32_msvc/sys/time.h1
-rw-r--r--src/include/port/win32_msvc/unistd.h1
-rw-r--r--src/include/port/win32_msvc/utime.h3
-rw-r--r--src/include/port/win32_port.h546
42 files changed, 3707 insertions, 0 deletions
diff --git a/src/include/port/aix.h b/src/include/port/aix.h
new file mode 100644
index 0000000..5b1159c
--- /dev/null
+++ b/src/include/port/aix.h
@@ -0,0 +1,14 @@
+/*
+ * src/include/port/aix.h
+ */
+#define CLASS_CONFLICT
+#define DISABLE_XOPEN_NLS
+
+/*
+ * "IBM XL C/C++ for AIX, V12.1" miscompiles, for 32-bit, some inline
+ * expansions of ginCompareItemPointers() "long long" arithmetic. To take
+ * advantage of inlining, build a 64-bit PostgreSQL.
+ */
+#if defined(__ILP32__) && defined(__IBMC__)
+#define PG_FORCE_DISABLE_INLINE
+#endif
diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h
new file mode 100644
index 0000000..856338f
--- /dev/null
+++ b/src/include/port/atomics.h
@@ -0,0 +1,524 @@
+/*-------------------------------------------------------------------------
+ *
+ * atomics.h
+ * Atomic operations.
+ *
+ * Hardware and compiler dependent functions for manipulating memory
+ * atomically and dealing with cache coherency. Used to implement locking
+ * facilities and lockless algorithms/data structures.
+ *
+ * To bring up postgres on a platform/compiler at the very least
+ * implementations for the following operations should be provided:
+ * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
+ * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
+ * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
+ * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
+ *
+ * There exist generic, hardware independent, implementations for several
+ * compilers which might be sufficient, although possibly not optimal, for a
+ * new platform. If no such generic implementation is available spinlocks (or
+ * even OS provided semaphores) will be used to implement the API.
+ *
+ * Implement _u64 atomics if and only if your platform can use them
+ * efficiently (and obviously correctly).
+ *
+ * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
+ * whenever possible. Writing correct code using these facilities is hard.
+ *
+ * For an introduction to using memory barriers within the PostgreSQL backend,
+ * see src/backend/storage/lmgr/README.barrier
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/atomics.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef ATOMICS_H
+#define ATOMICS_H
+
+#ifdef FRONTEND
+#error "atomics.h may not be included from frontend code"
+#endif
+
+#define INSIDE_ATOMICS_H
+
+#include <limits.h>
+
+/*
+ * First a set of architecture specific files is included.
+ *
+ * These files can provide the full set of atomics or can do pretty much
+ * nothing if all the compilers commonly used on these platforms provide
+ * usable generics.
+ *
+ * Don't add an inline assembly of the actual atomic operations if all the
+ * common implementations of your platform provide intrinsics. Intrinsics are
+ * much easier to understand and potentially support more architectures.
+ *
+ * It will often make sense to define memory barrier semantics here, since
+ * e.g. generic compiler intrinsics for x86 memory barriers can't know that
+ * postgres doesn't need x86 read/write barriers do anything more than a
+ * compiler barrier.
+ *
+ */
+#if defined(__arm__) || defined(__arm) || \
+ defined(__aarch64__) || defined(__aarch64)
+#include "port/atomics/arch-arm.h"
+#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
+#include "port/atomics/arch-x86.h"
+#elif defined(__ia64__) || defined(__ia64)
+#include "port/atomics/arch-ia64.h"
+#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
+#include "port/atomics/arch-ppc.h"
+#elif defined(__hppa) || defined(__hppa__)
+#include "port/atomics/arch-hppa.h"
+#endif
+
+/*
+ * Compiler specific, but architecture independent implementations.
+ *
+ * Provide architecture independent implementations of the atomic
+ * facilities. At the very least compiler barriers should be provided, but a
+ * full implementation of
+ * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
+ * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
+ * using compiler intrinsics are a good idea.
+ */
+/*
+ * gcc or compatible, including clang and icc. Exclude xlc. The ppc64le "IBM
+ * XL C/C++ for Linux, V13.1.2" emulates gcc, but __sync_lock_test_and_set()
+ * of one-byte types elicits SIGSEGV. That bug was gone by V13.1.5 (2016-12).
+ */
+#if (defined(__GNUC__) || defined(__INTEL_COMPILER)) && !(defined(__IBMC__) || defined(__IBMCPP__))
+#include "port/atomics/generic-gcc.h"
+#elif defined(_MSC_VER)
+#include "port/atomics/generic-msvc.h"
+#elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
+#include "port/atomics/generic-acc.h"
+#elif defined(__SUNPRO_C) && !defined(__GNUC__)
+#include "port/atomics/generic-sunpro.h"
+#else
+/*
+ * Unsupported compiler, we'll likely use slower fallbacks... At least
+ * compiler barriers should really be provided.
+ */
+#endif
+
+/*
+ * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
+ * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
+ * support. In the case of spinlock backed atomics the emulation is expected
+ * to be efficient, although less so than native atomics support.
+ */
+#include "port/atomics/fallback.h"
+
+/*
+ * Provide additional operations using supported infrastructure. These are
+ * expected to be efficient if the underlying atomic operations are efficient.
+ */
+#include "port/atomics/generic.h"
+
+
+/*
+ * pg_compiler_barrier - prevent the compiler from moving code across
+ *
+ * A compiler barrier need not (and preferably should not) emit any actual
+ * machine code, but must act as an optimization fence: the compiler must not
+ * reorder loads or stores to main memory around the barrier. However, the
+ * CPU may still reorder loads or stores at runtime, if the architecture's
+ * memory model permits this.
+ */
+#define pg_compiler_barrier() pg_compiler_barrier_impl()
+
+/*
+ * pg_memory_barrier - prevent the CPU from reordering memory access
+ *
+ * A memory barrier must act as a compiler barrier, and in addition must
+ * guarantee that all loads and stores issued prior to the barrier are
+ * completed before any loads or stores issued after the barrier. Unless
+ * loads and stores are totally ordered (which is not the case on most
+ * architectures) this requires issuing some sort of memory fencing
+ * instruction.
+ */
+#define pg_memory_barrier() pg_memory_barrier_impl()
+
+/*
+ * pg_(read|write)_barrier - prevent the CPU from reordering memory access
+ *
+ * A read barrier must act as a compiler barrier, and in addition must
+ * guarantee that any loads issued prior to the barrier are completed before
+ * any loads issued after the barrier. Similarly, a write barrier acts
+ * as a compiler barrier, and also orders stores. Read and write barriers
+ * are thus weaker than a full memory barrier, but stronger than a compiler
+ * barrier. In practice, on machines with strong memory ordering, read and
+ * write barriers may require nothing more than a compiler barrier.
+ */
+#define pg_read_barrier() pg_read_barrier_impl()
+#define pg_write_barrier() pg_write_barrier_impl()
+
+/*
+ * Spinloop delay - Allow CPU to relax in busy loops
+ */
+#define pg_spin_delay() pg_spin_delay_impl()
+
+/*
+ * pg_atomic_init_flag - initialize atomic flag.
+ *
+ * No barrier semantics.
+ */
+static inline void
+pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
+{
+ pg_atomic_init_flag_impl(ptr);
+}
+
+/*
+ * pg_atomic_test_set_flag - TAS()
+ *
+ * Returns true if the flag has successfully been set, false otherwise.
+ *
+ * Acquire (including read barrier) semantics.
+ */
+static inline bool
+pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
+{
+ return pg_atomic_test_set_flag_impl(ptr);
+}
+
+/*
+ * pg_atomic_unlocked_test_flag - Check if the lock is free
+ *
+ * Returns true if the flag currently is not set, false otherwise.
+ *
+ * No barrier semantics.
+ */
+static inline bool
+pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
+{
+ return pg_atomic_unlocked_test_flag_impl(ptr);
+}
+
+/*
+ * pg_atomic_clear_flag - release lock set by TAS()
+ *
+ * Release (including write barrier) semantics.
+ */
+static inline void
+pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
+{
+ pg_atomic_clear_flag_impl(ptr);
+}
+
+
+/*
+ * pg_atomic_init_u32 - initialize atomic variable
+ *
+ * Has to be done before any concurrent usage..
+ *
+ * No barrier semantics.
+ */
+static inline void
+pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ pg_atomic_init_u32_impl(ptr, val);
+}
+
+/*
+ * pg_atomic_read_u32 - unlocked read from atomic variable.
+ *
+ * The read is guaranteed to return a value as it has been written by this or
+ * another process at some point in the past. There's however no cache
+ * coherency interaction guaranteeing the value hasn't since been written to
+ * again.
+ *
+ * No barrier semantics.
+ */
+static inline uint32
+pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
+{
+ AssertPointerAlignment(ptr, 4);
+ return pg_atomic_read_u32_impl(ptr);
+}
+
+/*
+ * pg_atomic_write_u32 - write to atomic variable.
+ *
+ * The write is guaranteed to succeed as a whole, i.e. it's not possible to
+ * observe a partial write for any reader. Note that this correctly interacts
+ * with pg_atomic_compare_exchange_u32, in contrast to
+ * pg_atomic_unlocked_write_u32().
+ *
+ * No barrier semantics.
+ */
+static inline void
+pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ pg_atomic_write_u32_impl(ptr, val);
+}
+
+/*
+ * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
+ *
+ * The write is guaranteed to succeed as a whole, i.e. it's not possible to
+ * observe a partial write for any reader. But note that writing this way is
+ * not guaranteed to correctly interact with read-modify-write operations like
+ * pg_atomic_compare_exchange_u32. This should only be used in cases where
+ * minor performance regressions due to atomics emulation are unacceptable.
+ *
+ * No barrier semantics.
+ */
+static inline void
+pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ pg_atomic_unlocked_write_u32_impl(ptr, val);
+}
+
+/*
+ * pg_atomic_exchange_u32 - exchange newval with current value
+ *
+ * Returns the old value of 'ptr' before the swap.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ return pg_atomic_exchange_u32_impl(ptr, newval);
+}
+
+/*
+ * pg_atomic_compare_exchange_u32 - CAS operation
+ *
+ * Atomically compare the current value of ptr with *expected and store newval
+ * iff ptr and *expected have the same value. The current value of *ptr will
+ * always be stored in *expected.
+ *
+ * Return true if values have been exchanged, false otherwise.
+ *
+ * Full barrier semantics.
+ */
+static inline bool
+pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ AssertPointerAlignment(ptr, 4);
+ AssertPointerAlignment(expected, 4);
+
+ return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
+}
+
+/*
+ * pg_atomic_fetch_add_u32 - atomically add to variable
+ *
+ * Returns the value of ptr before the arithmetic operation.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ AssertPointerAlignment(ptr, 4);
+ return pg_atomic_fetch_add_u32_impl(ptr, add_);
+}
+
+/*
+ * pg_atomic_fetch_sub_u32 - atomically subtract from variable
+ *
+ * Returns the value of ptr before the arithmetic operation. Note that sub_
+ * may not be INT_MIN due to platform limitations.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
+{
+ AssertPointerAlignment(ptr, 4);
+ Assert(sub_ != INT_MIN);
+ return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
+}
+
+/*
+ * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
+ *
+ * Returns the value of ptr before the arithmetic operation.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
+{
+ AssertPointerAlignment(ptr, 4);
+ return pg_atomic_fetch_and_u32_impl(ptr, and_);
+}
+
+/*
+ * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
+ *
+ * Returns the value of ptr before the arithmetic operation.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
+{
+ AssertPointerAlignment(ptr, 4);
+ return pg_atomic_fetch_or_u32_impl(ptr, or_);
+}
+
+/*
+ * pg_atomic_add_fetch_u32 - atomically add to variable
+ *
+ * Returns the value of ptr after the arithmetic operation.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ AssertPointerAlignment(ptr, 4);
+ return pg_atomic_add_fetch_u32_impl(ptr, add_);
+}
+
+/*
+ * pg_atomic_sub_fetch_u32 - atomically subtract from variable
+ *
+ * Returns the value of ptr after the arithmetic operation. Note that sub_ may
+ * not be INT_MIN due to platform limitations.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
+{
+ AssertPointerAlignment(ptr, 4);
+ Assert(sub_ != INT_MIN);
+ return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
+}
+
+/* ----
+ * The 64 bit operations have the same semantics as their 32bit counterparts
+ * if they are available. Check the corresponding 32bit function for
+ * documentation.
+ * ----
+ */
+static inline void
+pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+ /*
+ * Can't necessarily enforce alignment - and don't need it - when using
+ * the spinlock based fallback implementation. Therefore only assert when
+ * not using it.
+ */
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ pg_atomic_init_u64_impl(ptr, val);
+}
+
+static inline uint64
+pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_read_u64_impl(ptr);
+}
+
+static inline void
+pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ pg_atomic_write_u64_impl(ptr, val);
+}
+
+static inline uint64
+pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_exchange_u64_impl(ptr, newval);
+}
+
+static inline bool
+pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+ AssertPointerAlignment(expected, 8);
+#endif
+ return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
+}
+
+static inline uint64
+pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_fetch_add_u64_impl(ptr, add_);
+}
+
+static inline uint64
+pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ Assert(sub_ != PG_INT64_MIN);
+ return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
+}
+
+static inline uint64
+pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_fetch_and_u64_impl(ptr, and_);
+}
+
+static inline uint64
+pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_fetch_or_u64_impl(ptr, or_);
+}
+
+static inline uint64
+pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_add_fetch_u64_impl(ptr, add_);
+}
+
+static inline uint64
+pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ Assert(sub_ != PG_INT64_MIN);
+ return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
+}
+
+#undef INSIDE_ATOMICS_H
+
+#endif /* ATOMICS_H */
diff --git a/src/include/port/atomics/arch-arm.h b/src/include/port/atomics/arch-arm.h
new file mode 100644
index 0000000..efa3853
--- /dev/null
+++ b/src/include/port/atomics/arch-arm.h
@@ -0,0 +1,26 @@
+/*-------------------------------------------------------------------------
+ *
+ * arch-arm.h
+ * Atomic operations considerations specific to ARM
+ *
+ * Portions Copyright (c) 2013-2021, PostgreSQL Global Development Group
+ *
+ * NOTES:
+ *
+ * src/include/port/atomics/arch-arm.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* intentionally no include guards, should only be included by atomics.h */
+#ifndef INSIDE_ATOMICS_H
+#error "should be included via atomics.h"
+#endif
+
+/*
+ * 64 bit atomics on ARM32 are implemented using kernel fallbacks and thus
+ * might be slow, so disable entirely. On ARM64 that problem doesn't exist.
+ */
+#if !defined(__aarch64__) && !defined(__aarch64)
+#define PG_DISABLE_64_BIT_ATOMICS
+#endif /* __aarch64__ || __aarch64 */
diff --git a/src/include/port/atomics/arch-hppa.h b/src/include/port/atomics/arch-hppa.h
new file mode 100644
index 0000000..0661c0e
--- /dev/null
+++ b/src/include/port/atomics/arch-hppa.h
@@ -0,0 +1,17 @@
+/*-------------------------------------------------------------------------
+ *
+ * arch-hppa.h
+ * Atomic operations considerations specific to HPPA
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * src/include/port/atomics/arch-hppa.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* HPPA doesn't do either read or write reordering */
+#define pg_memory_barrier_impl() pg_compiler_barrier_impl()
diff --git a/src/include/port/atomics/arch-ia64.h b/src/include/port/atomics/arch-ia64.h
new file mode 100644
index 0000000..45f36f2
--- /dev/null
+++ b/src/include/port/atomics/arch-ia64.h
@@ -0,0 +1,29 @@
+/*-------------------------------------------------------------------------
+ *
+ * arch-ia64.h
+ * Atomic operations considerations specific to intel itanium
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * src/include/port/atomics/arch-ia64.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/*
+ * Itanium is weakly ordered, so read and write barriers require a full
+ * fence.
+ */
+#if defined(__INTEL_COMPILER)
+# define pg_memory_barrier_impl() __mf()
+#elif defined(__GNUC__)
+# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
+#elif defined(__hpux)
+# define pg_memory_barrier_impl() _Asm_mf()
+#endif
+
+/* per architecture manual doubleword accesses have single copy atomicity */
+#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
diff --git a/src/include/port/atomics/arch-ppc.h b/src/include/port/atomics/arch-ppc.h
new file mode 100644
index 0000000..0814296
--- /dev/null
+++ b/src/include/port/atomics/arch-ppc.h
@@ -0,0 +1,254 @@
+/*-------------------------------------------------------------------------
+ *
+ * arch-ppc.h
+ * Atomic operations considerations specific to PowerPC
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * src/include/port/atomics/arch-ppc.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if defined(__GNUC__)
+
+/*
+ * lwsync orders loads with respect to each other, and similarly with stores.
+ * But a load can be performed before a subsequent store, so sync must be used
+ * for a full memory barrier.
+ */
+#define pg_memory_barrier_impl() __asm__ __volatile__ ("sync" : : : "memory")
+#define pg_read_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define pg_write_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory")
+#endif
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+/* 64bit atomics are only supported in 64bit mode */
+#if SIZEOF_VOID_P >= 8
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+ volatile uint64 value pg_attribute_aligned(8);
+} pg_atomic_uint64;
+
+#endif
+
+/*
+ * This mimics gcc __atomic_compare_exchange_n(..., __ATOMIC_SEQ_CST), but
+ * code generation differs at the end. __atomic_compare_exchange_n():
+ * 100: isync
+ * 104: mfcr r3
+ * 108: rlwinm r3,r3,3,31,31
+ * 10c: bne 120 <.eb+0x10>
+ * 110: clrldi r3,r3,63
+ * 114: addi r1,r1,112
+ * 118: blr
+ * 11c: nop
+ * 120: clrldi r3,r3,63
+ * 124: stw r9,0(r4)
+ * 128: addi r1,r1,112
+ * 12c: blr
+ *
+ * This:
+ * f0: isync
+ * f4: mfcr r9
+ * f8: rldicl. r3,r9,35,63
+ * fc: bne 104 <.eb>
+ * 100: stw r10,0(r4)
+ * 104: addi r1,r1,112
+ * 108: blr
+ *
+ * This implementation may or may not have materially different performance.
+ * It's not exploiting the fact that cr0 still holds the relevant comparison
+ * bits, set during the __asm__. One could fix that by moving more code into
+ * the __asm__. (That would remove the freedom to eliminate dead stores when
+ * the caller ignores "expected", but few callers do.)
+ *
+ * Recognizing constant "newval" would be superfluous, because there's no
+ * immediate-operand version of stwcx.
+ */
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ uint32 found;
+ uint32 condition_register;
+ bool ret;
+
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+ if (__builtin_constant_p(*expected) &&
+ (int32) *expected <= PG_INT16_MAX &&
+ (int32) *expected >= PG_INT16_MIN)
+ __asm__ __volatile__(
+ " sync \n"
+ " lwarx %0,0,%5 \n"
+ " cmpwi %0,%3 \n"
+ " bne $+12 \n" /* branch to isync */
+ " stwcx. %4,0,%5 \n"
+ " bne $-16 \n" /* branch to lwarx */
+ " isync \n"
+ " mfcr %1 \n"
+: "=&r"(found), "=r"(condition_register), "+m"(ptr->value)
+: "i"(*expected), "r"(newval), "r"(&ptr->value)
+: "memory", "cc");
+ else
+#endif
+ __asm__ __volatile__(
+ " sync \n"
+ " lwarx %0,0,%5 \n"
+ " cmpw %0,%3 \n"
+ " bne $+12 \n" /* branch to isync */
+ " stwcx. %4,0,%5 \n"
+ " bne $-16 \n" /* branch to lwarx */
+ " isync \n"
+ " mfcr %1 \n"
+: "=&r"(found), "=r"(condition_register), "+m"(ptr->value)
+: "r"(*expected), "r"(newval), "r"(&ptr->value)
+: "memory", "cc");
+
+ ret = (condition_register >> 29) & 1; /* test eq bit of cr0 */
+ if (!ret)
+ *expected = found;
+ return ret;
+}
+
+/*
+ * This mirrors gcc __sync_fetch_and_add().
+ *
+ * Like tas(), use constraint "=&b" to avoid allocating r0.
+ */
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ uint32 _t;
+ uint32 res;
+
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+ if (__builtin_constant_p(add_) &&
+ add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
+ __asm__ __volatile__(
+ " sync \n"
+ " lwarx %1,0,%4 \n"
+ " addi %0,%1,%3 \n"
+ " stwcx. %0,0,%4 \n"
+ " bne $-12 \n" /* branch to lwarx */
+ " isync \n"
+: "=&r"(_t), "=&b"(res), "+m"(ptr->value)
+: "i"(add_), "r"(&ptr->value)
+: "memory", "cc");
+ else
+#endif
+ __asm__ __volatile__(
+ " sync \n"
+ " lwarx %1,0,%4 \n"
+ " add %0,%1,%3 \n"
+ " stwcx. %0,0,%4 \n"
+ " bne $-12 \n" /* branch to lwarx */
+ " isync \n"
+: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+: "r"(add_), "r"(&ptr->value)
+: "memory", "cc");
+
+ return res;
+}
+
+#ifdef PG_HAVE_ATOMIC_U64_SUPPORT
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ uint64 found;
+ uint32 condition_register;
+ bool ret;
+
+ /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/; s/cmpw/cmpd/ */
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+ if (__builtin_constant_p(*expected) &&
+ (int64) *expected <= PG_INT16_MAX &&
+ (int64) *expected >= PG_INT16_MIN)
+ __asm__ __volatile__(
+ " sync \n"
+ " ldarx %0,0,%5 \n"
+ " cmpdi %0,%3 \n"
+ " bne $+12 \n" /* branch to isync */
+ " stdcx. %4,0,%5 \n"
+ " bne $-16 \n" /* branch to ldarx */
+ " isync \n"
+ " mfcr %1 \n"
+: "=&r"(found), "=r"(condition_register), "+m"(ptr->value)
+: "i"(*expected), "r"(newval), "r"(&ptr->value)
+: "memory", "cc");
+ else
+#endif
+ __asm__ __volatile__(
+ " sync \n"
+ " ldarx %0,0,%5 \n"
+ " cmpd %0,%3 \n"
+ " bne $+12 \n" /* branch to isync */
+ " stdcx. %4,0,%5 \n"
+ " bne $-16 \n" /* branch to ldarx */
+ " isync \n"
+ " mfcr %1 \n"
+: "=&r"(found), "=r"(condition_register), "+m"(ptr->value)
+: "r"(*expected), "r"(newval), "r"(&ptr->value)
+: "memory", "cc");
+
+ ret = (condition_register >> 29) & 1; /* test eq bit of cr0 */
+ if (!ret)
+ *expected = found;
+ return ret;
+}
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ uint64 _t;
+ uint64 res;
+
+ /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+ if (__builtin_constant_p(add_) &&
+ add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
+ __asm__ __volatile__(
+ " sync \n"
+ " ldarx %1,0,%4 \n"
+ " addi %0,%1,%3 \n"
+ " stdcx. %0,0,%4 \n"
+ " bne $-12 \n" /* branch to ldarx */
+ " isync \n"
+: "=&r"(_t), "=&b"(res), "+m"(ptr->value)
+: "i"(add_), "r"(&ptr->value)
+: "memory", "cc");
+ else
+#endif
+ __asm__ __volatile__(
+ " sync \n"
+ " ldarx %1,0,%4 \n"
+ " add %0,%1,%3 \n"
+ " stdcx. %0,0,%4 \n"
+ " bne $-12 \n" /* branch to ldarx */
+ " isync \n"
+: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+: "r"(add_), "r"(&ptr->value)
+: "memory", "cc");
+
+ return res;
+}
+
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+
+/* per architecture manual doubleword accesses have single copy atomicity */
+#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
diff --git a/src/include/port/atomics/arch-x86.h b/src/include/port/atomics/arch-x86.h
new file mode 100644
index 0000000..2cab5a3
--- /dev/null
+++ b/src/include/port/atomics/arch-x86.h
@@ -0,0 +1,252 @@
+/*-------------------------------------------------------------------------
+ *
+ * arch-x86.h
+ * Atomic operations considerations specific to intel x86
+ *
+ * Note that we actually require a 486 upwards because the 386 doesn't have
+ * support for xadd and cmpxchg. Given that the 386 isn't supported anywhere
+ * anymore that's not much of a restriction luckily.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * src/include/port/atomics/arch-x86.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/*
+ * Both 32 and 64 bit x86 do not allow loads to be reordered with other loads,
+ * or stores to be reordered with other stores, but a load can be performed
+ * before a subsequent store.
+ *
+ * Technically, some x86-ish chips support uncached memory access and/or
+ * special instructions that are weakly ordered. In those cases we'd need
+ * the read and write barriers to be lfence and sfence. But since we don't
+ * do those things, a compiler barrier should be enough.
+ *
+ * "lock; addl" has worked for longer than "mfence". It's also rumored to be
+ * faster in many scenarios.
+ */
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#if defined(__i386__) || defined(__i386)
+#define pg_memory_barrier_impl() \
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory", "cc")
+#elif defined(__x86_64__)
+#define pg_memory_barrier_impl() \
+ __asm__ __volatile__ ("lock; addl $0,0(%%rsp)" : : : "memory", "cc")
+#endif
+#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
+
+#define pg_read_barrier_impl() pg_compiler_barrier_impl()
+#define pg_write_barrier_impl() pg_compiler_barrier_impl()
+
+/*
+ * Provide implementation for atomics using inline assembly on x86 gcc. It's
+ * nice to support older gcc's and the compare/exchange implementation here is
+ * actually more efficient than the * __sync variant.
+ */
+#if defined(HAVE_ATOMICS)
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+#define PG_HAVE_ATOMIC_FLAG_SUPPORT
+typedef struct pg_atomic_flag
+{
+ volatile char value;
+} pg_atomic_flag;
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+/*
+ * It's too complicated to write inline asm for 64bit types on 32bit and the
+ * 486 can't do it anyway.
+ */
+#ifdef __x86_64__
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+ /* alignment guaranteed due to being on a 64bit platform */
+ volatile uint64 value;
+} pg_atomic_uint64;
+#endif /* __x86_64__ */
+
+#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
+
+#endif /* defined(HAVE_ATOMICS) */
+
+#if !defined(PG_HAVE_SPIN_DELAY)
+/*
+ * This sequence is equivalent to the PAUSE instruction ("rep" is
+ * ignored by old IA32 processors if the following instruction is
+ * not a string operation); the IA-32 Architecture Software
+ * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
+ * PAUSE in the inner loop of a spin lock is necessary for good
+ * performance:
+ *
+ * The PAUSE instruction improves the performance of IA-32
+ * processors supporting Hyper-Threading Technology when
+ * executing spin-wait loops and other routines where one
+ * thread is accessing a shared lock or semaphore in a tight
+ * polling loop. When executing a spin-wait loop, the
+ * processor can suffer a severe performance penalty when
+ * exiting the loop because it detects a possible memory order
+ * violation and flushes the core processor's pipeline. The
+ * PAUSE instruction provides a hint to the processor that the
+ * code sequence is a spin-wait loop. The processor uses this
+ * hint to avoid the memory order violation and prevent the
+ * pipeline flush. In addition, the PAUSE instruction
+ * de-pipelines the spin-wait loop to prevent it from
+ * consuming execution resources excessively.
+ */
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define PG_HAVE_SPIN_DELAY
+static __inline__ void
+pg_spin_delay_impl(void)
+{
+ __asm__ __volatile__(" rep; nop \n");
+}
+#elif defined(_MSC_VER) && defined(__x86_64__)
+#define PG_HAVE_SPIN_DELAY
+static __forceinline void
+pg_spin_delay_impl(void)
+{
+ _mm_pause();
+}
+#elif defined(_MSC_VER)
+#define PG_HAVE_SPIN_DELAY
+static __forceinline void
+pg_spin_delay_impl(void)
+{
+ /* See comment for gcc code. Same code, MASM syntax */
+ __asm rep nop;
+}
+#endif
+#endif /* !defined(PG_HAVE_SPIN_DELAY) */
+
+
+#if defined(HAVE_ATOMICS)
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+#define PG_HAVE_ATOMIC_TEST_SET_FLAG
+static inline bool
+pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ register char _res = 1;
+
+ __asm__ __volatile__(
+ " lock \n"
+ " xchgb %0,%1 \n"
+: "+q"(_res), "+m"(ptr->value)
+:
+: "memory");
+ return _res == 0;
+}
+
+#define PG_HAVE_ATOMIC_CLEAR_FLAG
+static inline void
+pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ /*
+ * On a TSO architecture like x86 it's sufficient to use a compiler
+ * barrier to achieve release semantics.
+ */
+ __asm__ __volatile__("" ::: "memory");
+ ptr->value = 0;
+}
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ char ret;
+
+ /*
+ * Perform cmpxchg and use the zero flag which it implicitly sets when
+ * equal to measure the success.
+ */
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgl %4,%5 \n"
+ " setz %2 \n"
+: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+: "a" (*expected), "r" (newval), "m"(ptr->value)
+: "memory", "cc");
+ return (bool) ret;
+}
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ uint32 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddl %0,%1 \n"
+: "=q"(res), "=m"(ptr->value)
+: "0" (add_), "m"(ptr->value)
+: "memory", "cc");
+ return res;
+}
+
+#ifdef __x86_64__
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ char ret;
+
+ /*
+ * Perform cmpxchg and use the zero flag which it implicitly sets when
+ * equal to measure the success.
+ */
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgq %4,%5 \n"
+ " setz %2 \n"
+: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+: "a" (*expected), "r" (newval), "m"(ptr->value)
+: "memory", "cc");
+ return (bool) ret;
+}
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ uint64 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddq %0,%1 \n"
+: "=q"(res), "=m"(ptr->value)
+: "0" (add_), "m"(ptr->value)
+: "memory", "cc");
+ return res;
+}
+
+#endif /* __x86_64__ */
+
+#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
+
+/*
+ * 8 byte reads / writes have single-copy atomicity on 32 bit x86 platforms
+ * since at least the 586. As well as on all x86-64 cpus.
+ */
+#if defined(__i568__) || defined(__i668__) || /* gcc i586+ */ \
+ (defined(_M_IX86) && _M_IX86 >= 500) || /* msvc i586+ */ \
+ defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) /* gcc, sunpro, msvc */
+#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
+#endif /* 8 byte single-copy atomicity */
+
+#endif /* HAVE_ATOMICS */
diff --git a/src/include/port/atomics/fallback.h b/src/include/port/atomics/fallback.h
new file mode 100644
index 0000000..b344aae
--- /dev/null
+++ b/src/include/port/atomics/fallback.h
@@ -0,0 +1,170 @@
+/*-------------------------------------------------------------------------
+ *
+ * fallback.h
+ * Fallback for platforms without spinlock and/or atomics support. Slower
+ * than native atomics support, but not unusably slow.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/atomics/fallback.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* intentionally no include guards, should only be included by atomics.h */
+#ifndef INSIDE_ATOMICS_H
+# error "should be included via atomics.h"
+#endif
+
+#ifndef pg_memory_barrier_impl
+/*
+ * If we have no memory barrier implementation for this architecture, we
+ * fall back to acquiring and releasing a spinlock. This might, in turn,
+ * fall back to the semaphore-based spinlock implementation, which will be
+ * amazingly slow.
+ *
+ * It's not self-evident that every possible legal implementation of a
+ * spinlock acquire-and-release would be equivalent to a full memory barrier.
+ * For example, I'm not sure that Itanium's acq and rel add up to a full
+ * fence. But all of our actual implementations seem OK in this regard.
+ */
+#define PG_HAVE_MEMORY_BARRIER_EMULATION
+
+extern void pg_spinlock_barrier(void);
+#define pg_memory_barrier_impl pg_spinlock_barrier
+#endif
+
+#ifndef pg_compiler_barrier_impl
+/*
+ * If the compiler/arch combination does not provide compiler barriers,
+ * provide a fallback. The fallback simply consists of a function call into
+ * an externally defined function. That should guarantee compiler barrier
+ * semantics except for compilers that do inter translation unit/global
+ * optimization - those better provide an actual compiler barrier.
+ *
+ * A native compiler barrier for sure is a lot faster than this...
+ */
+#define PG_HAVE_COMPILER_BARRIER_EMULATION
+extern void pg_extern_compiler_barrier(void);
+#define pg_compiler_barrier_impl pg_extern_compiler_barrier
+#endif
+
+
+/*
+ * If we have atomics implementation for this platform, fall back to providing
+ * the atomics API using a spinlock to protect the internal state. Possibly
+ * the spinlock implementation uses semaphores internally...
+ *
+ * We have to be a bit careful here, as it's not guaranteed that atomic
+ * variables are mapped to the same address in every process (e.g. dynamic
+ * shared memory segments). We can't just hash the address and use that to map
+ * to a spinlock. Instead assign a spinlock on initialization of the atomic
+ * variable.
+ */
+#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
+
+#define PG_HAVE_ATOMIC_FLAG_SIMULATION
+#define PG_HAVE_ATOMIC_FLAG_SUPPORT
+
+typedef struct pg_atomic_flag
+{
+ /*
+ * To avoid circular includes we can't use s_lock as a type here. Instead
+ * just reserve enough space for all spinlock types. Some platforms would
+ * be content with just one byte instead of 4, but that's not too much
+ * waste.
+ */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
+ int sema[4];
+#else
+ int sema;
+#endif
+ volatile bool value;
+} pg_atomic_flag;
+
+#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
+
+#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
+
+#define PG_HAVE_ATOMIC_U32_SIMULATION
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ /* Check pg_atomic_flag's definition above for an explanation */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
+ int sema[4];
+#else
+ int sema;
+#endif
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
+
+#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT)
+
+#define PG_HAVE_ATOMIC_U64_SIMULATION
+
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+ /* Check pg_atomic_flag's definition above for an explanation */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
+ int sema[4];
+#else
+ int sema;
+#endif
+ volatile uint64 value;
+} pg_atomic_uint64;
+
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+
+#ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
+
+#define PG_HAVE_ATOMIC_INIT_FLAG
+extern void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr);
+
+#define PG_HAVE_ATOMIC_TEST_SET_FLAG
+extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr);
+
+#define PG_HAVE_ATOMIC_CLEAR_FLAG
+extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr);
+
+#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
+extern bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr);
+
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+
+#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
+
+#define PG_HAVE_ATOMIC_INIT_U32
+extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_);
+
+#define PG_HAVE_ATOMIC_WRITE_U32
+extern void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val);
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval);
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
+
+#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+
+
+#ifdef PG_HAVE_ATOMIC_U64_SIMULATION
+
+#define PG_HAVE_ATOMIC_INIT_U64
+extern void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_);
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+extern bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval);
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+extern uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_);
+
+#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
diff --git a/src/include/port/atomics/generic-acc.h b/src/include/port/atomics/generic-acc.h
new file mode 100644
index 0000000..3c75b6a
--- /dev/null
+++ b/src/include/port/atomics/generic-acc.h
@@ -0,0 +1,106 @@
+/*-------------------------------------------------------------------------
+ *
+ * generic-acc.h
+ * Atomic operations support when using HPs acc on HPUX
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * Documentation:
+ * * inline assembly for Itanium-based HP-UX:
+ * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
+ * * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
+ * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
+ *
+ * Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
+ * for atomic add/sub, so we just implement everything but compare_exchange
+ * via the compare_exchange fallbacks in atomics/generic.h.
+ *
+ * src/include/port/atomics/generic-acc.h
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include <machine/sys/inline.h>
+
+#define pg_compiler_barrier_impl() _Asm_sched_fence()
+
+#if defined(HAVE_ATOMICS)
+
+/* IA64 always has 32/64 bit atomics */
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+ /*
+ * Alignment is guaranteed to be 64bit. Search for "Well-behaved
+ * application restrictions" => "Data alignment and data sharing" on HP's
+ * website. Unfortunately the URL doesn't seem to stable enough to
+ * include.
+ */
+ volatile uint64 value;
+} pg_atomic_uint64;
+
+
+#define MINOR_FENCE (_Asm_fence) (_UP_CALL_FENCE | _UP_SYS_FENCE | \
+ _DOWN_CALL_FENCE | _DOWN_SYS_FENCE )
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ bool ret;
+ uint32 current;
+
+ _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
+ /*
+ * We want a barrier, not just release/acquire semantics.
+ */
+ _Asm_mf();
+ /*
+ * Notes:
+ * _DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
+ */
+ current = _Asm_cmpxchg(_SZ_W, /* word */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ bool ret;
+ uint64 current;
+
+ _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
+ _Asm_mf();
+ current = _Asm_cmpxchg(_SZ_D, /* doubleword */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+#undef MINOR_FENCE
+
+#endif /* defined(HAVE_ATOMICS) */
diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h
new file mode 100644
index 0000000..99fda18
--- /dev/null
+++ b/src/include/port/atomics/generic-gcc.h
@@ -0,0 +1,286 @@
+/*-------------------------------------------------------------------------
+ *
+ * generic-gcc.h
+ * Atomic operations, implemented using gcc (or compatible) intrinsics.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * Documentation:
+ * * Legacy __sync Built-in Functions for Atomic Memory Access
+ * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
+ * * Built-in functions for memory model aware atomic operations
+ * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
+ *
+ * src/include/port/atomics/generic-gcc.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* intentionally no include guards, should only be included by atomics.h */
+#ifndef INSIDE_ATOMICS_H
+#error "should be included via atomics.h"
+#endif
+
+/*
+ * An empty asm block should be a sufficient compiler barrier.
+ */
+#define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
+
+/*
+ * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
+ * out of this compiler built-in. But we prefer to rely on platform specific
+ * definitions where possible, and use this only as a fallback.
+ */
+#if !defined(pg_memory_barrier_impl)
+# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
+# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+# define pg_memory_barrier_impl() __sync_synchronize()
+# endif
+#endif /* !defined(pg_memory_barrier_impl) */
+
+#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
+/* acquire semantics include read barrier semantics */
+# define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#endif
+
+#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
+/* release semantics include write barrier semantics */
+# define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
+#endif
+
+
+#ifdef HAVE_ATOMICS
+
+/* generic gcc based atomic flag implementation */
+#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
+ && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
+
+#define PG_HAVE_ATOMIC_FLAG_SUPPORT
+typedef struct pg_atomic_flag
+{
+ /*
+ * If we have a choice, use int-width TAS, because that is more efficient
+ * and/or more reliably implemented on most non-Intel platforms. (Note
+ * that this code isn't used on x86[_64]; see arch-x86.h for that.)
+ */
+#ifdef HAVE_GCC__SYNC_INT32_TAS
+ volatile int value;
+#else
+ volatile char value;
+#endif
+} pg_atomic_flag;
+
+#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
+
+/* generic gcc based atomic uint32 implementation */
+#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
+ && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
+
+/* generic gcc based atomic uint64 implementation */
+#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
+ && !defined(PG_DISABLE_64_BIT_ATOMICS) \
+ && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
+
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+
+typedef struct pg_atomic_uint64
+{
+ volatile uint64 value pg_attribute_aligned(8);
+} pg_atomic_uint64;
+
+#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
+
+#ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
+
+#if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
+
+#ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
+#define PG_HAVE_ATOMIC_TEST_SET_FLAG
+static inline bool
+pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ /* NB: only an acquire barrier, not a full one */
+ /* some platform only support a 1 here */
+ return __sync_lock_test_and_set(&ptr->value, 1) == 0;
+}
+#endif
+
+#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
+
+#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
+#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
+static inline bool
+pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ return ptr->value == 0;
+}
+#endif
+
+#ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
+#define PG_HAVE_ATOMIC_CLEAR_FLAG
+static inline void
+pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ __sync_lock_release(&ptr->value);
+}
+#endif
+
+#ifndef PG_HAVE_ATOMIC_INIT_FLAG
+#define PG_HAVE_ATOMIC_INIT_FLAG
+static inline void
+pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ pg_atomic_clear_flag_impl(ptr);
+}
+#endif
+
+#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
+
+/* prefer __atomic, it has a better API */
+#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ /* FIXME: we can probably use a lower consistency model */
+ return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ bool ret;
+ uint32 current;
+ current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+#endif
+
+/* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ return __sync_fetch_and_add(&ptr->value, add_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
+#define PG_HAVE_ATOMIC_FETCH_SUB_U32
+static inline uint32
+pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
+{
+ return __sync_fetch_and_sub(&ptr->value, sub_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
+#define PG_HAVE_ATOMIC_FETCH_AND_U32
+static inline uint32
+pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
+{
+ return __sync_fetch_and_and(&ptr->value, and_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
+#define PG_HAVE_ATOMIC_FETCH_OR_U32
+static inline uint32
+pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
+{
+ return __sync_fetch_and_or(&ptr->value, or_);
+}
+#endif
+
+
+#if !defined(PG_DISABLE_64_BIT_ATOMICS)
+
+#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ bool ret;
+ uint64 current;
+ current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+#endif
+
+/* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ return __sync_fetch_and_add(&ptr->value, add_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
+#define PG_HAVE_ATOMIC_FETCH_SUB_U64
+static inline uint64
+pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
+{
+ return __sync_fetch_and_sub(&ptr->value, sub_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
+#define PG_HAVE_ATOMIC_FETCH_AND_U64
+static inline uint64
+pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
+{
+ return __sync_fetch_and_and(&ptr->value, and_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
+#define PG_HAVE_ATOMIC_FETCH_OR_U64
+static inline uint64
+pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
+{
+ return __sync_fetch_and_or(&ptr->value, or_);
+}
+#endif
+
+#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
+
+#endif /* defined(HAVE_ATOMICS) */
diff --git a/src/include/port/atomics/generic-msvc.h b/src/include/port/atomics/generic-msvc.h
new file mode 100644
index 0000000..f535b45
--- /dev/null
+++ b/src/include/port/atomics/generic-msvc.h
@@ -0,0 +1,101 @@
+/*-------------------------------------------------------------------------
+ *
+ * generic-msvc.h
+ * Atomic operations support when using MSVC
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * NOTES:
+ *
+ * Documentation:
+ * * Interlocked Variable Access
+ * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
+ *
+ * src/include/port/atomics/generic-msvc.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <intrin.h>
+
+/* intentionally no include guards, should only be included by atomics.h */
+#ifndef INSIDE_ATOMICS_H
+#error "should be included via atomics.h"
+#endif
+
+#pragma intrinsic(_ReadWriteBarrier)
+#define pg_compiler_barrier_impl() _ReadWriteBarrier()
+
+#ifndef pg_memory_barrier_impl
+#define pg_memory_barrier_impl() MemoryBarrier()
+#endif
+
+#if defined(HAVE_ATOMICS)
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct __declspec(align(8)) pg_atomic_uint64
+{
+ volatile uint64 value;
+} pg_atomic_uint64;
+
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ bool ret;
+ uint32 current;
+ current = InterlockedCompareExchange(&ptr->value, newval, *expected);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ return InterlockedExchangeAdd(&ptr->value, add_);
+}
+
+/*
+ * The non-intrinsics versions are only available in vista upwards, so use the
+ * intrinsic version. Only supported on >486, but we require XP as a minimum
+ * baseline, which doesn't support the 486, so we don't need to add checks for
+ * that case.
+ */
+#pragma intrinsic(_InterlockedCompareExchange64)
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ bool ret;
+ uint64 current;
+ current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+/* Only implemented on itanium and 64bit builds */
+#ifdef _WIN64
+#pragma intrinsic(_InterlockedExchangeAdd64)
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ return _InterlockedExchangeAdd64(&ptr->value, add_);
+}
+#endif /* _WIN64 */
+
+#endif /* HAVE_ATOMICS */
diff --git a/src/include/port/atomics/generic-sunpro.h b/src/include/port/atomics/generic-sunpro.h
new file mode 100644
index 0000000..7196021
--- /dev/null
+++ b/src/include/port/atomics/generic-sunpro.h
@@ -0,0 +1,106 @@
+/*-------------------------------------------------------------------------
+ *
+ * generic-sunpro.h
+ * Atomic operations for solaris' CC
+ *
+ * Portions Copyright (c) 2013-2021, PostgreSQL Global Development Group
+ *
+ * NOTES:
+ *
+ * Documentation:
+ * * manpage for atomic_cas(3C)
+ * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
+ * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
+ *
+ * src/include/port/atomics/generic-sunpro.h
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#if defined(HAVE_ATOMICS)
+
+#ifdef HAVE_MBARRIER_H
+#include <mbarrier.h>
+
+#define pg_compiler_barrier_impl() __compiler_barrier()
+
+#ifndef pg_memory_barrier_impl
+/*
+ * Despite the name this is actually a full barrier. Expanding to mfence/
+ * membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
+ * respectively.
+ */
+# define pg_memory_barrier_impl() __machine_rw_barrier()
+#endif
+#ifndef pg_read_barrier_impl
+# define pg_read_barrier_impl() __machine_r_barrier()
+#endif
+#ifndef pg_write_barrier_impl
+# define pg_write_barrier_impl() __machine_w_barrier()
+#endif
+
+#endif /* HAVE_MBARRIER_H */
+
+/* Older versions of the compiler don't have atomic.h... */
+#ifdef HAVE_ATOMIC_H
+
+#include <atomic.h>
+
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+ volatile uint32 value;
+} pg_atomic_uint32;
+
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+ /*
+ * Syntax to enforce variable alignment should be supported by versions
+ * supporting atomic.h, but it's hard to find accurate documentation. If
+ * it proves to be a problem, we'll have to add more version checks for 64
+ * bit support.
+ */
+ volatile uint64 value pg_attribute_aligned(8);
+} pg_atomic_uint64;
+
+#endif /* HAVE_ATOMIC_H */
+
+#endif /* defined(HAVE_ATOMICS) */
+
+
+#if defined(HAVE_ATOMICS)
+
+#ifdef HAVE_ATOMIC_H
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
+static inline bool
+pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
+ uint32 *expected, uint32 newval)
+{
+ bool ret;
+ uint32 current;
+
+ current = atomic_cas_32(&ptr->value, *expected, newval);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
+static inline bool
+pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
+ uint64 *expected, uint64 newval)
+{
+ bool ret;
+ uint64 current;
+
+ current = atomic_cas_64(&ptr->value, *expected, newval);
+ ret = current == *expected;
+ *expected = current;
+ return ret;
+}
+
+#endif /* HAVE_ATOMIC_H */
+
+#endif /* defined(HAVE_ATOMICS) */
diff --git a/src/include/port/atomics/generic.h b/src/include/port/atomics/generic.h
new file mode 100644
index 0000000..6d094dc
--- /dev/null
+++ b/src/include/port/atomics/generic.h
@@ -0,0 +1,401 @@
+/*-------------------------------------------------------------------------
+ *
+ * generic.h
+ * Implement higher level operations based on some lower level atomic
+ * operations.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/atomics/generic.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* intentionally no include guards, should only be included by atomics.h */
+#ifndef INSIDE_ATOMICS_H
+# error "should be included via atomics.h"
+#endif
+
+/*
+ * If read or write barriers are undefined, we upgrade them to full memory
+ * barriers.
+ */
+#if !defined(pg_read_barrier_impl)
+# define pg_read_barrier_impl pg_memory_barrier_impl
+#endif
+#if !defined(pg_write_barrier_impl)
+# define pg_write_barrier_impl pg_memory_barrier_impl
+#endif
+
+#ifndef PG_HAVE_SPIN_DELAY
+#define PG_HAVE_SPIN_DELAY
+#define pg_spin_delay_impl() ((void)0)
+#endif
+
+
+/* provide fallback */
+#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && defined(PG_HAVE_ATOMIC_U32_SUPPORT)
+#define PG_HAVE_ATOMIC_FLAG_SUPPORT
+typedef pg_atomic_uint32 pg_atomic_flag;
+#endif
+
+#ifndef PG_HAVE_ATOMIC_READ_U32
+#define PG_HAVE_ATOMIC_READ_U32
+static inline uint32
+pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
+{
+ return ptr->value;
+}
+#endif
+
+#ifndef PG_HAVE_ATOMIC_WRITE_U32
+#define PG_HAVE_ATOMIC_WRITE_U32
+static inline void
+pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ ptr->value = val;
+}
+#endif
+
+#ifndef PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
+#define PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
+static inline void
+pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ ptr->value = val;
+}
+#endif
+
+/*
+ * provide fallback for test_and_set using atomic_exchange if available
+ */
+#if !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
+
+#define PG_HAVE_ATOMIC_INIT_FLAG
+static inline void
+pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ pg_atomic_write_u32_impl(ptr, 0);
+}
+
+#define PG_HAVE_ATOMIC_TEST_SET_FLAG
+static inline bool
+pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ return pg_atomic_exchange_u32_impl(ptr, &value, 1) == 0;
+}
+
+#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
+static inline bool
+pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ return pg_atomic_read_u32_impl(ptr) == 0;
+}
+
+
+#define PG_HAVE_ATOMIC_CLEAR_FLAG
+static inline void
+pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ /* XXX: release semantics suffice? */
+ pg_memory_barrier_impl();
+ pg_atomic_write_u32_impl(ptr, 0);
+}
+
+/*
+ * provide fallback for test_and_set using atomic_compare_exchange if
+ * available.
+ */
+#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+
+#define PG_HAVE_ATOMIC_INIT_FLAG
+static inline void
+pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ pg_atomic_write_u32_impl(ptr, 0);
+}
+
+#define PG_HAVE_ATOMIC_TEST_SET_FLAG
+static inline bool
+pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ uint32 value = 0;
+ return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
+}
+
+#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
+static inline bool
+pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ return pg_atomic_read_u32_impl(ptr) == 0;
+}
+
+#define PG_HAVE_ATOMIC_CLEAR_FLAG
+static inline void
+pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
+{
+ /*
+ * Use a memory barrier + plain write if we have a native memory
+ * barrier. But don't do so if memory barriers use spinlocks - that'd lead
+ * to circularity if flags are used to implement spinlocks.
+ */
+#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
+ /* XXX: release semantics suffice? */
+ pg_memory_barrier_impl();
+ pg_atomic_write_u32_impl(ptr, 0);
+#else
+ uint32 value = 1;
+ pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
+#endif
+}
+
+#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
+# error "No pg_atomic_test_and_set provided"
+#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
+
+
+#ifndef PG_HAVE_ATOMIC_INIT_U32
+#define PG_HAVE_ATOMIC_INIT_U32
+static inline void
+pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
+{
+ ptr->value = val_;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_EXCHANGE_U32
+static inline uint32
+pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
+{
+ uint32 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ uint32 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_FETCH_SUB_U32
+static inline uint32
+pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
+{
+ return pg_atomic_fetch_add_u32_impl(ptr, -sub_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_FETCH_AND_U32
+static inline uint32
+pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
+{
+ uint32 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_FETCH_OR_U32
+static inline uint32
+pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
+{
+ uint32 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
+#define PG_HAVE_ATOMIC_ADD_FETCH_U32
+static inline uint32
+pg_atomic_add_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+ return pg_atomic_fetch_add_u32_impl(ptr, add_) + add_;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U32)
+#define PG_HAVE_ATOMIC_SUB_FETCH_U32
+static inline uint32
+pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
+{
+ return pg_atomic_fetch_sub_u32_impl(ptr, sub_) - sub_;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_EXCHANGE_U64
+static inline uint64
+pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
+{
+ uint64 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#ifndef PG_HAVE_ATOMIC_WRITE_U64
+#define PG_HAVE_ATOMIC_WRITE_U64
+
+#if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
+ !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
+
+static inline void
+pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+ /*
+ * On this platform aligned 64bit writes are guaranteed to be atomic,
+ * except if using the fallback implementation, where can't guarantee the
+ * required alignment.
+ */
+ AssertPointerAlignment(ptr, 8);
+ ptr->value = val;
+}
+
+#else
+
+static inline void
+pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+ /*
+ * 64 bit writes aren't safe on all platforms. In the generic
+ * implementation implement them as an atomic exchange.
+ */
+ pg_atomic_exchange_u64_impl(ptr, val);
+}
+
+#endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_WRITE_U64 */
+
+#ifndef PG_HAVE_ATOMIC_READ_U64
+#define PG_HAVE_ATOMIC_READ_U64
+
+#if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
+ !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
+
+static inline uint64
+pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
+{
+ /*
+ * On this platform aligned 64-bit reads are guaranteed to be atomic.
+ */
+ AssertPointerAlignment(ptr, 8);
+ return ptr->value;
+}
+
+#else
+
+static inline uint64
+pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
+{
+ uint64 old = 0;
+
+ /*
+ * 64-bit reads aren't atomic on all platforms. In the generic
+ * implementation implement them as a compare/exchange with 0. That'll
+ * fail or succeed, but always return the old value. Possibly might store
+ * a 0, but only if the previous value also was a 0 - i.e. harmless.
+ */
+ pg_atomic_compare_exchange_u64_impl(ptr, &old, 0);
+
+ return old;
+}
+#endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_READ_U64 */
+
+#ifndef PG_HAVE_ATOMIC_INIT_U64
+#define PG_HAVE_ATOMIC_INIT_U64
+static inline void
+pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
+{
+ ptr->value = val_;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ uint64 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_FETCH_SUB_U64
+static inline uint64
+pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
+{
+ return pg_atomic_fetch_add_u64_impl(ptr, -sub_);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_FETCH_AND_U64
+static inline uint64
+pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
+{
+ uint64 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_FETCH_OR_U64
+static inline uint64
+pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
+{
+ uint64 old;
+ old = ptr->value; /* ok if read is not atomic */
+ while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
+ /* skip */;
+ return old;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
+#define PG_HAVE_ATOMIC_ADD_FETCH_U64
+static inline uint64
+pg_atomic_add_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+ return pg_atomic_fetch_add_u64_impl(ptr, add_) + add_;
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U64)
+#define PG_HAVE_ATOMIC_SUB_FETCH_U64
+static inline uint64
+pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
+{
+ return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
+}
+#endif
diff --git a/src/include/port/cygwin.h b/src/include/port/cygwin.h
new file mode 100644
index 0000000..64d6993
--- /dev/null
+++ b/src/include/port/cygwin.h
@@ -0,0 +1,17 @@
+/* src/include/port/cygwin.h */
+
+#ifdef BUILDING_DLL
+#define PGDLLIMPORT __declspec (dllexport)
+#else
+#define PGDLLIMPORT __declspec (dllimport)
+#endif
+
+#define PGDLLEXPORT
+
+/*
+ * Cygwin has a strtof() which is literally just (float)strtod(), which means
+ * we get misrounding _and_ silent over/underflow. Using our wrapper doesn't
+ * fix the misrounding but does fix the error checks, which cuts down on the
+ * number of test variant files needed.
+ */
+#define HAVE_BUGGY_STRTOF 1
diff --git a/src/include/port/darwin.h b/src/include/port/darwin.h
new file mode 100644
index 0000000..15fb69d
--- /dev/null
+++ b/src/include/port/darwin.h
@@ -0,0 +1,8 @@
+/* src/include/port/darwin.h */
+
+#define __darwin__ 1
+
+#if HAVE_DECL_F_FULLFSYNC /* not present before macOS 10.3 */
+#define HAVE_FSYNC_WRITETHROUGH
+
+#endif
diff --git a/src/include/port/freebsd.h b/src/include/port/freebsd.h
new file mode 100644
index 0000000..2e2e749
--- /dev/null
+++ b/src/include/port/freebsd.h
@@ -0,0 +1,10 @@
+/* src/include/port/freebsd.h */
+
+/*
+ * Set the default wal_sync_method to fdatasync. xlogdefs.h's normal rules
+ * would prefer open_datasync on FreeBSD 13+, but that is not a good choice on
+ * many systems.
+ */
+#ifdef HAVE_FDATASYNC
+#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+#endif
diff --git a/src/include/port/hpux.h b/src/include/port/hpux.h
new file mode 100644
index 0000000..4d1dcea
--- /dev/null
+++ b/src/include/port/hpux.h
@@ -0,0 +1,3 @@
+/* src/include/port/hpux.h */
+
+/* nothing needed */
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
new file mode 100644
index 0000000..7a6e46c
--- /dev/null
+++ b/src/include/port/linux.h
@@ -0,0 +1,22 @@
+/* src/include/port/linux.h */
+
+/*
+ * As of July 2007, all known versions of the Linux kernel will sometimes
+ * return EIDRM for a shmctl() operation when EINVAL is correct (it happens
+ * when the low-order 15 bits of the supplied shm ID match the slot number
+ * assigned to a newer shmem segment). We deal with this by assuming that
+ * EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
+ * since in fact Linux has no excuse for ever returning EIDRM; it doesn't
+ * track removed segments in a way that would allow distinguishing them from
+ * private ones. But someday that code might get upgraded, and we'd have
+ * to have a kernel version test here.
+ */
+#define HAVE_LINUX_EIDRM_BUG
+
+/*
+ * Set the default wal_sync_method to fdatasync. With recent Linux versions,
+ * xlogdefs.h's normal rules will prefer open_datasync, which (a) doesn't
+ * perform better and (b) causes outright failures on ext4 data=journal
+ * filesystems, because those don't support O_DIRECT.
+ */
+#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
diff --git a/src/include/port/netbsd.h b/src/include/port/netbsd.h
new file mode 100644
index 0000000..590233f
--- /dev/null
+++ b/src/include/port/netbsd.h
@@ -0,0 +1 @@
+/* src/include/port/netbsd.h */
diff --git a/src/include/port/openbsd.h b/src/include/port/openbsd.h
new file mode 100644
index 0000000..395319b
--- /dev/null
+++ b/src/include/port/openbsd.h
@@ -0,0 +1 @@
+/* src/include/port/openbsd.h */
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
new file mode 100644
index 0000000..086bd08
--- /dev/null
+++ b/src/include/port/pg_bitutils.h
@@ -0,0 +1,272 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_bitutils.h
+ * Miscellaneous functions for bit-wise operations.
+ *
+ *
+ * Copyright (c) 2019-2021, PostgreSQL Global Development Group
+ *
+ * src/include/port/pg_bitutils.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_BITUTILS_H
+#define PG_BITUTILS_H
+
+#ifndef FRONTEND
+extern PGDLLIMPORT const uint8 pg_leftmost_one_pos[256];
+extern PGDLLIMPORT const uint8 pg_rightmost_one_pos[256];
+extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
+#else
+extern const uint8 pg_leftmost_one_pos[256];
+extern const uint8 pg_rightmost_one_pos[256];
+extern const uint8 pg_number_of_ones[256];
+#endif
+
+/*
+ * pg_leftmost_one_pos32
+ * Returns the position of the most significant set bit in "word",
+ * measured from the least significant bit. word must not be 0.
+ */
+static inline int
+pg_leftmost_one_pos32(uint32 word)
+{
+#ifdef HAVE__BUILTIN_CLZ
+ Assert(word != 0);
+
+ return 31 - __builtin_clz(word);
+#else
+ int shift = 32 - 8;
+
+ Assert(word != 0);
+
+ while ((word >> shift) == 0)
+ shift -= 8;
+
+ return shift + pg_leftmost_one_pos[(word >> shift) & 255];
+#endif /* HAVE__BUILTIN_CLZ */
+}
+
+/*
+ * pg_leftmost_one_pos64
+ * As above, but for a 64-bit word.
+ */
+static inline int
+pg_leftmost_one_pos64(uint64 word)
+{
+#ifdef HAVE__BUILTIN_CLZ
+ Assert(word != 0);
+
+#if defined(HAVE_LONG_INT_64)
+ return 63 - __builtin_clzl(word);
+#elif defined(HAVE_LONG_LONG_INT_64)
+ return 63 - __builtin_clzll(word);
+#else
+#error must have a working 64-bit integer datatype
+#endif
+#else /* !HAVE__BUILTIN_CLZ */
+ int shift = 64 - 8;
+
+ Assert(word != 0);
+
+ while ((word >> shift) == 0)
+ shift -= 8;
+
+ return shift + pg_leftmost_one_pos[(word >> shift) & 255];
+#endif /* HAVE__BUILTIN_CLZ */
+}
+
+/*
+ * pg_rightmost_one_pos32
+ * Returns the position of the least significant set bit in "word",
+ * measured from the least significant bit. word must not be 0.
+ */
+static inline int
+pg_rightmost_one_pos32(uint32 word)
+{
+#ifdef HAVE__BUILTIN_CTZ
+ Assert(word != 0);
+
+ return __builtin_ctz(word);
+#else
+ int result = 0;
+
+ Assert(word != 0);
+
+ while ((word & 255) == 0)
+ {
+ word >>= 8;
+ result += 8;
+ }
+ result += pg_rightmost_one_pos[word & 255];
+ return result;
+#endif /* HAVE__BUILTIN_CTZ */
+}
+
+/*
+ * pg_rightmost_one_pos64
+ * As above, but for a 64-bit word.
+ */
+static inline int
+pg_rightmost_one_pos64(uint64 word)
+{
+#ifdef HAVE__BUILTIN_CTZ
+ Assert(word != 0);
+
+#if defined(HAVE_LONG_INT_64)
+ return __builtin_ctzl(word);
+#elif defined(HAVE_LONG_LONG_INT_64)
+ return __builtin_ctzll(word);
+#else
+#error must have a working 64-bit integer datatype
+#endif
+#else /* !HAVE__BUILTIN_CTZ */
+ int result = 0;
+
+ Assert(word != 0);
+
+ while ((word & 255) == 0)
+ {
+ word >>= 8;
+ result += 8;
+ }
+ result += pg_rightmost_one_pos[word & 255];
+ return result;
+#endif /* HAVE__BUILTIN_CTZ */
+}
+
+/*
+ * pg_nextpower2_32
+ * Returns the next higher power of 2 above 'num', or 'num' if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT32_MAX / 2 + 1.
+ */
+static inline uint32
+pg_nextpower2_32(uint32 num)
+{
+ Assert(num > 0 && num <= PG_UINT32_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num) + 1);
+}
+
+/*
+ * pg_nextpower2_64
+ * Returns the next higher power of 2 above 'num', or 'num' if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT64_MAX / 2 + 1.
+ */
+static inline uint64
+pg_nextpower2_64(uint64 num)
+{
+ Assert(num > 0 && num <= PG_UINT64_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num) + 1);
+}
+
+/*
+ * pg_nextpower2_size_t
+ * Returns the next higher power of 2 above 'num', for a size_t input.
+ */
+#if SIZEOF_SIZE_T == 4
+#define pg_nextpower2_size_t(num) pg_nextpower2_32(num)
+#else
+#define pg_nextpower2_size_t(num) pg_nextpower2_64(num)
+#endif
+
+/*
+ * pg_prevpower2_32
+ * Returns the next lower power of 2 below 'num', or 'num' if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0.
+ */
+static inline uint32
+pg_prevpower2_32(uint32 num)
+{
+ return ((uint32) 1) << pg_leftmost_one_pos32(num);
+}
+
+/*
+ * pg_prevpower2_64
+ * Returns the next lower power of 2 below 'num', or 'num' if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0.
+ */
+static inline uint64
+pg_prevpower2_64(uint64 num)
+{
+ return ((uint64) 1) << pg_leftmost_one_pos64(num);
+}
+
+/*
+ * pg_prevpower2_size_t
+ * Returns the next lower power of 2 below 'num', for a size_t input.
+ */
+#if SIZEOF_SIZE_T == 4
+#define pg_prevpower2_size_t(num) pg_prevpower2_32(num)
+#else
+#define pg_prevpower2_size_t(num) pg_prevpower2_64(num)
+#endif
+
+/*
+ * pg_ceil_log2_32
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint32
+pg_ceil_log2_32(uint32 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos32(num - 1) + 1;
+}
+
+/*
+ * pg_ceil_log2_64
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint64
+pg_ceil_log2_64(uint64 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos64(num - 1) + 1;
+}
+
+/* Count the number of one-bits in a uint32 or uint64 */
+extern int (*pg_popcount32) (uint32 word);
+extern int (*pg_popcount64) (uint64 word);
+
+/* Count the number of one-bits in a byte array */
+extern uint64 pg_popcount(const char *buf, int bytes);
+
+/*
+ * Rotate the bits of "word" to the right by n bits.
+ */
+static inline uint32
+pg_rotate_right32(uint32 word, int n)
+{
+ return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
+}
+
+#endif /* PG_BITUTILS_H */
diff --git a/src/include/port/pg_bswap.h b/src/include/port/pg_bswap.h
new file mode 100644
index 0000000..6dfa775
--- /dev/null
+++ b/src/include/port/pg_bswap.h
@@ -0,0 +1,161 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_bswap.h
+ * Byte swapping.
+ *
+ * Macros for reversing the byte order of 16, 32 and 64-bit unsigned integers.
+ * For example, 0xAABBCCDD becomes 0xDDCCBBAA. These are just wrappers for
+ * built-in functions provided by the compiler where support exists.
+ *
+ * Note that all of these functions accept unsigned integers as arguments and
+ * return the same. Use caution when using these wrapper macros with signed
+ * integers.
+ *
+ * Copyright (c) 2015-2021, PostgreSQL Global Development Group
+ *
+ * src/include/port/pg_bswap.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_BSWAP_H
+#define PG_BSWAP_H
+
+
+/*
+ * In all supported versions msvc provides _byteswap_* functions in stdlib.h,
+ * already included by c.h.
+ */
+
+
+/* implementation of uint16 pg_bswap16(uint16) */
+#if defined(HAVE__BUILTIN_BSWAP16)
+
+#define pg_bswap16(x) __builtin_bswap16(x)
+
+#elif defined(_MSC_VER)
+
+#define pg_bswap16(x) _byteswap_ushort(x)
+
+#else
+
+static inline uint16
+pg_bswap16(uint16 x)
+{
+ return
+ ((x << 8) & 0xff00) |
+ ((x >> 8) & 0x00ff);
+}
+
+#endif /* HAVE__BUILTIN_BSWAP16 */
+
+
+/* implementation of uint32 pg_bswap32(uint32) */
+#if defined(HAVE__BUILTIN_BSWAP32)
+
+#define pg_bswap32(x) __builtin_bswap32(x)
+
+#elif defined(_MSC_VER)
+
+#define pg_bswap32(x) _byteswap_ulong(x)
+
+#else
+
+static inline uint32
+pg_bswap32(uint32 x)
+{
+ return
+ ((x << 24) & 0xff000000) |
+ ((x << 8) & 0x00ff0000) |
+ ((x >> 8) & 0x0000ff00) |
+ ((x >> 24) & 0x000000ff);
+}
+
+#endif /* HAVE__BUILTIN_BSWAP32 */
+
+
+/* implementation of uint64 pg_bswap64(uint64) */
+#if defined(HAVE__BUILTIN_BSWAP64)
+
+#define pg_bswap64(x) __builtin_bswap64(x)
+
+
+#elif defined(_MSC_VER)
+
+#define pg_bswap64(x) _byteswap_uint64(x)
+
+#else
+
+static inline uint64
+pg_bswap64(uint64 x)
+{
+ return
+ ((x << 56) & UINT64CONST(0xff00000000000000)) |
+ ((x << 40) & UINT64CONST(0x00ff000000000000)) |
+ ((x << 24) & UINT64CONST(0x0000ff0000000000)) |
+ ((x << 8) & UINT64CONST(0x000000ff00000000)) |
+ ((x >> 8) & UINT64CONST(0x00000000ff000000)) |
+ ((x >> 24) & UINT64CONST(0x0000000000ff0000)) |
+ ((x >> 40) & UINT64CONST(0x000000000000ff00)) |
+ ((x >> 56) & UINT64CONST(0x00000000000000ff));
+}
+#endif /* HAVE__BUILTIN_BSWAP64 */
+
+
+/*
+ * Portable and fast equivalents for ntohs, ntohl, htons, htonl,
+ * additionally extended to 64 bits.
+ */
+#ifdef WORDS_BIGENDIAN
+
+#define pg_hton16(x) (x)
+#define pg_hton32(x) (x)
+#define pg_hton64(x) (x)
+
+#define pg_ntoh16(x) (x)
+#define pg_ntoh32(x) (x)
+#define pg_ntoh64(x) (x)
+
+#else
+
+#define pg_hton16(x) pg_bswap16(x)
+#define pg_hton32(x) pg_bswap32(x)
+#define pg_hton64(x) pg_bswap64(x)
+
+#define pg_ntoh16(x) pg_bswap16(x)
+#define pg_ntoh32(x) pg_bswap32(x)
+#define pg_ntoh64(x) pg_bswap64(x)
+
+#endif /* WORDS_BIGENDIAN */
+
+
+/*
+ * Rearrange the bytes of a Datum from big-endian order into the native byte
+ * order. On big-endian machines, this does nothing at all. Note that the C
+ * type Datum is an unsigned integer type on all platforms.
+ *
+ * One possible application of the DatumBigEndianToNative() macro is to make
+ * bitwise comparisons cheaper. A simple 3-way comparison of Datums
+ * transformed by the macro (based on native, unsigned comparisons) will return
+ * the same result as a memcmp() of the corresponding original Datums, but can
+ * be much cheaper. It's generally safe to do this on big-endian systems
+ * without any special transformation occurring first.
+ *
+ * If SIZEOF_DATUM is not defined, then postgres.h wasn't included and these
+ * macros probably shouldn't be used, so we define nothing. Note that
+ * SIZEOF_DATUM == 8 would evaluate as 0 == 8 in that case, potentially
+ * leading to the wrong implementation being selected and confusing errors, so
+ * defining nothing is safest.
+ */
+#ifdef SIZEOF_DATUM
+#ifdef WORDS_BIGENDIAN
+#define DatumBigEndianToNative(x) (x)
+#else /* !WORDS_BIGENDIAN */
+#if SIZEOF_DATUM == 8
+#define DatumBigEndianToNative(x) pg_bswap64(x)
+#else /* SIZEOF_DATUM != 8 */
+#define DatumBigEndianToNative(x) pg_bswap32(x)
+#endif /* SIZEOF_DATUM == 8 */
+#endif /* WORDS_BIGENDIAN */
+#endif /* SIZEOF_DATUM */
+
+#endif /* PG_BSWAP_H */
diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h
new file mode 100644
index 0000000..f3c4107
--- /dev/null
+++ b/src/include/port/pg_crc32c.h
@@ -0,0 +1,101 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_crc32c.h
+ * Routines for computing CRC-32C checksums.
+ *
+ * The speed of CRC-32C calculation has a big impact on performance, so we
+ * jump through some hoops to get the best implementation for each
+ * platform. Some CPU architectures have special instructions for speeding
+ * up CRC calculations (e.g. Intel SSE 4.2), on other platforms we use the
+ * Slicing-by-8 algorithm which uses lookup tables.
+ *
+ * The public interface consists of four macros:
+ *
+ * INIT_CRC32C(crc)
+ * Initialize a CRC accumulator
+ *
+ * COMP_CRC32C(crc, data, len)
+ * Accumulate some (more) bytes into a CRC
+ *
+ * FIN_CRC32C(crc)
+ * Finish a CRC calculation
+ *
+ * EQ_CRC32C(c1, c2)
+ * Check for equality of two CRCs.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/pg_crc32c.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_CRC32C_H
+#define PG_CRC32C_H
+
+#include "port/pg_bswap.h"
+
+typedef uint32 pg_crc32c;
+
+/* The INIT and EQ macros are the same for all implementations. */
+#define INIT_CRC32C(crc) ((crc) = 0xFFFFFFFF)
+#define EQ_CRC32C(c1, c2) ((c1) == (c2))
+
+#if defined(USE_SSE42_CRC32C)
+/* Use Intel SSE4.2 instructions. */
+#define COMP_CRC32C(crc, data, len) \
+ ((crc) = pg_comp_crc32c_sse42((crc), (data), (len)))
+#define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF)
+
+extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len);
+
+#elif defined(USE_ARMV8_CRC32C)
+/* Use ARMv8 CRC Extension instructions. */
+
+#define COMP_CRC32C(crc, data, len) \
+ ((crc) = pg_comp_crc32c_armv8((crc), (data), (len)))
+#define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF)
+
+extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len);
+
+#elif defined(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK) || defined(USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK)
+
+/*
+ * Use Intel SSE 4.2 or ARMv8 instructions, but perform a runtime check first
+ * to check that they are available.
+ */
+#define COMP_CRC32C(crc, data, len) \
+ ((crc) = pg_comp_crc32c((crc), (data), (len)))
+#define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF)
+
+extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len);
+extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len);
+
+#ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK
+extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len);
+#endif
+#ifdef USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK
+extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len);
+#endif
+
+#else
+/*
+ * Use slicing-by-8 algorithm.
+ *
+ * On big-endian systems, the intermediate value is kept in reverse byte
+ * order, to avoid byte-swapping during the calculation. FIN_CRC32C reverses
+ * the bytes to the final order.
+ */
+#define COMP_CRC32C(crc, data, len) \
+ ((crc) = pg_comp_crc32c_sb8((crc), (data), (len)))
+#ifdef WORDS_BIGENDIAN
+#define FIN_CRC32C(crc) ((crc) = pg_bswap32(crc) ^ 0xFFFFFFFF)
+#else
+#define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF)
+#endif
+
+extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len);
+
+#endif
+
+#endif /* PG_CRC32C_H */
diff --git a/src/include/port/pg_iovec.h b/src/include/port/pg_iovec.h
new file mode 100644
index 0000000..88f6615
--- /dev/null
+++ b/src/include/port/pg_iovec.h
@@ -0,0 +1,54 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_iovec.h
+ * Header for vectored I/O functions, to use in place of <sys/uio.h>.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/pg_iovec.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_IOVEC_H
+#define PG_IOVEC_H
+
+#include <limits.h>
+
+#ifdef HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+
+/* If <sys/uio.h> is missing, define our own POSIX-compatible iovec struct. */
+#ifndef HAVE_SYS_UIO_H
+struct iovec
+{
+ void *iov_base;
+ size_t iov_len;
+};
+#endif
+
+/*
+ * If <limits.h> didn't define IOV_MAX, define our own. POSIX requires at
+ * least 16.
+ */
+#ifndef IOV_MAX
+#define IOV_MAX 16
+#endif
+
+/* Define a reasonable maximum that is safe to use on the stack. */
+#define PG_IOV_MAX Min(IOV_MAX, 32)
+
+#if HAVE_DECL_PREADV
+#define pg_preadv preadv
+#else
+extern ssize_t pg_preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset);
+#endif
+
+#if HAVE_DECL_PWRITEV
+#define pg_pwritev pwritev
+#else
+extern ssize_t pg_pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset);
+#endif
+
+#endif /* PG_IOVEC_H */
diff --git a/src/include/port/pg_pthread.h b/src/include/port/pg_pthread.h
new file mode 100644
index 0000000..d102ce9
--- /dev/null
+++ b/src/include/port/pg_pthread.h
@@ -0,0 +1,41 @@
+/*-------------------------------------------------------------------------
+ *
+ * Declarations for missing POSIX thread components.
+ *
+ * Currently this supplies an implementation of pthread_barrier_t for the
+ * benefit of macOS, which lacks it. These declarations are not in port.h,
+ * because that'd require <pthread.h> to be included by every translation
+ * unit.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef PG_PTHREAD_H
+#define PG_PTHREAD_H
+
+#include <pthread.h>
+
+#ifndef HAVE_PTHREAD_BARRIER_WAIT
+
+#ifndef PTHREAD_BARRIER_SERIAL_THREAD
+#define PTHREAD_BARRIER_SERIAL_THREAD (-1)
+#endif
+
+typedef struct pg_pthread_barrier
+{
+ bool sense; /* we only need a one bit phase */
+ int count; /* number of threads expected */
+ int arrived; /* number of threads that have arrived */
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+} pthread_barrier_t;
+
+extern int pthread_barrier_init(pthread_barrier_t *barrier,
+ const void *attr,
+ int count);
+extern int pthread_barrier_wait(pthread_barrier_t *barrier);
+extern int pthread_barrier_destroy(pthread_barrier_t *barrier);
+
+#endif
+
+#endif
diff --git a/src/include/port/solaris.h b/src/include/port/solaris.h
new file mode 100644
index 0000000..e63a3bd
--- /dev/null
+++ b/src/include/port/solaris.h
@@ -0,0 +1,26 @@
+/* src/include/port/solaris.h */
+
+/*
+ * Sort this out for all operating systems some time. The __xxx
+ * symbols are defined on both GCC and Solaris CC, although GCC
+ * doesn't document them. The __xxx__ symbols are only on GCC.
+ */
+#if defined(__i386) && !defined(__i386__)
+#define __i386__
+#endif
+
+#if defined(__amd64) && !defined(__amd64__)
+#define __amd64__
+#endif
+
+#if defined(__x86_64) && !defined(__x86_64__)
+#define __x86_64__
+#endif
+
+#if defined(__sparc) && !defined(__sparc__)
+#define __sparc__
+#endif
+
+#if defined(__i386__)
+#include <sys/isa_defs.h>
+#endif
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
new file mode 100644
index 0000000..d8ae49e
--- /dev/null
+++ b/src/include/port/win32.h
@@ -0,0 +1,69 @@
+/* src/include/port/win32.h */
+
+/*
+ * We always rely on the WIN32 macro being set by our build system,
+ * but _WIN32 is the compiler pre-defined macro. So make sure we define
+ * WIN32 whenever _WIN32 is set, to facilitate standalone building.
+ */
+#if defined(_WIN32) && !defined(WIN32)
+#define WIN32
+#endif
+
+/*
+ * Make sure _WIN32_WINNT has the minimum required value.
+ * Leave a higher value in place. When building with at least Visual
+ * Studio 2015 the minimum requirement is Windows Vista (0x0600) to
+ * get support for GetLocaleInfoEx() with locales. For everything else
+ * the minimum version is Windows XP (0x0501).
+ */
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+#define MIN_WINNT 0x0600
+#else
+#define MIN_WINNT 0x0501
+#endif
+
+#if defined(_WIN32_WINNT) && _WIN32_WINNT < MIN_WINNT
+#undef _WIN32_WINNT
+#endif
+
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT MIN_WINNT
+#endif
+
+/*
+ * We need to prevent <crtdefs.h> from defining a symbol conflicting with
+ * our errcode() function. Since it's likely to get included by standard
+ * system headers, pre-emptively include it now.
+ */
+#if defined(_MSC_VER) || defined(HAVE_CRTDEFS_H)
+#define errcode __msvc_errcode
+#include <crtdefs.h>
+#undef errcode
+#endif
+
+/*
+ * defines for dynamic linking on Win32 platform
+ */
+
+#ifdef BUILDING_DLL
+#define PGDLLIMPORT __declspec (dllexport)
+#else
+#define PGDLLIMPORT __declspec (dllimport)
+#endif
+
+#ifdef _MSC_VER
+#define PGDLLEXPORT __declspec (dllexport)
+#else
+#define PGDLLEXPORT
+#endif
+
+/*
+ * Windows headers don't define this structure, but you can define it yourself
+ * to use the functionality.
+ */
+struct sockaddr_un
+{
+ unsigned short sun_family;
+ char sun_path[108];
+};
+#define HAVE_STRUCT_SOCKADDR_UN 1
diff --git a/src/include/port/win32/arpa/inet.h b/src/include/port/win32/arpa/inet.h
new file mode 100644
index 0000000..ad18031
--- /dev/null
+++ b/src/include/port/win32/arpa/inet.h
@@ -0,0 +1,3 @@
+/* src/include/port/win32/arpa/inet.h */
+
+#include <sys/socket.h>
diff --git a/src/include/port/win32/dlfcn.h b/src/include/port/win32/dlfcn.h
new file mode 100644
index 0000000..b6e43c0
--- /dev/null
+++ b/src/include/port/win32/dlfcn.h
@@ -0,0 +1 @@
+/* src/include/port/win32/dlfcn.h */
diff --git a/src/include/port/win32/grp.h b/src/include/port/win32/grp.h
new file mode 100644
index 0000000..8b4f213
--- /dev/null
+++ b/src/include/port/win32/grp.h
@@ -0,0 +1 @@
+/* src/include/port/win32/grp.h */
diff --git a/src/include/port/win32/netdb.h b/src/include/port/win32/netdb.h
new file mode 100644
index 0000000..ad0627e
--- /dev/null
+++ b/src/include/port/win32/netdb.h
@@ -0,0 +1 @@
+/* src/include/port/win32/netdb.h */
diff --git a/src/include/port/win32/netinet/in.h b/src/include/port/win32/netinet/in.h
new file mode 100644
index 0000000..a4e22f8
--- /dev/null
+++ b/src/include/port/win32/netinet/in.h
@@ -0,0 +1,3 @@
+/* src/include/port/win32/netinet/in.h */
+
+#include <sys/socket.h>
diff --git a/src/include/port/win32/pwd.h b/src/include/port/win32/pwd.h
new file mode 100644
index 0000000..b8c7178
--- /dev/null
+++ b/src/include/port/win32/pwd.h
@@ -0,0 +1,3 @@
+/*
+ * src/include/port/win32/pwd.h
+ */
diff --git a/src/include/port/win32/sys/socket.h b/src/include/port/win32/sys/socket.h
new file mode 100644
index 0000000..9b2cdf3
--- /dev/null
+++ b/src/include/port/win32/sys/socket.h
@@ -0,0 +1,33 @@
+/*
+ * src/include/port/win32/sys/socket.h
+ */
+#ifndef WIN32_SYS_SOCKET_H
+#define WIN32_SYS_SOCKET_H
+
+/*
+ * Unfortunately, <wingdi.h> of VC++ also defines ERROR.
+ * To avoid the conflict, we include <windows.h> here and undefine ERROR
+ * immediately.
+ *
+ * Note: Don't include <wingdi.h> directly. It causes compile errors.
+ */
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+
+#undef ERROR
+#undef small
+
+/* Restore old ERROR value */
+#ifdef PGERROR
+#define ERROR PGERROR
+#endif
+
+/*
+ * we can't use the windows gai_strerror{AW} functions because
+ * they are defined inline in the MS header files. So we'll use our
+ * own
+ */
+#undef gai_strerror
+
+#endif /* WIN32_SYS_SOCKET_H */
diff --git a/src/include/port/win32/sys/wait.h b/src/include/port/win32/sys/wait.h
new file mode 100644
index 0000000..eaeb566
--- /dev/null
+++ b/src/include/port/win32/sys/wait.h
@@ -0,0 +1,3 @@
+/*
+ * src/include/port/win32/sys/wait.h
+ */
diff --git a/src/include/port/win32_msvc/dirent.h b/src/include/port/win32_msvc/dirent.h
new file mode 100644
index 0000000..62799db
--- /dev/null
+++ b/src/include/port/win32_msvc/dirent.h
@@ -0,0 +1,34 @@
+/*
+ * Headers for port/dirent.c, win32 native implementation of dirent functions
+ *
+ * src/include/port/win32_msvc/dirent.h
+ */
+
+#ifndef _WIN32VC_DIRENT_H
+#define _WIN32VC_DIRENT_H
+struct dirent
+{
+ long d_ino;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ unsigned short d_namlen;
+ char d_name[MAX_PATH];
+};
+
+typedef struct DIR DIR;
+
+DIR *opendir(const char *);
+struct dirent *readdir(DIR *);
+int closedir(DIR *);
+
+/* File types for 'd_type'. */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+#endif
diff --git a/src/include/port/win32_msvc/sys/file.h b/src/include/port/win32_msvc/sys/file.h
new file mode 100644
index 0000000..76be3e7
--- /dev/null
+++ b/src/include/port/win32_msvc/sys/file.h
@@ -0,0 +1 @@
+/* src/include/port/win32_msvc/sys/file.h */
diff --git a/src/include/port/win32_msvc/sys/param.h b/src/include/port/win32_msvc/sys/param.h
new file mode 100644
index 0000000..160df3b
--- /dev/null
+++ b/src/include/port/win32_msvc/sys/param.h
@@ -0,0 +1 @@
+/* src/include/port/win32_msvc/sys/param.h */
diff --git a/src/include/port/win32_msvc/sys/time.h b/src/include/port/win32_msvc/sys/time.h
new file mode 100644
index 0000000..9d943ec
--- /dev/null
+++ b/src/include/port/win32_msvc/sys/time.h
@@ -0,0 +1 @@
+/* src/include/port/win32_msvc/sys/time.h */
diff --git a/src/include/port/win32_msvc/unistd.h b/src/include/port/win32_msvc/unistd.h
new file mode 100644
index 0000000..b63f477
--- /dev/null
+++ b/src/include/port/win32_msvc/unistd.h
@@ -0,0 +1 @@
+/* src/include/port/win32_msvc/unistd.h */
diff --git a/src/include/port/win32_msvc/utime.h b/src/include/port/win32_msvc/utime.h
new file mode 100644
index 0000000..c78e79c
--- /dev/null
+++ b/src/include/port/win32_msvc/utime.h
@@ -0,0 +1,3 @@
+/* src/include/port/win32_msvc/utime.h */
+
+#include <sys/utime.h> /* for non-unicode version */
diff --git a/src/include/port/win32_port.h b/src/include/port/win32_port.h
new file mode 100644
index 0000000..05c5a53
--- /dev/null
+++ b/src/include/port/win32_port.h
@@ -0,0 +1,546 @@
+/*-------------------------------------------------------------------------
+ *
+ * win32_port.h
+ * Windows-specific compatibility stuff.
+ *
+ * Note this is read in MinGW as well as native Windows builds,
+ * but not in Cygwin builds.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/port/win32_port.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_WIN32_PORT_H
+#define PG_WIN32_PORT_H
+
+/*
+ * Always build with SSPI support. Keep it as a #define in case
+ * we want a switch to disable it sometime in the future.
+ */
+#define ENABLE_SSPI 1
+
+/* undefine and redefine after #include */
+#undef mkdir
+
+#undef ERROR
+
+/*
+ * VS2013 and later issue warnings about using the old Winsock API,
+ * which we don't really want to hear about.
+ */
+#ifdef _MSC_VER
+#define _WINSOCK_DEPRECATED_NO_WARNINGS
+#endif
+
+/*
+ * The MinGW64 headers choke if this is already defined - they
+ * define it themselves.
+ */
+#if !defined(__MINGW64_VERSION_MAJOR) || defined(_MSC_VER)
+#define _WINSOCKAPI_
+#endif
+
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+#undef small
+#include <process.h>
+#include <signal.h>
+#include <direct.h>
+#undef near
+
+/* needed before sys/stat hacking below: */
+#define fstat microsoft_native_fstat
+#define stat microsoft_native_stat
+#include <sys/stat.h>
+#undef fstat
+#undef stat
+
+/* Must be here to avoid conflicting with prototype in windows.h */
+#define mkdir(a,b) mkdir(a)
+
+#define ftruncate(a,b) chsize(a,b)
+
+/* Windows doesn't have fsync() as such, use _commit() */
+#define fsync(fd) _commit(fd)
+
+/*
+ * For historical reasons, we allow setting wal_sync_method to
+ * fsync_writethrough on Windows, even though it's really identical to fsync
+ * (both code paths wind up at _commit()).
+ */
+#define HAVE_FSYNC_WRITETHROUGH
+#define FSYNC_WRITETHROUGH_IS_FSYNC
+
+#define USES_WINSOCK
+
+/*
+ * IPC defines
+ */
+#undef HAVE_UNION_SEMUN
+#define HAVE_UNION_SEMUN 1
+
+#define IPC_RMID 256
+#define IPC_CREAT 512
+#define IPC_EXCL 1024
+#define IPC_PRIVATE 234564
+#define IPC_NOWAIT 2048
+#define IPC_STAT 4096
+
+#define EACCESS 2048
+#ifndef EIDRM
+#define EIDRM 4096
+#endif
+
+#define SETALL 8192
+#define GETNCNT 16384
+#define GETVAL 65536
+#define SETVAL 131072
+#define GETPID 262144
+
+
+/*
+ * Signal stuff
+ *
+ * For WIN32, there is no wait() call so there are no wait() macros
+ * to interpret the return value of system(). Instead, system()
+ * return values < 0x100 are used for exit() termination, and higher
+ * values are used to indicate non-exit() termination, which is
+ * similar to a unix-style signal exit (think SIGSEGV ==
+ * STATUS_ACCESS_VIOLATION). Return values are broken up into groups:
+ *
+ * https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/using-ntstatus-values
+ *
+ * NT_SUCCESS 0 - 0x3FFFFFFF
+ * NT_INFORMATION 0x40000000 - 0x7FFFFFFF
+ * NT_WARNING 0x80000000 - 0xBFFFFFFF
+ * NT_ERROR 0xC0000000 - 0xFFFFFFFF
+ *
+ * Effectively, we don't care on the severity of the return value from
+ * system(), we just need to know if it was because of exit() or generated
+ * by the system, and it seems values >= 0x100 are system-generated.
+ * See this URL for a list of WIN32 STATUS_* values:
+ *
+ * Wine (URL used in our error messages) -
+ * http://source.winehq.org/source/include/ntstatus.h
+ * Descriptions -
+ * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55
+ *
+ * The comprehensive exception list is included in ntstatus.h from the
+ * Windows Driver Kit (WDK). A subset of the list is also included in
+ * winnt.h from the Windows SDK. Defining WIN32_NO_STATUS before including
+ * windows.h helps to avoid any conflicts.
+ *
+ * Some day we might want to print descriptions for the most common
+ * exceptions, rather than printing an include file name. We could use
+ * RtlNtStatusToDosError() and pass to FormatMessage(), which can print
+ * the text of error values, but MinGW does not support
+ * RtlNtStatusToDosError().
+ */
+#define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0)
+#define WIFSIGNALED(w) (!WIFEXITED(w))
+#define WEXITSTATUS(w) (w)
+#define WTERMSIG(w) (w)
+
+#define sigmask(sig) ( 1 << ((sig)-1) )
+
+/* Signal function return values */
+#undef SIG_DFL
+#undef SIG_ERR
+#undef SIG_IGN
+#define SIG_DFL ((pqsigfunc)0)
+#define SIG_ERR ((pqsigfunc)-1)
+#define SIG_IGN ((pqsigfunc)1)
+
+/* Some extra signals */
+#define SIGHUP 1
+#define SIGQUIT 3
+#define SIGTRAP 5
+#define SIGABRT 22 /* Set to match W32 value -- not UNIX value */
+#define SIGKILL 9
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGSTOP 17
+#define SIGTSTP 18
+#define SIGCONT 19
+#define SIGCHLD 20
+#define SIGWINCH 28
+#define SIGUSR1 30
+#define SIGUSR2 31
+
+/*
+ * New versions of MinGW have gettimeofday() and also declare
+ * struct timezone to support it.
+ */
+#ifndef HAVE_GETTIMEOFDAY
+struct timezone
+{
+ int tz_minuteswest; /* Minutes west of GMT. */
+ int tz_dsttime; /* Nonzero if DST is ever in effect. */
+};
+#endif
+
+/* for setitimer in backend/port/win32/timer.c */
+#define ITIMER_REAL 0
+struct itimerval
+{
+ struct timeval it_interval;
+ struct timeval it_value;
+};
+
+int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue);
+
+/*
+ * WIN32 does not provide 64-bit off_t, but does provide the functions operating
+ * with 64-bit offsets.
+ */
+#define pgoff_t __int64
+
+#ifdef _MSC_VER
+#define fseeko(stream, offset, origin) _fseeki64(stream, offset, origin)
+#define ftello(stream) _ftelli64(stream)
+#else
+#ifndef fseeko
+#define fseeko(stream, offset, origin) fseeko64(stream, offset, origin)
+#endif
+#ifndef ftello
+#define ftello(stream) ftello64(stream)
+#endif
+#endif
+
+/*
+ * Win32 also doesn't have symlinks, but we can emulate them with
+ * junction points on newer Win32 versions.
+ *
+ * Cygwin has its own symlinks which work on Win95/98/ME where
+ * junction points don't, so use those instead. We have no way of
+ * knowing what type of system Cygwin binaries will be run on.
+ * Note: Some CYGWIN includes might #define WIN32.
+ */
+extern int pgsymlink(const char *oldpath, const char *newpath);
+extern int pgreadlink(const char *path, char *buf, size_t size);
+extern bool pgwin32_is_junction(const char *path);
+
+#define symlink(oldpath, newpath) pgsymlink(oldpath, newpath)
+#define readlink(path, buf, size) pgreadlink(path, buf, size)
+
+/*
+ * Supplement to <sys/types.h>.
+ *
+ * Perl already has typedefs for uid_t and gid_t.
+ */
+#ifndef PLPERL_HAVE_UID_GID
+typedef int uid_t;
+typedef int gid_t;
+#endif
+typedef long key_t;
+
+#ifdef _MSC_VER
+typedef int pid_t;
+#endif
+
+/*
+ * Supplement to <sys/stat.h>.
+ *
+ * We must pull in sys/stat.h before this part, else our overrides lose.
+ *
+ * stat() is not guaranteed to set the st_size field on win32, so we
+ * redefine it to our own implementation. See src/port/win32stat.c.
+ *
+ * The struct stat is 32 bit in MSVC, so we redefine it as a copy of
+ * struct __stat64. This also fixes the struct size for MINGW builds.
+ */
+struct stat /* This should match struct __stat64 */
+{
+ _dev_t st_dev;
+ _ino_t st_ino;
+ unsigned short st_mode;
+ short st_nlink;
+ short st_uid;
+ short st_gid;
+ _dev_t st_rdev;
+ __int64 st_size;
+ __time64_t st_atime;
+ __time64_t st_mtime;
+ __time64_t st_ctime;
+};
+
+extern int _pgfstat64(int fileno, struct stat *buf);
+extern int _pgstat64(const char *name, struct stat *buf);
+
+#define fstat(fileno, sb) _pgfstat64(fileno, sb)
+#define stat(path, sb) _pgstat64(path, sb)
+#define lstat(path, sb) _pgstat64(path, sb)
+
+/* These macros are not provided by older MinGW, nor by MSVC */
+#ifndef S_IRUSR
+#define S_IRUSR _S_IREAD
+#endif
+#ifndef S_IWUSR
+#define S_IWUSR _S_IWRITE
+#endif
+#ifndef S_IXUSR
+#define S_IXUSR _S_IEXEC
+#endif
+#ifndef S_IRWXU
+#define S_IRWXU (S_IRUSR | S_IWUSR | S_IXUSR)
+#endif
+#ifndef S_IRGRP
+#define S_IRGRP 0
+#endif
+#ifndef S_IWGRP
+#define S_IWGRP 0
+#endif
+#ifndef S_IXGRP
+#define S_IXGRP 0
+#endif
+#ifndef S_IRWXG
+#define S_IRWXG 0
+#endif
+#ifndef S_IROTH
+#define S_IROTH 0
+#endif
+#ifndef S_IWOTH
+#define S_IWOTH 0
+#endif
+#ifndef S_IXOTH
+#define S_IXOTH 0
+#endif
+#ifndef S_IRWXO
+#define S_IRWXO 0
+#endif
+#ifndef S_ISDIR
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#endif
+#ifndef S_ISREG
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#endif
+
+/*
+ * Supplement to <fcntl.h>.
+ * This is the same value as _O_NOINHERIT in the MS header file. This is
+ * to ensure that we don't collide with a future definition. It means
+ * we cannot use _O_NOINHERIT ourselves.
+ */
+#define O_DSYNC 0x0080
+
+/*
+ * Supplement to <errno.h>.
+ *
+ * We redefine network-related Berkeley error symbols as the corresponding WSA
+ * constants. This allows strerror.c to recognize them as being in the Winsock
+ * error code range and pass them off to win32_socket_strerror(), since
+ * Windows' version of plain strerror() won't cope. Note that this will break
+ * if these names are used for anything else besides Windows Sockets errors.
+ * See TranslateSocketError() when changing this list.
+ */
+#undef EAGAIN
+#define EAGAIN WSAEWOULDBLOCK
+#undef EINTR
+#define EINTR WSAEINTR
+#undef EMSGSIZE
+#define EMSGSIZE WSAEMSGSIZE
+#undef EAFNOSUPPORT
+#define EAFNOSUPPORT WSAEAFNOSUPPORT
+#undef EWOULDBLOCK
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#undef ECONNABORTED
+#define ECONNABORTED WSAECONNABORTED
+#undef ECONNRESET
+#define ECONNRESET WSAECONNRESET
+#undef EINPROGRESS
+#define EINPROGRESS WSAEINPROGRESS
+#undef EISCONN
+#define EISCONN WSAEISCONN
+#undef ENOBUFS
+#define ENOBUFS WSAENOBUFS
+#undef EPROTONOSUPPORT
+#define EPROTONOSUPPORT WSAEPROTONOSUPPORT
+#undef ECONNREFUSED
+#define ECONNREFUSED WSAECONNREFUSED
+#undef ENOTSOCK
+#define ENOTSOCK WSAENOTSOCK
+#undef EOPNOTSUPP
+#define EOPNOTSUPP WSAEOPNOTSUPP
+#undef EADDRINUSE
+#define EADDRINUSE WSAEADDRINUSE
+#undef EADDRNOTAVAIL
+#define EADDRNOTAVAIL WSAEADDRNOTAVAIL
+#undef EHOSTDOWN
+#define EHOSTDOWN WSAEHOSTDOWN
+#undef EHOSTUNREACH
+#define EHOSTUNREACH WSAEHOSTUNREACH
+#undef ENETDOWN
+#define ENETDOWN WSAENETDOWN
+#undef ENETRESET
+#define ENETRESET WSAENETRESET
+#undef ENETUNREACH
+#define ENETUNREACH WSAENETUNREACH
+#undef ENOTCONN
+#define ENOTCONN WSAENOTCONN
+
+/*
+ * Locale stuff.
+ *
+ * Extended locale functions with gratuitous underscore prefixes.
+ * (These APIs are nevertheless fully documented by Microsoft.)
+ */
+#define locale_t _locale_t
+#define tolower_l _tolower_l
+#define toupper_l _toupper_l
+#define towlower_l _towlower_l
+#define towupper_l _towupper_l
+#define isdigit_l _isdigit_l
+#define iswdigit_l _iswdigit_l
+#define isalpha_l _isalpha_l
+#define iswalpha_l _iswalpha_l
+#define isalnum_l _isalnum_l
+#define iswalnum_l _iswalnum_l
+#define isupper_l _isupper_l
+#define iswupper_l _iswupper_l
+#define islower_l _islower_l
+#define iswlower_l _iswlower_l
+#define isgraph_l _isgraph_l
+#define iswgraph_l _iswgraph_l
+#define isprint_l _isprint_l
+#define iswprint_l _iswprint_l
+#define ispunct_l _ispunct_l
+#define iswpunct_l _iswpunct_l
+#define isspace_l _isspace_l
+#define iswspace_l _iswspace_l
+#define strcoll_l _strcoll_l
+#define strxfrm_l _strxfrm_l
+#define wcscoll_l _wcscoll_l
+#define wcstombs_l _wcstombs_l
+#define mbstowcs_l _mbstowcs_l
+
+/*
+ * Versions of libintl >= 0.18? try to replace setlocale() with a macro
+ * to their own versions. Remove the macro, if it exists, because it
+ * ends up calling the wrong version when the backend and libintl use
+ * different versions of msvcrt.
+ */
+#if defined(setlocale)
+#undef setlocale
+#endif
+
+/*
+ * Define our own wrapper macro around setlocale() to work around bugs in
+ * Windows' native setlocale() function.
+ */
+extern char *pgwin32_setlocale(int category, const char *locale);
+
+#define setlocale(a,b) pgwin32_setlocale(a,b)
+
+
+/* In backend/port/win32/signal.c */
+extern PGDLLIMPORT volatile int pg_signal_queue;
+extern PGDLLIMPORT int pg_signal_mask;
+extern HANDLE pgwin32_signal_event;
+extern HANDLE pgwin32_initial_signal_pipe;
+
+#define UNBLOCKED_SIGNAL_QUEUE() (pg_signal_queue & ~pg_signal_mask)
+#define PG_SIGNAL_COUNT 32
+
+void pgwin32_signal_initialize(void);
+HANDLE pgwin32_create_signal_listener(pid_t pid);
+void pgwin32_dispatch_queued_signals(void);
+void pg_queue_signal(int signum);
+
+/* In src/port/kill.c */
+#define kill(pid,sig) pgkill(pid,sig)
+extern int pgkill(int pid, int sig);
+
+/* In backend/port/win32/socket.c */
+#ifndef FRONTEND
+#define socket(af, type, protocol) pgwin32_socket(af, type, protocol)
+#define bind(s, addr, addrlen) pgwin32_bind(s, addr, addrlen)
+#define listen(s, backlog) pgwin32_listen(s, backlog)
+#define accept(s, addr, addrlen) pgwin32_accept(s, addr, addrlen)
+#define connect(s, name, namelen) pgwin32_connect(s, name, namelen)
+#define select(n, r, w, e, timeout) pgwin32_select(n, r, w, e, timeout)
+#define recv(s, buf, len, flags) pgwin32_recv(s, buf, len, flags)
+#define send(s, buf, len, flags) pgwin32_send(s, buf, len, flags)
+
+SOCKET pgwin32_socket(int af, int type, int protocol);
+int pgwin32_bind(SOCKET s, struct sockaddr *addr, int addrlen);
+int pgwin32_listen(SOCKET s, int backlog);
+SOCKET pgwin32_accept(SOCKET s, struct sockaddr *addr, int *addrlen);
+int pgwin32_connect(SOCKET s, const struct sockaddr *name, int namelen);
+int pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval *timeout);
+int pgwin32_recv(SOCKET s, char *buf, int len, int flags);
+int pgwin32_send(SOCKET s, const void *buf, int len, int flags);
+int pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout);
+
+extern int pgwin32_noblock;
+
+#endif /* FRONTEND */
+
+/* in backend/port/win32_shmem.c */
+extern int pgwin32_ReserveSharedMemoryRegion(HANDLE);
+
+/* in backend/port/win32/crashdump.c */
+extern void pgwin32_install_crashdump_handler(void);
+
+/* in port/win32error.c */
+extern void _dosmaperr(unsigned long);
+
+/* in port/win32env.c */
+extern int pgwin32_putenv(const char *);
+extern int pgwin32_setenv(const char *name, const char *value, int overwrite);
+extern int pgwin32_unsetenv(const char *name);
+
+#define putenv(x) pgwin32_putenv(x)
+#define setenv(x,y,z) pgwin32_setenv(x,y,z)
+#define unsetenv(x) pgwin32_unsetenv(x)
+
+/* in port/win32security.c */
+extern int pgwin32_is_service(void);
+extern int pgwin32_is_admin(void);
+
+/* Windows security token manipulation (in src/common/exec.c) */
+extern BOOL AddUserToTokenDacl(HANDLE hToken);
+
+/* Things that exist in MinGW headers, but need to be added to MSVC */
+#ifdef _MSC_VER
+
+#ifndef _WIN64
+typedef long ssize_t;
+#else
+typedef __int64 ssize_t;
+#endif
+
+typedef unsigned short mode_t;
+
+#define F_OK 0
+#define W_OK 2
+#define R_OK 4
+
+/* Pulled from Makefile.port in MinGW */
+#define DLSUFFIX ".dll"
+
+#endif /* _MSC_VER */
+
+#if (defined(_MSC_VER) && (_MSC_VER < 1900)) || \
+ defined(__MINGW32__) || defined(__MINGW64__)
+/*
+ * VS2013 has a strtof() that seems to give correct answers for valid input,
+ * even on the rounding edge cases, but which doesn't handle out-of-range
+ * input correctly. Work around that.
+ *
+ * Mingw claims to have a strtof, and my reading of its source code suggests
+ * that it ought to work (and not need this hack), but the regression test
+ * results disagree with me; whether this is a version issue or not is not
+ * clear. However, using our wrapper (and the misrounded-input variant file,
+ * already required for supporting ancient systems) can't make things any
+ * worse, except for a tiny performance loss when reading zeros.
+ *
+ * See also cygwin.h for another instance of this.
+ */
+#define HAVE_BUGGY_STRTOF 1
+#endif
+
+#endif /* PG_WIN32_PORT_H */