summaryrefslogtreecommitdiffstats
path: root/src/boost/libs/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'src/boost/libs/atomic')
-rw-r--r--src/boost/libs/atomic/CMakeLists.txt39
-rw-r--r--src/boost/libs/atomic/README.md28
-rw-r--r--src/boost/libs/atomic/build/Jamfile.v238
-rw-r--r--src/boost/libs/atomic/index.html13
-rw-r--r--src/boost/libs/atomic/meta/libraries.json18
-rw-r--r--src/boost/libs/atomic/src/lock_pool.cpp194
-rw-r--r--src/boost/libs/atomic/test/Jamfile.v237
-rw-r--r--src/boost/libs/atomic/test/aligned_object.hpp61
-rw-r--r--src/boost/libs/atomic/test/api_test_helpers.hpp1238
-rw-r--r--src/boost/libs/atomic/test/atomicity.cpp285
-rw-r--r--src/boost/libs/atomic/test/atomicity_ref.cpp293
-rw-r--r--src/boost/libs/atomic/test/c_implicit_ctor.cpp34
-rw-r--r--src/boost/libs/atomic/test/cf_arith_func_ptr.cpp17
-rw-r--r--src/boost/libs/atomic/test/cf_arith_mem_ptr.cpp22
-rw-r--r--src/boost/libs/atomic/test/cf_arith_void_ptr.cpp16
-rw-r--r--src/boost/libs/atomic/test/fallback_api.cpp77
-rw-r--r--src/boost/libs/atomic/test/fallback_ref_api.cpp74
-rw-r--r--src/boost/libs/atomic/test/lockfree.cpp237
-rw-r--r--src/boost/libs/atomic/test/native_api.cpp83
-rw-r--r--src/boost/libs/atomic/test/native_ref_api.cpp74
-rw-r--r--src/boost/libs/atomic/test/ordering.cpp270
-rw-r--r--src/boost/libs/atomic/test/ordering_ref.cpp276
-rw-r--r--src/boost/libs/atomic/test/test_cmake/CMakeLists.txt22
-rw-r--r--src/boost/libs/atomic/test/test_cmake/main.cpp22
-rw-r--r--src/boost/libs/atomic/test/value_with_epsilon.hpp78
25 files changed, 3546 insertions, 0 deletions
diff --git a/src/boost/libs/atomic/CMakeLists.txt b/src/boost/libs/atomic/CMakeLists.txt
new file mode 100644
index 000000000..0511c6470
--- /dev/null
+++ b/src/boost/libs/atomic/CMakeLists.txt
@@ -0,0 +1,39 @@
+# Copyright 2018 Mike Dev
+# Copyright 2019 Peter Dimov
+# Distributed under the Boost Software License, Version 1.0.
+# See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
+
+cmake_minimum_required(VERSION 3.5...3.16)
+project(boost_atomic VERSION "${BOOST_SUPERPROJECT_VERSION}" LANGUAGES CXX)
+
+add_library(boost_atomic src/lock_pool.cpp)
+add_library(Boost::atomic ALIAS boost_atomic)
+
+target_include_directories(boost_atomic PUBLIC include)
+
+target_link_libraries(boost_atomic
+ PUBLIC
+ Boost::assert
+ Boost::config
+ Boost::type_traits
+)
+
+target_compile_definitions(boost_atomic
+ PUBLIC
+ BOOST_ATOMIC_NO_LIB
+ PRIVATE
+ BOOST_ATOMIC_SOURCE
+)
+
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(boost_atomic PUBLIC BOOST_ATOMIC_DYN_LINK)
+else()
+ target_compile_definitions(boost_atomic PUBLIC BOOST_ATOMIC_STATIC_LINK)
+endif()
+
+if(BOOST_SUPERPROJECT_VERSION)
+
+ include(BoostInstall)
+ boost_install(TARGETS boost_atomic HEADER_DIRECTORY include/)
+
+endif()
diff --git a/src/boost/libs/atomic/README.md b/src/boost/libs/atomic/README.md
new file mode 100644
index 000000000..abf1795df
--- /dev/null
+++ b/src/boost/libs/atomic/README.md
@@ -0,0 +1,28 @@
+# ![Boost.Atomic](doc/logo.png)
+
+Boost.Atomic, part of collection of the [Boost C++ Libraries](https://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11 and later.
+
+### Directories
+
+* **build** - Boost.Atomic build scripts
+* **doc** - QuickBook documentation sources
+* **include** - Interface headers of Boost.Atomic
+* **src** - Compilable source code of Boost.Atomic
+* **test** - Boost.Atomic unit tests
+
+### More information
+
+* [Documentation](https://www.boost.org/libs/atomic)
+* [Report bugs](https://github.com/boostorg/atomic/issues/new). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
+* Submit your patches as [pull requests](https://github.com/boostorg/atomic/compare) against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](https://www.boost.org/LICENSE_1_0.txt).
+
+### Build status
+
+Branch | Travis CI | AppVeyor | Test Matrix | Dependencies |
+:-------------: | --------- | -------- | ----------- | ------------ |
+[`master`](https://github.com/boostorg/atomic/tree/master) | [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=master)](https://travis-ci.org/boostorg/atomic) | [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/master?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/master) | [![Tests](https://img.shields.io/badge/matrix-master-brightgreen.svg)](http://www.boost.org/development/tests/master/developer/atomic.html) | [![Dependencies](https://img.shields.io/badge/deps-master-brightgreen.svg)](https://pdimov.github.io/boostdep-report/master/atomic.html)
+[`develop`](https://github.com/boostorg/atomic/tree/develop) | [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=develop)](https://travis-ci.org/boostorg/atomic) | [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/develop?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) | [![Tests](https://img.shields.io/badge/matrix-develop-brightgreen.svg)](http://www.boost.org/development/tests/develop/developer/atomic.html) | [![Dependencies](https://img.shields.io/badge/deps-develop-brightgreen.svg)](https://pdimov.github.io/boostdep-report/develop/atomic.html)
+
+### License
+
+Distributed under the [Boost Software License, Version 1.0](https://www.boost.org/LICENSE_1_0.txt).
diff --git a/src/boost/libs/atomic/build/Jamfile.v2 b/src/boost/libs/atomic/build/Jamfile.v2
new file mode 100644
index 000000000..371034121
--- /dev/null
+++ b/src/boost/libs/atomic/build/Jamfile.v2
@@ -0,0 +1,38 @@
+# Boost.Atomic Library Jamfile
+#
+# Copyright Helge Bahmann 2011.
+# Copyright Andrey Semashev 2018.
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import common ;
+
+project boost/atomic
+ : requirements
+ <threading>multi
+ <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
+ <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
+ <define>BOOST_ATOMIC_SOURCE
+ <target-os>windows:<define>BOOST_USE_WINDOWS_H
+ <target-os>windows:<define>_WIN32_WINNT=0x0500
+ <toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
+ : usage-requirements
+ <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
+ <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
+ : source-location ../src
+ ;
+
+alias atomic_sources
+ : lock_pool.cpp
+ ;
+
+explicit atomic_sources ;
+
+
+lib boost_atomic
+ : atomic_sources
+ ;
+
+boost-install boost_atomic ;
diff --git a/src/boost/libs/atomic/index.html b/src/boost/libs/atomic/index.html
new file mode 100644
index 000000000..62a6c59f1
--- /dev/null
+++ b/src/boost/libs/atomic/index.html
@@ -0,0 +1,13 @@
+<html>
+<head>
+<meta http-equiv="refresh" content="0; URL=../../doc/html/atomic.html">
+</head>
+<body>
+Automatic redirection failed, please go to
+<a href="../../doc/html/atomic.html">../../doc/html/atomic.html</a> &nbsp;<hr>
+<p>&copy; Copyright Beman Dawes, 2001</p>
+<p>Distributed under the Boost Software License, Version 1.0. (See accompanying
+file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy
+at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
+</body>
+</html>
diff --git a/src/boost/libs/atomic/meta/libraries.json b/src/boost/libs/atomic/meta/libraries.json
new file mode 100644
index 000000000..42e0fe3cc
--- /dev/null
+++ b/src/boost/libs/atomic/meta/libraries.json
@@ -0,0 +1,18 @@
+{
+ "key": "atomic",
+ "name": "Atomic",
+ "authors": [
+ "Helge Bahmann",
+ "Tim Blechmann",
+ "Andrey Semashev"
+ ],
+ "description": "C++11-style atomic<>.",
+ "category": [
+ "Concurrent"
+ ],
+ "maintainers": [
+ "Helge Bahmann <hcb -at- chaoticmind.net>",
+ "Tim Blechmann <tim -at- klingt.org>",
+ "Andrey Semashev <andrey.semashev -at- gmail.com>"
+ ]
+}
diff --git a/src/boost/libs/atomic/src/lock_pool.cpp b/src/boost/libs/atomic/src/lock_pool.cpp
new file mode 100644
index 000000000..8f1d69e77
--- /dev/null
+++ b/src/boost/libs/atomic/src/lock_pool.cpp
@@ -0,0 +1,194 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013-2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file lock_pool.cpp
+ *
+ * This file contains implementation of the lock pool used to emulate atomic ops.
+ */
+
+#include <cstddef>
+#include <boost/config.hpp>
+#include <boost/assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/operations_lockfree.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/pause.hpp>
+
+#if BOOST_ATOMIC_FLAG_LOCK_FREE != 2
+#if defined(BOOST_HAS_PTHREADS)
+#include <pthread.h>
+#define BOOST_ATOMIC_USE_PTHREAD
+#else
+#error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
+#endif
+#endif // BOOST_ATOMIC_FLAG_LOCK_FREE != 2
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// 'struct_name' : structure was padded due to __declspec(align())
+#pragma warning(disable: 4324)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+namespace lock_pool {
+
+namespace {
+
+// Cache line size, in bytes
+// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
+#if defined(__s390__) || defined(__s390x__)
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 256
+#elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 128
+#else
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 64
+#endif
+
+#if defined(BOOST_ATOMIC_USE_PTHREAD)
+typedef pthread_mutex_t lock_type;
+#else
+typedef atomics::detail::operations< 1u, false > lock_operations;
+typedef lock_operations::storage_type lock_type;
+#endif
+
+enum
+{
+ padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
+ (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
+ (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
+};
+
+template< unsigned int PaddingSize >
+struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
+{
+ lock_type lock;
+ // The additional padding is needed to avoid false sharing between locks
+ char padding[PaddingSize];
+};
+
+template< >
+struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
+{
+ lock_type lock;
+};
+
+typedef padded_lock< padding_size > padded_lock_t;
+
+//! Lock pool size. Must be a power of two.
+BOOST_CONSTEXPR_OR_CONST std::size_t lock_pool_size = 64u;
+
+static padded_lock_t g_lock_pool[lock_pool_size]
+#if defined(BOOST_ATOMIC_USE_PTHREAD)
+=
+{
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
+ { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }
+}
+#endif
+;
+
+//! Returns index of the lock pool entry for the given pointer value
+BOOST_FORCEINLINE std::size_t get_lock_index(atomics::detail::uintptr_t ptr)
+{
+ // Since many malloc/new implementations return pointers with higher alignment
+ // than indicated by storage_alignment, it makes sense to mix higher bits
+ // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
+ // on 32-bit - to 8 bytes.
+ BOOST_CONSTEXPR_OR_CONST unsigned int mix_shift = sizeof(void*) >= 8u ? 4u : 3u;
+ ptr ^= ptr >> mix_shift;
+ return ptr & (lock_pool_size - 1u);
+}
+
+} // namespace
+
+
+#if !defined(BOOST_ATOMIC_USE_PTHREAD)
+
+// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
+BOOST_ATOMIC_DECL void* lock(atomics::detail::uintptr_t ptr) BOOST_NOEXCEPT
+{
+ lock_type& lock = g_lock_pool[get_lock_index(ptr)].lock;
+ while (lock_operations::test_and_set(lock, memory_order_acquire))
+ {
+ do
+ {
+ atomics::detail::pause();
+ }
+ while (!!lock_operations::load(lock, memory_order_relaxed));
+ }
+
+ return &lock;
+}
+
+BOOST_ATOMIC_DECL void unlock(void* p) BOOST_NOEXCEPT
+{
+ lock_operations::clear(*static_cast< lock_type* >(p), memory_order_release);
+}
+
+#else // !defined(BOOST_ATOMIC_USE_PTHREAD)
+
+BOOST_ATOMIC_DECL void* lock(atomics::detail::uintptr_t ptr) BOOST_NOEXCEPT
+{
+ lock_type& lock = g_lock_pool[get_lock_index(ptr)].lock;
+ BOOST_VERIFY(pthread_mutex_lock(&lock) == 0);
+ return &lock;
+}
+
+BOOST_ATOMIC_DECL void unlock(void* p) BOOST_NOEXCEPT
+{
+ BOOST_VERIFY(pthread_mutex_unlock(static_cast< lock_type* >(p)) == 0);
+}
+
+#endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
+
+BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT
+{
+#if BOOST_ATOMIC_THREAD_FENCE > 0
+ atomics::detail::thread_fence(memory_order_seq_cst);
+#else
+ // Emulate full fence by locking/unlocking a mutex
+ lock_pool::unlock(lock_pool::lock(0u));
+#endif
+}
+
+BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT
+{
+ // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
+#if BOOST_ATOMIC_SIGNAL_FENCE > 0
+ atomics::detail::signal_fence(memory_order_seq_cst);
+#endif
+}
+
+} // namespace lock_pool
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
diff --git a/src/boost/libs/atomic/test/Jamfile.v2 b/src/boost/libs/atomic/test/Jamfile.v2
new file mode 100644
index 000000000..42bc16d24
--- /dev/null
+++ b/src/boost/libs/atomic/test/Jamfile.v2
@@ -0,0 +1,37 @@
+# Boost.Atomic Library test Jamfile
+#
+# Copyright (c) 2011 Helge Bahmann
+# Copyright (c) 2012 Tim Blechmann
+# Copyright (c) 2020 Andrey Semashev
+#
+# Distributed under the Boost Software License, Version 1.0. (See
+# accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import testing ;
+
+project boost/atomic/test
+ : requirements
+ <threading>multi
+ <library>/boost/thread//boost_thread
+ <library>/boost/atomic//boost_atomic
+ <target-os>windows:<define>BOOST_USE_WINDOWS_H
+ <target-os>windows:<define>_WIN32_WINNT=0x0500
+ <toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
+ ;
+
+test-suite atomic
+ : [ run native_api.cpp ]
+ [ run native_ref_api.cpp ]
+ [ run fallback_api.cpp ]
+ [ run fallback_ref_api.cpp ]
+ [ run atomicity.cpp ]
+ [ run atomicity_ref.cpp ]
+ [ run ordering.cpp ]
+ [ run ordering_ref.cpp ]
+ [ run lockfree.cpp ]
+ [ compile-fail cf_arith_void_ptr.cpp ]
+ [ compile-fail cf_arith_func_ptr.cpp ]
+ [ compile-fail cf_arith_mem_ptr.cpp ]
+ [ compile c_implicit_ctor.cpp ]
+ ;
diff --git a/src/boost/libs/atomic/test/aligned_object.hpp b/src/boost/libs/atomic/test/aligned_object.hpp
new file mode 100644
index 000000000..9a8098782
--- /dev/null
+++ b/src/boost/libs/atomic/test/aligned_object.hpp
@@ -0,0 +1,61 @@
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_ATOMIC_TESTS_ALIGNED_OBJECT_HPP_INCLUDED_
+#define BOOST_ATOMIC_TESTS_ALIGNED_OBJECT_HPP_INCLUDED_
+
+#include <cstddef>
+#include <new>
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+
+//! A wrapper that creates an object that has at least the specified alignment
+template< typename T, std::size_t Alignment >
+class aligned_object
+{
+private:
+ T* m_p;
+ unsigned char m_storage[Alignment + sizeof(T)];
+
+public:
+ aligned_object()
+ {
+ m_p = new (get_aligned_storage()) T;
+ }
+
+ explicit aligned_object(T const& value)
+ {
+ m_p = new (get_aligned_storage()) T(value);
+ }
+
+ ~aligned_object() BOOST_NOEXCEPT
+ {
+ m_p->~T();
+ }
+
+ T& get() const BOOST_NOEXCEPT
+ {
+ return *m_p;
+ }
+
+ BOOST_DELETED_FUNCTION(aligned_object(aligned_object const&))
+ BOOST_DELETED_FUNCTION(aligned_object& operator= (aligned_object const&))
+
+private:
+ unsigned char* get_aligned_storage()
+ {
+#if defined(BOOST_HAS_INTPTR_T)
+ typedef boost::uintptr_t uintptr_type;
+#else
+ typedef std::size_t uintptr_type;
+#endif
+ uintptr_type p = (uintptr_type)(unsigned char*)m_storage;
+ p = (p + (uintptr_type)(Alignment - 1u)) & ~(uintptr_type)(Alignment - 1u);
+ return (unsigned char*)p;
+ }
+};
+
+#endif // BOOST_ATOMIC_TESTS_ALIGNED_OBJECT_HPP_INCLUDED_
diff --git a/src/boost/libs/atomic/test/api_test_helpers.hpp b/src/boost/libs/atomic/test/api_test_helpers.hpp
new file mode 100644
index 000000000..3f09700a3
--- /dev/null
+++ b/src/boost/libs/atomic/test/api_test_helpers.hpp
@@ -0,0 +1,1238 @@
+// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2017 - 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_ATOMIC_API_TEST_HELPERS_HPP
+#define BOOST_ATOMIC_API_TEST_HELPERS_HPP
+
+#include <boost/atomic.hpp>
+#include <cstddef>
+#include <cstring>
+#include <cstdlib>
+#include <limits>
+#include <iostream>
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+#include <boost/type.hpp>
+#include <boost/type_traits/integral_constant.hpp>
+#include <boost/type_traits/alignment_of.hpp>
+#include <boost/type_traits/is_pointer.hpp>
+#include <boost/type_traits/is_signed.hpp>
+#include <boost/type_traits/is_unsigned.hpp>
+#include <boost/type_traits/make_signed.hpp>
+#include <boost/type_traits/make_unsigned.hpp>
+#include <boost/type_traits/conditional.hpp>
+#include "aligned_object.hpp"
+
+struct test_stream_type
+{
+ typedef std::ios_base& (*ios_base_manip)(std::ios_base&);
+ typedef std::basic_ios< char, std::char_traits< char > >& (*basic_ios_manip)(std::basic_ios< char, std::char_traits< char > >&);
+ typedef std::ostream& (*stream_manip)(std::ostream&);
+
+ template< typename T >
+ test_stream_type const& operator<< (T const& value) const
+ {
+ std::cerr << value;
+ return *this;
+ }
+
+ test_stream_type const& operator<< (ios_base_manip manip) const
+ {
+ std::cerr << manip;
+ return *this;
+ }
+ test_stream_type const& operator<< (basic_ios_manip manip) const
+ {
+ std::cerr << manip;
+ return *this;
+ }
+ test_stream_type const& operator<< (stream_manip manip) const
+ {
+ std::cerr << manip;
+ return *this;
+ }
+
+ // Make sure characters are printed as numbers if tests fail
+ test_stream_type const& operator<< (char value) const
+ {
+ std::cerr << static_cast< int >(value);
+ return *this;
+ }
+ test_stream_type const& operator<< (signed char value) const
+ {
+ std::cerr << static_cast< int >(value);
+ return *this;
+ }
+ test_stream_type const& operator<< (unsigned char value) const
+ {
+ std::cerr << static_cast< unsigned int >(value);
+ return *this;
+ }
+ test_stream_type const& operator<< (short value) const
+ {
+ std::cerr << static_cast< int >(value);
+ return *this;
+ }
+ test_stream_type const& operator<< (unsigned short value) const
+ {
+ std::cerr << static_cast< unsigned int >(value);
+ return *this;
+ }
+
+#if defined(BOOST_HAS_INT128)
+ // Some GCC versions don't provide output operators for __int128
+ test_stream_type const& operator<< (boost::int128_type const& v) const
+ {
+ std::cerr << static_cast< long long >(v);
+ return *this;
+ }
+ test_stream_type const& operator<< (boost::uint128_type const& v) const
+ {
+ std::cerr << static_cast< unsigned long long >(v);
+ return *this;
+ }
+#endif // defined(BOOST_HAS_INT128)
+#if defined(BOOST_HAS_FLOAT128)
+ // libstdc++ does not provide output operators for __float128
+ test_stream_type const& operator<< (boost::float128_type const& v) const
+ {
+ std::cerr << static_cast< double >(v);
+ return *this;
+ }
+#endif // defined(BOOST_HAS_FLOAT128)
+};
+
+const test_stream_type test_stream = {};
+
+#define BOOST_LIGHTWEIGHT_TEST_OSTREAM test_stream
+
+#include <boost/core/lightweight_test.hpp>
+
+#include "value_with_epsilon.hpp"
+
+const unsigned int max_weak_cas_loops = 1000;
+
+//! Wrapper type for atomic template
+template< typename T >
+struct atomic_wrapper
+{
+ typedef boost::atomic< T > atomic_type;
+
+ atomic_type a;
+
+ BOOST_DEFAULTED_FUNCTION(atomic_wrapper(), {})
+ explicit atomic_wrapper(T const& value) : a(value) {}
+};
+
+//! Wrapper type for atomic_ref template
+template< typename T >
+struct atomic_ref_wrapper
+{
+ typedef boost::atomic_ref< T > atomic_type;
+
+ aligned_object< T, atomic_type::required_alignment > object;
+ atomic_type a;
+
+ atomic_ref_wrapper() : a(object.get()) {}
+ explicit atomic_ref_wrapper(T const& value) : object(value), a(object.get()) {}
+};
+
+/* provide helpers that exercise whether the API
+functions of "boost::atomic" provide the correct
+operational semantic in the case of sequential
+execution */
+
+inline void test_flag_api(void)
+{
+#ifndef BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
+ boost::atomic_flag f = BOOST_ATOMIC_FLAG_INIT;
+#else
+ boost::atomic_flag f;
+#endif
+
+ BOOST_TEST( !f.test() );
+ BOOST_TEST( !f.test_and_set() );
+ BOOST_TEST( f.test() );
+ BOOST_TEST( f.test_and_set() );
+ BOOST_TEST( f.test() );
+ f.clear();
+ BOOST_TEST( !f.test() );
+ BOOST_TEST( !f.test_and_set() );
+}
+
+template< typename T >
+inline void test_atomic_type_traits(boost::type< boost::atomic< T > >)
+{
+ BOOST_TEST_GE(sizeof(boost::atomic< T >), sizeof(T));
+}
+
+template< typename T >
+inline void test_atomic_type_traits(boost::type< boost::atomic_ref< T > >)
+{
+ if (boost::atomic_ref< T >::is_always_lock_free)
+ {
+ BOOST_TEST_GE(boost::atomic_ref< T >::required_alignment, boost::alignment_of< T >::value);
+ }
+ else
+ {
+ // Lock-based implementation should not require alignment higher than alignof(T)
+ BOOST_TEST_EQ(boost::atomic_ref< T >::required_alignment, boost::alignment_of< T >::value);
+ }
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_base_operators(T value1, T value2, T value3)
+{
+ test_atomic_type_traits(boost::type< typename Wrapper<T>::atomic_type >());
+
+ // explicit load/store
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ BOOST_TEST_EQ( a.load(), value1 );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.store(value2);
+ BOOST_TEST_EQ( a.load(), value2 );
+ }
+
+ // overloaded assignment/conversion
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ BOOST_TEST( value1 == a );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a = value2;
+ BOOST_TEST( value2 == a );
+ }
+
+ // exchange-type operators
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.exchange(value2);
+ BOOST_TEST_EQ( a.load(), value2 );
+ BOOST_TEST_EQ( n, value1 );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T expected = value1;
+ bool success = a.compare_exchange_strong(expected, value3);
+ BOOST_TEST( success );
+ BOOST_TEST_EQ( a.load(), value3 );
+ BOOST_TEST_EQ( expected, value1 );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T expected = value2;
+ bool success = a.compare_exchange_strong(expected, value3);
+ BOOST_TEST( !success );
+ BOOST_TEST_EQ( a.load(), value1 );
+ BOOST_TEST_EQ( expected, value1 );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T expected;
+ unsigned int loops = 0;
+ bool success = false;
+ do
+ {
+ expected = value1;
+ success = a.compare_exchange_weak(expected, value3);
+ ++loops;
+ }
+ while (!success && loops < max_weak_cas_loops);
+ BOOST_TEST( success );
+ BOOST_TEST_EQ( a.load(), value3 );
+ BOOST_TEST_EQ( expected, value1 );
+ }
+
+ {
+ Wrapper<T> wrapper(value1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T expected;
+ unsigned int loops = 0;
+ bool success = false;
+ do
+ {
+ expected = value2;
+ success = a.compare_exchange_weak(expected, value3);
+ if (expected != value2)
+ break;
+ ++loops;
+ }
+ while (!success && loops < max_weak_cas_loops);
+ BOOST_TEST( !success );
+ BOOST_TEST_EQ( a.load(), value1 );
+ BOOST_TEST_EQ( expected, value1 );
+ }
+}
+
+//! Tests whether boost::atomic supports constexpr constructor. Note that boost::atomic_ref (as std::atomic_ref) does not support constexpr constructor.
+template< typename T >
+void test_constexpr_ctor()
+{
+#ifndef BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT
+ constexpr T value(0);
+ constexpr boost::atomic<T> tester(value);
+ BOOST_TEST( tester == value );
+#endif
+}
+
+//! The type traits provides max and min values of type D that can be added/subtracted to T(0) without signed overflow
+template< typename T, typename D, bool IsSigned = boost::is_signed< D >::value >
+struct distance_limits
+{
+ //! Difference type D promoted to the width of type T
+ typedef typename boost::conditional<
+ IsSigned,
+ boost::make_signed< T >,
+ boost::make_unsigned< T >
+ >::type::type promoted_difference_type;
+
+ static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return (std::numeric_limits< D >::min)();
+ }
+ static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return (std::numeric_limits< D >::max)();
+ }
+};
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// 'static_cast': truncation of constant value. There is no actual truncation happening because
+// the cast is only performed if the value fits in the range of the result.
+#pragma warning(disable: 4309)
+#endif
+
+template< typename T, typename D >
+struct distance_limits< T*, D, true >
+{
+ //! Difference type D promoted to the width of type T
+ typedef std::ptrdiff_t promoted_difference_type;
+
+ static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ const std::ptrdiff_t ptrdiff = (std::numeric_limits< std::ptrdiff_t >::min)() / static_cast< std::ptrdiff_t >(sizeof(T));
+ const D diff = (std::numeric_limits< D >::min)();
+ // Both values are negative. Return the closest value to zero.
+ return diff < ptrdiff ? static_cast< D >(ptrdiff) : diff;
+ }
+ static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ const std::ptrdiff_t ptrdiff = (std::numeric_limits< std::ptrdiff_t >::max)() / static_cast< std::ptrdiff_t >(sizeof(T));
+ const D diff = (std::numeric_limits< D >::max)();
+ // Both values are positive. Return the closest value to zero.
+ return diff > ptrdiff ? static_cast< D >(ptrdiff) : diff;
+ }
+};
+
+template< typename T, typename D >
+struct distance_limits< T*, D, false >
+{
+ //! Difference type D promoted to the width of type T
+ typedef std::size_t promoted_difference_type;
+
+ static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return (std::numeric_limits< D >::min)();
+ }
+ static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ const std::size_t ptrdiff = static_cast< std::size_t >((std::numeric_limits< std::ptrdiff_t >::max)()) / sizeof(T);
+ const D diff = (std::numeric_limits< D >::max)();
+ return diff > ptrdiff ? static_cast< D >(ptrdiff) : diff;
+ }
+};
+
+#if defined(BOOST_HAS_INT128)
+
+// At least libstdc++ does not specialize std::numeric_limits for __int128 in strict mode (i.e. with GNU extensions disabled).
+// So we have to specialize the limits ourself. We assume two's complement signed representation.
+template< typename T, bool IsSigned >
+struct distance_limits< T, boost::int128_type, IsSigned >
+{
+ //! Difference type D promoted to the width of type T
+ typedef boost::int128_type promoted_difference_type;
+
+ static boost::int128_type min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return -(max)() - 1;
+ }
+ static boost::int128_type max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return static_cast< boost::int128_type >((~static_cast< boost::uint128_type >(0u)) >> 1);
+ }
+};
+
+template< typename T, bool IsSigned >
+struct distance_limits< T, boost::uint128_type, IsSigned >
+{
+ //! Difference type D promoted to the width of type T
+ typedef boost::uint128_type promoted_difference_type;
+
+ static boost::uint128_type min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return 0u;
+ }
+ static boost::uint128_type max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
+ {
+ return ~static_cast< boost::uint128_type >(0u);
+ }
+};
+
+#endif // defined(BOOST_HAS_INT128)
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// unary minus operator applied to unsigned type, result still unsigned
+#pragma warning(disable: 4146)
+#endif
+
+template< template< typename > class Wrapper, typename T, typename D, typename AddType >
+void test_additive_operators_with_type_and_test()
+{
+#if defined(UBSAN)
+ // clang UBSAN flags this test when AddType is a pointer as it considers subtracting from a null pointer (zero_add) an UB
+ if (boost::is_pointer< AddType >::value)
+ return;
+#endif
+
+ // Note: This set of tests is extracted to a separate function because otherwise MSVC-10 for x64 generates broken code
+ typedef typename distance_limits< T, D >::promoted_difference_type promoted_difference_type;
+ typedef typename boost::make_unsigned< promoted_difference_type >::type unsigned_promoted_difference_type;
+ const T zero_value = 0;
+ const D zero_diff = 0;
+ const D one_diff = 1;
+ const AddType zero_add = 0;
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.add_and_test(zero_diff);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), zero_value );
+
+ f = a.add_and_test(one_diff);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(zero_add + one_diff) );
+ }
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.add_and_test((distance_limits< T, D >::max)());
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(zero_add + (distance_limits< T, D >::max)()) );
+ }
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.add_and_test((distance_limits< T, D >::min)());
+ BOOST_TEST_EQ( f, ((distance_limits< T, D >::min)() != 0) );
+ BOOST_TEST_EQ( a.load(), T(zero_add + (distance_limits< T, D >::min)()) );
+ }
+
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.sub_and_test(zero_diff);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), zero_value );
+
+ f = a.sub_and_test(one_diff);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(zero_add - one_diff) );
+ }
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.sub_and_test((distance_limits< T, D >::max)());
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(zero_add - (distance_limits< T, D >::max)()) );
+ }
+ {
+ Wrapper<T> wrapper(zero_value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.sub_and_test((distance_limits< T, D >::min)());
+ BOOST_TEST_EQ( f, ((distance_limits< T, D >::min)() != 0) );
+ // Be very careful as to not cause signed overflow on negation
+ unsigned_promoted_difference_type umin = static_cast< unsigned_promoted_difference_type >(
+ static_cast< promoted_difference_type >((distance_limits< T, D >::min)()));
+ umin = -umin;
+ promoted_difference_type neg_min;
+ std::memcpy(&neg_min, &umin, sizeof(neg_min));
+ BOOST_TEST_EQ( a.load(), T(zero_add + neg_min) );
+ }
+}
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+template< template< typename > class Wrapper, typename T, typename D, typename AddType >
+void test_additive_operators_with_type(T value, D delta)
+{
+ /* note: the tests explicitly cast the result of any addition
+ to the type to be tested to force truncation of the result to
+ the correct range in case of overflow */
+
+ // explicit add/sub
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_add(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_sub(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ // overloaded modify/assign
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a += delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
+ BOOST_TEST_EQ( n, T((AddType)value + delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a -= delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
+ BOOST_TEST_EQ( n, T((AddType)value - delta) );
+ }
+
+ // overloaded increment/decrement
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a++;
+ BOOST_TEST_EQ( a.load(), T((AddType)value + 1) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = ++a;
+ BOOST_TEST_EQ( a.load(), T((AddType)value + 1) );
+ BOOST_TEST_EQ( n, T((AddType)value + 1) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a--;
+ BOOST_TEST_EQ( a.load(), T((AddType)value - 1) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = --a;
+ BOOST_TEST_EQ( a.load(), T((AddType)value - 1) );
+ BOOST_TEST_EQ( n, T((AddType)value - 1) );
+ }
+
+ // Operations returning the actual resulting value
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.add(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
+ BOOST_TEST_EQ( n, T((AddType)value + delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.sub(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
+ BOOST_TEST_EQ( n, T((AddType)value - delta) );
+ }
+
+ // Opaque operations
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_add(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_sub(delta);
+ BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
+ }
+
+ // Modify and test operations
+ test_additive_operators_with_type_and_test< Wrapper, T, D, AddType >();
+}
+
+template< template< typename > class Wrapper, typename T, typename D >
+void test_additive_operators(T value, D delta)
+{
+ test_additive_operators_with_type< Wrapper, T, D, T >(value, delta);
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_negation()
+{
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_negate();
+ BOOST_TEST_EQ( a.load(), (T)-1 );
+ BOOST_TEST_EQ( n, (T)1 );
+
+ n = a.fetch_negate();
+ BOOST_TEST_EQ( a.load(), (T)1 );
+ BOOST_TEST_EQ( n, (T)-1 );
+ }
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.negate();
+ BOOST_TEST_EQ( a.load(), (T)-1 );
+ BOOST_TEST_EQ( n, (T)-1 );
+
+ n = a.negate();
+ BOOST_TEST_EQ( a.load(), (T)1 );
+ BOOST_TEST_EQ( n, (T)1 );
+ }
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_negate();
+ BOOST_TEST_EQ( a.load(), (T)-1 );
+
+ a.opaque_negate();
+ BOOST_TEST_EQ( a.load(), (T)1 );
+ }
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.negate_and_test();
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), (T)-1 );
+
+ f = a.negate_and_test();
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), (T)1 );
+ }
+ {
+ Wrapper<T> wrapper((T)0);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.negate_and_test();
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), (T)0 );
+ }
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_additive_wrap(T value)
+{
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_add(1) + (T)1;
+ BOOST_TEST_EQ( a.load(), n );
+ }
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_sub(1) - (T)1;
+ BOOST_TEST_EQ( a.load(), n );
+ }
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_bit_operators(T value, T delta)
+{
+ // explicit and/or/xor
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_and(delta);
+ BOOST_TEST_EQ( a.load(), T(value & delta) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_or(delta);
+ BOOST_TEST_EQ( a.load(), T(value | delta) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_xor(delta);
+ BOOST_TEST_EQ( a.load(), T(value ^ delta) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_complement();
+ BOOST_TEST_EQ( a.load(), T(~value) );
+ BOOST_TEST_EQ( n, value );
+ }
+
+ // overloaded modify/assign
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a &= delta);
+ BOOST_TEST_EQ( a.load(), T(value & delta) );
+ BOOST_TEST_EQ( n, T(value & delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a |= delta);
+ BOOST_TEST_EQ( a.load(), T(value | delta) );
+ BOOST_TEST_EQ( n, T(value | delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a ^= delta);
+ BOOST_TEST_EQ( a.load(), T(value ^ delta) );
+ BOOST_TEST_EQ( n, T(value ^ delta) );
+ }
+
+ // Operations returning the actual resulting value
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.bitwise_and(delta);
+ BOOST_TEST_EQ( a.load(), T(value & delta) );
+ BOOST_TEST_EQ( n, T(value & delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.bitwise_or(delta);
+ BOOST_TEST_EQ( a.load(), T(value | delta) );
+ BOOST_TEST_EQ( n, T(value | delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.bitwise_xor(delta);
+ BOOST_TEST_EQ( a.load(), T(value ^ delta) );
+ BOOST_TEST_EQ( n, T(value ^ delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.bitwise_complement();
+ BOOST_TEST_EQ( a.load(), T(~value) );
+ BOOST_TEST_EQ( n, T(~value) );
+ }
+
+ // Opaque operations
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_and(delta);
+ BOOST_TEST_EQ( a.load(), T(value & delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_or(delta);
+ BOOST_TEST_EQ( a.load(), T(value | delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_xor(delta);
+ BOOST_TEST_EQ( a.load(), T(value ^ delta) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_complement();
+ BOOST_TEST_EQ( a.load(), T(~value) );
+ }
+
+ // Modify and test operations
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.and_and_test((T)1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(1) );
+
+ f = a.and_and_test((T)0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+
+ f = a.and_and_test((T)0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+ }
+
+ {
+ Wrapper<T> wrapper((T)0);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.or_and_test((T)0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+
+ f = a.or_and_test((T)1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(1) );
+
+ f = a.or_and_test((T)1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(1) );
+ }
+
+ {
+ Wrapper<T> wrapper((T)0);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.xor_and_test((T)0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+
+ f = a.xor_and_test((T)1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(1) );
+
+ f = a.xor_and_test((T)1);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+ }
+
+ {
+ Wrapper<T> wrapper((T)0);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.complement_and_test();
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), static_cast< T >(~static_cast< T >(0)) );
+
+ f = a.complement_and_test();
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(0) );
+ }
+
+ // Bit test and modify operations
+ {
+ Wrapper<T> wrapper((T)42);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.bit_test_and_set(0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(43) );
+
+ f = a.bit_test_and_set(1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(43) );
+
+ f = a.bit_test_and_set(2);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(47) );
+ }
+
+ {
+ Wrapper<T> wrapper((T)42);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.bit_test_and_reset(0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(42) );
+
+ f = a.bit_test_and_reset(1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(40) );
+
+ f = a.bit_test_and_set(2);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(44) );
+ }
+
+ {
+ Wrapper<T> wrapper((T)42);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ bool f = a.bit_test_and_complement(0);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(43) );
+
+ f = a.bit_test_and_complement(1);
+ BOOST_TEST_EQ( f, true );
+ BOOST_TEST_EQ( a.load(), T(41) );
+
+ f = a.bit_test_and_complement(2);
+ BOOST_TEST_EQ( f, false );
+ BOOST_TEST_EQ( a.load(), T(45) );
+ }
+
+ // Test that a runtime value works for the bit index. This is important for asm block constraints.
+ {
+ unsigned int runtime_bit_index = std::rand() & 7u;
+ Wrapper<T> wrapper((T)42);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+
+ a.bit_test_and_set(runtime_bit_index);
+ a.bit_test_and_reset(runtime_bit_index);
+ a.bit_test_and_complement(runtime_bit_index);
+ }
+}
+
+template< template< typename > class Wrapper, typename T >
+void do_test_integral_api(boost::false_type)
+{
+ test_base_operators< Wrapper, T >(42, 43, 44);
+ test_additive_operators< Wrapper, T, T >(42, 17);
+ test_bit_operators< Wrapper, T >((T)0x5f5f5f5f5f5f5f5fULL, (T)0xf5f5f5f5f5f5f5f5ULL);
+
+ /* test for unsigned overflow/underflow */
+ test_additive_operators< Wrapper, T, T >((T)-1, 1);
+ test_additive_operators< Wrapper, T, T >(0, 1);
+ /* test for signed overflow/underflow */
+ test_additive_operators< Wrapper, T, T >(((T)-1) >> (sizeof(T) * 8 - 1), 1);
+ test_additive_operators< Wrapper, T, T >(1 + (((T)-1) >> (sizeof(T) * 8 - 1)), 1);
+}
+
+template< template< typename > class Wrapper, typename T >
+void do_test_integral_api(boost::true_type)
+{
+ do_test_integral_api< Wrapper, T >(boost::false_type());
+
+ test_additive_wrap< Wrapper, T >(0u);
+ BOOST_CONSTEXPR_OR_CONST T all_ones = ~(T)0u;
+ test_additive_wrap< Wrapper, T >(all_ones);
+ BOOST_CONSTEXPR_OR_CONST T max_signed_twos_compl = all_ones >> 1;
+ test_additive_wrap< Wrapper, T >(all_ones ^ max_signed_twos_compl);
+ test_additive_wrap< Wrapper, T >(max_signed_twos_compl);
+}
+
+template< template< typename > class Wrapper, typename T >
+inline void test_integral_api(void)
+{
+ do_test_integral_api< Wrapper, T >(boost::is_unsigned<T>());
+
+ if (boost::is_signed<T>::value)
+ test_negation< Wrapper, T >();
+}
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+template< template< typename > class Wrapper, typename T, typename D >
+void test_fp_additive_operators(T value, D delta)
+{
+ // explicit add/sub
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_add(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value + delta)) );
+ BOOST_TEST_EQ( n, approx(value) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_sub(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value - delta)) );
+ BOOST_TEST_EQ( n, approx(value) );
+ }
+
+ // overloaded modify/assign
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a += delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value + delta)) );
+ BOOST_TEST_EQ( n, approx(T(value + delta)) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = (a -= delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value - delta)) );
+ BOOST_TEST_EQ( n, approx(T(value - delta)) );
+ }
+
+ // Operations returning the actual resulting value
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.add(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value + delta)) );
+ BOOST_TEST_EQ( n, approx(T(value + delta)) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.sub(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value - delta)) );
+ BOOST_TEST_EQ( n, approx(T(value - delta)) );
+ }
+
+ // Opaque operations
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_add(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value + delta)) );
+ }
+
+ {
+ Wrapper<T> wrapper(value);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_sub(delta);
+ BOOST_TEST_EQ( a.load(), approx(T(value - delta)) );
+ }
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_fp_negation()
+{
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.fetch_negate();
+ BOOST_TEST_EQ( a.load(), approx((T)-1) );
+ BOOST_TEST_EQ( n, approx((T)1) );
+
+ n = a.fetch_negate();
+ BOOST_TEST_EQ( a.load(), approx((T)1) );
+ BOOST_TEST_EQ( n, approx((T)-1) );
+ }
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ T n = a.negate();
+ BOOST_TEST_EQ( a.load(), approx((T)-1) );
+ BOOST_TEST_EQ( n, approx((T)-1) );
+
+ n = a.negate();
+ BOOST_TEST_EQ( a.load(), approx((T)1) );
+ BOOST_TEST_EQ( n, approx((T)1) );
+ }
+ {
+ Wrapper<T> wrapper((T)1);
+ typename Wrapper<T>::atomic_type& a = wrapper.a;
+ a.opaque_negate();
+ BOOST_TEST_EQ( a.load(), approx((T)-1) );
+
+ a.opaque_negate();
+ BOOST_TEST_EQ( a.load(), approx((T)1) );
+ }
+}
+
+#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+template< template< typename > class Wrapper, typename T >
+void test_floating_point_api(void)
+{
+ // Note: When support for floating point is disabled, even the base operation tests may fail because
+ // the generic template specialization does not account for garbage in padding bits that are present in some FP types.
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+ test_base_operators< Wrapper, T >(static_cast<T>(42.1), static_cast<T>(43.2), static_cast<T>(44.3));
+
+ test_fp_additive_operators< Wrapper, T, T >(static_cast<T>(42.5), static_cast<T>(17.7));
+ test_fp_additive_operators< Wrapper, T, T >(static_cast<T>(-42.5), static_cast<T>(-17.7));
+
+ test_fp_negation< Wrapper, T >();
+#endif
+}
+
+
+template< template< typename > class Wrapper, typename T >
+void test_pointer_api(void)
+{
+ T values[3];
+
+ test_base_operators< Wrapper, T* >(&values[0], &values[1], &values[2]);
+ test_additive_operators< Wrapper, T*>(&values[1], 1);
+
+ test_base_operators< Wrapper, void*>(&values[0], &values[1], &values[2]);
+
+#if defined(BOOST_HAS_INTPTR_T)
+ Wrapper<void*> wrapper_ptr;
+ typename Wrapper<void*>::atomic_type& ptr = wrapper_ptr.a;
+ Wrapper<boost::intptr_t> wrapper_integral;
+ typename Wrapper<boost::intptr_t>::atomic_type& integral = wrapper_integral.a;
+ BOOST_TEST_EQ( ptr.is_lock_free(), integral.is_lock_free() );
+#endif
+}
+
+enum test_enum
+{
+ foo, bar, baz
+};
+
+template< template< typename > class Wrapper >
+void test_enum_api(void)
+{
+ test_base_operators< Wrapper >(foo, bar, baz);
+}
+
+template< typename T >
+struct test_struct
+{
+ typedef T value_type;
+ value_type i;
+ inline bool operator==(test_struct const& c) const { return i == c.i; }
+ inline bool operator!=(test_struct const& c) const { return !operator==(c); }
+};
+
+template< typename Char, typename Traits, typename T >
+inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct< T > const& s)
+{
+ test_stream << "{" << s.i << "}";
+ return strm;
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_struct_api(void)
+{
+ T a = {1}, b = {2}, c = {3};
+
+ test_base_operators< Wrapper >(a, b, c);
+
+ {
+ Wrapper<T> wrapper_sa;
+ typename Wrapper<T>::atomic_type& sa = wrapper_sa.a;
+ Wrapper<typename T::value_type> wrapper_si;
+ typename Wrapper<typename T::value_type>::atomic_type& si = wrapper_si.a;
+ BOOST_TEST_EQ( sa.is_lock_free(), si.is_lock_free() );
+ }
+}
+
+template< typename T >
+struct test_struct_x2
+{
+ typedef T value_type;
+ value_type i, j;
+ inline bool operator==(test_struct_x2 const& c) const { return i == c.i && j == c.j; }
+ inline bool operator!=(test_struct_x2 const& c) const { return !operator==(c); }
+};
+
+template< typename Char, typename Traits, typename T >
+inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct_x2< T > const& s)
+{
+ test_stream << "{" << s.i << ", " << s.j << "}";
+ return strm;
+}
+
+template< template< typename > class Wrapper, typename T >
+void test_struct_x2_api(void)
+{
+ T a = {1, 1}, b = {2, 2}, c = {3, 3};
+
+ test_base_operators< Wrapper >(a, b, c);
+}
+
+struct large_struct
+{
+ unsigned char data[256u];
+
+ inline bool operator==(large_struct const& c) const
+ {
+ return std::memcmp(data, &c.data, sizeof(data)) == 0;
+ }
+ inline bool operator!=(large_struct const& c) const
+ {
+ return std::memcmp(data, &c.data, sizeof(data)) != 0;
+ }
+};
+
+template< typename Char, typename Traits >
+inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, large_struct const&)
+{
+ strm << "[large_struct]";
+ return strm;
+}
+
+template< template< typename > class Wrapper >
+void test_large_struct_api(void)
+{
+ large_struct a = {{1}}, b = {{2}}, c = {{3}};
+ test_base_operators< Wrapper >(a, b, c);
+}
+
+struct test_struct_with_ctor
+{
+ typedef unsigned int value_type;
+ value_type i;
+ test_struct_with_ctor() : i(0x01234567) {}
+ inline bool operator==(test_struct_with_ctor const& c) const { return i == c.i; }
+ inline bool operator!=(test_struct_with_ctor const& c) const { return !operator==(c); }
+};
+
+template< typename Char, typename Traits >
+inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct_with_ctor const& s)
+{
+ strm << "{" << s.i << "}";
+ return strm;
+}
+
+template< template< typename > class Wrapper >
+void test_struct_with_ctor_api(void)
+{
+ {
+ test_struct_with_ctor s;
+ Wrapper<test_struct_with_ctor> wrapper_sa;
+ typename Wrapper<test_struct_with_ctor>::atomic_type& sa = wrapper_sa.a;
+ // Check that the default constructor was called
+ BOOST_TEST( sa.load() == s );
+ }
+
+ test_struct_with_ctor a, b, c;
+ a.i = 1;
+ b.i = 2;
+ c.i = 3;
+
+ test_base_operators< Wrapper >(a, b, c);
+}
+
+#endif
diff --git a/src/boost/libs/atomic/test/atomicity.cpp b/src/boost/libs/atomic/test/atomicity.cpp
new file mode 100644
index 000000000..457c97a29
--- /dev/null
+++ b/src/boost/libs/atomic/test/atomicity.cpp
@@ -0,0 +1,285 @@
+// Copyright (c) 2011 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Attempt to determine whether the operations on atomic variables
+// do in fact behave atomically: Let multiple threads race modifying
+// a shared atomic variable and verify that it behaves as expected.
+//
+// We assume that "observable race condition" events are exponentially
+// distributed, with unknown "average time between observable races"
+// (which is just the reciprocal of exp distribution parameter lambda).
+// Use a non-atomic implementation that intentionally exhibits a
+// (hopefully tight) race to compute the maximum-likelihood estimate
+// for this time. From this, compute an estimate that covers the
+// unknown value with 0.995 confidence (using chi square quantile).
+//
+// Use this estimate to pick a timeout for the race tests of the
+// atomic implementations such that under the assumed distribution
+// we get 0.995 probability to detect a race (if there is one).
+//
+// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
+// operations truly behave atomic if this test program does not
+// report an error.
+
+#include <boost/atomic.hpp>
+
+#include <cstddef>
+#include <algorithm>
+#include <boost/config.hpp>
+#include <boost/ref.hpp>
+#include <boost/function.hpp>
+#include <boost/bind/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/thread_time.hpp>
+#include <boost/thread/lock_guard.hpp>
+#include <boost/thread/lock_types.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/thread/condition_variable.hpp>
+#include <boost/core/lightweight_test.hpp>
+
+/* helper class to let two instances of a function race against each
+other, with configurable timeout and early abort on detection of error */
+class concurrent_runner
+{
+public:
+ /* concurrently run the function in two threads, until either timeout
+ or one of the functions returns "false"; returns true if timeout
+ was reached, or false if early abort and updates timeout accordingly */
+ static bool execute(const boost::function<bool(std::size_t)> & fn, boost::posix_time::time_duration & timeout)
+ {
+ concurrent_runner runner(fn);
+ runner.wait_finish(timeout);
+ return !runner.failure();
+ }
+
+ concurrent_runner(const boost::function<bool(std::size_t)> & fn) :
+ finished_(false), failure_(false)
+ {
+ boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 0)).swap(first_thread_);
+ boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 1)).swap(second_thread_);
+ }
+
+ void wait_finish(boost::posix_time::time_duration & timeout)
+ {
+ boost::system_time start = boost::get_system_time();
+ boost::system_time end = start + timeout;
+
+ {
+ boost::unique_lock< boost::mutex > guard(m_);
+ while (boost::get_system_time() < end && !finished())
+ c_.timed_wait(guard, end);
+ }
+
+ finished_.store(true, boost::memory_order_relaxed);
+
+ first_thread_.join();
+ second_thread_.join();
+
+ boost::posix_time::time_duration duration = boost::get_system_time() - start;
+ if (duration < timeout)
+ timeout = duration;
+ }
+
+ bool finished(void) const BOOST_NOEXCEPT_OR_NOTHROW
+ {
+ return finished_.load(boost::memory_order_relaxed);
+ }
+
+ bool failure(void) const BOOST_NOEXCEPT_OR_NOTHROW
+ {
+ return failure_;
+ }
+
+private:
+ void thread_function(boost::function<bool(std::size_t)> function, std::size_t instance)
+ {
+ while (!finished())
+ {
+ if (!function(instance))
+ {
+ boost::lock_guard< boost::mutex > guard(m_);
+ failure_ = true;
+ finished_.store(true, boost::memory_order_relaxed);
+ c_.notify_all();
+ break;
+ }
+ }
+ }
+
+private:
+ boost::mutex m_;
+ boost::condition_variable c_;
+
+ boost::atomic<bool> finished_;
+ bool failure_;
+
+ boost::thread first_thread_;
+ boost::thread second_thread_;
+};
+
+bool racy_add(volatile unsigned int & value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ unsigned int mask = 0xff << shift;
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ unsigned int tmp = value;
+ value = tmp + (1 << shift);
+
+ if ((tmp & mask) != (n << shift))
+ return false;
+ }
+
+ unsigned int tmp = value;
+ value = tmp & ~mask;
+ if ((tmp & mask) != mask)
+ return false;
+
+ return true;
+}
+
+/* compute estimate for average time between races being observable, in usecs */
+double estimate_avg_race_time(void)
+{
+ double sum = 0.0;
+
+ /* take 10 samples */
+ for (std::size_t n = 0; n < 10; ++n)
+ {
+ boost::posix_time::time_duration timeout(0, 0, 10);
+
+ volatile unsigned int value(0);
+ bool success = concurrent_runner::execute(
+ boost::bind(racy_add, boost::ref(value), boost::placeholders::_1),
+ timeout
+ );
+
+ if (success)
+ {
+ BOOST_ERROR("Failed to establish baseline time for reproducing race condition");
+ }
+
+ sum = sum + timeout.total_microseconds();
+ }
+
+ /* determine maximum likelihood estimate for average time between
+ race observations */
+ double avg_race_time_mle = (sum / 10);
+
+ /* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
+ double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
+
+ return avg_race_time_995;
+}
+
+template<typename value_type, std::size_t shift_>
+bool test_arithmetic(boost::atomic<value_type> & shared_value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ value_type mask = 0xff << shift;
+ value_type increment = 1 << shift;
+
+ value_type expected = 0;
+
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ value_type tmp = shared_value.fetch_add(increment, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+ ++expected;
+ }
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ value_type tmp = shared_value.fetch_sub(increment, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+ --expected;
+ }
+
+ return true;
+}
+
+template<typename value_type, std::size_t shift_>
+bool test_bitops(boost::atomic<value_type> & shared_value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ value_type mask = 0xff << shift;
+
+ value_type expected = 0;
+
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type mod = 1u << k;
+ value_type tmp = shared_value.fetch_or(mod << shift, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected | mod;
+ }
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type tmp = shared_value.fetch_and(~(1u << (shift + k)), boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected & ~(1u << k);
+ }
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type mod = 255u ^ (1u << k);
+ value_type tmp = shared_value.fetch_xor(mod << shift, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected ^ mod;
+ }
+
+ value_type tmp = shared_value.fetch_and(~mask, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+
+ return true;
+}
+
+int main(int, char *[])
+{
+ boost::posix_time::time_duration reciprocal_lambda;
+
+ double avg_race_time = estimate_avg_race_time();
+
+ /* 5.298 = 0.995 quantile of exponential distribution */
+ const boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time));
+
+ {
+ boost::atomic<unsigned int> value(0);
+
+ /* testing two different operations in this loop, therefore
+ enlarge timeout */
+ boost::posix_time::time_duration tmp(timeout * 2);
+
+ bool success = concurrent_runner::execute(
+ boost::bind(test_arithmetic<unsigned int, 0>, boost::ref(value), boost::placeholders::_1),
+ tmp
+ );
+
+ BOOST_TEST(success); // concurrent arithmetic error
+ }
+
+ {
+ boost::atomic<unsigned int> value(0);
+
+ /* testing three different operations in this loop, therefore
+ enlarge timeout */
+ boost::posix_time::time_duration tmp(timeout * 3);
+
+ bool success = concurrent_runner::execute(
+ boost::bind(test_bitops<unsigned int, 0>, boost::ref(value), boost::placeholders::_1),
+ tmp
+ );
+
+ BOOST_TEST(success); // concurrent bit operations error
+ }
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/atomicity_ref.cpp b/src/boost/libs/atomic/test/atomicity_ref.cpp
new file mode 100644
index 000000000..5a7b013a9
--- /dev/null
+++ b/src/boost/libs/atomic/test/atomicity_ref.cpp
@@ -0,0 +1,293 @@
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// This test is based on atomicity.cpp by Helge Bahmann. The test
+// Was modified to use atomic_ref template instead of atomic.
+
+// Attempt to determine whether the operations on atomic variables
+// do in fact behave atomically: Let multiple threads race modifying
+// a shared atomic variable and verify that it behaves as expected.
+//
+// We assume that "observable race condition" events are exponentially
+// distributed, with unknown "average time between observable races"
+// (which is just the reciprocal of exp distribution parameter lambda).
+// Use a non-atomic implementation that intentionally exhibits a
+// (hopefully tight) race to compute the maximum-likelihood estimate
+// for this time. From this, compute an estimate that covers the
+// unknown value with 0.995 confidence (using chi square quantile).
+//
+// Use this estimate to pick a timeout for the race tests of the
+// atomic implementations such that under the assumed distribution
+// we get 0.995 probability to detect a race (if there is one).
+//
+// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
+// operations truly behave atomic if this test program does not
+// report an error.
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/atomic.hpp>
+#include <boost/atomic/atomic_ref.hpp>
+
+#include <cstddef>
+#include <algorithm>
+#include <boost/config.hpp>
+#include <boost/ref.hpp>
+#include <boost/function.hpp>
+#include <boost/bind/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/thread_time.hpp>
+#include <boost/thread/lock_guard.hpp>
+#include <boost/thread/lock_types.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/thread/condition_variable.hpp>
+#include <boost/core/lightweight_test.hpp>
+
+/* helper class to let two instances of a function race against each
+other, with configurable timeout and early abort on detection of error */
+class concurrent_runner
+{
+public:
+ /* concurrently run the function in two threads, until either timeout
+ or one of the functions returns "false"; returns true if timeout
+ was reached, or false if early abort and updates timeout accordingly */
+ static bool execute(const boost::function<bool(std::size_t)> & fn, boost::posix_time::time_duration & timeout)
+ {
+ concurrent_runner runner(fn);
+ runner.wait_finish(timeout);
+ return !runner.failure();
+ }
+
+
+ concurrent_runner(const boost::function<bool(std::size_t)> & fn) :
+ finished_(false), failure_(false)
+ {
+ boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 0)).swap(first_thread_);
+ boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 1)).swap(second_thread_);
+ }
+
+ void wait_finish(boost::posix_time::time_duration & timeout)
+ {
+ boost::system_time start = boost::get_system_time();
+ boost::system_time end = start + timeout;
+
+ {
+ boost::unique_lock< boost::mutex > guard(m_);
+ while (boost::get_system_time() < end && !finished())
+ c_.timed_wait(guard, end);
+ }
+
+ finished_.store(true, boost::memory_order_relaxed);
+
+ first_thread_.join();
+ second_thread_.join();
+
+ boost::posix_time::time_duration duration = boost::get_system_time() - start;
+ if (duration < timeout)
+ timeout = duration;
+ }
+
+ bool finished(void) const BOOST_NOEXCEPT_OR_NOTHROW
+ {
+ return finished_.load(boost::memory_order_relaxed);
+ }
+
+ bool failure(void) const BOOST_NOEXCEPT_OR_NOTHROW
+ {
+ return failure_;
+ }
+
+private:
+ void thread_function(boost::function<bool(std::size_t)> function, std::size_t instance)
+ {
+ while (!finished())
+ {
+ if (!function(instance))
+ {
+ boost::lock_guard< boost::mutex > guard(m_);
+ failure_ = true;
+ finished_.store(true, boost::memory_order_relaxed);
+ c_.notify_all();
+ break;
+ }
+ }
+ }
+
+private:
+ boost::mutex m_;
+ boost::condition_variable c_;
+
+ boost::atomic<bool> finished_;
+ bool failure_;
+
+ boost::thread first_thread_;
+ boost::thread second_thread_;
+};
+
+bool racy_add(volatile unsigned int & value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ unsigned int mask = 0xff << shift;
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ unsigned int tmp = value;
+ value = tmp + (1 << shift);
+
+ if ((tmp & mask) != (n << shift))
+ return false;
+ }
+
+ unsigned int tmp = value;
+ value = tmp & ~mask;
+ if ((tmp & mask) != mask)
+ return false;
+
+ return true;
+}
+
+/* compute estimate for average time between races being observable, in usecs */
+double estimate_avg_race_time(void)
+{
+ double sum = 0.0;
+
+ /* take 10 samples */
+ for (std::size_t n = 0; n < 10; n++)
+ {
+ boost::posix_time::time_duration timeout(0, 0, 10);
+
+ volatile unsigned int value(0);
+ bool success = concurrent_runner::execute(
+ boost::bind(racy_add, boost::ref(value), boost::placeholders::_1),
+ timeout
+ );
+
+ if (success)
+ {
+ BOOST_ERROR("Failed to establish baseline time for reproducing race condition");
+ }
+
+ sum = sum + timeout.total_microseconds();
+ }
+
+ /* determine maximum likelihood estimate for average time between
+ race observations */
+ double avg_race_time_mle = (sum / 10);
+
+ /* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
+ double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
+
+ return avg_race_time_995;
+}
+
+template<typename value_type, std::size_t shift_>
+bool test_arithmetic(value_type& shared_value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ value_type mask = 0xff << shift;
+ value_type increment = 1 << shift;
+
+ value_type expected = 0;
+ boost::atomic_ref<value_type> shared_value_ref(shared_value);
+
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ value_type tmp = shared_value_ref.fetch_add(increment, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+ ++expected;
+ }
+ for (std::size_t n = 0; n < 255; ++n)
+ {
+ value_type tmp = shared_value_ref.fetch_sub(increment, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+ --expected;
+ }
+
+ return true;
+}
+
+template<typename value_type, std::size_t shift_>
+bool test_bitops(value_type& shared_value, std::size_t instance)
+{
+ std::size_t shift = instance * 8;
+ value_type mask = 0xff << shift;
+
+ value_type expected = 0;
+ boost::atomic_ref<value_type> shared_value_ref(shared_value);
+
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type mod = 1u << k;
+ value_type tmp = shared_value_ref.fetch_or(mod << shift, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected | mod;
+ }
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type tmp = shared_value_ref.fetch_and(~(1u << (shift + k)), boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected & ~(1u << k);
+ }
+ for (std::size_t k = 0; k < 8; ++k)
+ {
+ value_type mod = 255u ^ (1u << k);
+ value_type tmp = shared_value_ref.fetch_xor(mod << shift, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift))
+ return false;
+ expected = expected ^ mod;
+ }
+
+ value_type tmp = shared_value_ref.fetch_and(~mask, boost::memory_order_relaxed);
+ if ( (tmp & mask) != (expected << shift) )
+ return false;
+
+ return true;
+}
+
+int main(int, char *[])
+{
+ boost::posix_time::time_duration reciprocal_lambda;
+
+ double avg_race_time = estimate_avg_race_time();
+
+ /* 5.298 = 0.995 quantile of exponential distribution */
+ const boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time));
+
+ {
+ unsigned int value = 0;
+
+ /* testing two different operations in this loop, therefore
+ enlarge timeout */
+ boost::posix_time::time_duration tmp(timeout * 2);
+
+ bool success = concurrent_runner::execute(
+ boost::bind(test_arithmetic<unsigned int, 0>, boost::ref(value), boost::placeholders::_1),
+ tmp
+ );
+
+ BOOST_TEST(success); // concurrent arithmetic error
+ }
+
+ {
+ unsigned int value = 0;
+
+ /* testing three different operations in this loop, therefore
+ enlarge timeout */
+ boost::posix_time::time_duration tmp(timeout * 3);
+
+ bool success = concurrent_runner::execute(
+ boost::bind(test_bitops<unsigned int, 0>, boost::ref(value), boost::placeholders::_1),
+ tmp
+ );
+
+ BOOST_TEST(success); // concurrent bit operations error
+ }
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/c_implicit_ctor.cpp b/src/boost/libs/atomic/test/c_implicit_ctor.cpp
new file mode 100644
index 000000000..1a68a7078
--- /dev/null
+++ b/src/boost/libs/atomic/test/c_implicit_ctor.cpp
@@ -0,0 +1,34 @@
+// Copyright (c) 2018 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// The test verifies that atomic<T> has an implicit conversion constructor from T.
+// This can only be tested in C++17 because it has mandated copy elision. Previous C++ versions
+// also require atomic<> to have a copy or move constructor, which it does not.
+#if __cplusplus >= 201703L
+
+#include <boost/atomic.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/config.hpp>
+#include <type_traits>
+
+int main(int, char *[])
+{
+ static_assert(std::is_convertible< int, boost::atomic< int > >::value, "boost::atomic<T> does not have an implicit constructor from T");
+
+ boost::atomic< short > a = 10;
+ (void)a;
+
+ return 0;
+}
+
+#else // __cplusplus >= 201703L
+
+int main(int, char *[])
+{
+ return 0;
+}
+
+#endif // __cplusplus >= 201703L
diff --git a/src/boost/libs/atomic/test/cf_arith_func_ptr.cpp b/src/boost/libs/atomic/test/cf_arith_func_ptr.cpp
new file mode 100644
index 000000000..dfb42f619
--- /dev/null
+++ b/src/boost/libs/atomic/test/cf_arith_func_ptr.cpp
@@ -0,0 +1,17 @@
+// Copyright (c) 2017 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic.hpp>
+
+int main(int, char *[])
+{
+ // The test verifies that atomic<> does not provide arithmetic operations on function pointers
+ typedef void (*func_ptr)(int);
+ boost::atomic< func_ptr > a;
+ a.fetch_add(1);
+
+ return 1;
+}
diff --git a/src/boost/libs/atomic/test/cf_arith_mem_ptr.cpp b/src/boost/libs/atomic/test/cf_arith_mem_ptr.cpp
new file mode 100644
index 000000000..6ccf44aad
--- /dev/null
+++ b/src/boost/libs/atomic/test/cf_arith_mem_ptr.cpp
@@ -0,0 +1,22 @@
+// Copyright (c) 2017 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic.hpp>
+
+struct foo
+{
+ int n;
+};
+
+int main(int, char *[])
+{
+ // The test verifies that atomic<> does not provide arithmetic operations on member pointers
+ typedef int (foo::*mem_ptr);
+ boost::atomic< mem_ptr > a;
+ a.fetch_add(1);
+
+ return 1;
+}
diff --git a/src/boost/libs/atomic/test/cf_arith_void_ptr.cpp b/src/boost/libs/atomic/test/cf_arith_void_ptr.cpp
new file mode 100644
index 000000000..7c2d8fc25
--- /dev/null
+++ b/src/boost/libs/atomic/test/cf_arith_void_ptr.cpp
@@ -0,0 +1,16 @@
+// Copyright (c) 2017 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic.hpp>
+
+int main(int, char *[])
+{
+ // The test verifies that atomic<> does not provide arithmetic operations on void pointers
+ boost::atomic< void* > a;
+ a.fetch_add(1);
+
+ return 1;
+}
diff --git a/src/boost/libs/atomic/test/fallback_api.cpp b/src/boost/libs/atomic/test/fallback_api.cpp
new file mode 100644
index 000000000..815f14ac7
--- /dev/null
+++ b/src/boost/libs/atomic/test/fallback_api.cpp
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/* force fallback implementation using locks */
+#define BOOST_ATOMIC_FORCE_FALLBACK 1
+
+#include <boost/atomic.hpp>
+
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+
+#include "api_test_helpers.hpp"
+
+int main(int, char *[])
+{
+ test_flag_api();
+
+ test_integral_api< atomic_wrapper, char >();
+ test_integral_api< atomic_wrapper, signed char >();
+ test_integral_api< atomic_wrapper, unsigned char >();
+ test_integral_api< atomic_wrapper, boost::uint8_t >();
+ test_integral_api< atomic_wrapper, boost::int8_t >();
+ test_integral_api< atomic_wrapper, short >();
+ test_integral_api< atomic_wrapper, unsigned short >();
+ test_integral_api< atomic_wrapper, boost::uint16_t >();
+ test_integral_api< atomic_wrapper, boost::int16_t >();
+ test_integral_api< atomic_wrapper, int >();
+ test_integral_api< atomic_wrapper, unsigned int >();
+ test_integral_api< atomic_wrapper, boost::uint32_t >();
+ test_integral_api< atomic_wrapper, boost::int32_t >();
+ test_integral_api< atomic_wrapper, long >();
+ test_integral_api< atomic_wrapper, unsigned long >();
+ test_integral_api< atomic_wrapper, boost::uint64_t >();
+ test_integral_api< atomic_wrapper, boost::int64_t >();
+ test_integral_api< atomic_wrapper, long long >();
+ test_integral_api< atomic_wrapper, unsigned long long >();
+#if defined(BOOST_HAS_INT128) && !defined(BOOST_ATOMIC_TESTS_NO_INT128)
+ test_integral_api< atomic_wrapper, boost::int128_type >();
+ test_integral_api< atomic_wrapper, boost::uint128_type >();
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+ test_floating_point_api< atomic_wrapper, float >();
+ test_floating_point_api< atomic_wrapper, double >();
+ test_floating_point_api< atomic_wrapper, long double >();
+#if defined(BOOST_HAS_FLOAT128) && !defined(BOOST_ATOMIC_TESTS_NO_FLOAT128)
+ test_floating_point_api< atomic_wrapper, boost::float128_type >();
+#endif
+#endif
+
+ test_pointer_api< atomic_wrapper, int >();
+
+ test_enum_api< atomic_wrapper >();
+
+ test_struct_api< atomic_wrapper, test_struct< boost::uint8_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint16_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint32_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/10994
+ test_struct_x2_api< atomic_wrapper, test_struct_x2< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/9985
+ test_struct_api< atomic_wrapper, test_struct< double > >();
+
+ test_large_struct_api< atomic_wrapper >();
+
+ // Test that boost::atomic<T> only requires T to be trivially copyable.
+ // Other non-trivial constructors are allowed.
+ test_struct_with_ctor_api< atomic_wrapper >();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/fallback_ref_api.cpp b/src/boost/libs/atomic/test/fallback_ref_api.cpp
new file mode 100644
index 000000000..b000eba55
--- /dev/null
+++ b/src/boost/libs/atomic/test/fallback_ref_api.cpp
@@ -0,0 +1,74 @@
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/* force fallback implementation using locks */
+#define BOOST_ATOMIC_FORCE_FALLBACK 1
+
+#include <boost/atomic/atomic_ref.hpp>
+
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+
+#include "api_test_helpers.hpp"
+
+int main(int, char *[])
+{
+ test_integral_api< atomic_ref_wrapper, char >();
+ test_integral_api< atomic_ref_wrapper, signed char >();
+ test_integral_api< atomic_ref_wrapper, unsigned char >();
+ test_integral_api< atomic_ref_wrapper, boost::uint8_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int8_t >();
+ test_integral_api< atomic_ref_wrapper, short >();
+ test_integral_api< atomic_ref_wrapper, unsigned short >();
+ test_integral_api< atomic_ref_wrapper, boost::uint16_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int16_t >();
+ test_integral_api< atomic_ref_wrapper, int >();
+ test_integral_api< atomic_ref_wrapper, unsigned int >();
+ test_integral_api< atomic_ref_wrapper, boost::uint32_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int32_t >();
+ test_integral_api< atomic_ref_wrapper, long >();
+ test_integral_api< atomic_ref_wrapper, unsigned long >();
+ test_integral_api< atomic_ref_wrapper, boost::uint64_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int64_t >();
+ test_integral_api< atomic_ref_wrapper, long long >();
+ test_integral_api< atomic_ref_wrapper, unsigned long long >();
+#if defined(BOOST_HAS_INT128) && !defined(BOOST_ATOMIC_TESTS_NO_INT128)
+ test_integral_api< atomic_ref_wrapper, boost::int128_type >();
+ test_integral_api< atomic_ref_wrapper, boost::uint128_type >();
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+ test_floating_point_api< atomic_ref_wrapper, float >();
+ test_floating_point_api< atomic_ref_wrapper, double >();
+ test_floating_point_api< atomic_ref_wrapper, long double >();
+#if defined(BOOST_HAS_FLOAT128) && !defined(BOOST_ATOMIC_TESTS_NO_FLOAT128)
+ test_floating_point_api< atomic_ref_wrapper, boost::float128_type >();
+#endif
+#endif
+
+ test_pointer_api< atomic_ref_wrapper, int >();
+
+ test_enum_api< atomic_ref_wrapper >();
+
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint8_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint16_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint32_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/10994
+ test_struct_x2_api< atomic_ref_wrapper, test_struct_x2< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/9985
+ test_struct_api< atomic_ref_wrapper, test_struct< double > >();
+
+ test_large_struct_api< atomic_ref_wrapper >();
+
+ // Test that boost::atomic_ref<T> only requires T to be trivially copyable.
+ // Other non-trivial constructors are allowed.
+ test_struct_with_ctor_api< atomic_ref_wrapper >();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/lockfree.cpp b/src/boost/libs/atomic/test/lockfree.cpp
new file mode 100644
index 000000000..23557d71e
--- /dev/null
+++ b/src/boost/libs/atomic/test/lockfree.cpp
@@ -0,0 +1,237 @@
+// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Verify that definition of the "LOCK_FREE" macros and the
+// "is_lock_free" members is consistent and matches expectations.
+// Also, if any operation is lock-free, then the platform
+// implementation must provide overridden fence implementations.
+
+#include <boost/atomic.hpp>
+
+#include <iostream>
+#include <boost/config.hpp>
+#include <boost/core/lightweight_test.hpp>
+#include "aligned_object.hpp"
+
+static const char* const lock_free_level[] =
+{
+ "never",
+ "sometimes",
+ "always"
+};
+
+template< typename T >
+void verify_lock_free(const char* type_name, int lock_free_macro_val, int lock_free_expect)
+{
+ BOOST_TEST(lock_free_macro_val >= 0 && lock_free_macro_val <= 2);
+ BOOST_TEST(lock_free_macro_val == lock_free_expect);
+
+ boost::atomic<T> value;
+
+ if (lock_free_macro_val == 0)
+ BOOST_TEST(!value.is_lock_free());
+ if (lock_free_macro_val == 2)
+ BOOST_TEST(value.is_lock_free());
+
+ BOOST_TEST_EQ(boost::atomic<T>::is_always_lock_free, (lock_free_expect == 2));
+
+ std::cout << "atomic<" << type_name << "> is " << lock_free_level[lock_free_macro_val] << " lock free\n";
+
+ // atomic<T> may use larger storage than sizeof(T) to achieve lock-free property. In this case atomic_ref<T> may not be lock-free.
+ if (sizeof(boost::atomic<T>) == sizeof(T))
+ {
+ aligned_object<T, boost::atomic_ref<T>::required_alignment> object;
+ boost::atomic_ref<T> ref(object.get());
+
+ BOOST_TEST_EQ(ref.is_lock_free(), value.is_lock_free());
+ BOOST_TEST_EQ(boost::atomic_ref<T>::is_always_lock_free, boost::atomic<T>::is_always_lock_free);
+ }
+}
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC)) && defined(__i386__)
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define EXPECT_LLONG_LOCK_FREE 2
+#else
+#define EXPECT_LLONG_LOCK_FREE 0
+#endif
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && defined(__x86_64__)
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#define EXPECT_LLONG_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#define EXPECT_INT128_LOCK_FREE 2
+#else
+#define EXPECT_INT128_LOCK_FREE 0
+#endif
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_CHAR16_T_LOCK_FREE 2
+#define EXPECT_CHAR32_T_LOCK_FREE 2
+#define EXPECT_WCHAR_T_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#if defined(__powerpc64__)
+#define EXPECT_LLONG_LOCK_FREE 2
+#else
+#define EXPECT_LLONG_LOCK_FREE 0
+#endif
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif defined(__GNUC__) && defined(__alpha__)
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_CHAR16_T_LOCK_FREE 2
+#define EXPECT_CHAR32_T_LOCK_FREE 2
+#define EXPECT_WCHAR_T_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#define EXPECT_LLONG_LOCK_FREE 2
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif defined(__GNUC__) &&\
+ (\
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
+ defined(__ARM_ARCH_6ZK__) ||\
+ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
+ defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\
+ )
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__)\
+ || ((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7M__))
+#define EXPECT_LLONG_LOCK_FREE 2
+#else
+#define EXPECT_LLONG_LOCK_FREE 0
+#endif
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif defined(__linux__) && defined(__arm__)
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#define EXPECT_LLONG_LOCK_FREE 0
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__sparcv8plus) || defined(__sparc_v9__))
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#define EXPECT_LLONG_LOCK_FREE 2
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+
+#define EXPECT_CHAR_LOCK_FREE 2
+#define EXPECT_SHORT_LOCK_FREE 2
+#define EXPECT_INT_LOCK_FREE 2
+#define EXPECT_LONG_LOCK_FREE 2
+#if defined(_WIN64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64) || (_MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64)))
+#define EXPECT_LLONG_LOCK_FREE 2
+#else
+#define EXPECT_LLONG_LOCK_FREE 0
+#endif
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 2
+#define EXPECT_BOOL_LOCK_FREE 2
+
+#else
+
+#define EXPECT_CHAR_LOCK_FREE 0
+#define EXPECT_SHORT_LOCK_FREE 0
+#define EXPECT_INT_LOCK_FREE 0
+#define EXPECT_LONG_LOCK_FREE 0
+#define EXPECT_LLONG_LOCK_FREE 0
+#define EXPECT_INT128_LOCK_FREE 0
+#define EXPECT_POINTER_LOCK_FREE 0
+#define EXPECT_BOOL_LOCK_FREE 0
+
+#endif
+
+int main(int, char *[])
+{
+ verify_lock_free<char>("char", BOOST_ATOMIC_CHAR_LOCK_FREE, EXPECT_CHAR_LOCK_FREE);
+ verify_lock_free<short>("short", BOOST_ATOMIC_SHORT_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
+ verify_lock_free<int>("int", BOOST_ATOMIC_INT_LOCK_FREE, EXPECT_INT_LOCK_FREE);
+ verify_lock_free<long>("long", BOOST_ATOMIC_LONG_LOCK_FREE, EXPECT_LONG_LOCK_FREE);
+#ifdef BOOST_HAS_LONG_LONG
+ verify_lock_free<long long>("long long", BOOST_ATOMIC_LLONG_LOCK_FREE, EXPECT_LLONG_LOCK_FREE);
+#endif
+#ifdef BOOST_HAS_INT128
+ verify_lock_free<boost::int128_type>("int128", BOOST_ATOMIC_INT128_LOCK_FREE, EXPECT_INT128_LOCK_FREE);
+#endif
+ verify_lock_free<void *>("void *", BOOST_ATOMIC_POINTER_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
+ verify_lock_free<bool>("bool", BOOST_ATOMIC_BOOL_LOCK_FREE, EXPECT_BOOL_LOCK_FREE);
+
+#ifndef BOOST_ATOMIC_NO_FLOATING_POINT
+
+ verify_lock_free<float>("float", BOOST_ATOMIC_FLOAT_LOCK_FREE,
+ sizeof(float) == 1 ? EXPECT_CHAR_LOCK_FREE : (sizeof(float) == 2 ? EXPECT_SHORT_LOCK_FREE :
+ (sizeof(float) <= 4 ? EXPECT_INT_LOCK_FREE : (sizeof(float) <= 8 ? EXPECT_LLONG_LOCK_FREE : (sizeof(float) <= 16 ? EXPECT_INT128_LOCK_FREE : 0)))));
+
+ verify_lock_free<double>("double", BOOST_ATOMIC_DOUBLE_LOCK_FREE,
+ sizeof(double) == 1 ? EXPECT_CHAR_LOCK_FREE : (sizeof(double) == 2 ? EXPECT_SHORT_LOCK_FREE :
+ (sizeof(double) <= 4 ? EXPECT_INT_LOCK_FREE : (sizeof(double) <= 8 ? EXPECT_LLONG_LOCK_FREE : (sizeof(double) <= 16 ? EXPECT_INT128_LOCK_FREE : 0)))));
+
+ verify_lock_free<long double>("long double", BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE,
+ sizeof(long double) == 1 ? EXPECT_CHAR_LOCK_FREE : (sizeof(long double) == 2 ? EXPECT_SHORT_LOCK_FREE :
+ (sizeof(long double) <= 4 ? EXPECT_INT_LOCK_FREE : (sizeof(long double) <= 8 ? EXPECT_LLONG_LOCK_FREE : (sizeof(long double) <= 16 ? EXPECT_INT128_LOCK_FREE : 0)))));
+
+#if defined(BOOST_HAS_INT128) && defined(BOOST_HAS_FLOAT128)
+ verify_lock_free<boost::float128_type>("float128", BOOST_ATOMIC_INT128_LOCK_FREE, EXPECT_INT128_LOCK_FREE);
+#endif
+
+#endif // BOOST_ATOMIC_NO_FLOATING_POINT
+
+ bool any_lock_free =
+ BOOST_ATOMIC_CHAR_LOCK_FREE > 0 ||
+ BOOST_ATOMIC_SHORT_LOCK_FREE > 0 ||
+ BOOST_ATOMIC_INT_LOCK_FREE > 0 ||
+ BOOST_ATOMIC_LONG_LOCK_FREE > 0 ||
+ BOOST_ATOMIC_LLONG_LOCK_FREE > 0 ||
+ BOOST_ATOMIC_BOOL_LOCK_FREE > 0;
+
+ BOOST_TEST(!any_lock_free || BOOST_ATOMIC_THREAD_FENCE > 0);
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/native_api.cpp b/src/boost/libs/atomic/test/native_api.cpp
new file mode 100644
index 000000000..4704e55c9
--- /dev/null
+++ b/src/boost/libs/atomic/test/native_api.cpp
@@ -0,0 +1,83 @@
+// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic.hpp>
+
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+
+#include "api_test_helpers.hpp"
+
+int main(int, char *[])
+{
+ test_flag_api();
+
+ test_integral_api< atomic_wrapper, char >();
+ test_integral_api< atomic_wrapper, signed char >();
+ test_integral_api< atomic_wrapper, unsigned char >();
+ test_integral_api< atomic_wrapper, boost::uint8_t >();
+ test_integral_api< atomic_wrapper, boost::int8_t >();
+ test_integral_api< atomic_wrapper, short >();
+ test_integral_api< atomic_wrapper, unsigned short >();
+ test_integral_api< atomic_wrapper, boost::uint16_t >();
+ test_integral_api< atomic_wrapper, boost::int16_t >();
+ test_integral_api< atomic_wrapper, int >();
+ test_integral_api< atomic_wrapper, unsigned int >();
+ test_integral_api< atomic_wrapper, boost::uint32_t >();
+ test_integral_api< atomic_wrapper, boost::int32_t >();
+ test_integral_api< atomic_wrapper, long >();
+ test_integral_api< atomic_wrapper, unsigned long >();
+ test_integral_api< atomic_wrapper, boost::uint64_t >();
+ test_integral_api< atomic_wrapper, boost::int64_t >();
+ test_integral_api< atomic_wrapper, long long >();
+ test_integral_api< atomic_wrapper, unsigned long long >();
+#if defined(BOOST_HAS_INT128) && !defined(BOOST_ATOMIC_TESTS_NO_INT128)
+ test_integral_api< atomic_wrapper, boost::int128_type >();
+ test_integral_api< atomic_wrapper, boost::uint128_type >();
+#endif
+
+ test_constexpr_ctor< char >();
+ test_constexpr_ctor< short >();
+ test_constexpr_ctor< int >();
+ test_constexpr_ctor< long >();
+ // test_constexpr_ctor< int* >(); // for pointers we're not offering a constexpr constructor because of bitwise_cast
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+ test_floating_point_api< atomic_wrapper, float >();
+ test_floating_point_api< atomic_wrapper, double >();
+ test_floating_point_api< atomic_wrapper, long double >();
+#if defined(BOOST_HAS_FLOAT128) && !defined(BOOST_ATOMIC_TESTS_NO_FLOAT128)
+ test_floating_point_api< atomic_wrapper, boost::float128_type >();
+#endif
+#endif
+
+ test_pointer_api< atomic_wrapper, int >();
+
+ test_enum_api< atomic_wrapper >();
+
+ test_struct_api< atomic_wrapper, test_struct< boost::uint8_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint16_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint32_t > >();
+ test_struct_api< atomic_wrapper, test_struct< boost::uint64_t > >();
+#if defined(BOOST_HAS_INT128)
+ test_struct_api< atomic_wrapper, test_struct< boost::uint128_type > >();
+#endif
+
+ // https://svn.boost.org/trac/boost/ticket/10994
+ test_struct_x2_api< atomic_wrapper, test_struct_x2< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/9985
+ test_struct_api< atomic_wrapper, test_struct< double > >();
+
+ test_large_struct_api< atomic_wrapper >();
+
+ // Test that boost::atomic<T> only requires T to be trivially copyable.
+ // Other non-trivial constructors are allowed.
+ test_struct_with_ctor_api< atomic_wrapper >();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/native_ref_api.cpp b/src/boost/libs/atomic/test/native_ref_api.cpp
new file mode 100644
index 000000000..e29ace670
--- /dev/null
+++ b/src/boost/libs/atomic/test/native_ref_api.cpp
@@ -0,0 +1,74 @@
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/atomic_ref.hpp>
+
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+
+#include "api_test_helpers.hpp"
+
+int main(int, char *[])
+{
+ test_integral_api< atomic_ref_wrapper, char >();
+ test_integral_api< atomic_ref_wrapper, signed char >();
+ test_integral_api< atomic_ref_wrapper, unsigned char >();
+ test_integral_api< atomic_ref_wrapper, boost::uint8_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int8_t >();
+ test_integral_api< atomic_ref_wrapper, short >();
+ test_integral_api< atomic_ref_wrapper, unsigned short >();
+ test_integral_api< atomic_ref_wrapper, boost::uint16_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int16_t >();
+ test_integral_api< atomic_ref_wrapper, int >();
+ test_integral_api< atomic_ref_wrapper, unsigned int >();
+ test_integral_api< atomic_ref_wrapper, boost::uint32_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int32_t >();
+ test_integral_api< atomic_ref_wrapper, long >();
+ test_integral_api< atomic_ref_wrapper, unsigned long >();
+ test_integral_api< atomic_ref_wrapper, boost::uint64_t >();
+ test_integral_api< atomic_ref_wrapper, boost::int64_t >();
+ test_integral_api< atomic_ref_wrapper, long long >();
+ test_integral_api< atomic_ref_wrapper, unsigned long long >();
+#if defined(BOOST_HAS_INT128) && !defined(BOOST_ATOMIC_TESTS_NO_INT128)
+ test_integral_api< atomic_ref_wrapper, boost::int128_type >();
+ test_integral_api< atomic_ref_wrapper, boost::uint128_type >();
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+ test_floating_point_api< atomic_ref_wrapper, float >();
+ test_floating_point_api< atomic_ref_wrapper, double >();
+ test_floating_point_api< atomic_ref_wrapper, long double >();
+#if defined(BOOST_HAS_FLOAT128) && !defined(BOOST_ATOMIC_TESTS_NO_FLOAT128)
+ test_floating_point_api< atomic_ref_wrapper, boost::float128_type >();
+#endif
+#endif
+
+ test_pointer_api< atomic_ref_wrapper, int >();
+
+ test_enum_api< atomic_ref_wrapper >();
+
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint8_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint16_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint32_t > >();
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint64_t > >();
+#if defined(BOOST_HAS_INT128)
+ test_struct_api< atomic_ref_wrapper, test_struct< boost::uint128_type > >();
+#endif
+
+ // https://svn.boost.org/trac/boost/ticket/10994
+ test_struct_x2_api< atomic_ref_wrapper, test_struct_x2< boost::uint64_t > >();
+
+ // https://svn.boost.org/trac/boost/ticket/9985
+ test_struct_api< atomic_ref_wrapper, test_struct< double > >();
+
+ test_large_struct_api< atomic_ref_wrapper >();
+
+ // Test that boost::atomic_ref<T> only requires T to be trivially copyable.
+ // Other non-trivial constructors are allowed.
+ test_struct_with_ctor_api< atomic_ref_wrapper >();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/ordering.cpp b/src/boost/libs/atomic/test/ordering.cpp
new file mode 100644
index 000000000..a7416cdeb
--- /dev/null
+++ b/src/boost/libs/atomic/test/ordering.cpp
@@ -0,0 +1,270 @@
+// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2012 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Attempt to determine whether the memory ordering/ fence operations
+// work as expected:
+// Let two threads race accessing multiple shared variables and
+// verify that "observable" order of operations matches with the
+// ordering constraints specified.
+//
+// We assume that "memory ordering violation" events are exponentially
+// distributed, with unknown "average time between violations"
+// (which is just the reciprocal of exp distribution parameter lambda).
+// Use a "relaxed ordering" implementation that intentionally exhibits
+// a (hopefully observable) such violation to compute the maximum-likelihood
+// estimate for this time. From this, compute an estimate that covers the
+// unknown value with 0.995 confidence (using chi square quantile).
+//
+// Use this estimate to pick a timeout for the race tests of the
+// atomic implementations such that under the assumed distribution
+// we get 0.995 probability to detect a race (if there is one).
+//
+// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
+// fences work as expected if this test program does not
+// report an error.
+
+#include <boost/atomic.hpp>
+
+#include <cstddef>
+#include <boost/bind/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/thread_time.hpp>
+#include <boost/thread/lock_guard.hpp>
+#include <boost/thread/lock_types.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/thread/condition_variable.hpp>
+#include <boost/thread/barrier.hpp>
+#include <boost/core/lightweight_test.hpp>
+
+// Two threads perform the following operations:
+//
+// thread # 1 thread # 2
+// store(a, 1) store(b, 1)
+// x = read(b) y = read(a)
+//
+// Under relaxed memory ordering, the case (x, y) == (0, 0) is
+// possible. Under sequential consistency, this case is impossible.
+//
+// This "problem" is reproducible on all platforms, even x86.
+template<boost::memory_order store_order, boost::memory_order load_order>
+class total_store_order_test
+{
+public:
+ total_store_order_test(void);
+
+ void run(boost::posix_time::time_duration & timeout);
+ bool detected_conflict(void) const { return detected_conflict_; }
+
+private:
+ void thread1fn(void);
+ void thread2fn(void);
+ void check_conflict(void);
+
+private:
+ boost::atomic<int> a_;
+ /* insert a bit of padding to push the two variables into
+ different cache lines and increase the likelihood of detecting
+ a conflict */
+ char pad1_[512];
+ boost::atomic<int> b_;
+
+ char pad2_[512];
+ boost::barrier barrier_;
+
+ int vrfyb1_, vrfya2_;
+
+ boost::atomic<bool> terminate_threads_;
+ boost::atomic<int> termination_consensus_;
+
+ bool detected_conflict_;
+ boost::mutex m_;
+ boost::condition_variable c_;
+};
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+total_store_order_test<store_order, load_order>::total_store_order_test(void) :
+ a_(0), b_(0), barrier_(2),
+ vrfyb1_(0), vrfya2_(0),
+ terminate_threads_(false), termination_consensus_(0),
+ detected_conflict_(false)
+{
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::run(boost::posix_time::time_duration & timeout)
+{
+ boost::system_time start = boost::get_system_time();
+ boost::system_time end = start + timeout;
+
+ boost::thread t1(boost::bind(&total_store_order_test::thread1fn, this));
+ boost::thread t2(boost::bind(&total_store_order_test::thread2fn, this));
+
+ {
+ boost::unique_lock< boost::mutex > guard(m_);
+ while (boost::get_system_time() < end && !detected_conflict_)
+ c_.timed_wait(guard, end);
+ }
+
+ terminate_threads_.store(true, boost::memory_order_relaxed);
+
+ t2.join();
+ t1.join();
+
+ boost::posix_time::time_duration duration = boost::get_system_time() - start;
+ if (duration < timeout)
+ timeout = duration;
+}
+
+volatile int backoff_dummy;
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::thread1fn(void)
+{
+ while (true)
+ {
+ a_.store(1, store_order);
+ int b = b_.load(load_order);
+
+ barrier_.wait();
+
+ vrfyb1_ = b;
+
+ barrier_.wait();
+
+ check_conflict();
+
+ /* both threads synchronize via barriers, so either
+ both threads must exit here, or they must both do
+ another round, otherwise one of them will wait forever */
+ if (terminate_threads_.load(boost::memory_order_relaxed))
+ {
+ while (true)
+ {
+ int tmp = termination_consensus_.fetch_or(1, boost::memory_order_relaxed);
+
+ if (tmp == 3)
+ return;
+ if (tmp & 4)
+ break;
+ }
+ }
+
+ termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
+
+ unsigned int delay = rand() % 10000;
+ a_.store(0, boost::memory_order_relaxed);
+
+ barrier_.wait();
+
+ while (delay--)
+ backoff_dummy = delay;
+ }
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::thread2fn(void)
+{
+ while (true)
+ {
+ b_.store(1, store_order);
+ int a = a_.load(load_order);
+
+ barrier_.wait();
+
+ vrfya2_ = a;
+
+ barrier_.wait();
+
+ check_conflict();
+
+ /* both threads synchronize via barriers, so either
+ both threads must exit here, or they must both do
+ another round, otherwise one of them will wait forever */
+ if (terminate_threads_.load(boost::memory_order_relaxed))
+ {
+ while (true)
+ {
+ int tmp = termination_consensus_.fetch_or(2, boost::memory_order_relaxed);
+
+ if (tmp == 3)
+ return;
+ if (tmp & 4)
+ break;
+ }
+ }
+
+ termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
+
+ unsigned int delay = rand() % 10000;
+ b_.store(0, boost::memory_order_relaxed);
+
+ barrier_.wait();
+
+ while (delay--)
+ backoff_dummy = delay;
+ }
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::check_conflict(void)
+{
+ if (vrfyb1_ == 0 && vrfya2_ == 0)
+ {
+ boost::lock_guard< boost::mutex > guard(m_);
+ detected_conflict_ = true;
+ terminate_threads_.store(true, boost::memory_order_relaxed);
+ c_.notify_all();
+ }
+}
+
+void test_seq_cst(void)
+{
+ double sum = 0.0;
+
+ /* take 10 samples */
+ for (std::size_t n = 0; n < 10; n++)
+ {
+ boost::posix_time::time_duration timeout(0, 0, 10);
+
+ total_store_order_test<boost::memory_order_relaxed, boost::memory_order_relaxed> test;
+ test.run(timeout);
+ if (!test.detected_conflict())
+ {
+ std::cout << "Failed to detect order=seq_cst violation while ith order=relaxed -- intrinsic ordering too strong for this test\n";
+ return;
+ }
+
+ std::cout << "seq_cst violation with order=relaxed after " << timeout.total_microseconds() << " us\n";
+
+ sum = sum + timeout.total_microseconds();
+ }
+
+ /* determine maximum likelihood estimate for average time between
+ race observations */
+ double avg_race_time_mle = (sum / 10);
+
+ /* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
+ double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
+
+ /* 5.298 = 0.995 quantile of exponential distribution */
+ boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time_995));
+
+ std::cout << "run seq_cst for " << timeout.total_microseconds() << " us\n";
+
+ total_store_order_test<boost::memory_order_seq_cst, boost::memory_order_seq_cst> test;
+ test.run(timeout);
+
+ BOOST_TEST(!test.detected_conflict()); // sequential consistency error
+}
+
+int main(int, char *[])
+{
+ test_seq_cst();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/ordering_ref.cpp b/src/boost/libs/atomic/test/ordering_ref.cpp
new file mode 100644
index 000000000..8a48277e0
--- /dev/null
+++ b/src/boost/libs/atomic/test/ordering_ref.cpp
@@ -0,0 +1,276 @@
+// Copyright (c) 2020 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// This test is based on ordering.cpp by Helge Bahmann and Tim Blechmann.
+// The test Was modified to use atomic_ref template instead of atomic.
+
+// Attempt to determine whether the memory ordering/ fence operations
+// work as expected:
+// Let two threads race accessing multiple shared variables and
+// verify that "observable" order of operations matches with the
+// ordering constraints specified.
+//
+// We assume that "memory ordering violation" events are exponentially
+// distributed, with unknown "average time between violations"
+// (which is just the reciprocal of exp distribution parameter lambda).
+// Use a "relaxed ordering" implementation that intentionally exhibits
+// a (hopefully observable) such violation to compute the maximum-likelihood
+// estimate for this time. From this, compute an estimate that covers the
+// unknown value with 0.995 confidence (using chi square quantile).
+//
+// Use this estimate to pick a timeout for the race tests of the
+// atomic implementations such that under the assumed distribution
+// we get 0.995 probability to detect a race (if there is one).
+//
+// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
+// fences work as expected if this test program does not
+// report an error.
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/atomic.hpp>
+#include <boost/atomic/atomic_ref.hpp>
+
+#include <cstddef>
+#include <boost/bind/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/thread_time.hpp>
+#include <boost/thread/lock_guard.hpp>
+#include <boost/thread/lock_types.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/thread/condition_variable.hpp>
+#include <boost/thread/barrier.hpp>
+#include <boost/core/lightweight_test.hpp>
+
+// Two threads perform the following operations:
+//
+// thread # 1 thread # 2
+// store(a, 1) store(b, 1)
+// x = read(b) y = read(a)
+//
+// Under relaxed memory ordering, the case (x, y) == (0, 0) is
+// possible. Under sequential consistency, this case is impossible.
+//
+// This "problem" is reproducible on all platforms, even x86.
+template<boost::memory_order store_order, boost::memory_order load_order>
+class total_store_order_test
+{
+public:
+ total_store_order_test(void);
+
+ void run(boost::posix_time::time_duration & timeout);
+ bool detected_conflict(void) const { return detected_conflict_; }
+
+private:
+ void thread1fn(void);
+ void thread2fn(void);
+ void check_conflict(void);
+
+private:
+ int a_value_;
+ boost::atomic_ref<int> a_;
+ /* insert a bit of padding to push the two variables into
+ different cache lines and increase the likelihood of detecting
+ a conflict */
+ char pad1_[512];
+ int b_value_;
+ boost::atomic_ref<int> b_;
+
+ char pad2_[512];
+ boost::barrier barrier_;
+
+ int vrfyb1_, vrfya2_;
+
+ boost::atomic<bool> terminate_threads_;
+ boost::atomic<int> termination_consensus_;
+
+ bool detected_conflict_;
+ boost::mutex m_;
+ boost::condition_variable c_;
+};
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+total_store_order_test<store_order, load_order>::total_store_order_test(void) :
+ a_value_(0), a_(a_value_), b_value_(0), b_(b_value_), barrier_(2),
+ vrfyb1_(0), vrfya2_(0),
+ terminate_threads_(false), termination_consensus_(0),
+ detected_conflict_(false)
+{
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::run(boost::posix_time::time_duration & timeout)
+{
+ boost::system_time start = boost::get_system_time();
+ boost::system_time end = start + timeout;
+
+ boost::thread t1(boost::bind(&total_store_order_test::thread1fn, this));
+ boost::thread t2(boost::bind(&total_store_order_test::thread2fn, this));
+
+ {
+ boost::unique_lock< boost::mutex > guard(m_);
+ while (boost::get_system_time() < end && !detected_conflict_)
+ c_.timed_wait(guard, end);
+ }
+
+ terminate_threads_.store(true, boost::memory_order_relaxed);
+
+ t2.join();
+ t1.join();
+
+ boost::posix_time::time_duration duration = boost::get_system_time() - start;
+ if (duration < timeout)
+ timeout = duration;
+}
+
+volatile int backoff_dummy;
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::thread1fn(void)
+{
+ while (true)
+ {
+ a_.store(1, store_order);
+ int b = b_.load(load_order);
+
+ barrier_.wait();
+
+ vrfyb1_ = b;
+
+ barrier_.wait();
+
+ check_conflict();
+
+ /* both threads synchronize via barriers, so either
+ both threads must exit here, or they must both do
+ another round, otherwise one of them will wait forever */
+ if (terminate_threads_.load(boost::memory_order_relaxed))
+ {
+ while (true)
+ {
+ int tmp = termination_consensus_.fetch_or(1, boost::memory_order_relaxed);
+
+ if (tmp == 3)
+ return;
+ if (tmp & 4)
+ break;
+ }
+ }
+
+ termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
+
+ unsigned int delay = rand() % 10000;
+ a_.store(0, boost::memory_order_relaxed);
+
+ barrier_.wait();
+
+ while (delay--)
+ backoff_dummy = delay;
+ }
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::thread2fn(void)
+{
+ while (true)
+ {
+ b_.store(1, store_order);
+ int a = a_.load(load_order);
+
+ barrier_.wait();
+
+ vrfya2_ = a;
+
+ barrier_.wait();
+
+ check_conflict();
+
+ /* both threads synchronize via barriers, so either
+ both threads must exit here, or they must both do
+ another round, otherwise one of them will wait forever */
+ if (terminate_threads_.load(boost::memory_order_relaxed))
+ {
+ while (true)
+ {
+ int tmp = termination_consensus_.fetch_or(2, boost::memory_order_relaxed);
+
+ if (tmp == 3)
+ return;
+ if (tmp & 4)
+ break;
+ }
+ }
+
+ termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
+
+ unsigned int delay = rand() % 10000;
+ b_.store(0, boost::memory_order_relaxed);
+
+ barrier_.wait();
+
+ while (delay--)
+ backoff_dummy = delay;
+ }
+}
+
+template<boost::memory_order store_order, boost::memory_order load_order>
+void total_store_order_test<store_order, load_order>::check_conflict(void)
+{
+ if (vrfyb1_ == 0 && vrfya2_ == 0)
+ {
+ boost::lock_guard< boost::mutex > guard(m_);
+ detected_conflict_ = true;
+ terminate_threads_.store(true, boost::memory_order_relaxed);
+ c_.notify_all();
+ }
+}
+
+void test_seq_cst(void)
+{
+ double sum = 0.0;
+
+ /* take 10 samples */
+ for (std::size_t n = 0; n < 10; n++)
+ {
+ boost::posix_time::time_duration timeout(0, 0, 10);
+
+ total_store_order_test<boost::memory_order_relaxed, boost::memory_order_relaxed> test;
+ test.run(timeout);
+ if (!test.detected_conflict())
+ {
+ std::cout << "Failed to detect order=seq_cst violation while ith order=relaxed -- intrinsic ordering too strong for this test\n";
+ return;
+ }
+
+ std::cout << "seq_cst violation with order=relaxed after " << timeout.total_microseconds() << " us\n";
+
+ sum = sum + timeout.total_microseconds();
+ }
+
+ /* determine maximum likelihood estimate for average time between
+ race observations */
+ double avg_race_time_mle = (sum / 10);
+
+ /* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
+ double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
+
+ /* 5.298 = 0.995 quantile of exponential distribution */
+ boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time_995));
+
+ std::cout << "run seq_cst for " << timeout.total_microseconds() << " us\n";
+
+ total_store_order_test<boost::memory_order_seq_cst, boost::memory_order_seq_cst> test;
+ test.run(timeout);
+
+ BOOST_TEST(!test.detected_conflict()); // sequential consistency error
+}
+
+int main(int, char *[])
+{
+ test_seq_cst();
+
+ return boost::report_errors();
+}
diff --git a/src/boost/libs/atomic/test/test_cmake/CMakeLists.txt b/src/boost/libs/atomic/test/test_cmake/CMakeLists.txt
new file mode 100644
index 000000000..31170bdee
--- /dev/null
+++ b/src/boost/libs/atomic/test/test_cmake/CMakeLists.txt
@@ -0,0 +1,22 @@
+# Copyright 2018 Mike Dev
+# Distributed under the Boost Software License, Version 1.0.
+# See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
+#
+# NOTE: This does NOT run the unit tests for Boost.Atomic.
+# It only tests, if the CMakeLists.txt file in it's root works as expected
+
+cmake_minimum_required( VERSION 3.5 )
+
+project( BoostAtomicCMakeSelfTest )
+
+add_definitions( -DBOOST_ALL_NO_LIB )
+
+add_subdirectory( ../../../assert ${CMAKE_CURRENT_BINARY_DIR}/libs/assert )
+add_subdirectory( ../../../config ${CMAKE_CURRENT_BINARY_DIR}/libs/config )
+add_subdirectory( ../../../static_assert ${CMAKE_CURRENT_BINARY_DIR}/libs/static_assert )
+add_subdirectory( ../../../type_traits ${CMAKE_CURRENT_BINARY_DIR}/libs/type_traits )
+
+add_subdirectory( ../.. ${CMAKE_CURRENT_BINARY_DIR}/libs/boost_atomic )
+
+add_executable( boost_atomic_cmake_self_test main.cpp )
+target_link_libraries( boost_atomic_cmake_self_test Boost::atomic )
diff --git a/src/boost/libs/atomic/test/test_cmake/main.cpp b/src/boost/libs/atomic/test/test_cmake/main.cpp
new file mode 100644
index 000000000..98d8453ad
--- /dev/null
+++ b/src/boost/libs/atomic/test/test_cmake/main.cpp
@@ -0,0 +1,22 @@
+// Copyright (c) 2018 Mike Dev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// https://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic.hpp>
+
+struct Dummy
+{
+ int x[128];
+};
+
+int main()
+{
+ Dummy d = {};
+ boost::atomic<Dummy> ad;
+
+ // this operation requires functions from
+ // the compiled part of Boost.Atomic
+ ad = d;
+}
diff --git a/src/boost/libs/atomic/test/value_with_epsilon.hpp b/src/boost/libs/atomic/test/value_with_epsilon.hpp
new file mode 100644
index 000000000..32180a7db
--- /dev/null
+++ b/src/boost/libs/atomic/test/value_with_epsilon.hpp
@@ -0,0 +1,78 @@
+// Copyright (c) 2018 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_ATOMIC_TESTS_VALUE_WITH_EPSILON_H_INCLUDED_
+#define BOOST_ATOMIC_TESTS_VALUE_WITH_EPSILON_H_INCLUDED_
+
+#include <limits>
+#include <iosfwd>
+
+template< typename T >
+class value_with_epsilon
+{
+private:
+ T m_value;
+ T m_epsilon;
+
+public:
+ value_with_epsilon(T value, T epsilon) : m_value(value), m_epsilon(epsilon) {}
+
+ T value() const
+ {
+ return m_value;
+ }
+
+ T epsilon() const
+ {
+ return m_epsilon;
+ }
+
+ bool equal(T value) const
+ {
+ return value >= (m_value - m_epsilon) && value <= (m_value + m_epsilon);
+ }
+
+ friend bool operator== (T left, value_with_epsilon< T > const& right)
+ {
+ return right.equal(left);
+ }
+ friend bool operator== (value_with_epsilon< T > const& left, T right)
+ {
+ return left.equal(right);
+ }
+
+ friend bool operator!= (T left, value_with_epsilon< T > const& right)
+ {
+ return !right.equal(left);
+ }
+ friend bool operator!= (value_with_epsilon< T > const& left, T right)
+ {
+ return !left.equal(right);
+ }
+};
+
+template< typename Char, typename Traits, typename T >
+inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, value_with_epsilon< T > const& val)
+{
+ // Note: libstdc++ does not provide output operators for __float128. There may also be no operators for long double.
+ // We don't use such floating point values in our tests where the cast would matter.
+ strm << static_cast< double >(val.value()) << " (+/-" << static_cast< double >(val.epsilon()) << ")";
+ return strm;
+}
+
+template< typename T, typename U >
+inline value_with_epsilon< T > approx(T value, U epsilon)
+{
+ return value_with_epsilon< T >(value, static_cast< T >(epsilon));
+}
+
+template< typename T >
+inline value_with_epsilon< T > approx(T value)
+{
+ return value_with_epsilon< T >(value, static_cast< T >(0.0000001));
+}
+
+#endif // BOOST_ATOMIC_TESTS_VALUE_WITH_EPSILON_H_INCLUDED_