/***************************************************************************** Copyright (c) 2020, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA *****************************************************************************/ #include "srw_lock.h" #include "srv0srv.h" #include "my_cpu.h" #include "transactional_lock_guard.h" #ifdef NO_ELISION #elif defined _MSC_VER && (defined _M_IX86 || defined _M_X64) # include bool have_transactional_memory; bool transactional_lock_enabled() { int regs[4]; __cpuid(regs, 0); if (regs[0] < 7) return false; __cpuidex(regs, 7, 0); /* Restricted Transactional Memory (RTM) */ have_transactional_memory= regs[1] & 1U << 11; return have_transactional_memory; } #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) # include bool have_transactional_memory; bool transactional_lock_enabled() { if (__get_cpuid_max(0, nullptr) < 7) return false; unsigned eax, ebx, ecx, edx; __cpuid_count(7, 0, eax, ebx, ecx, edx); /* Restricted Transactional Memory (RTM) */ have_transactional_memory= ebx & 1U << 11; return have_transactional_memory; } # ifdef UNIV_DEBUG TRANSACTIONAL_TARGET bool xtest() { return have_transactional_memory && _xtest(); } # endif #elif defined __powerpc64__ || defined __s390__ # include # include # include __attribute__((target("htm"),hot)) bool xbegin() { return have_transactional_memory && __TM_simple_begin() == _HTM_TBEGIN_STARTED; } __attribute__((target("htm"),hot)) void xabort() { __TM_abort(); } __attribute__((target("htm"),hot)) void xend() { __TM_end(); } bool have_transactional_memory; static sigjmp_buf ill_jmp; static void ill_handler(int sig) { siglongjmp(ill_jmp, sig); } /** Here we are testing we can do a transaction without SIGILL and a 1 instruction store can succeed. */ __attribute__((noinline)) static void test_tm(bool *r) { if (__TM_simple_begin() == _HTM_TBEGIN_STARTED) { *r= true; __TM_end(); } } bool transactional_lock_enabled() { bool r= false; sigset_t oset; struct sigaction ill_act, oact_ill; memset(&ill_act, 0, sizeof(ill_act)); ill_act.sa_handler = ill_handler; sigfillset(&ill_act.sa_mask); sigdelset(&ill_act.sa_mask, SIGILL); sigprocmask(SIG_SETMASK, &ill_act.sa_mask, &oset); sigaction(SIGILL, &ill_act, &oact_ill); if (sigsetjmp(ill_jmp, 1) == 0) { test_tm(&r); } sigaction(SIGILL, &oact_ill, NULL); sigprocmask(SIG_SETMASK, &oset, NULL); return r; } # ifdef UNIV_DEBUG __attribute__((target("htm"),hot)) bool xtest() { # ifdef __s390x__ return have_transactional_memory && __builtin_tx_nesting_depth() > 0; # else return have_transactional_memory && _HTM_STATE (__builtin_ttest ()) == _HTM_TRANSACTIONAL; # endif } # endif #endif /** @return the parameter for srw_pause() */ static inline unsigned srw_pause_delay() { return my_cpu_relax_multiplier / 4 * srv_spin_wait_delay; } /** Pause the CPU for some time, with no memory accesses. */ static inline void srw_pause(unsigned delay) { HMT_low(); while (delay--) MY_RELAX_CPU(); HMT_medium(); } #ifndef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP template<> void pthread_mutex_wrapper::wr_wait() { const unsigned delay= srw_pause_delay(); for (auto spin= srv_n_spin_wait_rounds; spin; spin--) { srw_pause(delay); if (wr_lock_try()) return; } pthread_mutex_lock(&lock); } #endif #ifdef SUX_LOCK_GENERIC template void ssux_lock_impl::init(); template void ssux_lock_impl::init(); template void ssux_lock_impl::destroy(); template void ssux_lock_impl::destroy(); template inline void srw_mutex_impl::wait(uint32_t lk) { pthread_mutex_lock(&mutex); while (lock.load(std::memory_order_relaxed) == lk) pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); } template inline void ssux_lock_impl::wait(uint32_t lk) { pthread_mutex_lock(&writer.mutex); while (readers.load(std::memory_order_relaxed) == lk) pthread_cond_wait(&readers_cond, &writer.mutex); pthread_mutex_unlock(&writer.mutex); } template void srw_mutex_impl::wake() { pthread_mutex_lock(&mutex); pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); } template void ssux_lock_impl::wake() { pthread_mutex_lock(&writer.mutex); pthread_cond_signal(&readers_cond); pthread_mutex_unlock(&writer.mutex); } #else static_assert(4 == sizeof(rw_lock), "ABI"); # ifdef _WIN32 # include template inline void srw_mutex_impl::wait(uint32_t lk) { WaitOnAddress(&lock, &lk, 4, INFINITE); } template void srw_mutex_impl::wake() { WakeByAddressSingle(&lock); } template inline void ssux_lock_impl::wait(uint32_t lk) { WaitOnAddress(&readers, &lk, 4, INFINITE); } template void ssux_lock_impl::wake() { WakeByAddressSingle(&readers); } # else # ifdef __linux__ # include # include # define SRW_FUTEX(a,op,n) \ syscall(SYS_futex, a, FUTEX_ ## op ## _PRIVATE, n, nullptr, nullptr, 0) # elif defined __OpenBSD__ # include # include # define SRW_FUTEX(a,op,n) \ futex((volatile uint32_t*) a, FUTEX_ ## op, n, nullptr, nullptr) # elif defined __FreeBSD__ # include # include # define FUTEX_WAKE UMTX_OP_WAKE_PRIVATE # define FUTEX_WAIT UMTX_OP_WAIT_UINT_PRIVATE # define SRW_FUTEX(a,op,n) _umtx_op(a, FUTEX_ ## op, n, nullptr, nullptr) # elif defined __DragonFly__ # include # define FUTEX_WAKE(a,n) umtx_wakeup(a,n) # define FUTEX_WAIT(a,n) umtx_sleep(a,n,0) # define SRW_FUTEX(a,op,n) FUTEX_ ## op((volatile int*) a, int(n)) # else # error "no futex support" # endif template inline void srw_mutex_impl::wait(uint32_t lk) { SRW_FUTEX(&lock, WAIT, lk); } template void srw_mutex_impl::wake() { SRW_FUTEX(&lock, WAKE, 1); } template inline void ssux_lock_impl::wait(uint32_t lk) { SRW_FUTEX(&readers, WAIT, lk); } template void ssux_lock_impl::wake() { SRW_FUTEX(&readers, WAKE, 1); } # endif #endif template void srw_mutex_impl::wake(); template void ssux_lock_impl::wake(); template void srw_mutex_impl::wake(); template void ssux_lock_impl::wake(); /* Unfortunately, compilers targeting IA-32 or AMD64 currently cannot translate the following single-bit operations into Intel 80386 instructions: m.fetch_or(1<(&mem), bit)) \ goto label; # define IF_NOT_FETCH_OR_GOTO(mem, bit, label) \ if (!_interlockedbittestandset(reinterpret_cast(&mem), bit))\ goto label; #endif template void srw_mutex_impl::wait_and_lock() { uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed); if (spinloop) { const unsigned delay= srw_pause_delay(); for (auto spin= srv_n_spin_wait_rounds;;) { DBUG_ASSERT(~HOLDER & lk); if (lk & HOLDER) lk= lock.load(std::memory_order_relaxed); else { #ifdef IF_NOT_FETCH_OR_GOTO static_assert(HOLDER == (1U << 31), "compatibility"); IF_NOT_FETCH_OR_GOTO(*this, 31, acquired); lk|= HOLDER; #else if (!((lk= lock.fetch_or(HOLDER, std::memory_order_relaxed)) & HOLDER)) goto acquired; #endif srw_pause(delay); } if (!--spin) break; } } for (;;) { DBUG_ASSERT(~HOLDER & lk); if (lk & HOLDER) { wait(lk); #ifdef IF_FETCH_OR_GOTO reload: #endif lk= lock.load(std::memory_order_relaxed); } else { #ifdef IF_FETCH_OR_GOTO static_assert(HOLDER == (1U << 31), "compatibility"); IF_FETCH_OR_GOTO(*this, 31, reload); #else if ((lk= lock.fetch_or(HOLDER, std::memory_order_relaxed)) & HOLDER) continue; DBUG_ASSERT(lk); #endif acquired: std::atomic_thread_fence(std::memory_order_acquire); return; } } } template void srw_mutex_impl::wait_and_lock(); template void srw_mutex_impl::wait_and_lock(); template void ssux_lock_impl::wr_wait(uint32_t lk) { DBUG_ASSERT(writer.is_locked()); DBUG_ASSERT(lk); DBUG_ASSERT(lk < WRITER); if (spinloop) { const unsigned delay= srw_pause_delay(); for (auto spin= srv_n_spin_wait_rounds; spin; spin--) { srw_pause(delay); lk= readers.load(std::memory_order_acquire); if (lk == WRITER) return; DBUG_ASSERT(lk > WRITER); } } lk|= WRITER; do { DBUG_ASSERT(lk > WRITER); wait(lk); lk= readers.load(std::memory_order_acquire); } while (lk != WRITER); } template void ssux_lock_impl::wr_wait(uint32_t); template void ssux_lock_impl::wr_wait(uint32_t); template void ssux_lock_impl::rd_wait() { for (;;) { writer.wr_lock(); bool acquired= rd_lock_try(); writer.wr_unlock(); if (acquired) break; } } template void ssux_lock_impl::rd_wait(); template void ssux_lock_impl::rd_wait(); #if defined _WIN32 || defined SUX_LOCK_GENERIC template<> void srw_lock_::rd_wait() { const unsigned delay= srw_pause_delay(); for (auto spin= srv_n_spin_wait_rounds; spin; spin--) { srw_pause(delay); if (rd_lock_try()) return; } IF_WIN(AcquireSRWLockShared(&lk), rw_rdlock(&lk)); } template<> void srw_lock_::wr_wait() { const unsigned delay= srw_pause_delay(); for (auto spin= srv_n_spin_wait_rounds; spin; spin--) { srw_pause(delay); if (wr_lock_try()) return; } IF_WIN(AcquireSRWLockExclusive(&lk), rw_wrlock(&lk)); } #endif #ifdef UNIV_PFS_RWLOCK template void srw_lock_impl::psi_rd_lock(const char*, unsigned); template void srw_lock_impl::psi_wr_lock(const char*, unsigned); template void srw_lock_impl::psi_rd_lock(const char*, unsigned); template void srw_lock_impl::psi_wr_lock(const char*, unsigned); template void srw_lock_impl::psi_rd_lock(const char *file, unsigned line) { PSI_rwlock_locker_state state; const bool nowait= lock.rd_lock_try(); if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_rdwait) (&state, pfs_psi, nowait ? PSI_RWLOCK_TRYREADLOCK : PSI_RWLOCK_READLOCK, file, line)) { if (!nowait) lock.rd_lock(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else if (!nowait) lock.rd_lock(); } template void srw_lock_impl::psi_wr_lock(const char *file, unsigned line) { PSI_rwlock_locker_state state; const bool nowait= lock.wr_lock_try(); if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) (&state, pfs_psi, nowait ? PSI_RWLOCK_TRYWRITELOCK : PSI_RWLOCK_WRITELOCK, file, line)) { if (!nowait) lock.wr_lock(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else if (!nowait) lock.wr_lock(); } void ssux_lock::psi_rd_lock(const char *file, unsigned line) { PSI_rwlock_locker_state state; const bool nowait= lock.rd_lock_try(); if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_rdwait) (&state, pfs_psi, nowait ? PSI_RWLOCK_TRYSHAREDLOCK : PSI_RWLOCK_SHAREDLOCK, file, line)) { if (!nowait) lock.rd_lock(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else if (!nowait) lock.rd_lock(); } void ssux_lock::psi_u_lock(const char *file, unsigned line) { PSI_rwlock_locker_state state; if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) (&state, pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK, file, line)) { lock.u_lock(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else lock.u_lock(); } void ssux_lock::psi_wr_lock(const char *file, unsigned line) { PSI_rwlock_locker_state state; const bool nowait= lock.wr_lock_try(); if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) (&state, pfs_psi, nowait ? PSI_RWLOCK_TRYEXCLUSIVELOCK : PSI_RWLOCK_EXCLUSIVELOCK, file, line)) { if (!nowait) lock.wr_lock(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else if (!nowait) lock.wr_lock(); } void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line) { PSI_rwlock_locker_state state; DBUG_ASSERT(lock.writer.is_locked()); uint32_t lk= 1; const bool nowait= lock.readers.compare_exchange_strong(lk, ssux_lock_impl::WRITER, std::memory_order_acquire, std::memory_order_relaxed); if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) (&state, pfs_psi, nowait ? PSI_RWLOCK_TRYEXCLUSIVELOCK : PSI_RWLOCK_EXCLUSIVELOCK, file, line)) { if (!nowait) lock.u_wr_upgrade(); PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); } else if (!nowait) lock.u_wr_upgrade(); } #else /* UNIV_PFS_RWLOCK */ template void ssux_lock_impl::rd_lock(); template void ssux_lock_impl::rd_unlock(); template void ssux_lock_impl::u_unlock(); template void ssux_lock_impl::wr_unlock(); #endif /* UNIV_PFS_RWLOCK */ #ifdef UNIV_DEBUG void srw_lock_debug::SRW_LOCK_INIT(mysql_pfs_key_t key) { srw_lock::SRW_LOCK_INIT(key); readers_lock.init(); ut_ad(!readers.load(std::memory_order_relaxed)); ut_ad(!have_any()); } void srw_lock_debug::destroy() { ut_ad(!writer); if (auto r= readers.load(std::memory_order_relaxed)) { readers.store(0, std::memory_order_relaxed); ut_ad(r->empty()); delete r; } srw_lock::destroy(); } bool srw_lock_debug::wr_lock_try() { ut_ad(!have_any()); if (!srw_lock::wr_lock_try()) return false; ut_ad(!writer); writer.store(pthread_self(), std::memory_order_relaxed); return true; } void srw_lock_debug::wr_lock(SRW_LOCK_ARGS(const char *file, unsigned line)) { ut_ad(!have_any()); srw_lock::wr_lock(SRW_LOCK_ARGS(file, line)); ut_ad(!writer); writer.store(pthread_self(), std::memory_order_relaxed); } void srw_lock_debug::wr_unlock() { ut_ad(have_wr()); writer.store(0, std::memory_order_relaxed); srw_lock::wr_unlock(); } void srw_lock_debug::readers_register() { readers_lock.wr_lock(); auto r= readers.load(std::memory_order_relaxed); if (!r) { r= new std::unordered_multiset(); readers.store(r, std::memory_order_relaxed); } r->emplace(pthread_self()); readers_lock.wr_unlock(); } bool srw_lock_debug::rd_lock_try() { ut_ad(!have_any()); if (!srw_lock::rd_lock_try()) return false; readers_register(); return true; } void srw_lock_debug::rd_lock(SRW_LOCK_ARGS(const char *file, unsigned line)) { ut_ad(!have_any()); srw_lock::rd_lock(SRW_LOCK_ARGS(file, line)); readers_register(); } void srw_lock_debug::rd_unlock() { const pthread_t self= pthread_self(); ut_ad(writer != self); readers_lock.wr_lock(); auto r= readers.load(std::memory_order_relaxed); ut_ad(r); auto i= r->find(self); ut_ad(i != r->end()); r->erase(i); readers_lock.wr_unlock(); srw_lock::rd_unlock(); } bool srw_lock_debug::have_rd() const noexcept { if (auto r= readers.load(std::memory_order_relaxed)) { readers_lock.wr_lock(); bool found= r->find(pthread_self()) != r->end(); readers_lock.wr_unlock(); # ifndef SUX_LOCK_GENERIC ut_ad(!found || is_locked()); # endif return found; } return false; } bool srw_lock_debug::have_wr() const noexcept { if (writer != pthread_self()) return false; # ifndef SUX_LOCK_GENERIC ut_ad(is_write_locked()); # endif return true; } bool srw_lock_debug::have_any() const noexcept { return have_wr() || have_rd(); } #endif