summaryrefslogtreecommitdiffstats
path: root/src/libs/dxvk-native-1.9.2a/src/util/sync/sync_spinlock.h
blob: 27157929f92a6bf8977631757d05cf3e18e21d2e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#pragma once

#include <atomic>

#include "../thread.h"

#include "../util_bit.h"
#include "../util_likely.h"

namespace dxvk::sync {

  /**
   * \brief Generic spin function
   *
   * Blocks calling thread until a condition becomes
   * \c true, calling \c yield every few iterations.
   * \param [in] spinCount Number of probes between each yield
   * \param [in] fn Condition to test
   */
  template<typename Fn>
  void spin(uint32_t spinCount, const Fn& fn) {
    while (unlikely(!fn())) {
      for (uint32_t i = 1; i < spinCount; i++) {
        _mm_pause();
        if (fn())
          return;
      }

      dxvk::this_thread::yield();
    }
  }
  
  /**
   * \brief Spin lock
   * 
   * A low-overhead spin lock which can be used to
   * protect data structures for a short duration
   * in case the structure is not likely contested.
   */
  class Spinlock {

  public:
    
    Spinlock() { }
    ~Spinlock() { }
    
    Spinlock             (const Spinlock&) = delete;
    Spinlock& operator = (const Spinlock&) = delete;
    
    void lock() {
      spin(200, [this] { return try_lock(); });
    }
    
    void unlock() {
      m_lock.store(0, std::memory_order_release);
    }
    
    bool try_lock() {
      return likely(!m_lock.load())
          && likely(!m_lock.exchange(1, std::memory_order_acquire));
    }
    
  private:
    
    std::atomic<uint32_t> m_lock = { 0 };
    
  };
  
}