blob: 6f4d3e1a5e9608a498055640607bb1bae80f4c5f (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "db/flush_scheduler.h"
#include <cassert>
#include "db/column_family.h"
namespace ROCKSDB_NAMESPACE {
void FlushScheduler::ScheduleWork(ColumnFamilyData* cfd) {
#ifndef NDEBUG
{
std::lock_guard<std::mutex> lock(checking_mutex_);
assert(checking_set_.count(cfd) == 0);
checking_set_.insert(cfd);
}
#endif // NDEBUG
cfd->Ref();
// Suppress false positive clang analyzer warnings.
#ifndef __clang_analyzer__
Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
while (!head_.compare_exchange_strong(
node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
// failing CAS updates the first param, so we are already set for
// retry. TakeNextColumnFamily won't happen until after another
// inter-thread synchronization, so we don't even need release
// semantics for this CAS
}
#endif // __clang_analyzer__
}
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
while (true) {
if (head_.load(std::memory_order_relaxed) == nullptr) {
return nullptr;
}
// dequeue the head
Node* node = head_.load(std::memory_order_relaxed);
head_.store(node->next, std::memory_order_relaxed);
ColumnFamilyData* cfd = node->column_family;
delete node;
#ifndef NDEBUG
{
std::lock_guard<std::mutex> lock(checking_mutex_);
auto iter = checking_set_.find(cfd);
assert(iter != checking_set_.end());
checking_set_.erase(iter);
}
#endif // NDEBUG
if (!cfd->IsDropped()) {
// success
return cfd;
}
// no longer relevant, retry
cfd->UnrefAndTryDelete();
}
}
bool FlushScheduler::Empty() {
auto rv = head_.load(std::memory_order_relaxed) == nullptr;
#ifndef NDEBUG
std::lock_guard<std::mutex> lock(checking_mutex_);
// Empty is allowed to be called concurrnetly with ScheduleFlush. It would
// only miss the recent schedules.
assert((rv == checking_set_.empty()) || rv);
#endif // NDEBUG
return rv;
}
void FlushScheduler::Clear() {
ColumnFamilyData* cfd;
while ((cfd = TakeNextColumnFamily()) != nullptr) {
cfd->UnrefAndTryDelete();
}
assert(head_.load(std::memory_order_relaxed) == nullptr);
}
} // namespace ROCKSDB_NAMESPACE
|