1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <haomai@xsky.com>
*
* Author: Haomai Wang <haomaiwang@gmail.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <libgen.h>
#include <unistd.h>
#include "BlockDevice.h"
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
#include "KernelDevice.h"
#endif
#if defined(HAVE_SPDK)
#include "NVMEDevice.h"
#endif
#if defined(HAVE_PMEM)
#include "PMEMDevice.h"
#include "libpmem.h"
#endif
#include "common/debug.h"
#include "common/EventTrace.h"
#include "common/errno.h"
#include "include/compat.h"
#define dout_context cct
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev "
void IOContext::aio_wait()
{
std::unique_lock l(lock);
// see _aio_thread for waker logic
while (num_running.load() > 0) {
dout(10) << __func__ << " " << this
<< " waiting for " << num_running.load() << " aios to complete"
<< dendl;
cond.wait(l);
}
dout(20) << __func__ << " " << this << " done" << dendl;
}
uint64_t IOContext::get_num_ios() const
{
// this is about the simplest model for transaction cost you can
// imagine. there is some fixed overhead cost by saying there is a
// minimum of one "io". and then we have some cost per "io" that is
// a configurable (with different hdd and ssd defaults), and add
// that to the bytes value.
uint64_t ios = 0;
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
ios += pending_aios.size();
#endif
#ifdef HAVE_SPDK
ios += total_nseg;
#endif
return ios;
}
void IOContext::release_running_aios()
{
ceph_assert(!num_running);
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
// release aio contexts (including pinned buffers).
running_aios.clear();
#endif
}
BlockDevice *BlockDevice::create(CephContext* cct, const string& path,
aio_callback_t cb, void *cbpriv, aio_callback_t d_cb, void *d_cbpriv)
{
string type = "kernel";
char buf[PATH_MAX + 1];
int r = ::readlink(path.c_str(), buf, sizeof(buf) - 1);
if (r >= 0) {
buf[r] = '\0';
char *bname = ::basename(buf);
if (strncmp(bname, SPDK_PREFIX, sizeof(SPDK_PREFIX)-1) == 0)
type = "ust-nvme";
}
#if defined(HAVE_PMEM)
if (type == "kernel") {
int is_pmem = 0;
size_t map_len = 0;
void *addr = pmem_map_file(path.c_str(), 0, PMEM_FILE_EXCL, O_RDONLY, &map_len, &is_pmem);
if (addr != NULL) {
if (is_pmem)
type = "pmem";
else
dout(1) << path.c_str() << " isn't pmem file" << dendl;
pmem_unmap(addr, map_len);
} else {
dout(1) << "pmem_map_file:" << path.c_str() << " failed." << pmem_errormsg() << dendl;
}
}
#endif
dout(1) << __func__ << " path " << path << " type " << type << dendl;
#if defined(HAVE_PMEM)
if (type == "pmem") {
return new PMEMDevice(cct, cb, cbpriv);
}
#endif
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
if (type == "kernel") {
return new KernelDevice(cct, cb, cbpriv, d_cb, d_cbpriv);
}
#endif
#if defined(HAVE_SPDK)
if (type == "ust-nvme") {
return new NVMEDevice(cct, cb, cbpriv);
}
#endif
derr << __func__ << " unknown backend " << type << dendl;
ceph_abort();
return NULL;
}
void BlockDevice::queue_reap_ioc(IOContext *ioc)
{
std::lock_guard l(ioc_reap_lock);
if (ioc_reap_count.load() == 0)
++ioc_reap_count;
ioc_reap_queue.push_back(ioc);
}
void BlockDevice::reap_ioc()
{
if (ioc_reap_count.load()) {
std::lock_guard l(ioc_reap_lock);
for (auto p : ioc_reap_queue) {
dout(20) << __func__ << " reap ioc " << p << dendl;
delete p;
}
ioc_reap_queue.clear();
--ioc_reap_count;
}
}
|