1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSKY <haomai@xsky.com>
*
* Author: Haomai Wang <haomaiwang@gmail.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_RDMASTACK_H
#define CEPH_MSG_RDMASTACK_H
#include <sys/eventfd.h>
#include <list>
#include <vector>
#include <thread>
#include "common/ceph_context.h"
#include "common/debug.h"
#include "common/errno.h"
#include "msg/async/Stack.h"
#include "Infiniband.h"
class RDMAConnectedSocketImpl;
class RDMAServerSocketImpl;
class RDMAStack;
class RDMAWorker;
class RDMADispatcher {
typedef Infiniband::MemoryManager::Chunk Chunk;
typedef Infiniband::QueuePair QueuePair;
std::thread t;
CephContext *cct;
Infiniband::CompletionQueue* tx_cq = nullptr;
Infiniband::CompletionQueue* rx_cq = nullptr;
Infiniband::CompletionChannel *tx_cc = nullptr, *rx_cc = nullptr;
EventCallbackRef async_handler;
bool done = false;
std::atomic<uint64_t> num_dead_queue_pair = {0};
std::atomic<uint64_t> num_qp_conn = {0};
Mutex lock; // protect `qp_conns`, `dead_queue_pairs`
// qp_num -> InfRcConnection
// The main usage of `qp_conns` is looking up connection by qp_num,
// so the lifecycle of element in `qp_conns` is the lifecycle of qp.
//// make qp queue into dead state
/**
* 1. Connection call mark_down
* 2. Move the Queue Pair into the Error state(QueuePair::to_dead)
* 3. Wait for the affiliated event IBV_EVENT_QP_LAST_WQE_REACHED(handle_async_event)
* 4. Wait for CQ to be empty(handle_tx_event)
* 5. Destroy the QP by calling ibv_destroy_qp()(handle_tx_event)
*
* @param qp The qp needed to dead
*/
ceph::unordered_map<uint32_t, std::pair<QueuePair*, RDMAConnectedSocketImpl*> > qp_conns;
/// if a queue pair is closed when transmit buffers are active
/// on it, the transmit buffers never get returned via tx_cq. To
/// work around this problem, don't delete queue pairs immediately. Instead,
/// save them in this vector and delete them at a safe time, when there are
/// no outstanding transmit buffers to be lost.
std::vector<QueuePair*> dead_queue_pairs;
std::atomic<uint64_t> num_pending_workers = {0};
Mutex w_lock; // protect pending workers
// fixme: lockfree
std::list<RDMAWorker*> pending_workers;
RDMAStack* stack;
class C_handle_cq_async : public EventCallback {
RDMADispatcher *dispatcher;
public:
explicit C_handle_cq_async(RDMADispatcher *w): dispatcher(w) {}
void do_request(uint64_t fd) {
// worker->handle_tx_event();
dispatcher->handle_async_event();
}
};
public:
PerfCounters *perf_logger;
explicit RDMADispatcher(CephContext* c, RDMAStack* s);
virtual ~RDMADispatcher();
void handle_async_event();
void polling_start();
void polling_stop();
void polling();
void register_qp(QueuePair *qp, RDMAConnectedSocketImpl* csi);
void make_pending_worker(RDMAWorker* w) {
Mutex::Locker l(w_lock);
auto it = std::find(pending_workers.begin(), pending_workers.end(), w);
if (it != pending_workers.end())
return;
pending_workers.push_back(w);
++num_pending_workers;
}
RDMAStack* get_stack() { return stack; }
RDMAConnectedSocketImpl* get_conn_lockless(uint32_t qp);
QueuePair* get_qp(uint32_t qp);
void erase_qpn_lockless(uint32_t qpn);
void erase_qpn(uint32_t qpn);
Infiniband::CompletionQueue* get_tx_cq() const { return tx_cq; }
Infiniband::CompletionQueue* get_rx_cq() const { return rx_cq; }
void notify_pending_workers();
void handle_tx_event(ibv_wc *cqe, int n);
void post_tx_buffer(std::vector<Chunk*> &chunks);
std::atomic<uint64_t> inflight = {0};
void post_chunk_to_pool(Chunk* chunk);
int post_chunks_to_rq(int num, ibv_qp *qp=NULL);
};
class RDMAWorker : public Worker {
typedef Infiniband::CompletionQueue CompletionQueue;
typedef Infiniband::CompletionChannel CompletionChannel;
typedef Infiniband::MemoryManager::Chunk Chunk;
typedef Infiniband::MemoryManager MemoryManager;
typedef std::vector<Chunk*>::iterator ChunkIter;
RDMAStack *stack;
EventCallbackRef tx_handler;
std::list<RDMAConnectedSocketImpl*> pending_sent_conns;
RDMADispatcher* dispatcher = nullptr;
Mutex lock;
class C_handle_cq_tx : public EventCallback {
RDMAWorker *worker;
public:
explicit C_handle_cq_tx(RDMAWorker *w): worker(w) {}
void do_request(uint64_t fd) {
worker->handle_pending_message();
}
};
public:
PerfCounters *perf_logger;
explicit RDMAWorker(CephContext *c, unsigned i);
virtual ~RDMAWorker();
virtual int listen(entity_addr_t &addr,
unsigned addr_slot,
const SocketOptions &opts, ServerSocket *) override;
virtual int connect(const entity_addr_t &addr, const SocketOptions &opts, ConnectedSocket *socket) override;
virtual void initialize() override;
RDMAStack *get_stack() { return stack; }
int get_reged_mem(RDMAConnectedSocketImpl *o, std::vector<Chunk*> &c, size_t bytes);
void remove_pending_conn(RDMAConnectedSocketImpl *o) {
ceph_assert(center.in_thread());
pending_sent_conns.remove(o);
}
void handle_pending_message();
void set_stack(RDMAStack *s) { stack = s; }
void notify_worker() {
center.dispatch_event_external(tx_handler);
}
};
struct RDMACMInfo {
RDMACMInfo(rdma_cm_id *cid, rdma_event_channel *cm_channel_, uint32_t qp_num_)
: cm_id(cid), cm_channel(cm_channel_), qp_num(qp_num_) {}
rdma_cm_id *cm_id;
rdma_event_channel *cm_channel;
uint32_t qp_num;
};
class RDMAConnectedSocketImpl : public ConnectedSocketImpl {
public:
typedef Infiniband::MemoryManager::Chunk Chunk;
typedef Infiniband::CompletionChannel CompletionChannel;
typedef Infiniband::CompletionQueue CompletionQueue;
protected:
CephContext *cct;
Infiniband::QueuePair *qp;
IBSYNMsg peer_msg;
IBSYNMsg my_msg;
int connected;
int error;
Infiniband* infiniband;
RDMADispatcher* dispatcher;
RDMAWorker* worker;
std::vector<Chunk*> buffers;
int notify_fd = -1;
bufferlist pending_bl;
Mutex lock;
std::vector<ibv_wc> wc;
bool is_server;
EventCallbackRef read_handler;
EventCallbackRef established_handler;
int tcp_fd = -1;
bool active;// qp is active ?
bool pending;
int post_backlog = 0;
void notify();
ssize_t read_buffers(char* buf, size_t len);
int post_work_request(std::vector<Chunk*>&);
public:
RDMAConnectedSocketImpl(CephContext *cct, Infiniband* ib, RDMADispatcher* s,
RDMAWorker *w);
virtual ~RDMAConnectedSocketImpl();
void pass_wc(std::vector<ibv_wc> &&v);
void get_wc(std::vector<ibv_wc> &w);
virtual int is_connected() override { return connected; }
virtual ssize_t read(char* buf, size_t len) override;
virtual ssize_t zero_copy_read(bufferptr &data) override;
virtual ssize_t send(bufferlist &bl, bool more) override;
virtual void shutdown() override;
virtual void close() override;
virtual int fd() const override { return notify_fd; }
virtual int socket_fd() const override { return tcp_fd; }
void fault();
const char* get_qp_state() { return Infiniband::qp_state_string(qp->get_state()); }
ssize_t submit(bool more);
int activate();
void fin();
void handle_connection();
int handle_connection_established(bool need_set_fault = true);
void cleanup();
void set_accept_fd(int sd);
virtual int try_connect(const entity_addr_t&, const SocketOptions &opt);
bool is_pending() {return pending;}
void set_pending(bool val) {pending = val;}
void post_chunks_to_rq(int num);
void update_post_backlog();
};
enum RDMA_CM_STATUS {
IDLE = 1,
RDMA_ID_CREATED,
CHANNEL_FD_CREATED,
RESOURCE_ALLOCATED,
ADDR_RESOLVED,
ROUTE_RESOLVED,
CONNECTED,
DISCONNECTED,
ERROR
};
class RDMAIWARPConnectedSocketImpl : public RDMAConnectedSocketImpl {
public:
RDMAIWARPConnectedSocketImpl(CephContext *cct, Infiniband* ib, RDMADispatcher* s,
RDMAWorker *w, RDMACMInfo *info = nullptr);
~RDMAIWARPConnectedSocketImpl();
virtual int try_connect(const entity_addr_t&, const SocketOptions &opt) override;
virtual void close() override;
virtual void shutdown() override;
virtual void handle_cm_connection();
uint32_t get_local_qpn() const { return local_qpn; }
void activate();
int alloc_resource();
void close_notify();
private:
rdma_cm_id *cm_id;
rdma_event_channel *cm_channel;
uint32_t local_qpn;
uint32_t remote_qpn;
EventCallbackRef cm_con_handler;
bool is_server;
std::mutex close_mtx;
std::condition_variable close_condition;
bool closed;
RDMA_CM_STATUS status;
class C_handle_cm_connection : public EventCallback {
RDMAIWARPConnectedSocketImpl *csi;
public:
C_handle_cm_connection(RDMAIWARPConnectedSocketImpl *w): csi(w) {}
void do_request(uint64_t fd) {
csi->handle_cm_connection();
}
};
};
class RDMAServerSocketImpl : public ServerSocketImpl {
protected:
CephContext *cct;
NetHandler net;
int server_setup_socket;
Infiniband* infiniband;
RDMADispatcher *dispatcher;
RDMAWorker *worker;
entity_addr_t sa;
public:
RDMAServerSocketImpl(CephContext *cct, Infiniband* i, RDMADispatcher *s,
RDMAWorker *w, entity_addr_t& a, unsigned slot);
virtual int listen(entity_addr_t &sa, const SocketOptions &opt);
virtual int accept(ConnectedSocket *s, const SocketOptions &opts, entity_addr_t *out, Worker *w) override;
virtual void abort_accept() override;
virtual int fd() const override { return server_setup_socket; }
int get_fd() { return server_setup_socket; }
};
class RDMAIWARPServerSocketImpl : public RDMAServerSocketImpl {
public:
RDMAIWARPServerSocketImpl(
CephContext *cct, Infiniband *i, RDMADispatcher *s, RDMAWorker *w,
entity_addr_t& addr, unsigned addr_slot);
virtual int listen(entity_addr_t &sa, const SocketOptions &opt) override;
virtual int accept(ConnectedSocket *s, const SocketOptions &opts, entity_addr_t *out, Worker *w) override;
virtual void abort_accept() override;
private:
rdma_cm_id *cm_id;
rdma_event_channel *cm_channel;
};
class RDMAStack : public NetworkStack {
vector<std::thread> threads;
PerfCounters *perf_counter;
Infiniband ib;
RDMADispatcher dispatcher;
std::atomic<bool> fork_finished = {false};
public:
explicit RDMAStack(CephContext *cct, const string &t);
virtual ~RDMAStack();
virtual bool support_zero_copy_read() const override { return false; }
virtual bool nonblock_connect_need_writable_event() const override { return false; }
virtual void spawn_worker(unsigned i, std::function<void ()> &&func) override;
virtual void join_worker(unsigned i) override;
RDMADispatcher &get_dispatcher() { return dispatcher; }
Infiniband &get_infiniband() { return ib; }
virtual bool is_ready() override { return fork_finished.load(); };
virtual void ready() override { fork_finished = true; };
};
#endif
|