1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
|
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_VERBS_H
#define IRDMA_VERBS_H
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
#define IRDMA_FLUSH_DELAY_MS 20
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
struct irdma_ucontext {
struct ib_ucontext ibucontext;
struct irdma_device *iwdev;
struct rdma_user_mmap_entry *db_mmap_entry;
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
int abi_ver;
bool legacy_mode;
};
struct irdma_pd {
struct ib_pd ibpd;
struct irdma_sc_pd sc_pd;
};
union irdma_sockaddr {
struct sockaddr_in saddr_in;
struct sockaddr_in6 saddr_in6;
};
struct irdma_av {
u8 macaddr[16];
struct rdma_ah_attr attrs;
union irdma_sockaddr sgid_addr;
union irdma_sockaddr dgid_addr;
u8 net_type;
};
struct irdma_ah {
struct ib_ah ibah;
struct irdma_sc_ah sc_ah;
struct irdma_pd *pd;
struct irdma_av av;
u8 sgid_index;
union ib_gid dgid;
struct hlist_node list;
refcount_t refcnt;
struct irdma_ah *parent_ah; /* AH from cached list */
};
struct irdma_hmc_pble {
union {
u32 idx;
dma_addr_t addr;
};
};
struct irdma_cq_mr {
struct irdma_hmc_pble cq_pbl;
dma_addr_t shadow;
bool split;
};
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
struct page *sq_page;
};
struct irdma_cq_buf {
struct irdma_dma_mem kmem_buf;
struct irdma_cq_uk cq_uk;
struct irdma_hw *hw;
struct list_head list;
struct work_struct work;
};
struct irdma_pbl {
struct list_head list;
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
};
bool pbl_allocated:1;
bool on_list:1;
u64 user_base;
struct irdma_pble_alloc pble_alloc;
struct irdma_mr *iwmr;
};
struct irdma_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_umem *region;
u16 type;
u32 page_cnt;
u64 page_size;
u32 npages;
u32 stag;
u64 len;
u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
struct irdma_pbl iwpbl;
};
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
u16 cq_head;
u16 cq_size;
u16 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
spinlock_t lock; /* for poll cq */
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
};
struct irdma_cmpl_gen {
struct list_head list;
struct irdma_cq_poll_info cpi;
};
struct disconn_work {
struct work_struct work;
struct irdma_qp *iwqp;
};
struct iw_cm_id;
struct irdma_qp_kmode {
struct irdma_dma_mem dma_mem;
struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
u64 *rq_wrid_mem;
};
struct irdma_qp {
struct ib_qp ibqp;
struct irdma_sc_qp sc_qp;
struct irdma_device *iwdev;
struct irdma_cq *iwscq;
struct irdma_cq *iwrcq;
struct irdma_pd *iwpd;
struct rdma_user_mmap_entry *push_wqe_mmap_entry;
struct rdma_user_mmap_entry *push_db_mmap_entry;
struct irdma_qp_host_ctx_info ctx_info;
union {
struct irdma_iwarp_offload_info iwarp_info;
struct irdma_roce_offload_info roce_info;
};
union {
struct irdma_tcp_offload_info tcp_info;
struct irdma_udp_offload_info udp_info;
};
struct irdma_ah roce_ah;
struct list_head teardown_entry;
refcount_t refcnt;
struct iw_cm_id *cm_id;
struct irdma_cm_node *cm_node;
struct delayed_work dwork_flush;
struct ib_mr *lsmm_mr;
atomic_t hw_mod_qp_pend;
enum ib_qp_state ibqp_state;
u32 qp_mem_size;
u32 last_aeq;
int max_send_wr;
int max_recv_wr;
atomic_t close_timer_started;
spinlock_t lock; /* serialize posting WRs to SQ/RQ */
struct irdma_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn : 1;
u8 user_mode : 1;
u8 hte_added : 1;
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
u8 suspend_pending : 1;
u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
struct irdma_qp_kmode kqp;
struct irdma_dma_mem host_ctx;
struct timer_list terminate_timer;
struct irdma_pbl *iwpbl;
struct irdma_dma_mem q2_ctx_mem;
struct irdma_dma_mem ietf_mem;
struct completion free_qp;
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
};
enum irdma_mmap_flag {
IRDMA_MMAP_IO_NC,
IRDMA_MMAP_IO_WC,
};
struct irdma_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u64 bar_offset;
u8 mmap_flag;
};
static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
{
return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
{
return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
struct ib_wc *entry)
{
switch (cq_poll_info->op_type) {
case IRDMA_OP_TYPE_RDMA_WRITE:
case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
entry->opcode = IB_WC_RDMA_WRITE;
break;
case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
case IRDMA_OP_TYPE_RDMA_READ:
entry->opcode = IB_WC_RDMA_READ;
break;
case IRDMA_OP_TYPE_SEND_SOL:
case IRDMA_OP_TYPE_SEND_SOL_INV:
case IRDMA_OP_TYPE_SEND_INV:
case IRDMA_OP_TYPE_SEND:
entry->opcode = IB_WC_SEND;
break;
case IRDMA_OP_TYPE_FAST_REG_NSMR:
entry->opcode = IB_WC_REG_MR;
break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IB_WC_LOCAL_INV;
break;
default:
entry->status = IB_WC_GENERAL_ERR;
}
}
static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
struct ib_wc *entry, bool send_imm_support)
{
/**
* iWARP does not support sendImm, so the presence of Imm data
* must be WriteImm.
*/
if (!send_imm_support) {
entry->opcode = cq_poll_info->imm_valid ?
IB_WC_RECV_RDMA_WITH_IMM :
IB_WC_RECV;
return;
}
switch (cq_poll_info->op_type) {
case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
break;
default:
entry->opcode = IB_WC_RECV;
}
}
void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_dealloc_device(struct ib_device *ibdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
void irdma_generate_flush_completions(struct irdma_qp *iwqp);
void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
#endif /* IRDMA_VERBS_H */
|