blob: 5f52ef12788656afef578ed0d6b8edc2d231f446 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
|
NVMF_PORT=4420
NVMF_IP_PREFIX="192.168.100"
NVMF_IP_LEAST_ADDR=8
NVMF_TCP_IP_ADDRESS="127.0.0.1"
NVMF_TRANSPORT_OPTS=""
NVMF_SERIAL=SPDK00000000000001
function build_nvmf_app_args() {
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
else
NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
fi
}
: ${NVMF_APP_SHM_ID="0"}
export NVMF_APP_SHM_ID
build_nvmf_app_args
have_pci_nics=0
function rxe_cfg() {
"$rootdir/scripts/rxe_cfg_small.sh" "$@"
}
function load_ib_rdma_modules() {
if [ $(uname) != Linux ]; then
return 0
fi
modprobe ib_cm
modprobe ib_core
# Newer kernels do not have the ib_ucm module
modprobe ib_ucm || true
modprobe ib_umad
modprobe ib_uverbs
modprobe iw_cm
modprobe rdma_cm
modprobe rdma_ucm
}
function detect_soft_roce_nics() {
rxe_cfg start
}
# args 1 and 2 represent the grep filters for finding our NICS.
# subsequent args are all drivers that should be loaded if we find these NICs.
# Those drivers should be supplied in the correct order.
function detect_nics_and_probe_drivers() {
NIC_VENDOR="$1"
NIC_CLASS="$2"
nvmf_nic_bdfs=$(lspci | grep Ethernet | grep "$NIC_VENDOR" | grep "$NIC_CLASS" | awk -F ' ' '{print "0000:"$1}')
if [ -z "$nvmf_nic_bdfs" ]; then
return 0
fi
have_pci_nics=1
if [ $# -ge 2 ]; then
# shift out the first two positional arguments.
shift 2
# Iterate through the remaining arguments.
for i; do
modprobe "$i"
done
fi
}
function detect_pci_nics() {
if ! hash lspci; then
return 0
fi
detect_nics_and_probe_drivers "Mellanox" "ConnectX-4" "mlx4_core" "mlx4_ib" "mlx4_en"
detect_nics_and_probe_drivers "Mellanox" "ConnectX-5" "mlx5_core" "mlx5_ib"
detect_nics_and_probe_drivers "Intel" "X722" "i40e" "i40iw"
detect_nics_and_probe_drivers "Chelsio" "Unified Wire" "cxgb4" "iw_cxgb4"
if [ "$have_pci_nics" -eq "0" ]; then
return 0
fi
# Provide time for drivers to properly load.
sleep 5
}
function detect_rdma_nics() {
detect_pci_nics
if [ "$have_pci_nics" -eq "0" ]; then
detect_soft_roce_nics
fi
}
function allocate_nic_ips() {
((count = NVMF_IP_LEAST_ADDR))
for nic_name in $(get_rdma_if_list); do
ip="$(get_ip_address $nic_name)"
if [ -z $ip ]; then
ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
ip link set $nic_name up
((count = count + 1))
fi
# dump configuration for debug log
ip addr show $nic_name
done
}
function get_available_rdma_ips() {
for nic_name in $(get_rdma_if_list); do
get_ip_address $nic_name
done
}
function get_rdma_if_list() {
for nic_type in /sys/class/infiniband/*; do
[[ -e "$nic_type" ]] || break
for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
[[ -e "$nic_name" ]] || break
basename "$nic_name"
done
done
}
function get_ip_address() {
interface=$1
ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
}
function nvmfcleanup() {
sync
set +e
for i in {1..20}; do
modprobe -v -r nvme-$TEST_TRANSPORT
if modprobe -v -r nvme-fabrics; then
set -e
return 0
fi
sleep 1
done
set -e
# So far unable to remove the kernel modules. Try
# one more time and let it fail.
# Allow the transport module to fail for now. See Jim's comment
# about the nvme-tcp module below.
modprobe -v -r nvme-$TEST_TRANSPORT || true
modprobe -v -r nvme-fabrics
}
function nvmftestinit() {
if [ -z $TEST_TRANSPORT ]; then
echo "transport not specified - use --transport= to specify"
return 1
fi
if [ "$TEST_MODE" == "iso" ]; then
$rootdir/scripts/setup.sh
if [ "$TEST_TRANSPORT" == "rdma" ]; then
rdma_device_init
fi
fi
NVMF_TRANSPORT_OPTS="-t $TEST_TRANSPORT"
if [ "$TEST_TRANSPORT" == "rdma" ]; then
RDMA_IP_LIST=$(get_available_rdma_ips)
NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | tail -n +2 | head -n 1)
if [ -z $NVMF_FIRST_TARGET_IP ]; then
echo "no NIC for nvmf test"
exit 0
fi
elif [ "$TEST_TRANSPORT" == "tcp" ]; then
NVMF_FIRST_TARGET_IP=127.0.0.1
NVMF_TRANSPORT_OPTS="$NVMF_TRANSPORT_OPTS -o"
fi
# currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
# support; that's fine since the host/perf test uses the SPDK initiator
# maybe later we will enforce modprobe to succeed once we have systems in the test pool
# with nvme-tcp kernel support - but until then let this pass so we can still run the
# host/perf test with the tcp transport
modprobe nvme-$TEST_TRANSPORT || true
}
function nvmfappstart() {
timing_enter start_nvmf_tgt
"${NVMF_APP[@]}" "$@" &
nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
waitforlisten $nvmfpid
timing_exit start_nvmf_tgt
}
function nvmftestfini() {
nvmfcleanup || :
if [ -n "$nvmfpid" ]; then
killprocess $nvmfpid
fi
if [ "$TEST_MODE" == "iso" ]; then
$rootdir/scripts/setup.sh reset
if [ "$TEST_TRANSPORT" == "rdma" ]; then
rdma_device_init
fi
fi
}
function rdma_device_init() {
load_ib_rdma_modules
detect_rdma_nics
allocate_nic_ips
}
function revert_soft_roce() {
rxe_cfg stop
}
function check_ip_is_soft_roce() {
rxe_cfg status rxe | grep -wq "$1"
}
function nvme_connect() {
local init_count
init_count=$(nvme list | wc -l)
if ! nvme connect "$@"; then return $?; fi
for i in $(seq 1 10); do
if [ $(nvme list | wc -l) -gt $init_count ]; then
return 0
else
sleep 1s
fi
done
return 1
}
function get_nvme_devs() {
local dev rest
nvmes=()
while read -r dev rest; do
if [[ $dev == /dev/nvme* ]]; then
nvmes+=("$dev")
fi
if [[ $1 == print ]]; then
echo "$dev $rest"
fi
done < <(nvme list)
((${#nvmes[@]})) || return 1
echo "${#nvmes[@]}" >&2
}
function gen_nvmf_target_json() {
local subsystem config=()
for subsystem in "${@:-1}"; do
config+=(
"$(
cat <<- EOF
{
"params": {
"name": "Nvme$subsystem",
"trtype": "$TEST_TRANSPORT",
"traddr": "$NVMF_FIRST_TARGET_IP",
"adrfam": "ipv4",
"trsvcid": "$NVMF_PORT",
"subnqn": "nqn.2016-06.io.spdk:cnode$subsystem"
},
"method": "bdev_nvme_attach_controller"
}
EOF
)"
)
done
jq . <<- JSON
{
"subsystems": [
{
"subsystem": "bdev",
"config": [
$(
IFS=","
printf '%s\n' "${config[*]}"
)
]
}
]
}
JSON
}
|