summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/vhost/fiotest/fio.sh
blob: 930948d6dd8d394905fab59c4234852b54da9fc4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
#!/usr/bin/env bash

testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/vhost/common.sh

dry_run=false
no_shutdown=false
fio_bin=""
remote_fio_bin=""
fio_jobs=""
test_type=spdk_vhost_scsi
reuse_vms=false
vms=()
used_vms=""
x=""
readonly=""

function usage() {
	[[ -n $2 ]] && (
		echo "$2"
		echo ""
	)
	echo "Shortcut script for doing automated test"
	echo "Usage: $(basename $1) [OPTIONS]"
	echo
	echo "-h, --help                print help and exit"
	echo "    --test-type=TYPE      Perform specified test:"
	echo "                          virtio - test host virtio-scsi-pci using file as disk image"
	echo "                          kernel_vhost - use kernel driver vhost-scsi"
	echo "                          spdk_vhost_scsi - use spdk vhost scsi"
	echo "                          spdk_vhost_blk - use spdk vhost block"
	echo "-x                        set -x for script debug"
	echo "    --fio-bin=FIO         Use specific fio binary (will be uploaded to VM)"
	echo "    --fio-job=            Fio config to use for test."
	echo "                          All VMs will run the same fio job when FIO executes."
	echo "                          (no unique jobs for specific VMs)"
	echo "    --dry-run             Don't perform any tests, run only and wait for enter to terminate"
	echo "    --no-shutdown         Don't shutdown at the end but leave envirionment working"
	echo "    --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
	echo "                          NUM - VM number (mandatory)"
	echo "                          OS - VM os disk path (optional)"
	echo "                          DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
	echo "    --readonly            Use readonly for fio"
	exit 0
}

#default raw file is NVMe drive

while getopts 'xh-:' optchar; do
	case "$optchar" in
		-)
			case "$OPTARG" in
				help) usage $0 ;;
				fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
				fio-job=*) fio_job="${OPTARG#*=}" ;;
				dry-run) dry_run=true ;;
				no-shutdown) no_shutdown=true ;;
				test-type=*) test_type="${OPTARG#*=}" ;;
				vm=*) vms+=("${OPTARG#*=}") ;;
				readonly) readonly="--readonly" ;;
				*) usage $0 "Invalid argument '$OPTARG'" ;;
			esac
			;;
		h) usage $0 ;;
		x)
			set -x
			x="-x"
			;;
		*) usage $0 "Invalid argument '$OPTARG'" ;;
	esac
done
shift $((OPTIND - 1))

if [[ ! -r "$fio_job" ]]; then
	fail "no fio job file specified"
fi

vhosttestinit

trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR

vm_kill_all

if [[ $test_type =~ "spdk_vhost" ]]; then
	notice "==============="
	notice ""
	notice "running SPDK"
	notice ""
	vhost_run 0
	rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
	$rpc_py bdev_split_create Nvme0n1 4
	$rpc_py bdev_malloc_create -b Malloc0 128 4096
	$rpc_py bdev_malloc_create -b Malloc1 128 4096
	$rpc_py bdev_malloc_create -b Malloc2 64 512
	$rpc_py bdev_malloc_create -b Malloc3 64 512
	$rpc_py bdev_malloc_create -b Malloc4 64 512
	$rpc_py bdev_malloc_create -b Malloc5 64 512
	$rpc_py bdev_malloc_create -b Malloc6 64 512
	$rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
	$rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
	$rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
	$rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
	$rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
	$rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
	notice ""
fi

notice "==============="
notice ""
notice "Setting up VM"
notice ""

rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"

for vm_conf in "${vms[@]}"; do
	IFS=',' read -ra conf <<< "$vm_conf"
	if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
		fail "invalid VM configuration syntax $vm_conf"
	fi

	# Sanity check if VM is not defined twice
	for vm_num in $used_vms; do
		if [[ $vm_num -eq ${conf[0]} ]]; then
			fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
		fi
	done

	used_vms+=" ${conf[0]}"

	if [[ $test_type =~ "spdk_vhost" ]]; then

		notice "Adding device via RPC ..."

		while IFS=':' read -ra disks; do
			for disk in "${disks[@]}"; do
				notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
				if [[ $disk == "RaidBdev2" ]]; then
					ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
					free_mb=$(get_lvs_free_mb "$ls_guid")
					based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
				else
					based_disk="$disk"
				fi

				if [[ "$test_type" == "spdk_vhost_blk" ]]; then
					disk=${disk%%_*}
					notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
					$rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
				else
					notice "Creating controller naa.$disk.${conf[0]}"
					$rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}

					notice "Adding device (0) to naa.$disk.${conf[0]}"
					$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
				fi
			done
		done <<< "${conf[2]}"
		unset IFS
		$rpc_py vhost_get_controllers
	fi

	setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
	[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
	[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"

	$setup_cmd
done

# Run everything
vm_run $used_vms
vm_wait_for_boot 300 $used_vms

if [[ $test_type == "spdk_vhost_scsi" ]]; then
	for vm_conf in "${vms[@]}"; do
		IFS=',' read -ra conf <<< "$vm_conf"
		while IFS=':' read -ra disks; do
			for disk in "${disks[@]}"; do
				# For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
				if [[ $disk == "RaidBdev2" ]]; then
					based_disk="lvs_0/lbd_0"
				else
					based_disk="$disk"
				fi
				notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
				$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0

				sleep 0.1

				notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
				$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
			done
		done <<< "${conf[2]}"
		unset IFS
	done
fi

sleep 0.1

notice "==============="
notice ""
notice "Testing..."

notice "Running fio jobs ..."

# Check if all VM have disk in tha same location
DISK=""

fio_disks=""
for vm_num in $used_vms; do
	qemu_mask_param="VM_${vm_num}_qemu_mask"

	host_name="VM-$vm_num"
	notice "Setting up hostname: $host_name"
	vm_exec $vm_num "hostname $host_name"
	vm_start_fio_server $fio_bin $readonly $vm_num

	if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
		vm_check_scsi_location $vm_num
		#vm_reset_scsi_devices $vm_num $SCSI_DISK
	elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
		vm_check_blk_location $vm_num
	fi

	fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done

if $dry_run; then
	read -r -p "Enter to kill evething" xx
	sleep 3
	at_app_exit
	exit 0
fi

run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks

if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
	for vm_num in $used_vms; do
		vm_reset_scsi_devices $vm_num $SCSI_DISK
	done
fi

if ! $no_shutdown; then
	notice "==============="
	notice "APP EXITING"
	notice "killing all VMs"
	vm_shutdown_all
	notice "waiting 2 seconds to let all VMs die"
	sleep 2
	if [[ $test_type =~ "spdk_vhost" ]]; then
		notice "Removing vhost devices & controllers via RPC ..."
		for vm_conf in "${vms[@]}"; do
			IFS=',' read -ra conf <<< "$vm_conf"

			while IFS=':' read -ra disks; do
				for disk in "${disks[@]}"; do
					disk=${disk%%_*}
					notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
					if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
						$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
					fi

					$rpc_py vhost_delete_controller naa.$disk.${conf[0]}
					if [[ $disk == "RaidBdev2" ]]; then
						notice "Removing lvol bdev and lvol store"
						$rpc_py bdev_lvol_delete lvs_0/lbd_0
						$rpc_py bdev_lvol_delete_lvstore -l lvs_0
					fi
				done
			done <<< "${conf[2]}"
		done
	fi
	notice "Testing done -> shutting down"
	notice "killing vhost app"
	vhost_kill 0

	notice "EXIT DONE"
	notice "==============="
else
	notice "==============="
	notice ""
	notice "Leaving environment working!"
	notice ""
	notice "==============="
fi

vhosttestfini