summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/vhost/lvol/lvol_test.sh
blob: 5190b5f280747b3365c1fc09aa93df1ee3226d00 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
#!/usr/bin/env bash
set -e

rootdir=$(readlink -f $(dirname $0))/../../..
source "$rootdir/scripts/common.sh"

LVOL_TEST_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $LVOL_TEST_DIR/../../../../ && pwd)"
[[ -z "$COMMON_DIR" ]] && COMMON_DIR="$(cd $LVOL_TEST_DIR/../common && pwd)"

. $COMMON_DIR/common.sh
rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"

vm_count=1
max_disks=""
ctrl_type="spdk_vhost_scsi"
use_fs=false
nested_lvol=false
distribute_cores=false

function usage()
{
    [[ ! -z $2 ]] && ( echo "$2"; echo ""; )
    echo "Shortcut script for doing automated test"
    echo "Usage: $(basename $1) [OPTIONS]"
    echo
    echo "-h, --help                Print help and exit"
    echo "    --fio-bin=PATH        Path to FIO binary.;"
    echo "    --vm-count=INT        Virtual machines to use in test;"
    echo "                          Each VM will get one lvol bdev on each NVMe."
    echo "                          Default: 1"
    echo "    --max-disks=INT       Maximum number of NVMe drives to use in test."
    echo "                          Default: will use all available NVMes."
    echo "    --ctrl-type=TYPE      Controller type to use for test:"
    echo "                          spdk_vhost_scsi - use spdk vhost scsi"
    echo "                          spdk_vhost_blk - use spdk vhost block"
    echo "    --nested-lvol         If enabled will create additional lvol bdev"
    echo "                          on each NVMe for use as base device for next"
    echo "                          lvol store and lvol bdevs."
    echo "                          (NVMe->lvol_store->lvol_bdev->lvol_store->lvol_bdev)"
    echo "                          Default: False"
    echo "    --thin-provisioning   Create lvol bdevs thin provisioned instead of"
    echo "                          allocating space up front"
    echo "    --distribute-cores    Use custom config file and run vhost controllers"
    echo "                          on different CPU cores instead of single core."
    echo "                          Default: False"
    echo "-x                        set -x for script debug"
    echo "    --multi-os            Run tests on different os types in VMs"
    echo "                          Default: False"
    exit 0
}

function clean_lvol_cfg()
{
    notice "Removing nested lvol bdevs"
    for lvol_bdev in "${nest_lvol_bdevs[@]}"; do
        $rpc_py destroy_lvol_bdev $lvol_bdev
        notice "nested lvol bdev $lvol_bdev removed"
    done

    notice "Removing nested lvol stores"
    for lvol_store in "${nest_lvol_stores[@]}"; do
        $rpc_py destroy_lvol_store -u $lvol_store
        notice "nested lvol store $lvol_store removed"
    done

    notice "Removing lvol bdevs"
    for lvol_bdev in "${lvol_bdevs[@]}"; do
        $rpc_py destroy_lvol_bdev $lvol_bdev
        notice "lvol bdev $lvol_bdev removed"
    done

    notice "Removing lvol stores"
    for lvol_store in "${lvol_stores[@]}"; do
        $rpc_py destroy_lvol_store -u $lvol_store
        notice "lvol store $lvol_store removed"
    done
}

while getopts 'xh-:' optchar; do
    case "$optchar" in
        -)
        case "$OPTARG" in
            help) usage $0 ;;
            fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
            vm-count=*) vm_count="${OPTARG#*=}" ;;
            max-disks=*) max_disks="${OPTARG#*=}" ;;
            ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
            nested-lvol) nested_lvol=true ;;
            distribute-cores) distribute_cores=true ;;
            thin-provisioning) thin=" -t " ;;
            multi-os) multi_os=true ;;
            *) usage $0 "Invalid argument '$OPTARG'" ;;
        esac
        ;;
    h) usage $0 ;;
    x) set -x
        x="-x" ;;
    *) usage $0 "Invalid argument '$OPTARG'"
    esac
done

notice "Get NVMe disks:"
nvmes=($(iter_pci_class_code 01 08 02))

if [[ -z $max_disks ]]; then
    max_disks=${#nvmes[@]}
fi

if [[ ${#nvmes[@]} -lt max_disks ]]; then
    fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
fi

if $distribute_cores; then
    # FIXME: this need to be handled entirely in common.sh
    source $LVOL_TEST_DIR/autotest.config
fi

trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR

vm_kill_all

notice "running SPDK vhost"
spdk_vhost_run
notice "..."

trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR

lvol_stores=()
lvol_bdevs=()
nest_lvol_stores=()
nest_lvol_bdevs=()
used_vms=""

# On each NVMe create one lvol store
for (( i=0; i<$max_disks; i++ ));do

    # Create base lvol store on NVMe
    notice "Creating lvol store on device Nvme${i}n1"
    ls_guid=$($rpc_py construct_lvol_store Nvme${i}n1 lvs_$i -c 4194304)
    lvol_stores+=("$ls_guid")

    if $nested_lvol; then
        free_mb=$(get_lvs_free_mb "$ls_guid")
        size=$((free_mb / (vm_count+1) ))

        notice "Creating lvol bdev on lvol store: $ls_guid"
        lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_nest $size $thin)

        notice "Creating nested lvol store on lvol bdev: $lb_name"
        nest_ls_guid=$($rpc_py construct_lvol_store $lb_name lvs_n_$i -c 4194304)
        nest_lvol_stores+=("$nest_ls_guid")

        for (( j=0; j<$vm_count; j++)); do
            notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
            free_mb=$(get_lvs_free_mb "$nest_ls_guid")
            nest_size=$((free_mb / (vm_count-j) ))
            lb_name=$($rpc_py construct_lvol_bdev -u $nest_ls_guid lbd_vm_$j $nest_size $thin)
            nest_lvol_bdevs+=("$lb_name")
        done
    fi

    # Create base lvol bdevs
    for (( j=0; j<$vm_count; j++)); do
        notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
        free_mb=$(get_lvs_free_mb "$ls_guid")
        size=$((free_mb / (vm_count-j) ))
        lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_vm_$j $size $thin)
        lvol_bdevs+=("$lb_name")
    done
done

bdev_info=$($rpc_py get_bdevs)
notice "Configuration after initial set-up:"
$rpc_py get_lvol_stores
echo "$bdev_info"

# Set up VMs
for (( i=0; i<$vm_count; i++)); do
    vm="vm_$i"

    # Get all lvol bdevs associated with this VM number
    bdevs=$(jq -r "map(select(.aliases[] | contains(\"$vm\")) | \
            .aliases[]) | join(\" \")" <<< "$bdev_info")
    bdevs=($bdevs)

    setup_cmd="vm_setup --disk-type=$ctrl_type --force=$i"
    if [[ $i%2 -ne 0 ]] && [[ $multi_os ]]; then
        setup_cmd+=" --os=/home/sys_sgsw/spdk_vhost_CentOS_vm_image.qcow2"
    else
        setup_cmd+=" --os=/home/sys_sgsw/vhost_vm_image.qcow2"
    fi

    # Create single SCSI controller or multiple BLK controllers for this VM
    if $distribute_cores; then
        mask="VM_${i}_qemu_mask"
        mask_arg="--cpumask ${!mask}"
    fi

    if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
        $rpc_py construct_vhost_scsi_controller naa.0.$i $mask_arg
        for (( j=0; j<${#bdevs[@]}; j++)); do
            $rpc_py add_vhost_scsi_lun naa.0.$i $j ${bdevs[$j]}
        done
        setup_cmd+=" --disks=0"
    elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
        disk=""
        for (( j=0; j<${#bdevs[@]}; j++)); do
            $rpc_py construct_vhost_blk_controller naa.$j.$i ${bdevs[$j]} $mask_arg
            disk+="${j}:"
        done
        disk="${disk::-1}"
        setup_cmd+=" --disks=$disk"
    fi

    $setup_cmd
    used_vms+=" $i"
done

$rpc_py get_vhost_controllers

# Run VMs
vm_run $used_vms
vm_wait_for_boot 600 $used_vms

# Get disk names from VMs and run FIO traffic

fio_disks=""
for vm_num in $used_vms; do
    vm_dir=$VM_BASE_DIR/$vm_num
    qemu_mask_param="VM_${vm_num}_qemu_mask"

    host_name="VM-$vm_num-${!qemu_mask_param}"
    vm_ssh $vm_num "hostname $host_name"
    vm_start_fio_server $fio_bin $vm_num

    if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
        vm_check_scsi_location $vm_num
    elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
        vm_check_blk_location $vm_num
    fi

    fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done

if [[ $RUN_NIGHTLY -eq 1 ]]; then
    job_file="default_integrity_nightly.job"
else
    job_file="default_integrity.job"
fi
# Run FIO traffic
run_fio $fio_bin --job-file=$COMMON_DIR/fio_jobs/$job_file --out="$TEST_DIR/fio_results" $fio_disks

notice "Shutting down virtual machines..."
vm_shutdown_all
sleep 2

notice "Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
    for (( i=0; i<$vm_count; i++)); do
        notice "Removing devices from vhost SCSI controller naa.0.$i"
        for (( j=0; j<${#bdevs[@]}; j++)); do
            $rpc_py remove_vhost_scsi_target naa.0.$i $j
            notice "Removed device $j"
        done
        notice "Removing vhost SCSI controller naa.0.$i"
        $rpc_py remove_vhost_controller naa.0.$i
    done
elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
    for (( i=0; i<$vm_count; i++)); do
        for (( j=0; j<${#bdevs[@]}; j++)); do
            notice "Removing vhost BLK controller naa.$j.$i"
            $rpc_py remove_vhost_controller naa.$j.$i
            notice "Removed naa.$j.$i"
        done
    done
fi

clean_lvol_cfg

$rpc_py get_lvol_stores
$rpc_py get_bdevs
$rpc_py get_vhost_controllers

notice "Shutting down SPDK vhost app..."
spdk_vhost_kill