1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
import contextlib
import logging
import json
from io import StringIO
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
log.info('Setting up nvme_loop on scratch devices...')
host = 'hostnqn'
port = '1'
devs_by_remote = {}
old_scratch_by_remote = {}
for remote, roles in ctx.cluster.remotes.items():
if remote.is_container:
continue
devs = teuthology.get_scratch_devices(remote)
devs_by_remote[remote] = devs
base = '/sys/kernel/config/nvmet'
remote.run(
args=[
'grep', '^nvme_loop', '/proc/modules', run.Raw('||'),
'sudo', 'modprobe', 'nvme_loop',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/hosts/{host}',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/ports/{port}',
run.Raw('&&'),
'echo', 'loop', run.Raw('|'),
'sudo', 'tee', f'{base}/ports/{port}/addr_trtype',
]
)
for dev in devs:
short = dev.split('/')[-1]
log.info(f'Connecting nvme_loop {remote.shortname}:{dev}...')
remote.run(
args=[
'sudo', 'mkdir', '-p', f'{base}/subsystems/{short}',
run.Raw('&&'),
'echo', '1', run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/attr_allow_any_host',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/subsystems/{short}/namespaces/1',
run.Raw('&&'),
'echo', '-n', dev, run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/namespaces/1/device_path',
run.Raw('&&'),
'echo', '1', run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/namespaces/1/enable',
run.Raw('&&'),
'sudo', 'ln', '-s', f'{base}/subsystems/{short}',
f'{base}/ports/{port}/subsystems/{short}',
run.Raw('&&'),
'sudo', 'nvme', 'connect', '-t', 'loop', '-n', short, '-q', host,
]
)
# identify nvme_loops devices
old_scratch_by_remote[remote] = remote.read_file('/scratch_devs')
with contextutil.safe_while(sleep=1, tries=15) as proceed:
while proceed():
p = remote.run(args=['sudo', 'nvme', 'list', '-o', 'json'], stdout=StringIO())
new_devs = []
# `nvme list -o json` will return the following output:
'''{
"Devices" : [
{
"DevicePath" : "/dev/nvme0n1",
"Firmware" : "8DV101H0",
"Index" : 0,
"ModelNumber" : "INTEL SSDPEDMD400G4",
"ProductName" : "Unknown Device",
"SerialNumber" : "PHFT620400WB400BGN"
},
{
"DevicePath" : "/dev/nvme1n1",
"Firmware" : "5.15.0-1",
"Index" : 1,
"ModelNumber" : "Linux",
"ProductName" : "Unknown Device",
"SerialNumber" : "7672ce414766ba44a8e5"
}
]
}'''
nvme_list = json.loads(p.stdout.getvalue())
for device in nvme_list['Devices']:
dev = device['DevicePath']
vendor = device['ModelNumber']
if dev.startswith('/dev/') and vendor == 'Linux':
new_devs.append(dev)
log.info(f'new_devs {new_devs}')
assert len(new_devs) <= len(devs)
if len(new_devs) == len(devs):
break
remote.write_file(
path='/scratch_devs',
data='\n'.join(new_devs) + '\n',
sudo=True
)
try:
yield
finally:
for remote, devs in devs_by_remote.items():
if remote.is_container:
continue
for dev in devs:
short = dev.split('/')[-1]
log.info(f'Disconnecting nvme_loop {remote.shortname}:{dev}...')
remote.run(
args=[
'sudo', 'nvme', 'disconnect', '-n', short
],
check_status=False,
)
remote.write_file(
path='/scratch_devs',
data=old_scratch_by_remote[remote],
sudo=True
)
|