blob: 120e073a723064290182ca44a02a6ac518cf5a95 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
|
roles:
- - mon.a
- mgr.x
- osd.0
- osd.1
- osd.2
- osd.3
- osd.4
- osd.5
- osd.6
- osd.7
- osd.8
- osd.9
- osd.10
- osd.11
- osd.12
- osd.13
- osd.14
- client.0
openstack:
- volumes: # attached to each instance
count: 4
size: 1 # GB
overrides:
ceph:
conf:
mon:
mon memory target: 134217728 # reduced to 128_M
rocksdb cache size: 67108864 # reduced to 64_M
mon osd cache size: 100000
mon osd cache size min: 134217728
osd:
osd memory target: 1610612736 # reduced to 1.5_G
osd objectstore: bluestore
debug bluestore: 20
osd scrub min interval: 60
osd scrub max interval: 120
osd max backfills: 9
tasks:
- install:
branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate
- ceph:
create_rbd_pool: false
pre-mgr-commands:
- sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(OBJECT_
- \(SLOW_OPS\)
- \(REQUEST_SLOW\)
- \(TOO_FEW_PGS\)
- slow request
- interactive:
- parallel:
- log-mon-rss
- stress-tasks
- benchload
- exec:
client.0:
- "ceph_test_mon_memory_target 134217728" # mon memory target
- "ceph_test_mon_rss_usage 134217728"
log-mon-rss:
- background_exec:
client.0:
- while true
- do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log
- sleep 300 # log rss usage every 5 mins. May be modified accordingly
- done
- exec:
client.0:
- sleep 37860 # sum total of the radosbench test times below plus 60 secs
benchload: # The total radosbench test below translates to 10.5 hrs
- full_sequential:
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
- radosbench:
clients: [client.0]
time: 1800
stress-tasks:
- thrashosds:
op_delay: 1
bdev_inject_crash: 1
bdev_inject_crash_probability: .8
chance_down: 80
chance_pgnum_grow: 3
chance_pgpnum_fix: 1
chance_thrash_cluster_full: 0
chance_thrash_pg_upmap: 3
chance_thrash_pg_upmap_items: 3
min_in: 2
|