summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml
blob: 2732286aba0c6741f80ca0b163b1c3f93bd41576 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
#################################################################################################################
# Define the settings for the rook-ceph cluster with settings for a minikube cluster with a single node

# This example expects a single node minikube cluster with three extra disks: vdb, vdc and vdd. Please modify
# it according to your environment. See the documentation for more details on storage settings available.

# For example, to create the cluster:
#   kubectl create -f crds.yaml -f common.yaml -f operator.yaml
#   kubectl create -f cluster-on-pvc-minikube.yaml
#################################################################################################################
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: local0-0
spec:
  storageClassName: local-storage
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  # PV for mon must be a filesystem volume.
  volumeMode: Filesystem
  local:
    # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`.
    path: /dev/vdb
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values:
                - minikube
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: local0-1
spec:
  storageClassName: local-storage
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  # PV for mon must be a filesystem volume.
  volumeMode: Block
  local:
    # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`.
    path: /dev/vdc
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values:
                - minikube
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: local0-2
spec:
  storageClassName: local-storage
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  # PV for mon must be a filesystem volume.
  volumeMode: Block
  local:
    # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`.
    path: /dev/vdd
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values:
                - minikube
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: local0-3
spec:
  storageClassName: local-storage
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  # PV for mon must be a filesystem volume.
  volumeMode: Block
  local:
    # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`.
    path: /dev/vde
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values:
                - minikube
---
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
  name: my-cluster
  namespace: rook-ceph # namespace:cluster
spec:
  dataDirHostPath: /var/lib/rook
  mon:
    count: 1
    allowMultiplePerNode: true
    volumeClaimTemplate:
      spec:
        storageClassName: local-storage
        resources:
          requests:
            storage: 10Gi
  mgr:
    count: 1
    modules:
      - name: pg_autoscaler
        enabled: true
  dashboard:
    enabled: true
    ssl: false
  crashCollector:
    disable: false
  cephVersion:
    image: quay.io/ceph/daemon-base:latest-main
    allowUnsupported: true
  skipUpgradeChecks: false
  continueUpgradeAfterChecksEvenIfNotHealthy: false
  storage:
    storageClassDeviceSets:
      - name: set1
        count: 3
        portable: false
        tuneDeviceClass: true
        tuneFastDeviceClass: false
        encrypted: false
        placement:
        preparePlacement:
        volumeClaimTemplates:
          - metadata:
              name: data
              # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph
              # annotations:
              #   crushDeviceClass: hybrid
            spec:
              resources:
                requests:
                  storage: 20Gi
              # IMPORTANT: Change the storage class depending on your environment
              storageClassName: local-storage
              volumeMode: Block
              accessModes:
                - ReadWriteOnce
    # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement
    onlyApplyOSDPlacement: false
  priorityClassNames:
    mon: system-node-critical
    osd: system-node-critical
    mgr: system-cluster-critical
  disruptionManagement:
    managePodBudgets: true
    osdMaintenanceTimeout: 30
    pgHealthCheckTimeout: 0
  cephConfig:
    global:
      mon_warn_on_pool_no_redundancy: "false"
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
  name: builtin-mgr
  namespace: rook-ceph # namespace:cluster
spec:
  name: .mgr
  failureDomain: osd
  replicated:
    size: 1
    requireSafeReplicaSize: false