From 389020e14594e4894e28d1eb9103c210b142509e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 23 May 2024 18:45:13 +0200 Subject: Adding upstream version 18.2.3. Signed-off-by: Daniel Baumann --- .../ci/cluster-specs/cluster-on-pvc-minikube.yaml | 198 +++++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml (limited to 'src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml') diff --git a/src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml b/src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml new file mode 100644 index 000000000..2732286ab --- /dev/null +++ b/src/pybind/mgr/rook/ci/cluster-specs/cluster-on-pvc-minikube.yaml @@ -0,0 +1,198 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with settings for a minikube cluster with a single node + +# This example expects a single node minikube cluster with three extra disks: vdb, vdc and vdd. Please modify +# it according to your environment. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster-on-pvc-minikube.yaml +################################################################################################################# +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-0 +spec: + storageClassName: local-storage + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Filesystem + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdb + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-1 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdc + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-2 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdd + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-3 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vde + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: my-cluster + namespace: rook-ceph # namespace:cluster +spec: + dataDirHostPath: /var/lib/rook + mon: + count: 1 + allowMultiplePerNode: true + volumeClaimTemplate: + spec: + storageClassName: local-storage + resources: + requests: + storage: 10Gi + mgr: + count: 1 + modules: + - name: pg_autoscaler + enabled: true + dashboard: + enabled: true + ssl: false + crashCollector: + disable: false + cephVersion: + image: quay.io/ceph/daemon-base:latest-main + allowUnsupported: true + skipUpgradeChecks: false + continueUpgradeAfterChecksEvenIfNotHealthy: false + storage: + storageClassDeviceSets: + - name: set1 + count: 3 + portable: false + tuneDeviceClass: true + tuneFastDeviceClass: false + encrypted: false + placement: + preparePlacement: + volumeClaimTemplates: + - metadata: + name: data + # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph + # annotations: + # crushDeviceClass: hybrid + spec: + resources: + requests: + storage: 20Gi + # IMPORTANT: Change the storage class depending on your environment + storageClassName: local-storage + volumeMode: Block + accessModes: + - ReadWriteOnce + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement + onlyApplyOSDPlacement: false + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0 + cephConfig: + global: + mon_warn_on_pool_no_redundancy: "false" +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: builtin-mgr + namespace: rook-ceph # namespace:cluster +spec: + name: .mgr + failureDomain: osd + replicated: + size: 1 + requireSafeReplicaSize: false -- cgit v1.2.3