diff --git a/apps-kustomized/rook-cluster/blockpool-ssd.yaml b/apps-kustomized/rook-cluster/blockpool-ssd.yaml new file mode 100644 index 0000000..e218f32 --- /dev/null +++ b/apps-kustomized/rook-cluster/blockpool-ssd.yaml @@ -0,0 +1,9 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool-ssd + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 1 diff --git a/apps-kustomized/rook-cluster/cluster-ssd.yaml b/apps-kustomized/rook-cluster/cluster-ssd.yaml new file mode 100644 index 0000000..8cd96f0 --- /dev/null +++ b/apps-kustomized/rook-cluster/cluster-ssd.yaml @@ -0,0 +1,67 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: rook-config-override + namespace: rook-ceph # namespace:cluster +data: + config: | + [global] + osd_pool_default_size = 1 + mon_warn_on_pool_no_redundancy = false + bdev_flock_retry = 20 + bluefs_buffered_io = false + mon_data_avail_warn = 10 +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: ssd-cluster + namespace: rook-ceph # namespace:cluster +spec: + dataDirHostPath: /var/lib/rook + cephVersion: + image: quay.io/ceph/ceph:v18 + allowUnsupported: true + mon: + count: 1 + allowMultiplePerNode: true + mgr: + count: 1 + allowMultiplePerNode: true + dashboard: + enabled: true + crashCollector: + disable: true + storage: + useAllNodes: true + useAllDevices: false + #deviceFilter: + nodes: + - name: "talos-iqd-ysy" + devices: + - name: "sda" + config: + osdsPerDevice: "1" + monitoring: + enabled: false + healthCheck: + daemonHealth: + mon: + interval: 45s + timeout: 600s + priorityClassNames: + all: system-node-critical + mgr: system-cluster-critical + disruptionManagement: + managePodBudgets: true +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: builtin-mgr + namespace: rook-ceph # namespace:cluster +spec: + name: .mgr + replicated: + size: 1 + requireSafeReplicaSize: false diff --git a/apps-kustomized/rook-cluster/storageclass-ssd-fs.yaml b/apps-kustomized/rook-cluster/storageclass-ssd-fs.yaml new file mode 100644 index 0000000..6c3c7ca --- /dev/null +++ b/apps-kustomized/rook-cluster/storageclass-ssd-fs.yaml @@ -0,0 +1,22 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-cephfs-ssd +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.cephfs.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: ssd-cluster + fsName: ssd-cephfs + + # Ceph pool into which the image shall be created + pool: replicapool-ssd + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-data + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-data + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete diff --git a/apps-kustomized/rook-cluster/storageclass-ssd.yaml b/apps-kustomized/rook-cluster/storageclass-ssd.yaml new file mode 100644 index 0000000..6379691 --- /dev/null +++ b/apps-kustomized/rook-cluster/storageclass-ssd.yaml @@ -0,0 +1,30 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block-ssd +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: ssd-cluster + # Ceph pool into which the RBD image shall be created + pool: replicapool-ssd + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-data + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-data + + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. + csi.storage.k8s.io/fstype: xfs + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete