kind: ConfigMap apiVersion: v1 metadata: name: rook-config-override namespace: rook-ceph # namespace:cluster data: config: | [global] osd_pool_default_size = 1 mon_warn_on_pool_no_redundancy = false bdev_flock_retry = 20 bluefs_buffered_io = false mon_data_avail_warn = 10 --- apiVersion: ceph.rook.io/v1 kind: CephCluster metadata: name: ssd-cluster namespace: rook-ceph # namespace:cluster spec: dataDirHostPath: /var/lib/rook cephVersion: image: quay.io/ceph/ceph:v18 allowUnsupported: true mon: count: 1 allowMultiplePerNode: true mgr: count: 1 allowMultiplePerNode: true dashboard: enabled: true crashCollector: disable: true storage: useAllNodes: true useAllDevices: false #deviceFilter: nodes: - name: "talos-iqd-ysy" devices: - name: "sda" config: osdsPerDevice: "1" monitoring: enabled: false healthCheck: daemonHealth: mon: interval: 45s timeout: 600s priorityClassNames: all: system-node-critical mgr: system-cluster-critical disruptionManagement: managePodBudgets: true --- apiVersion: ceph.rook.io/v1 kind: CephBlockPool metadata: name: builtin-mgr namespace: rook-ceph # namespace:cluster spec: name: .mgr replicated: size: 1 requireSafeReplicaSize: false