From 2101d69f0d25d9fbd1a25685554b8b20b51f0327 Mon Sep 17 00:00:00 2001 From: Martyn Ranyard Date: Mon, 27 Nov 2023 22:12:18 +0100 Subject: [PATCH] mirror what is real Signed-off-by: Martyn Ranyard --- .../rook-cluster-ssd/deploy-toolbox.yaml | 130 ++++++++++++++++++ apps-kustomized/rook-cluster-ssd/fs.yaml | 17 +++ .../rook-cluster-ssd/storageclass-ssd-fs.yaml | 4 +- 3 files changed, 150 insertions(+), 1 deletion(-) create mode 100644 apps-kustomized/rook-cluster-ssd/deploy-toolbox.yaml create mode 100644 apps-kustomized/rook-cluster-ssd/fs.yaml diff --git a/apps-kustomized/rook-cluster-ssd/deploy-toolbox.yaml b/apps-kustomized/rook-cluster-ssd/deploy-toolbox.yaml new file mode 100644 index 0000000..3a08325 --- /dev/null +++ b/apps-kustomized/rook-cluster-ssd/deploy-toolbox.yaml @@ -0,0 +1,130 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-tools + namespace: rook-ceph # namespace:cluster + labels: + app: rook-ceph-tools +spec: + replicas: 1 + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: quay.io/ceph/ceph:v17.2.6 + command: + - /bin/bash + - -c + - | + # Replicate the script from toolbox.sh inline so the ceph image + # can be run directly, instead of requiring the rook toolbox + CEPH_CONFIG="/etc/ceph/ceph.conf" + MON_CONFIG="/etc/rook/mon-endpoints" + KEYRING_FILE="/etc/ceph/keyring" + + # create a ceph config file in its default location so ceph/rados tools can be used + # without specifying any arguments + write_endpoints() { + endpoints=$(cat ${MON_CONFIG}) + + # filter out the mon names + # external cluster can have numbers or hyphens in mon names, handling them in regex + # shellcheck disable=SC2001 + mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g') + + DATE=$(date) + echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}" + cat < ${CEPH_CONFIG} + [global] + mon_host = ${mon_endpoints} + + [client.admin] + keyring = ${KEYRING_FILE} + EOF + } + + # watch the endpoints config file and update if the mon endpoints ever change + watch_endpoints() { + # get the timestamp for the target of the soft link + real_path=$(realpath ${MON_CONFIG}) + initial_time=$(stat -c %Z "${real_path}") + while true; do + real_path=$(realpath ${MON_CONFIG}) + latest_time=$(stat -c %Z "${real_path}") + + if [[ "${latest_time}" != "${initial_time}" ]]; then + write_endpoints + initial_time=${latest_time} + fi + + sleep 10 + done + } + + # read the secret from an env var (for backward compatibility), or from the secret file + ceph_secret=${ROOK_CEPH_SECRET} + if [[ "$ceph_secret" == "" ]]; then + ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring) + fi + + # create the keyring file + cat < ${KEYRING_FILE} + [${ROOK_CEPH_USERNAME}] + key = ${ceph_secret} + EOF + + # write the initial config file + write_endpoints + + # continuously update the mon endpoints if they fail over + watch_endpoints + imagePullPolicy: IfNotPresent + tty: true + securityContext: + runAsNonRoot: true + runAsUser: 2016 + runAsGroup: 2016 + capabilities: + drop: ["ALL"] + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + - name: ceph-admin-secret + mountPath: /var/lib/rook-ceph-mon + readOnly: true + volumes: + - name: ceph-admin-secret + secret: + secretName: rook-ceph-mon + optional: false + items: + - key: ceph-secret + path: secret.keyring + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 diff --git a/apps-kustomized/rook-cluster-ssd/fs.yaml b/apps-kustomized/rook-cluster-ssd/fs.yaml new file mode 100644 index 0000000..4190a34 --- /dev/null +++ b/apps-kustomized/rook-cluster-ssd/fs.yaml @@ -0,0 +1,17 @@ +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: ssdfs + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: 1 + dataPools: + - name: replicated + replicated: + size: 1 + preserveFilesystemOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true diff --git a/apps-kustomized/rook-cluster-ssd/storageclass-ssd-fs.yaml b/apps-kustomized/rook-cluster-ssd/storageclass-ssd-fs.yaml index aa19518..e493c7e 100644 --- a/apps-kustomized/rook-cluster-ssd/storageclass-ssd-fs.yaml +++ b/apps-kustomized/rook-cluster-ssd/storageclass-ssd-fs.yaml @@ -7,7 +7,7 @@ provisioner: rook-ceph.cephfs.csi.ceph.com parameters: # clusterID is the namespace where the rook cluster is running clusterID: ssd-cluster - fsName: ssd-cephfs + fsName: ssdfs # Ceph pool into which the image shall be created pool: replicapool-ssd @@ -15,6 +15,8 @@ parameters: # The secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph