Remove these that no longer get deployed anyway.
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
This commit is contained in:
parent
c7d82f4502
commit
a82d5b8d23
7 changed files with 0 additions and 272 deletions
|
@ -1,9 +0,0 @@
|
||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephBlockPool
|
|
||||||
metadata:
|
|
||||||
name: replicapool-ssd
|
|
||||||
namespace: rook-ceph
|
|
||||||
spec:
|
|
||||||
failureDomain: host
|
|
||||||
replicated:
|
|
||||||
size: 2
|
|
|
@ -1,47 +0,0 @@
|
||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephCluster
|
|
||||||
metadata:
|
|
||||||
name: ssd-cluster
|
|
||||||
namespace: rook-ceph # namespace:cluster
|
|
||||||
spec:
|
|
||||||
dataDirHostPath: /var/lib/rook-cluster-ssd
|
|
||||||
cephVersion:
|
|
||||||
image: quay.io/ceph/ceph:v18
|
|
||||||
allowUnsupported: true
|
|
||||||
mon:
|
|
||||||
count: 1
|
|
||||||
allowMultiplePerNode: true
|
|
||||||
mgr:
|
|
||||||
count: 1
|
|
||||||
allowMultiplePerNode: true
|
|
||||||
dashboard:
|
|
||||||
enabled: true
|
|
||||||
crashCollector:
|
|
||||||
disable: true
|
|
||||||
storage:
|
|
||||||
useAllNodes: false
|
|
||||||
useAllDevices: false
|
|
||||||
#deviceFilter:
|
|
||||||
nodes:
|
|
||||||
- name: "talos-7oq-vur"
|
|
||||||
devices:
|
|
||||||
- name: "sda"
|
|
||||||
config:
|
|
||||||
osdsPerDevice: "1"
|
|
||||||
- name: "talos-iqd-ysy"
|
|
||||||
devices:
|
|
||||||
- name: "sda"
|
|
||||||
config:
|
|
||||||
osdsPerDevice: "1"
|
|
||||||
monitoring:
|
|
||||||
enabled: false
|
|
||||||
healthCheck:
|
|
||||||
daemonHealth:
|
|
||||||
mon:
|
|
||||||
interval: 45s
|
|
||||||
timeout: 600s
|
|
||||||
priorityClassNames:
|
|
||||||
all: system-node-critical
|
|
||||||
mgr: system-cluster-critical
|
|
||||||
disruptionManagement:
|
|
||||||
managePodBudgets: true
|
|
|
@ -1,13 +0,0 @@
|
||||||
kind: ConfigMap
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: rook-config-override
|
|
||||||
namespace: rook-ceph # namespace:cluster
|
|
||||||
data:
|
|
||||||
config: |
|
|
||||||
[global]
|
|
||||||
osd_pool_default_size = 1
|
|
||||||
mon_warn_on_pool_no_redundancy = false
|
|
||||||
bdev_flock_retry = 20
|
|
||||||
bluefs_buffered_io = false
|
|
||||||
mon_data_avail_warn = 10
|
|
|
@ -1,130 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph-tools
|
|
||||||
namespace: rook-ceph # namespace:cluster
|
|
||||||
labels:
|
|
||||||
app: rook-ceph-tools
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: rook-ceph-tools
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: rook-ceph-tools
|
|
||||||
spec:
|
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
|
||||||
containers:
|
|
||||||
- name: rook-ceph-tools
|
|
||||||
image: quay.io/ceph/ceph:v17.2.6
|
|
||||||
command:
|
|
||||||
- /bin/bash
|
|
||||||
- -c
|
|
||||||
- |
|
|
||||||
# Replicate the script from toolbox.sh inline so the ceph image
|
|
||||||
# can be run directly, instead of requiring the rook toolbox
|
|
||||||
CEPH_CONFIG="/etc/ceph/ceph.conf"
|
|
||||||
MON_CONFIG="/etc/rook/mon-endpoints"
|
|
||||||
KEYRING_FILE="/etc/ceph/keyring"
|
|
||||||
|
|
||||||
# create a ceph config file in its default location so ceph/rados tools can be used
|
|
||||||
# without specifying any arguments
|
|
||||||
write_endpoints() {
|
|
||||||
endpoints=$(cat ${MON_CONFIG})
|
|
||||||
|
|
||||||
# filter out the mon names
|
|
||||||
# external cluster can have numbers or hyphens in mon names, handling them in regex
|
|
||||||
# shellcheck disable=SC2001
|
|
||||||
mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
|
|
||||||
|
|
||||||
DATE=$(date)
|
|
||||||
echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
|
|
||||||
cat <<EOF > ${CEPH_CONFIG}
|
|
||||||
[global]
|
|
||||||
mon_host = ${mon_endpoints}
|
|
||||||
|
|
||||||
[client.admin]
|
|
||||||
keyring = ${KEYRING_FILE}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
# watch the endpoints config file and update if the mon endpoints ever change
|
|
||||||
watch_endpoints() {
|
|
||||||
# get the timestamp for the target of the soft link
|
|
||||||
real_path=$(realpath ${MON_CONFIG})
|
|
||||||
initial_time=$(stat -c %Z "${real_path}")
|
|
||||||
while true; do
|
|
||||||
real_path=$(realpath ${MON_CONFIG})
|
|
||||||
latest_time=$(stat -c %Z "${real_path}")
|
|
||||||
|
|
||||||
if [[ "${latest_time}" != "${initial_time}" ]]; then
|
|
||||||
write_endpoints
|
|
||||||
initial_time=${latest_time}
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep 10
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# read the secret from an env var (for backward compatibility), or from the secret file
|
|
||||||
ceph_secret=${ROOK_CEPH_SECRET}
|
|
||||||
if [[ "$ceph_secret" == "" ]]; then
|
|
||||||
ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create the keyring file
|
|
||||||
cat <<EOF > ${KEYRING_FILE}
|
|
||||||
[${ROOK_CEPH_USERNAME}]
|
|
||||||
key = ${ceph_secret}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# write the initial config file
|
|
||||||
write_endpoints
|
|
||||||
|
|
||||||
# continuously update the mon endpoints if they fail over
|
|
||||||
watch_endpoints
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
tty: true
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 2016
|
|
||||||
runAsGroup: 2016
|
|
||||||
capabilities:
|
|
||||||
drop: ["ALL"]
|
|
||||||
env:
|
|
||||||
- name: ROOK_CEPH_USERNAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: rook-ceph-mon
|
|
||||||
key: ceph-username
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /etc/ceph
|
|
||||||
name: ceph-config
|
|
||||||
- name: mon-endpoint-volume
|
|
||||||
mountPath: /etc/rook
|
|
||||||
- name: ceph-admin-secret
|
|
||||||
mountPath: /var/lib/rook-ceph-mon
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: ceph-admin-secret
|
|
||||||
secret:
|
|
||||||
secretName: rook-ceph-mon
|
|
||||||
optional: false
|
|
||||||
items:
|
|
||||||
- key: ceph-secret
|
|
||||||
path: secret.keyring
|
|
||||||
- name: mon-endpoint-volume
|
|
||||||
configMap:
|
|
||||||
name: rook-ceph-mon-endpoints
|
|
||||||
items:
|
|
||||||
- key: data
|
|
||||||
path: mon-endpoints
|
|
||||||
- name: ceph-config
|
|
||||||
emptyDir: {}
|
|
||||||
tolerations:
|
|
||||||
- key: "node.kubernetes.io/unreachable"
|
|
||||||
operator: "Exists"
|
|
||||||
effect: "NoExecute"
|
|
||||||
tolerationSeconds: 5
|
|
|
@ -1,17 +0,0 @@
|
||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephFilesystem
|
|
||||||
metadata:
|
|
||||||
name: ssdfs
|
|
||||||
namespace: rook-ceph
|
|
||||||
spec:
|
|
||||||
metadataPool:
|
|
||||||
replicated:
|
|
||||||
size: 1
|
|
||||||
dataPools:
|
|
||||||
- name: replicated
|
|
||||||
replicated:
|
|
||||||
size: 1
|
|
||||||
preserveFilesystemOnDelete: true
|
|
||||||
metadataServer:
|
|
||||||
activeCount: 1
|
|
||||||
activeStandby: true
|
|
|
@ -1,24 +0,0 @@
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: rook-cephfs-ssd
|
|
||||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
|
||||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
# clusterID is the namespace where the rook cluster is running
|
|
||||||
clusterID: rook-ceph
|
|
||||||
fsName: ssdfs
|
|
||||||
|
|
||||||
# Ceph pool into which the image shall be created
|
|
||||||
pool: ssdfs-replicated
|
|
||||||
|
|
||||||
# The secrets contain Ceph admin credentials.
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
|
|
||||||
# Delete the rbd volume when a PVC is deleted
|
|
||||||
reclaimPolicy: Delete
|
|
|
@ -1,32 +0,0 @@
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph-block-ssd
|
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: true
|
|
||||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
|
||||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
# clusterID is the namespace where the rook cluster is running
|
|
||||||
clusterID: rook-ceph
|
|
||||||
# Ceph pool into which the RBD image shall be created
|
|
||||||
pool: replicapool-ssd
|
|
||||||
|
|
||||||
# RBD image format. Defaults to "2".
|
|
||||||
imageFormat: "2"
|
|
||||||
|
|
||||||
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
|
|
||||||
imageFeatures: layering
|
|
||||||
|
|
||||||
# The secrets contain Ceph admin credentials.
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
|
|
||||||
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
||||||
# will set default as `ext4`.
|
|
||||||
csi.storage.k8s.io/fstype: xfs
|
|
||||||
|
|
||||||
# Delete the rbd volume when a PVC is deleted
|
|
||||||
reclaimPolicy: Delete
|
|
Loading…
Add table
Reference in a new issue