Compare commits

...
Sign in to create a new pull request.

30 commits

Author SHA1 Message Date
0835746e29 Upgrading for "reasons"
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-27 19:01:33 +00:00
0ed9afcf96 Oops, wrong samba :-)
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-20 11:31:40 +00:00
1f682b195b Revert "Move to the storage node when wanted"
This reverts commit 622b8a36f4.

Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-20 11:31:03 +00:00
622b8a36f4 Move to the storage node when wanted
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-20 11:27:32 +00:00
6307bc1cf3 Move LMS to where the storages are
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-20 10:05:20 +00:00
993eccffb2 Revert "move back for now"
This reverts commit ed320ab060.

Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-19 20:50:29 +00:00
63810d655d Env vars to use the db
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:55:42 +00:00
71fa939bdc Let's have a real database
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:49:31 +00:00
dd18f226f2 And on 80 please
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:47:01 +00:00
ab822b67d5 YAML was a mistake
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:44:13 +00:00
ae8acc29ee Manual start
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:42:34 +00:00
ef09aaedeb Let's start from scratch on this
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:31:50 +00:00
b0d715badf Only released 23hrs ago, perhaps bug?
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:12:47 +00:00
e0458bbb9f copypasta error
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:04:05 +00:00
19fe703297 SQLite I guess
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:02:47 +00:00
dd728b9829 SQLite I guess
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 20:01:03 +00:00
3dd6a68634 Allow another repo
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 19:42:12 +00:00
6f97d323fb Add spoolman, why not
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-17 19:41:02 +00:00
a82d5b8d23 Remove these that no longer get deployed anyway.
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
2025-03-13 10:51:10 +00:00
c7d82f4502 Merge pull request 'Update Helm release nvidia-device-plugin to v0.17.1' (#30) from renovate/nvidia-device-plugin-0.x into main
Reviewed-on: #30
2025-03-13 10:45:24 +00:00
a5221d8389 Merge pull request 'Update 1password/connect-api Docker tag to v1.7.3' (#17) from renovate/1password-connect-api-1.x into main
Reviewed-on: #17
2025-03-13 10:43:46 +00:00
f9e1f2fef8 Merge pull request 'Update 1password/connect-sync Docker tag to v1.7.3' (#18) from renovate/1password-connect-sync-1.x into main
Reviewed-on: #18
2025-03-13 10:42:55 +00:00
832299c24a Merge branch 'main' into renovate/1password-connect-api-1.x 2025-03-13 10:41:47 +00:00
ced8b183d3 Merge pull request 'Update Helm release secrets-store-csi-driver to v1.4.8' (#31) from renovate/secrets-store-csi-driver-1.x into main
Reviewed-on: #31
2025-03-13 10:38:06 +00:00
8c9baf2e97 Merge pull request 'Update Helm release external-dns to v1.15.2' (#28) from renovate/external-dns-1.x into main
Reviewed-on: #28
2025-03-13 10:32:38 +00:00
Renovate bot
c1990ed111 Update Helm release secrets-store-csi-driver to v1.4.8 2025-03-13 00:01:32 +00:00
Renovate bot
012a592fc5 Update Helm release nvidia-device-plugin to v0.17.1 2025-03-13 00:01:29 +00:00
Renovate bot
71da7530d7 Update Helm release external-dns to v1.15.2 2025-03-12 00:01:30 +00:00
Renovate bot
e5ac982346 Update 1password/connect-sync Docker tag to v1.7.3 2025-03-02 00:01:43 +00:00
Renovate bot
75d0c94c40 Update 1password/connect-api Docker tag to v1.7.3 2025-03-01 00:01:30 +00:00
20 changed files with 131 additions and 279 deletions

View file

@ -46,7 +46,7 @@ spec:
value: "8080"
- name: OP_LOG_LEVEL
value: info
image: 1password/connect-api:1.7.2
image: 1password/connect-api:1.7.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@ -92,7 +92,7 @@ spec:
value: localhost:11220
- name: OP_LOG_LEVEL
value: info
image: 1password/connect-sync:1.7.2
image: 1password/connect-sync:1.7.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3

View file

@ -4,7 +4,7 @@ kind: Kustomization
helmCharts:
- name: external-dns
repo: https://kubernetes-sigs.github.io/external-dns
version: 1.15.0
version: 1.15.2
releaseName: external-dns
namespace: external-dns
valuesInline:

View file

@ -66,7 +66,7 @@ spec:
dnsPolicy: ClusterFirst
nodeSelector:
intel.feature.node.kubernetes.io/gpu: "true"
kubernetes.io/hostname: talos-e48-wv7
kubernetes.io/hostname: talos-llu-kx3
terminationGracePeriodSeconds: 30
volumes:
- name: jellyfin-config

View file

@ -71,6 +71,16 @@ spec:
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- talos-llu-kx3
terminationGracePeriodSeconds: 30
volumes:
- name: config

View file

@ -10,7 +10,7 @@ helmCharts:
includeCRDs: true
namespace: nvidia-device-plugin
releaseName: nvidia-device-plugin
version: 0.17.0
version: 0.17.1
repo: https://nvidia.github.io/k8s-device-plugin
valuesInline:
nodeSelector: "feature.node.kubernetes.io/pci-0300_10de_13c0_1569_13c0.present=true"

View file

@ -1,9 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: replicapool-ssd
namespace: rook-ceph
spec:
failureDomain: host
replicated:
size: 2

View file

@ -1,47 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: ssd-cluster
namespace: rook-ceph # namespace:cluster
spec:
dataDirHostPath: /var/lib/rook-cluster-ssd
cephVersion:
image: quay.io/ceph/ceph:v18
allowUnsupported: true
mon:
count: 1
allowMultiplePerNode: true
mgr:
count: 1
allowMultiplePerNode: true
dashboard:
enabled: true
crashCollector:
disable: true
storage:
useAllNodes: false
useAllDevices: false
#deviceFilter:
nodes:
- name: "talos-7oq-vur"
devices:
- name: "sda"
config:
osdsPerDevice: "1"
- name: "talos-iqd-ysy"
devices:
- name: "sda"
config:
osdsPerDevice: "1"
monitoring:
enabled: false
healthCheck:
daemonHealth:
mon:
interval: 45s
timeout: 600s
priorityClassNames:
all: system-node-critical
mgr: system-cluster-critical
disruptionManagement:
managePodBudgets: true

View file

@ -1,13 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-config-override
namespace: rook-ceph # namespace:cluster
data:
config: |
[global]
osd_pool_default_size = 1
mon_warn_on_pool_no_redundancy = false
bdev_flock_retry = 20
bluefs_buffered_io = false
mon_data_avail_warn = 10

View file

@ -1,130 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-tools
namespace: rook-ceph # namespace:cluster
labels:
app: rook-ceph-tools
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-tools
template:
metadata:
labels:
app: rook-ceph-tools
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: quay.io/ceph/ceph:v17.2.6
command:
- /bin/bash
- -c
- |
# Replicate the script from toolbox.sh inline so the ceph image
# can be run directly, instead of requiring the rook toolbox
CEPH_CONFIG="/etc/ceph/ceph.conf"
MON_CONFIG="/etc/rook/mon-endpoints"
KEYRING_FILE="/etc/ceph/keyring"
# create a ceph config file in its default location so ceph/rados tools can be used
# without specifying any arguments
write_endpoints() {
endpoints=$(cat ${MON_CONFIG})
# filter out the mon names
# external cluster can have numbers or hyphens in mon names, handling them in regex
# shellcheck disable=SC2001
mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
DATE=$(date)
echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
cat <<EOF > ${CEPH_CONFIG}
[global]
mon_host = ${mon_endpoints}
[client.admin]
keyring = ${KEYRING_FILE}
EOF
}
# watch the endpoints config file and update if the mon endpoints ever change
watch_endpoints() {
# get the timestamp for the target of the soft link
real_path=$(realpath ${MON_CONFIG})
initial_time=$(stat -c %Z "${real_path}")
while true; do
real_path=$(realpath ${MON_CONFIG})
latest_time=$(stat -c %Z "${real_path}")
if [[ "${latest_time}" != "${initial_time}" ]]; then
write_endpoints
initial_time=${latest_time}
fi
sleep 10
done
}
# read the secret from an env var (for backward compatibility), or from the secret file
ceph_secret=${ROOK_CEPH_SECRET}
if [[ "$ceph_secret" == "" ]]; then
ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
fi
# create the keyring file
cat <<EOF > ${KEYRING_FILE}
[${ROOK_CEPH_USERNAME}]
key = ${ceph_secret}
EOF
# write the initial config file
write_endpoints
# continuously update the mon endpoints if they fail over
watch_endpoints
imagePullPolicy: IfNotPresent
tty: true
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
env:
- name: ROOK_CEPH_USERNAME
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: ceph-username
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
- name: ceph-admin-secret
mountPath: /var/lib/rook-ceph-mon
readOnly: true
volumes:
- name: ceph-admin-secret
secret:
secretName: rook-ceph-mon
optional: false
items:
- key: ceph-secret
path: secret.keyring
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: ceph-config
emptyDir: {}
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 5

View file

@ -1,17 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: ssdfs
namespace: rook-ceph
spec:
metadataPool:
replicated:
size: 1
dataPools:
- name: replicated
replicated:
size: 1
preserveFilesystemOnDelete: true
metadataServer:
activeCount: 1
activeStandby: true

View file

@ -1,24 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs-ssd
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
clusterID: rook-ceph
fsName: ssdfs
# Ceph pool into which the image shall be created
pool: ssdfs-replicated
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Delete the rbd volume when a PVC is deleted
reclaimPolicy: Delete

View file

@ -1,32 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-block-ssd
annotations:
storageclass.kubernetes.io/is-default-class: true
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
clusterID: rook-ceph
# Ceph pool into which the RBD image shall be created
pool: replicapool-ssd
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`.
csi.storage.k8s.io/fstype: xfs
# Delete the rbd volume when a PVC is deleted
reclaimPolicy: Delete

View file

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: spoolman
name: spoolman
spec:
replicas: 1
selector:
matchLabels:
app: spoolman
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: spoolman
spec:
containers:
- image: ghcr.io/donkie/spoolman:0.22
env:
- name: SPOOLMAN_DB_TYPE
value: postgres
- name: SPOOLMAN_DB_HOST
value: postgres-spoolman
- name: SPOOLMAN_DB_USERNAME
valueFrom:
secretKeyRef:
name: spoolman.postgres-spoolman.credentials.postgresql.acid.zalan.do
key: username
- name: SPOOLMAN_DB_PASSWORD
valueFrom:
secretKeyRef:
name: spoolman.postgres-spoolman.credentials.postgresql.acid.zalan.do
key: password
imagePullPolicy: IfNotPresent
name: spoolman
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}

View file

@ -0,0 +1,22 @@
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: postgres-spoolman
spec:
teamId: spoolman
volume:
size: 8Gi
storageClass: longhorn-fast
numberOfInstances: 1
users:
spoolman:
- superuser
- createdb
databases:
spoolman: spoolman # dbname: owner
postgresql:
version: "15"
resources:
requests:
cpu: 10m
memory: 1Mi

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: spoolman
name: k8s-spoolman
annotations:
external-dns.alpha.kubernetes.io/hostname: spoolman.martyn.berlin
spec:
ports:
- port: 80
targetPort: 8000
selector:
app: spoolman
type: LoadBalancer

View file

@ -11,7 +11,7 @@ spec:
source:
chart: ingress-nginx
repoURL: https://kubernetes.github.io/ingress-nginx
targetRevision: 4.12.0
targetRevision: 4.12.1
helm:
parameters:
- name: controller.ingressClassResource.default

View file

@ -11,7 +11,7 @@ spec:
source:
chart: secrets-store-csi-driver
repoURL: https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts
targetRevision: 1.3.4
targetRevision: 1.4.8
syncPolicy:
automated:
selfHeal: true

View file

@ -16,6 +16,7 @@ spec:
- https://k8s-at-home.com/charts/
- https://charts.bitnami.com/bitnami
- https://charts.gabe565.com
- https://ideaplexus.gitlab.io/charts
destinations:
- name: '*'
namespace: '*'

View file

@ -24,6 +24,16 @@ spec:
users:
- username: martyn
password: "564628"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- talos-llu-kx3
persistence:
extraPVCShares:
- name: oldspace

View file

@ -0,0 +1,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: spoolman
namespace: argocd
spec:
destination:
namespace: spoolman
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/spoolman
repoURL: https://git.martyn.berlin/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true