Compare commits

..

1 commit

Author SHA1 Message Date
f69f073917 Update physicals-mermaid.md 2025-03-03 10:33:42 +00:00
384 changed files with 69951 additions and 4483 deletions

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: actual
namespace: argocd
spec:
destination:
namespace: actual
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/actual
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,63 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx-internal
namespace: argocd
spec:
destination:
namespace: cluster-ingress
server: https://kubernetes.default.svc
project: infra
source:
chart: ingress-nginx
repoURL: https://kubernetes.github.io/ingress-nginx
targetRevision: 4.12.1
helm:
parameters:
- name: controller.ingressClassResource.default
value: 'false'
- name: controller.ingressClass
value: 'internal-nginx'
- name: controller.ingressClassResource.className
value: 'internal-nginx'
- name: controller.ingressClassResource.name
value: 'internal-nginx'
- name: controller.ingressClassResource.controllerValue
value: "k8s.io/internal-ingress-nginx"
- name: controller.config.annotations-risk-level
value: Critical
- name: controller.service.type
value: LoadBalancer
- name: controller.allowSnippetAnnotations
value: 'true'
- name: controller.resources.requests.cpu
value: 35m
- name: controller.resources.requests.memory
value: '351198544'
- name: controller.extraArgs.default-ssl-certificate
value: cluster-ingress/cluster-ingress-wildcard
- name: controller.config.http-snippet
value: 'more_set_headers -a "X-Robots-Tag: noai";
more_set_headers -a "X-Robots-Tag: Google-Extended: none";
more_set_headers -a "X-Robots-Tag: GPTBot: none";
more_set_headers -a "X-Robots-Tag: ChatGPT-User: none";
more_set_headers -a "X-Robots-Tag: anthropic-ai: none";
more_set_headers -a "X-Robots-Tag: CCBot: none";
more_set_headers -a "X-Robots-Tag: semrushbot: none";
more_set_headers -a "X-Robots-Tag: Amazonbot: none";
more_set_headers -a "X-Robots-Tag: dotbot: none";
more_set_headers -a "X-Robots-Tag: AhrefsBot: none";'
- name: controller.config.block-user-agents
value: ~*Amazonbot,~*SemrushBot,~*DotBot,~*Ahrefsbot,~*GPT
syncPolicy:
automated:
selfHeal: true

View file

@ -1,66 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
namespace: argocd
spec:
destination:
namespace: cluster-ingress
server: https://kubernetes.default.svc
project: infra
source:
chart: ingress-nginx
repoURL: https://kubernetes.github.io/ingress-nginx
targetRevision: 4.12.1
helm:
valuesObject:
controller:
nodeSelector:
ingresshost: 'true'
hostNetwork: 'true'
parameters:
- name: controller.ingressClassResource.default
value: 'true'
- name: controller.ingressClass
value: 'nginx'
- name: controller.ingressClassResource.className
value: 'nginx'
- name: controller.ingressClassResource.controllerValue
value: "k8s.io/ingress-nginx"
- name: controller.config.annotations-risk-level
value: Critical
- name: controller.service.type
value: LoadBalancer
- name: controller.allowSnippetAnnotations
value: 'true'
- name: controller.resources.requests.cpu
value: 35m
- name: controller.resources.requests.memory
value: '351198544'
- name: controller.extraArgs.default-ssl-certificate
value: cluster-ingress/cluster-ingress-wildcard
- name: controller.config.http-snippet
value: 'more_set_headers -a "X-Robots-Tag: noai";
more_set_headers -a "X-Robots-Tag: Google-Extended: none";
more_set_headers -a "X-Robots-Tag: GPTBot: none";
more_set_headers -a "X-Robots-Tag: ChatGPT-User: none";
more_set_headers -a "X-Robots-Tag: anthropic-ai: none";
more_set_headers -a "X-Robots-Tag: CCBot: none";
more_set_headers -a "X-Robots-Tag: semrushbot: none";
more_set_headers -a "X-Robots-Tag: Amazonbot: none";
more_set_headers -a "X-Robots-Tag: dotbot: none";
more_set_headers -a "X-Robots-Tag: AhrefsBot: none";'
- name: controller.config.block-user-agents
value: ~*Amazonbot,~*SemrushBot,~*DotBot,~*Ahrefsbot,~*GPT
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: faircampbuilder
namespace: argocd
spec:
destination:
namespace: faircampbuilder
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/faircampbuilder
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: files-web
namespace: argocd
spec:
destination:
namespace: files-web
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/files-web
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: filestash
namespace: argocd
spec:
destination:
namespace: filestash
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/filestash
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,36 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: immich
namespace: argocd
spec:
destination:
namespace: 'immich'
server: 'https://kubernetes.default.svc'
sources:
- repoURL: ghcr.io/immich-app/immich-charts
path: immich
targetRevision: 0.9.3
chart: immich
helm:
valuesObject:
env:
DB_HOSTNAME: "immich-postgres"
DB_USERNAME: "immich"
DB_DATABASE_NAME: "immich"
immich:
persistence:
library:
existingClaim: "immich-photos"
redis:
enabled: true
postgresql:
global:
postgresql:
auth:
existingSecret:
project: apps
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: k8up
namespace: argocd
spec:
destination:
namespace: k8up
server: https://kubernetes.default.svc
project: default
source:
chart: k8up
helm:
parameters:
- name: metrics.prometheusRule.createDefaultRules
value: "false"
- name: k8up.skipWithoutAnnotation
value: "true"
values: |-
k8up:
envVars:
- name: PROM_URL
value: ""
repoURL: https://k8up-io.github.io/k8up
targetRevision: 4.8.4
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mailhog4masto
namespace: argocd
spec:
destination:
namespace: mastodon
server: https://kubernetes.default.svc
project: infra
source:
chart: mailhog
repoURL: https://codecentric.github.io/helm-charts
targetRevision: 5.8.0
helm:
valuesObject:
service:
port:
http: 80
smtp: 25
type: LoadBalancer
syncPolicy:
automated:
selfHeal: true

View file

@ -1,33 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mysql4blog
namespace: argocd
spec:
destination:
namespace: blog
server: https://kubernetes.default.svc
project: apps
source:
chart: mariadb
helm:
parameters:
- name: auth.database
value: writefreely
- name: auth.forcePassword
value: 'true'
- name: auth.username
value: writefreely
- name: auth.existingSecret
value: db-creds
- name: primary.service.type
value: ClusterIP
- name: image.repository
value: bitnamilegacy/mariadb
- name: image.tag
value: 11.8.3-debian-12-r0
repoURL: https://charts.bitnami.com/bitnami
targetRevision: 14.1.4
syncPolicy:
automated:
selfHeal: true

View file

@ -1,59 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ntfy
namespace: argocd
spec:
destination:
namespace: ntfy
server: https://kubernetes.default.svc
project: default
source:
helm:
parameters:
- name: service.type
value: LoadBalancer
- name: persistence.cache.enabled
value: 'true'
- name: persistence.cache.storageClass
value: longhorn-fast
- name: persistence.data.enabled
value: 'true'
- name: persistence.data.storageClass
value: longhorn-fast
- name: persistence.data.size
value: 10Gi
valuesObject:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: nginx
external-dns.alpha.kubernetes.io/target: armnleg.martyn.berlin
nginx.ingress.kubernetes.io/server-snippets: "location / {\n proxy_set_header\
\ Upgrade $http_upgrade;\n proxy_http_version 1.1;\n proxy_set_header X-Forwarded-Host\
\ $http_host;\n proxy_set_header X-Forwarded-Proto $scheme;\n proxy_set_header\
\ X-Forwarded-For $remote_addr;\n proxy_set_header Host $host;\n proxy_set_header\
\ Connection \"upgrade\";\n proxy_cache_bypass $http_upgrade;\n}\n"
hosts:
- host: ntfy.martyn.berlin
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- ntfy.martyn.berlin
ntfy:
config:
base-url: https://ntfy.martyn.berlin
cache-file: /var/cache/ntfy/cache.db
attachment-cache-dir: /var/cache/ntfy/attachments
auth-file: /var/lib/ntfy/user.db
auth-default-access: deny-all
path: apps-helm/ntfy
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,47 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: oauth2proxy
namespace: argocd
spec:
destination:
namespace: auth
server: https://kubernetes.default.svc
project: default
source:
chart: oauth2-proxy
helm:
parameters:
- name: config.clientID
value: f7f67ba843f06f244857b01d96cbe7bc
- name: config.clientSecret
value: 2774dc53beb0570e5ea7bc342d2a8f4e47d2d16986ad542d4ce776505043238f
- name: config.cookieSecret
value: iRNHJrMvm3Lv3UUe3j0ZBWTic3y5XuZQ-mdLVAdmNV4=
values: |-
ingress:
enabled: true
className: nginx
path: /oauth2
pathType: Prefix
hosts:
- homeauth.martyn.berlin
tls:
- hosts:
- homeauth.martyn.berlin
extraArgs:
- --provider=oidc
- --oidc-issuer-url=https://auth.martyn.berlin/dex
- "--scope=openid profile email"
- "--email-domain=*"
- "--upstream=static://200"
- "--http-address=0.0.0.0:4180"
- "--whitelist-domain=.martyn.berlin"
- "--cookie-domain=.martyn.berlin"
- "--cookie-name=_home_oauth2_proxy"
repoURL: https://oauth2-proxy.github.io/manifests
targetRevision: 7.12.18
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -1,35 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: paperless-ngx
namespace: argocd
spec:
destination:
namespace: paperless-ngx
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/paperless-ngx
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
ignoreDifferences:
- kind: Secret
namespace: paperless-ngx
name: paperless-ngx-redis
jsonPointers:
- /data
- kind: StatefulSet
namespace: paperless-ngx
name: paperless-ngx-redis-master
jsonPointers:
- /spec/template/metadata/annotations
- kind: StatefulSet
namespace: paperless-ngx
name: paperless-ngx-redis-replicas
jsonPointers:
- /spec/template/metadata/annotations
syncPolicy:
syncOptions:
- RespectIgnoreDifferences=true
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prowlarr
namespace: argocd
spec:
destination:
namespace: prowlarr
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/prowlarr
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,23 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis4masto
namespace: argocd
spec:
destination:
namespace: mastodon
server: https://kubernetes.default.svc
project: infra
source:
chart: redis
repoURL: https://charts.pascaliske.dev
targetRevision: 2.1.0
helm:
valuesObject:
persistentVolumeClaim:
storageClassName: longhorn-fast
securityContext:
fsGroup: 999
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: s3onhp40l
namespace: argocd
spec:
destination:
namespace: s3
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/s3onhp40l
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,36 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: samba-fast
namespace: argocd
spec:
destination:
namespace: sambas
server: https://kubernetes.default.svc
project: infra
source:
helm:
parameters:
- name: image.tag
value: v1.0.2
- name: service.type
value: LoadBalancer
- name: persistence.pvc.storageClass
value: local-path-fast
- name: persistance.pvc.size
value: 3Ti
valuesObject:
samba:
users:
- username: martyn
password: '564628'
tolerations:
- key: "justdisks"
operator: Exists
effect: "NoSchedule"
path: apps-helm/samba4
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,57 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: samba-fast-longhorn
namespace: argocd
spec:
destination:
namespace: sambas
server: https://kubernetes.default.svc
project: infra
source:
helm:
parameters:
- name: image.tag
value: v1.0.2
- name: service.type
value: LoadBalancer
- name: persistence.pvc.storageClass
value: longhorn-fast
- name: persistence.pvc.size
value: 100Mi
- name: livenessProbe.enabled
value: 'false'
valuesObject:
samba:
users:
- username: martyn
password: '564628'
persistence:
extraPVCShares:
- name: oldspace
size: 300Gi
storageClass: longhorn-fast
- name: flacstore
size: 600Gi
storageClass: longhorn-fast
- name: backups
size: 2Ti
storageClass: longhorn-fast
- name: backups-overflow
size: 1Ti
storageClass: longhorn-fast
- name: usenet
size: 512Gi
storageClass: longhorn-fast
- name: scans
size: 1Gi
storageClass: longhorn-fast
- name: s3
size: 20Gi
storageClass: longhorn-fast
path: apps-helm/samba4
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,129 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: samba-slow
namespace: argocd
spec:
destination:
namespace: sambas
server: https://kubernetes.default.svc
project: infra
source:
helm:
parameters:
- name: image.tag
value: v1.0.2
- name: service.type
value: LoadBalancer
- name: persistence.pvc.storageClass
value: longhorn-spinny
- name: persistence.pvc.size
value: 1Ti
- name: livenessProbe.enabled
value: 'false'
valuesObject:
samba:
users:
- username: martyn
password: '564628'
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- n150-nas
persistence:
combineShares: true
sharesToCombine:
- destName: films
shares:
- films1
- films2
- films3
- films4
- destName: series
shares:
- moreoldseries
- evenmoreoldseries
- runningseries-b
- runningseries
- justmoreseries
- justmoreseries-2
- justmoreseries-3
- justmoreseries-4
- justmoreseries-5
- justmoreseries-6
- justmoreseries-7
- destName: roms
shares:
- roms-a
- roms-b
extraPVCShares:
- name: films1
size: 1Ti
storageClass: longhorn-spinny
- name: films2
size: 1Ti
storageClass: longhorn-spinny
- name: films3
size: 1Ti
storageClass: longhorn-spinny
- name: films4
size: 1Ti
storageClass: longhorn-spinny
- name: runningseries
size: 2Ti
storageClass: longhorn-spinny
- name: runningseries-b
size: 1Ti
storageClass: longhorn-spinny
- name: moreoldseries
size: 1Ti
storageClass: longhorn-spinny
- name: youtube
size: 1Ti
storageClass: longhorn-spinny
- name: evenmoreoldseries
size: 1Ti
storageClass: longhorn-spinny
- name: justmoreseries
size: 1Ti
storageClass: longhorn-spinny
- name: justmoreseries-2
size: 1Ti
storageClass: longhorn-spinny
- name: justmoreseries-3
size: 900Gi
storageClass: longhorn-spinny
- name: justmoreseries-4
size: 900Gi
storageClass: longhorn-spinny
- name: justmoreseries-5
size: 900Gi
storageClass: longhorn-spinny
- name: justmoreseries-6
size: 900Gi
storageClass: longhorn-spinny
- name: justmoreseries-7
size: 900Gi
storageClass: longhorn-spinny
- name: roms-a
size: 1Ti
storageClass: longhorn-spinny
- name: roms-b
size: 1Ti
storageClass: longhorn-spinny
tolerations:
- key: "justdisks"
operator: Exists
effect: "NoSchedule"
path: apps-helm/samba4
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: siyuan
namespace: argocd
spec:
destination:
namespace: siyuan
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/siyuan
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: tailscale-proxy
namespace: argocd
spec:
destination:
namespace: tailscale-proxy
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/tailscale-proxy
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: translate
namespace: argocd
spec:
destination:
namespace: libretranslate
server: https://kubernetes.default.svc
project: apps
source:
path: apps-kustomized/libretranslate
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: uptime-kuma
namespace: argocd
spec:
destination:
namespace: uptime
server: https://kubernetes.default.svc
project: apps
source:
helm:
parameters:
- name: service.type
value: LoadBalancer
- name: persistence.enabled
value: 'true'
- name: persistence.storageClass
value: longhorn-fast
path: apps-helm/uptime-kuma
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,34 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: valheim
namespace: argocd
spec:
syncPolicy:
destination:
namespace: valheim
server: https://kubernetes.default.svc
project: apps
source:
chart: valheim-k8s
repoURL: https://addyvan.github.io/valheim-k8s/
targetRevision: 2.0.1
helm:
parameters:
- name: worldName
value: campuzzi-fire
- name: serverName
value: campuzzi
- name: password
value: soupsoup
- name: storage.kind
value: persistentVolumeClaim
- name: storage.pvc.storageClassName
value: longhorn-fast
- name: serverStorage.kind
value: persistentVolumeClaim
- name: serverStorage.pvc.storageClassName
value: longhorn-fast
valuesObject:
extraEnvironmentVars:
CROSSPLAY: 'true'

View file

@ -1,41 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: wg-access-server
namespace: argocd
spec:
destination:
namespace: wg-access
server: https://kubernetes.default.svc
project: infra
source:
helm:
parameters:
- name: web.service.type
value: LoadBalancer
- name: wireguard.service.type
value: LoadBalancer
valuesObject:
config:
csiSecretsStore:
providerName: 1password
resourceName: vaults/3oh5jxmxvqvpuimu2lbuajtizi/allitems/idkjj6oyua2fq6df4fkjzmh4ne/config.yaml
persistence:
enabled: true
storageClass: longhorn-fast
ingress:
enabled: true
ingressClassName: internal-nginx
annotations:
external-dns.alpha.kubernetes.io/target: internal-ingress.martyn.berlin
hosts:
- wg-access.martyn.berlin
tls:
- hosts:
- wg-access.martyn.berlin
path: apps-helm/wg-access-server
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: wol-rest
namespace: argocd
spec:
destination:
namespace: wol-rest
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/wol-rest
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: work-siyuan
namespace: argocd
spec:
destination:
namespace: work-siyuan
server: https://kubernetes.default.svc
project: default
source:
path: apps-kustomized/work-siyuan
repoURL: http://forgejo.git.svc.cluster.local/martyn/infra4talos.git
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true

View file

@ -32,7 +32,6 @@ spec:
{{- if .Values.securityContext.enabled }} {{- if .Values.securityContext.enabled }}
securityContext: securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }} fsGroup: {{ .Values.securityContext.fsGroup }}
fsGroupChangePolicy: "OnRootMismatch" # There's a chmod already, and no other setup is using this volume!
{{- end }} {{- end }}
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
initContainers: initContainers:

View file

@ -3,9 +3,6 @@
server string = {{ .Values.samba.global.server_string | default "%h server (Samba, Alpine)" }} server string = {{ .Values.samba.global.server_string | default "%h server (Samba, Alpine)" }}
security = {{ .Values.samba.global.security | default "user" }} security = {{ .Values.samba.global.security | default "user" }}
map to guest = {{ .Values.samba.global.map_to_guest | default "Bad User" }} map to guest = {{ .Values.samba.global.map_to_guest | default "Bad User" }}
{{- if .Values.samba.global.include_echo_map_script }}
username map script = /bin/echo
{{ end -}}
encrypt passwords = {{ .Values.samba.global.encrypt_passwords | default "yes" }} encrypt passwords = {{ .Values.samba.global.encrypt_passwords | default "yes" }}
load printers = no load printers = no
printing = bsd printing = bsd

View file

@ -86,7 +86,7 @@ spec:
- name: cifs - name: cifs
containerPort: 445 containerPort: 445
protocol: TCP protocol: TCP
{{- if .Values.livenessProbe.enabled }} {{- if .Values.livenessProbe.enabled | default true }}
livenessProbe: livenessProbe:
exec: exec:
command: command:

View file

@ -21,12 +21,10 @@ fullnameOverride: ""
podSecurityContext: podSecurityContext:
fsGroup: 1000 fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
samba: samba:
global: global:
workgroup: WORKGROUP workgroup: WORKGROUP
include_echo_map_script: true
share: {} share: {}
users: [] users: []

View file

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -0,0 +1,24 @@
apiVersion: v2
name: sshtunnel
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "2.0.1"

View file

@ -0,0 +1,178 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "13"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"autossh"},"name":"autossh","namespace":"cluster-ingress"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"autossh"}},"template":{"metadata":{"labels":{"app":"autossh"}},"spec":{"containers":[{"command":["/bin/sh","-c","cp /keyfile/autossh /tmp/; chmod 600 /tmp/autossh; autossh -M0 v4tov6@31.7.180.171 -R0.0.0.0:8080:192.168.1.11:80 -R0.0.0.0:8443:192.168.1.11:443 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -i /tmp/autossh -T -N"],"image":"jnovack/autossh","imagePullPolicy":"IfNotPresent","name":"autossh","volumeMounts":[{"mountPath":"/keyfile","name":"keyfile"}]}],"restartPolicy":"Always","volumes":[{"name":"keyfile","secret":{"defaultMode":256,"secretName":"autossh-keyfile"}}]}}}}
creationTimestamp: "2020-07-26T16:27:16Z"
generation: 61
labels:
app: autossh
managedFields:
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:kubectl.kubernetes.io/last-applied-configuration: {}
f:labels:
.: {}
f:app: {}
f:spec:
f:progressDeadlineSeconds: {}
f:replicas: {}
f:revisionHistoryLimit: {}
f:selector:
f:matchLabels:
.: {}
f:app: {}
f:strategy:
f:rollingUpdate:
.: {}
f:maxSurge: {}
f:maxUnavailable: {}
f:type: {}
f:template:
f:metadata:
f:labels:
.: {}
f:app: {}
f:spec:
f:containers:
k:{"name":"autossh"}:
.: {}
f:command: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/keyfile"}:
.: {}
f:mountPath: {}
f:name: {}
f:dnsPolicy: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:terminationGracePeriodSeconds: {}
f:volumes:
.: {}
k:{"name":"keyfile"}:
.: {}
f:name: {}
f:secret:
.: {}
f:defaultMode: {}
f:secretName: {}
manager: kubectl
operation: Update
time: "2021-07-04T17:59:23Z"
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:deployment.kubernetes.io/revision: {}
f:status:
f:availableReplicas: {}
f:conditions:
.: {}
k:{"type":"Available"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"Progressing"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updatedReplicas: {}
manager: k3s
operation: Update
time: "2021-09-30T18:41:41Z"
name: autossh
namespace: cluster-ingress
resourceVersion: "235469131"
selfLink: /apis/apps/v1/namespaces/cluster-ingress/deployments/autossh
uid: d9661f02-0b6c-4d1f-9c34-3a5ff586af2b
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: autossh
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: autossh
spec:
containers:
- command:
- /bin/sh
- -c
- cp /keyfile/autossh /tmp/; chmod 600 /tmp/autossh; autossh -M0 v4tov6@31.7.180.171
-R0.0.0.0:8080:10.43.14.171:80 -R0.0.0.0:8443:10.43.14.171:443 -R0.0.0.0:2222:192.168.1.52:2222
-R0.0.0.0:4422:192.168.1.111:22 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
-o ServerAliveInterval=30 -o ExitOnForwardFailure=yes -o ServerAliveCountMax=3
-i /tmp/autossh -T -N
image: jnovack/autossh
imagePullPolicy: IfNotPresent
name: autossh
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /keyfile
name: keyfile
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: keyfile
secret:
defaultMode: 256
secretName: autossh-keyfile
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2020-07-26T16:27:16Z"
lastUpdateTime: "2021-07-04T17:59:25Z"
message: ReplicaSet "autossh-7969f78dd8" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2021-09-30T18:41:41Z"
lastUpdateTime: "2021-09-30T18:41:41Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 61
readyReplicas: 1
replicas: 1
updatedReplicas: 1

View file

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "sshtunnel.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "sshtunnel.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "sshtunnel.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "sshtunnel.labels" -}}
helm.sh/chart: {{ include "sshtunnel.chart" . }}
{{ include "sshtunnel.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "sshtunnel.selectorLabels" -}}
app.kubernetes.io/name: {{ include "sshtunnel.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "sshtunnel.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "sshtunnel.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sshtunnel.fullname" . }}
labels:
{{- include "sshtunnel.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "sshtunnel.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "sshtunnel.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
command:
- /bin/sh
- -c
- cp /keyfile/keyfile /tmp/autossh; chmod 600 /tmp/autossh; autossh -M0 {{.Values.tunnel.user}}@{{.Values.tunnel.sshHost}} -i /tmp/autossh
{{- range .Values.tunnel.sshOptions}} {{.}} {{end}}
{{- range .Values.tunnel.ports}} -R{{.bindIP}}:{{.source}}:{{.destsvc}}.{{.destns}}.svc.cluster.local.:{{.dest}} {{end}}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- mountPath: "/keyfile"
name: keyfile
volumes:
- name: keyfile
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: app-secrets
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,10 @@
apiVersion: secrets-store.csi.x-k8s.io/v1
kind: SecretProviderClass
metadata:
name: app-secrets
spec:
provider: 1password
parameters:
secrets: |
- resourceName: "vaults/3oh5jxmxvqvpuimu2lbuajtizi/allitems/gd24dzcpub5vmuscsvavnyu6cm/private key"
path: "keyfile"

View file

@ -0,0 +1,87 @@
# Default values for sshtunnel.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
tunnel:
user: proxyuser
sshHost: armnleg.martyn.berlin
sshOptions:
- -o UserKnownHostsFile=/dev/null
- -o StrictHostKeyChecking=no
- -o ServerAliveInterval=30
- -o ExitOnForwardFailure=yes
- -o ServerAliveCountMax=3
- -T
- -N
ports:
- name: http
source: 8080
dest: 80
bindIP: 0.0.0.0
destsvc: ingress-nginx-controller
destns: cluster-ingress
- name: https
source: 8443
dest: 443
bindIP: 8080
destsvc: ingress-nginx-controller
destns: cluster-ingress
image:
repository: jnovack/autossh
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -1,6 +1,6 @@
{{- if .Values.ingress.enabled -}} {{- if .Values.ingress.enabled -}}
{{- $fullName := include "wg-access-server.fullname" . -}} {{- $fullName := include "wg-access-server.fullname" . -}}
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1beta1
kind: Ingress kind: Ingress
metadata: metadata:
name: {{ $fullName }} name: {{ $fullName }}
@ -11,9 +11,6 @@ metadata:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
spec: spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
{{- if .Values.ingress.tls }} {{- if .Values.ingress.tls }}
tls: tls:
{{- range .Values.ingress.tls }} {{- range .Values.ingress.tls }}
@ -21,22 +18,17 @@ spec:
{{- range .hosts }} {{- range .hosts }}
- {{ . | quote }} - {{ . | quote }}
{{- end }} {{- end }}
{{- if .secretName }}
secretName: {{ .secretName }} secretName: {{ .secretName }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}
rules: rules:
{{- range .Values.ingress.hosts }} {{- range .Values.ingress.hosts }}
- host: {{ . | quote }} - host: {{ . | quote }}
http: http:
paths: paths:
- path: / - path: /
pathType: Prefix
backend: backend:
service: serviceName: {{ $fullName }}-web
name: {{ $fullName }}-web servicePort: 80
port:
number: 80
{{- end }} {{- end }}
{{- end }} {{- end }}

View file

@ -1,6 +1,8 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations:
deployment.kubernetes.io/revision: "1"
labels: labels:
app.kubernetes.io/component: connect app.kubernetes.io/component: connect
app.kubernetes.io/instance: 1password-connect app.kubernetes.io/instance: 1password-connect
@ -44,7 +46,7 @@ spec:
value: "8080" value: "8080"
- name: OP_LOG_LEVEL - name: OP_LOG_LEVEL
value: info value: info
image: 1password/connect-api:1.7.3 image: 1password/connect-api:1.7.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 3 failureThreshold: 3
@ -90,7 +92,7 @@ spec:
value: localhost:11220 value: localhost:11220
- name: OP_LOG_LEVEL - name: OP_LOG_LEVEL
value: info value: info
image: 1password/connect-sync:1.7.3 image: 1password/connect-sync:1.7.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 3 failureThreshold: 3

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: backup-creds
data:
accesskey: UUtJNkVONFJQNjhJWTJKQko1R1U=
secretkey: bzVSN2QxK2pxNkJScHF5Ri9GUGdhRm5XZXdKU3YxNjE1SWYzMHl0Vw==
type: Opaque

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: backup-repo
data:
password: MTkyNzY5MTJlNGJiOTQyZmU1MGE3MGEyNjlhNjA1NmE=
type: Opaque

View file

@ -1,27 +0,0 @@
apiVersion: k8up.io/v1
kind: Schedule
metadata:
name: backup-actual
spec:
backend:
repoPasswordSecretRef:
key: password
name: backup-repo
s3:
accessKeyIDSecretRef:
key: accesskey
name: backup-creds
bucket: k3sup-backups-armnleg
endpoint: http://s3.s3:8333
secretAccessKeySecretRef:
key: secretkey
name: backup-creds
backup:
failedJobsHistoryLimit: 2
schedule: 3 5 * * *
successfulJobsHistoryLimit: 2
prune:
retention:
keepDaily: 14
keepLast: 5
schedule: 0 1 * * 0

View file

@ -1,60 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: actual
name: actual
spec:
replicas: 1
selector:
matchLabels:
app: actual
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
app: actual
spec:
enableServiceLinks: false
containers:
- env:
- name: ACTUAL_OPENID_CLIENT_ID
value: "92c919af43ed3d82a9cfd281f7e16355"
- name: ACTUAL_OPENID_CLIENT_SECRET
value: "6a98e994d036d7c3fe9fecfe918aac89b072be8747cd6952dc1328c63aee9925"
- name: ACTUAL_OPENID_SERVER_HOSTNAME
value: "https://auth.martyn.berlin"
- name: ACTUAL_OPENID_AUTHORIZATION_ENDPOINT
value: "https://auth.martyn.berlin/oauth2/outhorize"
- name: ACTUAL_OPENID_TOKEN_ENDPOINT
value: "https://auth.martyn.berlin/oauth2/token"
- name: ACTUAL_OPENID_USERINFO_ENDPOINT
value: "https://auth.martyn.berlin/oauth2/userinfo"
- name: ACTUAL_OPENID_AUTH_METHOD
value: "oauth2"
image: docker.io/actualbudget/actual-server:25.9.0
imagePullPolicy: IfNotPresent
name: actual
ports:
- containerPort: 5006
name: http
protocol: TCP
resources:
requests:
cpu: 25m
memory: "920733364"
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data
name: data
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: data
persistentVolumeClaim:
claimName: actual-data

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/target: armnleg.martyn.berlin
nginx.ingress.kubernetes.io/auth-signin: https://homeauth.martyn.berlin/oauth2/start?rd=https://$host$escaped_request_uri
nginx.ingress.kubernetes.io/auth-url: https://homeauth.martyn.berlin/oauth2/auth
labels:
app.kubernetes.io/name: actual
name: actual
spec:
ingressClassName: nginx
rules:
- host: actual.martyn.berlin
http:
paths:
- backend:
service:
name: actual
port:
number: 5006
path: /
pathType: Prefix
tls:
- hosts:
- actual.martyn.berlin

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: actual
name: actual
spec:
ports:
- port: 80
targetPort: 5006
selector:
app: actual
type: LoadBalancer

View file

@ -1,19 +0,0 @@
apiVersion: v1
data:
APPSMITH_DB_URL: |
mongodb+srv://root:password@appsmith-mongodb.appsmith.svc.cluster.local/appsmith?retryWrites=true&authSource=admin&ssl=false
APPSMITH_DISABLE_IFRAME_WIDGET_SANDBOX: "false"
APPSMITH_KEYCLOAK_DB_DRIVER: postgresql
APPSMITH_KEYCLOAK_DB_PASSWORD: password
APPSMITH_KEYCLOAK_DB_URL: appsmith-postgresql.appsmith.svc.cluster.local:5432/keycloak
APPSMITH_KEYCLOAK_DB_USERNAME: root
APPSMITH_REDIS_URL: redis://appsmith-redis-master.appsmith.svc.cluster.local:6379
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: appsmith
appsmith.sh/chart: appsmith-3.6.4
name: appsmith-appsmith
namespace: appsmith

View file

@ -1,31 +0,0 @@
apiVersion: v1
data:
ping-mongodb.sh: |
#!/bin/bash
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval "db.adminCommand('ping')"
readiness-probe.sh: |
#!/bin/bash
# Run the proper check depending on the version
[[ $(mongod -version | grep "db version") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]}
. /opt/bitnami/scripts/libversion.sh
VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)"
VERSION_MINOR="$(get_sematic_version "$VERSION" 2)"
VERSION_PATCH="$(get_sematic_version "$VERSION" 3)"
if [[ "$VERSION_MAJOR" -ge 5 ]] || [[ "$VERSION_MAJOR" -ge 4 ]] && [[ "$VERSION_MINOR" -ge 4 ]] && [[ "$VERSION_PATCH" -ge 2 ]]; then
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
else
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.isMaster().ismaster || db.isMaster().secondary' | grep -q 'true'
fi
startup-probe.sh: |
#!/bin/bash
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb-common-scripts
namespace: appsmith

View file

@ -1,93 +0,0 @@
apiVersion: v1
data:
setup-hidden.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
echo "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER"
echo "Configuring node as a hidden node"
export MONGODB_REPLICA_SET_MODE="hidden"
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh
setup.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libvalidations.sh
if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then
export MONGODB_ADVERTISED_PORT_NUMBER="$MONGODB_PORT_NUMBER"
fi
info "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
info "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER"
# Check for existing replica set in case there is no data in the PVC
# This is for cases where the PVC is lost or for MongoDB caches without
# persistence
current_primary=""
if is_dir_empty "${MONGODB_DATA_DIR}/db"; then
info "Data dir empty, checking if the replica set already exists"
current_primary=$(mongosh admin --host "appsmith-mongodb-0.appsmith-mongodb-headless.appsmith.svc.cluster.local:27017,appsmith-mongodb-1.appsmith-mongodb-headless.appsmith.svc.cluster.local:27017" --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD --eval 'db.runCommand("ismaster")' | awk -F\' '/primary/ {print $2}')
if ! is_empty_value "$current_primary"; then
info "Detected existing primary: ${current_primary}"
fi
fi
if ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" == "$current_primary" ]]; then
info "Advertised name matches current primary, configuring node as a primary"
export MONGODB_REPLICA_SET_MODE="primary"
elif ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" != "$current_primary" ]]; then
info "Current primary is different from this node. Configuring the node as replica of ${current_primary}"
export MONGODB_REPLICA_SET_MODE="secondary"
export MONGODB_INITIAL_PRIMARY_HOST="${current_primary%:*}"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="${current_primary#*:}"
export MONGODB_SET_SECONDARY_OK="yes"
elif [[ "$MY_POD_NAME" = "appsmith-mongodb-0" ]]; then
info "Pod name matches initial primary pod name, configuring node as a primary"
export MONGODB_REPLICA_SET_MODE="primary"
else
info "Pod name doesn't match initial primary pod name, configuring node as a secondary"
export MONGODB_REPLICA_SET_MODE="secondary"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
fi
if [[ "$MONGODB_REPLICA_SET_MODE" == "secondary" ]]; then
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
fi
exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb-scripts
namespace: appsmith

View file

@ -1,27 +0,0 @@
apiVersion: v1
data:
master.conf: |-
dir /data
# User-supplied master configuration:
# End of master configuration
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
# End of replica configuration
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-configuration
namespace: appsmith

View file

@ -1,75 +0,0 @@
apiVersion: v1
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--protected-mode" "no")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--protected-mode" "no")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-scripts
namespace: appsmith

View file

@ -1,23 +0,0 @@
apiVersion: v1
data:
ca.crt: |
-----BEGIN CERTIFICATE-----
MIIBijCCAS+gAwIBAgIQJGb95Oq4oJVojUzKHTaK2jAKBggqhkjOPQQDAjAVMRMw
EQYDVQQKEwprdWJlcm5ldGVzMB4XDTIzMTExMDEyMzQwMloXDTMzMTEwNzEyMzQw
MlowFTETMBEGA1UEChMKa3ViZXJuZXRlczBZMBMGByqGSM49AgEGCCqGSM49AwEH
A0IABGDaf5TlkdHoEjnsi1QnUtmw+eRDyQyYBTkDYnmHUg5z6uj5DqTEAxw5oXtn
yhQNGHLzFHBU87NKnRYMS3lpOsujYTBfMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUE
FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
FgQUSLS2yUT1+1+sJtLJX79TDA5Pm48wCgYIKoZIzj0EAwIDSQAwRgIhAMeG6Nph
Sm/EN00cMgACe8OGH19nwPXu17iq7krizxYoAiEA4D6hkPQ1L6kOijW9wWS5G164
Ks7z8KPjG6LXEfACTZk=
-----END CERTIFICATE-----
kind: ConfigMap
metadata:
annotations:
kubernetes.io/description: Contains a CA bundle that can be used to verify the
kube-apiserver when using internal endpoints such as the internal service IP
or kubernetes.default.svc. No other usage is guaranteed across distributions
of Kubernetes clusters.
name: kube-root-ca.crt
namespace: appsmith

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/target: armnleg.martyn.berlin
nginx.ingress.kubernetes.io/app-root: /app/karaoke/home-68b9fa05cb66516b642152fa
nginx.ingress.kubernetes.io/auth-signin: https://homeauth.martyn.berlin/oauth2/start?rd=https://$host$escaped_request_uri
nginx.ingress.kubernetes.io/auth-url: https://homeauth.martyn.berlin/oauth2/auth
name: appsmith-karaokelist
namespace: appsmith
spec:
ingressClassName: nginx
rules:
- host: karaokelist.martyn.berlin
http:
paths:
- backend:
service:
name: appsmith-appsmith
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- karaokelist.martyn.berlin

View file

@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: appsmith
appsmith.sh/chart: appsmith-3.6.4
name: appsmith-appsmith-headless
namespace: appsmith
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 8080
selector:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: appsmith

View file

@ -1,28 +0,0 @@
apiVersion: v1
kind: Service
metadata:
annotations:
metallb.io/ip-allocated-from-pool: arm
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: appsmith
appsmith.sh/chart: appsmith-3.6.4
name: appsmith-appsmith
namespace: appsmith
spec:
clusterIP: 10.97.168.186
clusterIPs:
- 10.97.168.186
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: appsmith
nodePort: 31541
port: 80
targetPort: http
selector:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: appsmith
type: LoadBalancer

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb-arbiter-headless
namespace: appsmith
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-mongodb
port: 27017
targetPort: mongodb
selector:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb

View file

@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb
namespace: appsmith
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: mongodb
port: 27017
targetPort: mongodb
publishNotReadyAddresses: true
selector:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb

View file

@ -1,28 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.9.5
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: appsmith-postgresql-hl
namespace: appsmith
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
publishNotReadyAddresses: true
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: postgresql

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.9.5
name: appsmith-postgresql
namespace: appsmith
spec:
clusterIP: 10.100.117.209
clusterIPs:
- 10.100.117.209
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: postgresql

View file

@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-headless
namespace: appsmith
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: master
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-master
namespace: appsmith
spec:
clusterIP: 10.96.82.177
clusterIPs:
- 10.96.82.177
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/component: master
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-replicas
namespace: appsmith
spec:
clusterIP: 10.96.107.137
clusterIPs:
- 10.96.107.137
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: appsmith
appsmith.sh/chart: appsmith-3.6.4
name: appsmith-appsmith
namespace: appsmith
secrets:
- name: appsmith-appsmith

View file

@ -1,13 +0,0 @@
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb
namespace: appsmith
secrets:
- name: appsmith-mongodb

View file

@ -1,11 +0,0 @@
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis
namespace: appsmith

View file

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: appsmith

View file

@ -1,146 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: appsmith
appsmith.sh/chart: appsmith-3.6.4
name: appsmith-appsmith
namespace: appsmith
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: appsmith
serviceName: appsmith-appsmith
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: appsmith
spec:
containers:
- env:
- name: APPSMITH_ENABLE_EMBEDDED_DB
value: "0"
- name: JGROUPS_DISCOVERY_PROTOCOL
value: kubernetes.KUBE_PING
- name: APPSMITH_HEADLESS_SVC
value: appsmith-appsmith-headless
envFrom:
- configMapRef:
name: appsmith-appsmith
image: index.docker.io/appsmith/appsmith-ee:latest
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/v1/health
port: 80
scheme: HTTP
periodSeconds: 60
successThreshold: 1
timeoutSeconds: 1
name: appsmith
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 2019
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/v1/health
port: 80
scheme: HTTP
periodSeconds: 60
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 500m
memory: 3000Mi
startupProbe:
failureThreshold: 3
httpGet:
path: /api/v1/health
port: 80
scheme: HTTP
periodSeconds: 60
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /appsmith-stacks
name: data
dnsPolicy: ClusterFirst
initContainers:
- command:
- sh
- -c
- until redis-cli -h appsmith-redis-master.appsmith.svc.cluster.local ping
; do echo waiting for redis; sleep 2; done
image: docker.io/redis:7.0.15
imagePullPolicy: IfNotPresent
name: redis-init-container
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
- command:
- sh
- -c
- until mongosh --host appsmith-mongodb.appsmith.svc.cluster.local --eval
'db.runCommand({ping:1})' ; do echo waiting for mongo; sleep 2; done
image: docker.io/bitnamilegacy/mongodb:6.0.13
imagePullPolicy: IfNotPresent
name: mongo-init-container
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
- command:
- sh
- -c
- until pg_isready -U $postgresuser -d $postgresdb -h appsmith-postgresql.appsmith.svc.cluster.local;
do echo waiting for postgresql; sleep 2; done
image: docker.io/bitnamilegacy/postgresql:14.5.0-debian-11-r21
imagePullPolicy: IfNotPresent
name: psql-init-container
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
restartPolicy: Always
schedulerName: default-scheduler
serviceAccount: appsmith-appsmith
serviceAccountName: appsmith-appsmith
terminationGracePeriodSeconds: 30
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn-fast
volumeMode: Filesystem
status:
phase: Pending

View file

@ -1,127 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb-arbiter
namespace: appsmith
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb
serviceName: appsmith-mongodb-arbiter-headless
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/component: arbiter
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb
namespaces:
- appsmith
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: appsmith-mongodb-arbiter-headless
- name: MONGODB_REPLICA_SET_MODE
value: arbiter
- name: MONGODB_INITIAL_PRIMARY_HOST
value: appsmith-mongodb-0.appsmith-mongodb.$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_REPLICA_SET_NAME
value: rs0
- name: MONGODB_ADVERTISED_HOSTNAME
value: $(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: MONGODB_INITIAL_PRIMARY_ROOT_USER
value: root
- name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: mongodb-root-password
name: appsmith-mongodb
- name: MONGODB_REPLICA_SET_KEY
valueFrom:
secretKeyRef:
key: mongodb-replica-set-key
name: appsmith-mongodb
- name: ALLOW_EMPTY_PASSWORD
value: "no"
image: docker.io/bitnamilegacy/mongodb:6.0.13
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
tcpSocket:
port: mongodb
timeoutSeconds: 10
name: mongodb-arbiter
ports:
- containerPort: 27017
name: mongodb
protocol: TCP
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 20
successThreshold: 1
tcpSocket:
port: mongodb
timeoutSeconds: 10
securityContext:
runAsNonRoot: true
runAsUser: 1001
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
nodeSelector:
feature.node.kubernetes.io/cpu-cpuid.AVX: "true"
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1001
serviceAccount: appsmith-mongodb
serviceAccountName: appsmith-mongodb
terminationGracePeriodSeconds: 30
updateStrategy:
type: RollingUpdate

View file

@ -1,185 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
name: appsmith-mongodb
namespace: appsmith
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb
serviceName: appsmith-mongodb
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.16
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: feature.node.kubernetes.io/cpu-cpuid.AVX
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: mongodb
namespaces:
- appsmith
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- command:
- /scripts/setup.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: MY_POD_HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: K8S_SERVICE_NAME
value: appsmith-mongodb
- name: MONGODB_INITIAL_PRIMARY_HOST
value: appsmith-mongodb-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_REPLICA_SET_NAME
value: rs0
- name: MONGODB_ADVERTISED_HOSTNAME
value: $(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_ROOT_USER
value: root
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: mongodb-root-password
name: appsmith-mongodb
- name: MONGODB_REPLICA_SET_KEY
valueFrom:
secretKeyRef:
key: mongodb-replica-set-key
name: appsmith-mongodb
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: "0"
- name: MONGODB_DISABLE_SYSTEM_LOG
value: "no"
- name: MONGODB_DISABLE_JAVASCRIPT
value: "no"
- name: MONGODB_ENABLE_JOURNAL
value: "yes"
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: MONGODB_ENABLE_IPV6
value: "no"
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: "no"
image: docker.io/bitnamilegacy/mongodb:6.0.13
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /bitnami/scripts/ping-mongodb.sh
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 10
name: mongodb
ports:
- containerPort: 27017
name: mongodb
protocol: TCP
readinessProbe:
exec:
command:
- /bitnami/scripts/readiness-probe.sh
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
runAsNonRoot: true
runAsUser: 1001
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/mongodb
name: datadir
- mountPath: /bitnami/scripts
name: common-scripts
- mountPath: /scripts/setup.sh
name: scripts
subPath: setup.sh
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1001
serviceAccount: appsmith-mongodb
serviceAccountName: appsmith-mongodb
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 360
name: appsmith-mongodb-common-scripts
name: common-scripts
- configMap:
defaultMode: 493
name: appsmith-mongodb-scripts
name: scripts
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
name: datadir
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: longhorn-fast
volumeMode: Filesystem
status:
phase: Pending

View file

@ -1,171 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
name: appsmith-redis-replicas
namespace: appsmith
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis
serviceName: appsmith-redis-headless
template:
metadata:
annotations:
checksum/configmap: 200ea504d6238919d0d307d6a9532731dc31f75b892c7c3aa5c0ac0e6d33989f
checksum/health: 773ea337f77df0444ffc318b00d3e78d6dab5a70ff1a5ba67bf18d98ad459fa9
checksum/scripts: c8705e5003141308d45cbfb90346393c427657644ab7aea26bcb88fff444a244
checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
creationTimestamp: null
labels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.11.2
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis
namespaces:
- appsmith
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
command:
- /bin/bash
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: appsmith-redis-master-0.appsmith-redis-headless.appsmith.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
image: docker.io/redis:7.0.15
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 6
name: redis
ports:
- containerPort: 6379
name: redis
protocol: TCP
readinessProbe:
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
securityContext:
runAsUser: 1001
startupProbe:
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: redis
timeoutSeconds: 5
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /opt/bitnami/scripts/start-scripts
name: start-scripts
- mountPath: /health
name: health
- mountPath: /data
name: redis-data
- mountPath: /opt/bitnami/redis/mounted-etc
name: config
- mountPath: /opt/bitnami/redis/etc
name: redis-tmp-conf
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1001
serviceAccount: appsmith-redis
serviceAccountName: appsmith-redis
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 493
name: appsmith-redis-scripts
name: start-scripts
- configMap:
defaultMode: 493
name: appsmith-redis-health
name: health
- configMap:
defaultMode: 420
name: appsmith-redis-configuration
name: config
- name: redis-tmp-conf
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: replica
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: redis
name: redis-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: longhorn-fast
volumeMode: Filesystem
status:
phase: Pending

View file

@ -24,11 +24,3 @@ patches:
version: v1 version: v1
kind: ConfigMap kind: ConfigMap
name: argocd-cm name: argocd-cm
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --insecure=true
target:
kind: Deployment
name: argocd-server

View file

@ -18,7 +18,7 @@ spec:
app: bazarr app: bazarr
spec: spec:
containers: containers:
- image: ghcr.io/hotio/bazarr:release - image: hotio/bazarr:release
name: bazarr name: bazarr
ports: ports:
- name: http - name: http
@ -33,6 +33,8 @@ spec:
mountPath: /config mountPath: /config
- name: series - name: series
mountPath: /series mountPath: /series
- name: oldseries
mountPath: /oldseries
- name: films - name: films
mountPath: /films mountPath: /films
volumes: volumes:
@ -42,6 +44,9 @@ spec:
- name: series - name: series
persistentVolumeClaim: persistentVolumeClaim:
claimName: smb-series claimName: smb-series
- name: oldseries
persistentVolumeClaim:
claimName: smb-oldseries
- name: films - name: films
persistentVolumeClaim: persistentVolumeClaim:
claimName: smb-films claimName: smb-films

View file

@ -12,6 +12,18 @@ spec:
--- ---
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata:
name: smb-oldseries
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: smb-oldseries
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata: metadata:
name: smb-films name: smb-films
spec: spec:

View file

@ -1,27 +0,0 @@
apiVersion: k8up.io/v1
kind: Schedule
metadata:
name: backup-blog
spec:
backend:
repoPasswordSecretRef:
key: password
name: backup-repo
s3:
accessKeyIDSecretRef:
key: accesskey
name: backup-credentials
bucket: armnlegback
endpoint: https://hel1.your-objectstorage.com
secretAccessKeySecretRef:
key: secretkey
name: backup-credentials
backup:
schedule: '3 5 * * *'
failedJobsHistoryLimit: 2
successfulJobsHistoryLimit: 2
prune:
schedule: '0 1 * * 0'
retention:
keepLast: 5
keepDaily: 14

View file

@ -0,0 +1,10 @@
apiVersion: mysql.oracle.com/v2
kind: InnoDBCluster
metadata:
name: writefreelydb
spec:
secretName: blogdbrootpass
tlsUseSelfSigned: true
instances: 1
router:
instances: 0

View file

@ -1,12 +0,0 @@
apiVersion: v1
data:
LOCAL_CALENDAR_NAME: calendar
LOCAL_PASSWORD: none
LOCAL_URL: http://xandikos.xandikos.svc.cluster.local./dav/
LOCAL_USERNAME: martyn
SYNC_EVERY: "2 minutes"
KEEP_LOCAL: "true"
kind: ConfigMap
metadata:
creationTimestamp: null
name: env-configmap

View file

@ -1,29 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: calsync
name: calsync
spec:
replicas: 1
selector:
matchLabels:
app: calsync
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: calsync
spec:
containers:
- envFrom:
- configMapRef:
name: env-configmap
- secretRef:
name: env-secrets
image: przemub/ics_caldav_sync:v1.1
name: ics-caldav-sync-9b9hc
resources: {}
status: {}

View file

@ -1,7 +0,0 @@
apiVersion: v1
data:
REMOTE_URL: aHR0cHM6Ly9jYWxlbmRhci5nb29nbGUuY29tL2NhbGVuZGFyL2ljYWwvcmFueWFyZG0lNDBnbWFpbC5jb20vcHJpdmF0ZS05Mjc1ODIwMzg4ZTEzYjNhZTk2YjgwYWE2NThlOGU2NC9iYXNpYy5pY3M=
kind: Secret
metadata:
creationTimestamp: null
name: env-secrets

View file

@ -0,0 +1,24 @@
apiVersion: v1
data:
cloudtube.config.js: |
module.exports = {
/*
Copy this file to `config.js`, and add options here.
They'll override the options from `utils/constants.js`.
For example, the next block changes the default instance.
*/
user_settings: {
instance: {
default: "http://newleaf.martyn.berlin"
}
},
/*
If cloudtube and Newleaf is not in same docker-network, you need to also set local_instance_origin.
*/
server_setup: {
local_instance_origin: "http://newleaf.cloudtube.svc.cluster.local:3000"
}
}
kind: ConfigMap
metadata:
name: cloudtube

View file

@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: cloudtube
name: cloudtube
spec:
replicas: 1
selector:
matchLabels:
app: cloudtube
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: cloudtube
spec:
containers:
- image: abeltramo/cloudtube:be33a66
imagePullPolicy: Always
name: cloudtube
resources: {}
volumeMounts:
- mountPath: /workdir/db
name: db
- mountPath: /workdir/config/config.js
name: config
subPath: cloudtube.config.js
volumes:
- configMap:
defaultMode: 420
items:
- key: cloudtube.config.js
path: cloudtube.config.js
name: cloudtube
name: config
- name: db
persistentVolumeClaim:
claimName: cloudtube-db-data
status: {}

View file

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: siyuan-data name: cloudtube-db-data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: cloudtube
app.kubernetes.io/instance: cloudtube
name: cloudtube
annotations:
external-dns.alpha.kubernetes.io/hostname: cloudtube.martyn.berlin
spec:
ports:
- targetPort: 10412
port: 80
selector:
app: cloudtube
type: LoadBalancer

View file

@ -17,7 +17,7 @@ spec:
- env: - env:
- name: ESPHOME_DASHBOARD_USE_PING - name: ESPHOME_DASHBOARD_USE_PING
value: "true" value: "true"
image: esphome/esphome:2025.11.0 image: esphome/esphome:2022.12.8
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 3 failureThreshold: 3

View file

@ -1,12 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
template:
spec:
containers:
- name: external-dns
envFrom:
- secretRef:
name: dnsimple-auth

View file

@ -3,16 +3,12 @@ kind: Kustomization
helmCharts: helmCharts:
- name: external-dns - name: external-dns
repo: https://kubernetes-sigs.github.io/external-dns repo: https://charts.bitnami.com/bitnami
version: 1.15.2 version: 6.28.4
releaseName: external-dns releaseName: external-dns
namespace: external-dns namespace: external-dns
valuesInline: valuesInline:
provider: provider: dnsimple
name: dnsimple
txtPrefix: armnleg txtPrefix: armnleg
patches: sources: [service,ingress]
- path: deploy.yaml extraEnvVarsSecret: dnsimple-auth
target:
kind: Deployment
name: external-dns

View file

@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mosquitto-config
data:
mosquitto.conf: |
persistence true
persistence_location /mosquitto/data/
log_dest stdout
password_file /mosquitto/data/auth
# MQTTS listener
listener 8883
protocol mqtt
cafile /etc/ssl/certs/ca-certificates.crt
keyfile /mosquitto/certs/tls.key
certfile /mosquitto/certs/tls.crt
# WS Listener
listener 9001
protocol websockets

View file

@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mosquitto
spec:
replicas: 1
selector:
matchLabels:
app: mosquitto
template:
metadata:
labels:
app: mosquitto
spec:
containers:
- name: mosquitto
image: eclipse-mosquitto
ports:
- containerPort: 8883
- containerPort: 9001
volumeMounts:
- mountPath: /mosquitto/config/
name: config
- mountPath: /mosquitto/certs/
name: certs
- mountPath: /mosquitto/data/
name: data
volumes:
- name: config
configMap:
name: mosquitto-config
- name: certs
secret:
secretName: mosquitto-certs
- name: data
persistentVolumeClaim:
claimName: mosquitto-data

View file

@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mosquitto
annotations:
cert-manager.io/cluster-issuer: "letsencrypt"
external-dns.alpha.kubernetes.io/target: home.martyn.berlin
spec:
ingressClassName: nginx
tls:
- hosts:
- iot.martyn.berlin
rules:
- host: iot.martyn.berlin
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mosquitto-ws
port:
number: 9001

View file

@ -1,11 +1,11 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: siyuan-data name: mosquitto-data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 10Gi storage: 5Gi
storageClassName: longhorn-fast storageClassName: longhorn-fast

View file

@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: mosquitto-mqtts
spec:
type: LoadBalancer
selector:
app: mosquitto
ports:
- port: 8883
---
apiVersion: v1
kind: Service
metadata:
name: mosquitto-ws
spec:
type: ClusterIP
selector:
app: mosquitto
ports:
- port: 9001

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: faircamp-sites
data:
# property-like keys; each key maps to a simple value
faircamp_sites: "word-garage lycansong"

View file

@ -1,43 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: faircampbuilder
spec:
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
concurrencyPolicy: Forbid
schedule: "*/15 * * * *"
jobTemplate:
metadata:
labels:
app: faircampbuilder
spec:
template:
metadata:
labels:
app: faircampbuilder
spec:
containers:
- command:
- /faircampbuilder.bash
env:
- name: FAIRCAMP_SITES
valueFrom:
configMapKeyRef:
name: faircamp-sites
key: faircamp_sites
image: git.martyn.berlin/martyn/rclone-faircamp:latest
name: shell
volumeMounts:
- mountPath: /rclone-output
name: faircamp-data
- mountPath: /root/.config/rclone
name: rclone-config
volumes:
- name: faircamp-data
persistentVolumeClaim:
claimName: faircamp-data
- name: rclone-config
secret:
secretName: rclone-config
restartPolicy: Never

View file

@ -1,38 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: tester
spec:
replicas: 0
selector:
matchLabels:
app: tester
template:
metadata:
labels:
app: tester
spec:
containers:
- command:
- sleep
- "1000000"
env:
- name: FAIRCAMP_SITES
valueFrom:
configMapKeyRef:
name: faircamp-sites
key: faircamp_sites
image: git.martyn.berlin/martyn/rclone-faircamp:latest
name: shell
volumeMounts:
- mountPath: /rclone-output
name: faircamp-data
- mountPath: /root/.config/rclone
name: rclone-config
volumes:
- name: faircamp-data
persistentVolumeClaim:
claimName: faircamp-data
- name: rclone-config
secret:
secretName: rclone-config

View file

@ -1,36 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/target: armnleg.martyn.berlin
nginx.ingress.kubernetes.io/rewrite-target: "/lycansong/.faircamp_build/$1"
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/configuration-snippet: |
if ($uri ~ "^/(.*)/$") {
rewrite ^(.+)/$ $1 last;
}
if ($uri ~ "^\/$") {
rewrite ^ /lycansong/.faircamp_build/index.html break;
}
if ($uri !~ "^(.*)\.(.*)$") {
rewrite ^ /lycansong/.faircamp_build$uri/index.html break;
}
name: lycansong
spec:
ingressClassName: nginx
rules:
- host: lycansong.martyn.berlin
http:
paths:
- backend:
service:
name: s3
port:
number: 8333
path: /(.*)
pathType: ImplementationSpecific
tls:
- hosts:
- lycansong.martyn.berlin

View file

@ -1,36 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/target: armnleg.martyn.berlin
nginx.ingress.kubernetes.io/rewrite-target: "/word-garage/.faircamp_build/$1"
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/configuration-snippet: |
if ($uri ~ "^/(.*)/$") {
rewrite ^(.+)/$ $1 last;
}
if ($uri ~ "^\/$") {
rewrite ^ /word-garage/.faircamp_build/index.html break;
}
if ($uri !~ "^(.*)\.(.*)$") {
rewrite ^ /word-garage/.faircamp_build$uri/index.html break;
}
name: word-garage
spec:
ingressClassName: nginx
rules:
- host: word-garage.martyn.berlin
http:
paths:
- backend:
service:
name: s3
port:
number: 8333
path: /(.*)
pathType: ImplementationSpecific
tls:
- hosts:
- word-garage.martyn.berlin

View file

@ -1,17 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: seaweedfs-buckets
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1Gi
csi:
driver: seaweedfs-csi-driver
volumeAttributes:
collection: default
diskType: spinny
path: /buckets/
replication: "000"
volumeHandle: dfs-test

View file

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: seaweedfs-buckets
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
volumeName: seaweedfs-buckets

View file

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: faircamp-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn-fast

View file

@ -1,15 +0,0 @@
[word-garage]
type = s3
provider = SeaweedFS
access_key_id = T0UXLIN7NA775DO2B01O
secret_access_key = 2AW/8N3Wdsf3iIebdZKZTsGKHHiBWdM9kJyM+czB
endpoint = http://hp40l.lan:8333
acl = public-read
[lycansong]
type = s3
provider = SeaweedFS
access_key_id = T0UXLIN7NA775DO2B01O
secret_access_key = 2AW/8N3Wdsf3iIebdZKZTsGKHHiBWdM9kJyM+czB
endpoint = http://hp40l.lan:8333
acl = public-read

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: s3
spec:
externalName: hp40l.lan
type: ExternalName

Some files were not shown because too many files have changed in this diff Show more