more cleanup
Signed-off-by: Martyn Ranyard <m@rtyn.berlin>
This commit is contained in:
parent
5243a834c2
commit
cd23865169
22 changed files with 0 additions and 841 deletions
|
@ -1,23 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -1,24 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: sshtunnel
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "2.0.1"
|
|
@ -1,178 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "13"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"autossh"},"name":"autossh","namespace":"cluster-ingress"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"autossh"}},"template":{"metadata":{"labels":{"app":"autossh"}},"spec":{"containers":[{"command":["/bin/sh","-c","cp /keyfile/autossh /tmp/; chmod 600 /tmp/autossh; autossh -M0 v4tov6@31.7.180.171 -R0.0.0.0:8080:192.168.1.11:80 -R0.0.0.0:8443:192.168.1.11:443 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -i /tmp/autossh -T -N"],"image":"jnovack/autossh","imagePullPolicy":"IfNotPresent","name":"autossh","volumeMounts":[{"mountPath":"/keyfile","name":"keyfile"}]}],"restartPolicy":"Always","volumes":[{"name":"keyfile","secret":{"defaultMode":256,"secretName":"autossh-keyfile"}}]}}}}
|
||||
creationTimestamp: "2020-07-26T16:27:16Z"
|
||||
generation: 61
|
||||
labels:
|
||||
app: autossh
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
.: {}
|
||||
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:progressDeadlineSeconds: {}
|
||||
f:replicas: {}
|
||||
f:revisionHistoryLimit: {}
|
||||
f:selector:
|
||||
f:matchLabels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:strategy:
|
||||
f:rollingUpdate:
|
||||
.: {}
|
||||
f:maxSurge: {}
|
||||
f:maxUnavailable: {}
|
||||
f:type: {}
|
||||
f:template:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:containers:
|
||||
k:{"name":"autossh"}:
|
||||
.: {}
|
||||
f:command: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:resources: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
f:volumeMounts:
|
||||
.: {}
|
||||
k:{"mountPath":"/keyfile"}:
|
||||
.: {}
|
||||
f:mountPath: {}
|
||||
f:name: {}
|
||||
f:dnsPolicy: {}
|
||||
f:restartPolicy: {}
|
||||
f:schedulerName: {}
|
||||
f:securityContext: {}
|
||||
f:terminationGracePeriodSeconds: {}
|
||||
f:volumes:
|
||||
.: {}
|
||||
k:{"name":"keyfile"}:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:secret:
|
||||
.: {}
|
||||
f:defaultMode: {}
|
||||
f:secretName: {}
|
||||
manager: kubectl
|
||||
operation: Update
|
||||
time: "2021-07-04T17:59:23Z"
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
f:deployment.kubernetes.io/revision: {}
|
||||
f:status:
|
||||
f:availableReplicas: {}
|
||||
f:conditions:
|
||||
.: {}
|
||||
k:{"type":"Available"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
k:{"type":"Progressing"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
f:observedGeneration: {}
|
||||
f:readyReplicas: {}
|
||||
f:replicas: {}
|
||||
f:updatedReplicas: {}
|
||||
manager: k3s
|
||||
operation: Update
|
||||
time: "2021-09-30T18:41:41Z"
|
||||
name: autossh
|
||||
namespace: cluster-ingress
|
||||
resourceVersion: "235469131"
|
||||
selfLink: /apis/apps/v1/namespaces/cluster-ingress/deployments/autossh
|
||||
uid: d9661f02-0b6c-4d1f-9c34-3a5ff586af2b
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: autossh
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: autossh
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- cp /keyfile/autossh /tmp/; chmod 600 /tmp/autossh; autossh -M0 v4tov6@31.7.180.171
|
||||
-R0.0.0.0:8080:10.43.14.171:80 -R0.0.0.0:8443:10.43.14.171:443 -R0.0.0.0:2222:192.168.1.52:2222
|
||||
-R0.0.0.0:4422:192.168.1.111:22 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
|
||||
-o ServerAliveInterval=30 -o ExitOnForwardFailure=yes -o ServerAliveCountMax=3
|
||||
-i /tmp/autossh -T -N
|
||||
image: jnovack/autossh
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: autossh
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /keyfile
|
||||
name: keyfile
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: keyfile
|
||||
secret:
|
||||
defaultMode: 256
|
||||
secretName: autossh-keyfile
|
||||
status:
|
||||
availableReplicas: 1
|
||||
conditions:
|
||||
- lastTransitionTime: "2020-07-26T16:27:16Z"
|
||||
lastUpdateTime: "2021-07-04T17:59:25Z"
|
||||
message: ReplicaSet "autossh-7969f78dd8" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: "True"
|
||||
type: Progressing
|
||||
- lastTransitionTime: "2021-09-30T18:41:41Z"
|
||||
lastUpdateTime: "2021-09-30T18:41:41Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: "True"
|
||||
type: Available
|
||||
observedGeneration: 61
|
||||
readyReplicas: 1
|
||||
replicas: 1
|
||||
updatedReplicas: 1
|
|
@ -1,62 +0,0 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "sshtunnel.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "sshtunnel.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "sshtunnel.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "sshtunnel.labels" -}}
|
||||
helm.sh/chart: {{ include "sshtunnel.chart" . }}
|
||||
{{ include "sshtunnel.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "sshtunnel.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "sshtunnel.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "sshtunnel.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "sshtunnel.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,68 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "sshtunnel.fullname" . }}
|
||||
labels:
|
||||
{{- include "sshtunnel.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "sshtunnel.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "sshtunnel.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- cp /keyfile/keyfile /tmp/autossh; chmod 600 /tmp/autossh; autossh -M0 {{.Values.tunnel.user}}@{{.Values.tunnel.sshHost}} -i /tmp/autossh
|
||||
{{- range .Values.tunnel.sshOptions}} {{.}} {{end}}
|
||||
{{- range .Values.tunnel.ports}} -R{{.bindIP}}:{{.source}}:{{.destsvc}}.{{.destns}}.svc.cluster.local.:{{.dest}} {{end}}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: "/keyfile"
|
||||
name: keyfile
|
||||
volumes:
|
||||
- name: keyfile
|
||||
csi:
|
||||
driver: secrets-store.csi.k8s.io
|
||||
readOnly: true
|
||||
volumeAttributes:
|
||||
secretProviderClass: app-secrets
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: secrets-store.csi.x-k8s.io/v1
|
||||
kind: SecretProviderClass
|
||||
metadata:
|
||||
name: app-secrets
|
||||
spec:
|
||||
provider: 1password
|
||||
parameters:
|
||||
secrets: |
|
||||
- resourceName: "vaults/3oh5jxmxvqvpuimu2lbuajtizi/allitems/gd24dzcpub5vmuscsvavnyu6cm/private key"
|
||||
path: "keyfile"
|
|
@ -1,87 +0,0 @@
|
|||
# Default values for sshtunnel.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
tunnel:
|
||||
user: proxyuser
|
||||
sshHost: armnleg.martyn.berlin
|
||||
sshOptions:
|
||||
- -o UserKnownHostsFile=/dev/null
|
||||
- -o StrictHostKeyChecking=no
|
||||
- -o ServerAliveInterval=30
|
||||
- -o ExitOnForwardFailure=yes
|
||||
- -o ServerAliveCountMax=3
|
||||
- -T
|
||||
- -N
|
||||
ports:
|
||||
- name: http
|
||||
source: 8080
|
||||
dest: 80
|
||||
bindIP: 0.0.0.0
|
||||
destsvc: ingress-nginx-controller
|
||||
destns: cluster-ingress
|
||||
- name: https
|
||||
source: 8443
|
||||
dest: 443
|
||||
bindIP: 8080
|
||||
destsvc: ingress-nginx-controller
|
||||
destns: cluster-ingress
|
||||
|
||||
image:
|
||||
repository: jnovack/autossh
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -1,72 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: qbittorrent
|
||||
labels:
|
||||
app: qbittorrent
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: qbittorrent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: qbittorrent
|
||||
spec:
|
||||
initContainers:
|
||||
containers:
|
||||
- image: qbittorrentofficial/qbittorrent-nox:latest
|
||||
name: qbittorrent
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: kube-api-access-t4rzn
|
||||
readOnly: true
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: /downloads
|
||||
name: downloads
|
||||
env:
|
||||
- name: QBT_EULA
|
||||
value: "accept"
|
||||
- name: QBT_WEBUI_PORT
|
||||
value: "8080"
|
||||
- name: QBT_CONFIG_PATH
|
||||
value: "/config"
|
||||
- name: QBT_DOWNLOADS
|
||||
value: "/downloads"
|
||||
preemptionPolicy: PreemptLowerPriority
|
||||
priority: 0
|
||||
serviceAccountName: tailscale
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
volumes:
|
||||
- name: downloads
|
||||
persistentVolumeClaim:
|
||||
claimName: smb-usenet
|
||||
- name: config
|
||||
persistentVolumeClaim:
|
||||
claimName: q-config
|
||||
- name: kube-api-access-t4rzn
|
||||
projected:
|
||||
sources:
|
||||
- serviceAccountToken:
|
||||
expirationSeconds: 3607
|
||||
path: token
|
||||
- configMap:
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
name: kube-root-ca.crt
|
||||
- downwardAPI:
|
||||
items:
|
||||
- fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
path: namespace
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
app: gluetun
|
||||
name: q-config
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: longhorn-fast
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: smb-usenet
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: smb-usenet
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: qbittorrent.martyn.berlin
|
||||
labels:
|
||||
app: qbittorrent
|
||||
app.kubernetes.io/instance: torrents
|
||||
name: qbittorrent
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
selector:
|
||||
app: qbittorrent
|
||||
type: LoadBalancer
|
|
@ -1 +0,0 @@
|
|||
1.33.2
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/bash
|
||||
filename=$(date '+%Y%m%d%H%M%S').tgz
|
||||
for i in $(talosctl -n 192.168.1.26 list /var/mnt/storage/zigbee2mqtt | grep 192.168 | tail -n 4 | awk '{print $2}'); do talosctl -n 192.168.1.26 read /var/mnt/storage/zigbee2mqtt/$i >$i; done
|
||||
tar czf ${filename} $(talosctl -n 192.168.1.26 list /var/mnt/storage/zigbee2mqtt | grep 192.168 | tail -n 4 | awk '{print $2}' | xargs echo)
|
||||
rm $(talosctl -n 192.168.1.26 list /var/mnt/storage/zigbee2mqtt | grep 192.168 | tail -n 4 | awk '{print $2}' | xargs echo)
|
||||
echo copy to https://minio.service.consul/browser/backups/emlnYmVlMm1xdHQv
|
|
@ -1,80 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
app.kubernetes.io/version: 1.19.1
|
||||
helm.sh/chart: zigbee2mqtt-9.4.2
|
||||
name: zigbee2mqtt-settings
|
||||
data:
|
||||
configuration.yaml: |
|
||||
advanced:
|
||||
homeassistant_discovery_topic: homeassistant
|
||||
homeassistant_status_topic: homeassistant/status
|
||||
last_seen: ISO_8601
|
||||
log_level: debug
|
||||
log_output:
|
||||
- console
|
||||
channel: 25
|
||||
network_key:
|
||||
- 140
|
||||
- 18
|
||||
- 129
|
||||
- 36
|
||||
- 87
|
||||
- 2
|
||||
- 242
|
||||
- 222
|
||||
- 178
|
||||
- 205
|
||||
- 177
|
||||
- 160
|
||||
- 9
|
||||
- 212
|
||||
- 115
|
||||
- 122
|
||||
experimental:
|
||||
new_api: true
|
||||
frontend:
|
||||
port: 8080
|
||||
homeassistant: true
|
||||
serial:
|
||||
port: 'tcp://ser2net.ser2net.svc.cluster.local:3001'
|
||||
mqtt:
|
||||
base_topic: zigbee2mqtt
|
||||
include_device_information: true
|
||||
server: 'mqtt://mosquitto.martyn.berlin'
|
||||
permit_join: false
|
||||
external_converters:
|
||||
- ledvanceA60S.js
|
||||
ledvanceA60S.js: |
|
||||
const fz = require('zigbee-herdsman-converters/converters/fromZigbee');
|
||||
const tz = require('zigbee-herdsman-converters/converters/toZigbee');
|
||||
const exposes = require('zigbee-herdsman-converters/lib/exposes');
|
||||
const reporting = require('zigbee-herdsman-converters/lib/reporting');
|
||||
const extend = require('zigbee-herdsman-converters/lib/extend');
|
||||
const ota = require('zigbee-herdsman-converters/lib/ota');
|
||||
const tuya = require('zigbee-herdsman-converters/lib/tuya');
|
||||
const e = exposes.presets;
|
||||
const ea = exposes.access;
|
||||
|
||||
const definition = {
|
||||
zigbeeModel: ['A60S RGBW'],
|
||||
model: 'A60S RGBW',
|
||||
vendor: 'Ledvance',
|
||||
description: 'Ledvance Classic E27 Multicolor',
|
||||
// Note that fromZigbee, toZigbee and exposes are missing here since we use extend here.
|
||||
// Extend contains a default set of fromZigbee/toZigbee converters and expose for common device types.
|
||||
// The following extends are available:
|
||||
// - extend.switch
|
||||
// - extend.light_onoff_brightness
|
||||
// - extend.light_onoff_brightness_colortemp
|
||||
// - extend.light_onoff_brightness_color
|
||||
// - extend.light_onoff_brightness_colortemp_color
|
||||
extend: extend.light_onoff_brightness_colortemp_color(),
|
||||
};
|
||||
|
||||
module.exports = definition;
|
|
@ -1,95 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
app.kubernetes.io/version: 1.19.1
|
||||
helm.sh/chart: zigbee2mqtt-9.4.2
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "zigbee2mqtt-settings"
|
||||
name: zigbee2mqtt
|
||||
spec:
|
||||
replicas: 0
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
initContainers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- cp /configs/ledvanceA60S.js /data/;
|
||||
image: alpine:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: copy
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: data
|
||||
- mountPath: /configs/
|
||||
name: zigbee2mqtt-settings
|
||||
containers:
|
||||
- env:
|
||||
- name: ZIGBEE2MQTT_DATA
|
||||
value: /data
|
||||
image: koenkk/zigbee2mqtt:1.35.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 10
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
name: zigbee2mqtt
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 10
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_ADMIN
|
||||
privileged: true
|
||||
startupProbe:
|
||||
failureThreshold: 30
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 5
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: data
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
enableServiceLinks: true
|
||||
hostNetwork: true
|
||||
serviceAccountName: default
|
||||
volumes:
|
||||
- configMap:
|
||||
name: zigbee2mqtt-settings
|
||||
name: zigbee2mqtt-settings
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: zigbee2mqtt
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: zigbee2mqtt
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- configmap.yaml
|
||||
- deploy.yaml
|
||||
- pvc.yaml
|
||||
- svc.yaml
|
|
@ -1,6 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
name: zigbee2mqtt
|
||||
name: zigbee2mqtt
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: zigbee2mqtt
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: longhorn-fast
|
||||
volumeName: pvc-ce3db1cb-42ef-4eb4-90e1-4bd212dfbf86
|
||||
resources:
|
||||
requests:
|
||||
storage: 128Mi
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: zigbee.martyn.berlin
|
||||
labels:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
app.kubernetes.io/version: 1.19.1
|
||||
helm.sh/chart: zigbee2mqtt-9.4.2
|
||||
name: zigbee2mqtt
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubernetes.io/instance: zigbee2mqtt
|
||||
app.kubernetes.io/name: zigbee2mqtt
|
||||
type: LoadBalancer
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: sshtunnel
|
||||
namespace: argocd
|
||||
spec:
|
||||
destination:
|
||||
namespace: cluster-ingress
|
||||
server: https://kubernetes.default.svc
|
||||
project: infra
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: connect.server
|
||||
value: http://onepassword-connect.1password.svc.cluster.local:8080/
|
||||
- name: image.pullPolicy
|
||||
value: Always
|
||||
path: apps-helm/sshtunnel
|
||||
repoURL: https://git.martyn.berlin/martyn/infra4talos
|
||||
targetRevision: HEAD
|
|
@ -1,14 +0,0 @@
|
|||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: torrents
|
||||
namespace: argocd
|
||||
spec:
|
||||
destination:
|
||||
namespace: torr
|
||||
server: https://kubernetes.default.svc
|
||||
project: infra
|
||||
source:
|
||||
path: apps-kustomized/torrents
|
||||
repoURL: https://git.martyn.berlin/martyn/infra4talos
|
||||
targetRevision: HEAD
|
|
@ -1,14 +0,0 @@
|
|||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: zigbee2mqtt
|
||||
namespace: argocd
|
||||
spec:
|
||||
destination:
|
||||
namespace: zigbee2mqtt
|
||||
server: https://kubernetes.default.svc
|
||||
project: infra
|
||||
source:
|
||||
path: apps-kustomized/zigbee2mqtt
|
||||
repoURL: https://git.martyn.berlin/martyn/infra4talos
|
||||
targetRevision: HEAD
|
Loading…
Add table
Reference in a new issue