update to gotmpl

This commit is contained in:
Grant Hunter
2025-12-07 13:14:48 -07:00
parent 6aa777f880
commit 332f776c4f
13 changed files with 400 additions and 32 deletions

View File

@@ -0,0 +1,199 @@
alloy:
clustering:
enabled: true
configMap:
content: |-
logging {
level = "info"
format = "logfmt"
}
discovery.kubernetes "pods" {
role = "pod"
}
discovery.kubernetes "nodes" {
role = "node"
}
discovery.relabel "pods" {
targets = discovery.kubernetes.pods.targets
rule {
source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__meta_kubernetes_pod_container_name"]
separator = "/"
target_label = "deployment_name"
action = "replace"
}
}
loki.source.kubernetes "pods" {
targets = discovery.relabel.pods.output
forward_to = [loki.process.process.receiver]
}
loki.process "process" {
forward_to = [loki.write.loki.receiver]
stage.drop {
older_than = "1h"
drop_counter_reason = "too old"
}
stage.match {
selector = "{instance=~\".*\"}"
stage.json {
expressions = {
level = "\"level\"",
}
}
stage.labels {
values = {
level = "level",
}
}
}
stage.label_drop {
values = [ "job", "service_name" ]
}
}
loki.write "loki" {
endpoint {
url = "http://grafana-loki-distributor:3100/loki/api/v1/push"
}
}
discovery.relabel "metrics" {
targets = discovery.kubernetes.pods.targets
rule {
source_labels = ["__meta_kubernetes_pod_annotation_prometheus_io_port"]
target_label = "__meta_kubernetes_pod_container_port_number"
action = "keepequal"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_port_number"]
regex = ""
action = "drop"
}
rule {
source_labels = ["__meta_kubernetes_pod_annotation_prometheus_io_path",]
target_label = "__metrics_path__"
separator = ""
action = "replace"
}
}
prometheus.scrape "metrics" {
clustering {
enabled = true
}
targets = discovery.relabel.metrics.output
forward_to = [prometheus.remote_write.metrics.receiver]
scrape_interval = "30s"
}
discovery.relabel "pods_metrics" {
targets = discovery.kubernetes.nodes.targets
rule {
replacement = "kubernetes.default.svc:443"
target_label = "__address__"
}
rule {
regex = "(.+)"
replacement = "/api/v1/nodes/$1/proxy/metrics/cadvisor"
source_labels = ["__meta_kubernetes_node_name"]
target_label = "__metrics_path__"
}
}
prometheus.scrape "pods_metrics" {
clustering {
enabled = true
}
targets = discovery.relabel.pods_metrics.output
job_name = "integrations/kubernetes/kubelet"
scheme = "https"
honor_labels = true
forward_to = [prometheus.remote_write.metrics.receiver]
bearer_token_file = "/run/secrets/kubernetes.io/serviceaccount/token"
tls_config {
insecure_skip_verify = true
server_name = "kubernetes"
}
scrape_interval = "30s"
}
prometheus.exporter.unix "os_metrics" { }
prometheus.scrape "os_metrics" {
clustering {
enabled = true
}
targets = prometheus.exporter.unix.os_metrics.targets
forward_to = [prometheus.remote_write.metrics.receiver]
scrape_interval = "30s"
}
discovery.kubernetes "kube_state_metrics" {
role = "endpoints"
selectors {
role = "endpoints"
label = "app.kubernetes.io/name=kube-state-metrics"
}
namespaces {
names = ["grafana"]
}
}
discovery.relabel "kube_state_metrics" {
targets = discovery.kubernetes.kube_state_metrics.targets
// only keep targets with a matching port name
rule {
source_labels = ["__meta_kubernetes_endpoint_port_name"]
regex = "http"
action = "keep"
}
rule {
action = "replace"
replacement = "kubernetes"
target_label = "source"
}
}
prometheus.scrape "kube_state_metrics" {
targets = discovery.relabel.kube_state_metrics.output
job_name = "integrations/kubernetes/kube-state-metrics"
scrape_interval = "30s"
scheme = "http"
bearer_token_file = ""
tls_config {
insecure_skip_verify = true
}
clustering {
enabled = true
}
forward_to = [prometheus.relabel.kube_state_metrics.receiver]
}
prometheus.relabel "kube_state_metrics" {
max_cache_size = 100000
rule {
source_labels = ["__name__"]
regex = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*"
action = "keep"
}
forward_to = [prometheus.remote_write.metrics.receiver]
}
prometheus.remote_write "metrics" {
endpoint {
url = "http://grafana-mimir-nginx/api/v1/push"
}
}
resources:
requests:
cpu: 1m
memory: 5Mi
limits:
cpu: 1
memory: 400Mi

View File

@@ -0,0 +1,19 @@
grafana:
ingress:
enabled: true
hosts:
- watcher.incngrnt.ca
annotations:
"traefik.ingress.kubernetes.io/router.tls.certresolver": "letsencrypt"
persistence:
enabled: true
mimir:
mimir:
structuredConfig:
limits:
compactor_blocks_retention_period: 2h
ingester:
persistentVolume:
size: 5Gi

View File

@@ -17,23 +17,13 @@ releases:
createNamespace: true
chart: traefik/traefik
values:
- ../traefik/values.yaml
setString:
- name: certificatesResolvers.letsencrypt.acme.email
value: {{ requiredEnv "ACME_EMAIL" }}
- name: extraObjects[0].stringData.password
value: {{ requiredEnv "TRAEFIK_ADMIN_PASSWORD" }}
- ../traefik/values.yaml.gotmpl
- name: tailscale-operator
namespace: tailscale
createNamespace: true
chart: tailscale/tailscale-operator
values:
- ../tailscale/values.yaml
setString:
- name: oauth.clientId
value: {{ requiredEnv "TAILSCALE_OAUTH_CLIENT_ID" }}
- name: oauth.clientSecret
value: {{ requiredEnv "TAILSCALE_OAUTH_CLIENT_SECRET" }}
- ../tailscale/values.yaml.gotmpl
# storage infrastructure
- name: rook-ceph
@@ -41,13 +31,13 @@ releases:
createNamespace: true
chart: rook-release/rook-ceph
values:
- ../rook-ceph/values.yaml
- ../rook-ceph/values.yaml.gotmpl
- name: rook-ceph-cluster
namespace: rook-ceph
createNamespace: true
chart: rook-release/rook-ceph-cluster
values:
- ../rook-ceph-cluster/values.yaml
- ../rook-ceph-cluster/values.yaml.gotmpl
set:
- name: operatorNamespace
value: rook-ceph
value: rook-ceph

View File

@@ -13,29 +13,19 @@ releases:
createNamespace: true
chart: crunchydata/pgo
values:
- ../postgres/operator-values.yaml
- ../postgres/operator-values.yaml.gotmpl
- name: postgres
namespace: datastore
createNamespace: true
chart: crunchydata/postgrescluster
values:
- ../postgres/values.yaml
setString:
- name: pgBackRestConfig.global.repo1-s3-key
value: '{{ requiredEnv "HETZNER_S3_ACCESS_KEY" }}'
- name: pgBackRestConfig.global.repo1-s3-key-secret
value: '{{ requiredEnv "HETZNER_S3_ACCESS_SECRET" }}'
- name: pgBackRestConfig.global.repo1-cipher-pass
value: '{{ requiredEnv "PG_BACKREST_PASSWORD" }}'
- ../postgres/values.yaml.gotmpl
- name: mariadb
namespace: datastore
createNamespace: true
chart: bitnami/mariadb
values:
- ../mariadb/values.yaml
setString:
- name: auth.rootPassword
value: {{ requiredEnv "MARIADB_ROOT_PASSWORD" }}
- ../mariadb/values.yaml.gotmpl
# backup infrastructure
- name: k8up
@@ -43,4 +33,4 @@ releases:
createNamespace: true
chart: k8up-io/k8up
values:
- ../k8up/values.yaml
- ../k8up/values.yaml.gotmpl

View File

@@ -40,7 +40,7 @@ releases:
createNamespace: true
chart: static-site/static-site
values:
- ../incngrnt-web/values.yaml
- ../incngrnt-web/values.yaml.gotmpl
# ghost blogs
- name: kgnot-ghost
namespace: ghost

View File

@@ -12,14 +12,14 @@ releases:
createNamespace: true
chart: grafana/lgtm-distributed
values:
- ../grafana/values.yaml
- ../grafana/values.yaml.gotmpl
- name: alloy
namespace: grafana
installed: false
createNamespace: true
chart: grafana/alloy
values:
- ../grafana/alloy_values.yaml
- ../grafana/alloy_values.yaml.gotmpl
- name: kube-state-metrics
namespace: grafana
installed: false

View File

@@ -0,0 +1,14 @@
init:
method: wget
wget:
url: https://git.incngrnt.ca/grant/incngrnt/releases/download/v0.0.8/v0.0.8.tar
ingress:
enabled: true
annotations:
"traefik.ingress.kubernetes.io/router.tls.certresolver": "letsencrypt"
hosts:
- host: incngrnt.ca
paths:
- path: /
pathType: ImplementationSpecific

4
k8up/values.yaml.gotmpl Normal file
View File

@@ -0,0 +1,4 @@
k8up:
envVars:
- name: BACKUP_GLOBAL_CONCURRENT_BACKUP_JOBS_LIMIT
values: 1

View File

@@ -0,0 +1,16 @@
auth:
rootPassword: {{ requiredEnv "MARIADB_ROOT_PASSWORD" }}
persistent:
size: 5Gi
primary:
resources:
limits:
cpu: 375m
ephemeral-storage: 2Gi
memory: 384Mi
requests:
cpu: 50m
ephemeral-storage: 50Mi
memory: 256Mi

View File

@@ -0,0 +1,2 @@
pgoControllerLeaseName: ''
replicas: 1

View File

@@ -0,0 +1,48 @@
instanceSize: 50Gi
patroni:
dynamicConfiguration:
postgresql:
pg_hba:
- "host all all all scram-sha-256"
shared_preload_libraries: pgvector
users:
- name: grant
databases:
- postgres
- synapse
- gitea
- immich
options: "SUPERUSER LOGIN"
- name: synapse
databases:
- synapse
options: "LOGIN"
- name: gitea
databases:
- gitea
options: "LOGIN"
- name: immich
databases:
- immich
options: "LOGIN"
pgBackRestConfig:
global:
repo1-path: /pgbackrest/datastore/postgres/repo1
repo1-retention-full: "10"
repo1-retention-full-type: count
repo1-s3-key: {{ requiredEnv "HETZNER_S3_ACCESS_KEY" }}
repo1-s3-key-secret: {{ requiredEnv "HETZNER_S3_ACCESS_SECRET" }}
repo1-cipher-pass: {{ requiredEnv "PG_BACKREST_PASSWORD" }}
repos:
- name: repo1
s3:
bucket: fog
endpoint: hel1.your-objectstorage.com
region: hel1
schedules:
full: "0 1 * * 0"
differential: "0 1 * * 1-6"

View File

@@ -0,0 +1,82 @@
cephClusterSpec:
dashboard:
ssl: false
storage:
useAllNodes: true
useAllDevices: false
deviceFilter: "^sda"
resources:
mgr:
requests:
cpu: 50m
memory: 256Mi
limits:
cpu: "1"
mon:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "1"
osd:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "1"
ingress:
dashboard:
host:
name: fog.incngrnt.ca
path: /fog/ceph
pathType: Prefix
annotations:
"traefik.ingress.kubernetes.io/router.tls.certresolver": "letsencrypt"
"traefik.ingress.kubernetes.io/router.middlewares": "rook-ceph-ceph-stripprefix@kubernetescrd"
cephFileSystems:
- name: ceph-filesystem
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 2
dataPools:
- failureDomain: host
replicated:
size: 2
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: 50m
memory: 256Mi
limit:
cpu: "1"
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4

View File

@@ -0,0 +1,4 @@
resources:
requests:
cpu: 100m
memory: 128Mi