Add SDS deployment

This commit is contained in:
Paul-Henry PERRIN 2026-01-19 15:41:15 +01:00
commit 094eae5e5f
11 changed files with 1301 additions and 0 deletions

Binary file not shown.

View File

@ -0,0 +1,8 @@
namespace: reports-sds
resources:
- ../base
- namespace.yaml
- sds-puppeteer.yaml
- sds-api.yaml
- sds-ui-admin.yaml

7
sds/app/namespace.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: reports-sds
name: reports-sds

435
sds/app/sds-api.yaml Normal file
View File

@ -0,0 +1,435 @@
#
# api:
# image: registry.dev.k8s.transcity/sds/api:latest
# ports:
# - "13000:3000"
# depends_on:
# puppeteer:
# condition: service_started
# postgres:
# condition: service_healthy
# environment:
# SERVER_PORT: 3000
# EVENTS_TURNIT_ENABLED: "false"
# EVENTS_AZURE_ENABLED: "false"
# SDS_PUPPETEER_API_URL: "http://puppeteer:3000"
# PRINT_REPORT_URL: "http://admin:3000"
# REDIS_HOST: "redis"
# ELASTICSEARCH_URL: "https://elastic:fkBqGedfBM@elasticsearch:9200"
# DATABASE_URL: "postgres://postgres:postgres@postgres:5432/postgres"
# KEYCLOAK_URL: "https://keycloak.alpha.k8s.transcity"
# KEYCLOAK_AGENTS_REALM: sds-agents
# KEYCLOAK_THIRD_PARTY_REALM: sds-third-parties
# KEYCLOAK_CUSTOMERS_REALM: sds-customers
# KEYCLOAK_RETAILERS_REALM: sds-retailers
# AZ_STORAGE_CONNECTION_STRING: "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/#K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite:10000/devstoreaccount1;"
# NODE_EXTRA_CA_CERTS: /etc/ssl/certs/ca-certificates.crt
# healthcheck:
# test: ["CMD", "wget", "--spider", "http://127.0.0.1:3000/health"]
# interval: 5s
# timeout: 2s
# retries: 10
# volumes:
# - '/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt'
#
---
apiVersion: db-caretaker.dev/v1alpha1
kind: Database
metadata:
name: sds
spec:
dropOnDelete: true
name: sds
serverRef:
name: pg-common
namespace: commons
---
apiVersion: db-caretaker.dev/v1alpha1
kind: DatabaseUser
metadata:
name: sds-api
spec:
databaseRef:
name: sds
login: sdsapi
role: dbowner
secret: sds-api-db-credentials
secretConnectionStringKey: connectionString
secretConnectionStringFormat: postgres://{login}:{password_urlsafe}@{server}:{port}/{database}
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcRealm
metadata:
name: sds-agents
spec:
realm: sds-agents
displayName: "SDS Agents"
loginWithEmailAllowed: true
bruteForceDetection:
enabled: true
excludeDefaultRoles:
- client: account
name: manage-account
- client: account
name: view-profile
roles:
- name: "sds:admin"
- name: "sds:agent"
- name: "sds:helpdesk"
- name: "sds:reporting"
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcClient
metadata:
name: sds-agents-api
spec:
realm: sds-agents
clientId: sds-api
name: SDS Api
publicClient: false
standardFlowEnabled: true
serviceAccountsEnabled: true
clientSecretName: sds-agents-api-client-credentials
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcGroup
metadata:
name: sds-agents-api
spec:
realm: sds-agents
name: sds-api
clientRoles:
realm-management:
- manage-realm
- manage-users
- manage-clients
- view-users
realmRoles:
- sds:admin
users:
- service-account-sds-api
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcClient
metadata:
name: sds-agents-ui
spec:
realm: sds-agents
clientId: sds-ui-admin
name: SDS UI
publicClient: true
standardFlowEnabled: true
serviceAccountsEnabled: true
clientSecretName: sds-agents-ui-client-credentials
redirectUris:
- "*"
webOrigins:
- "*"
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcRealm
metadata:
name: sds-third-parties
spec:
realm: sds-third-parties
displayName: "SDS Third Parties"
loginWithEmailAllowed: true
bruteForceDetection:
enabled: true
excludeDefaultRoles:
- client: account
name: manage-account
- client: account
name: view-profile
roles:
- name: "sds:administration"
- name: "sds:standard"
- name: "sds:third-parties"
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcClient
metadata:
name: sds-third-parties-api
spec:
realm: sds-third-parties
clientId: sds-api
name: SDS Api
publicClient: false
standardFlowEnabled: true
serviceAccountsEnabled: true
clientSecretName: sds-third-parties-api-client-credentials
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcGroup
metadata:
name: sds-third-parties-api
spec:
realm: sds-third-parties
name: sds-api
clientRoles:
realm-management:
- manage-realm
- manage-users
- manage-clients
- view-users
users:
- service-account-sds-api
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcRealm
metadata:
name: sds-customers
spec:
realm: sds-customers
displayName: "SDS Customers"
loginWithEmailAllowed: true
bruteForceDetection:
enabled: true
excludeDefaultRoles:
- client: account
name: manage-account
- client: account
name: view-profile
roles:
- name: "sds:admin"
- name: "sds:customer"
- name: "sds:ui"
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcClient
metadata:
name: sds-customers-api
spec:
realm: sds-customers
clientId: sds-api
name: SDS Api
publicClient: false
standardFlowEnabled: true
serviceAccountsEnabled: true
clientSecretName: sds-customers-api-client-credentials
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcGroup
metadata:
name: sds-customers-api
spec:
realm: sds-customers
name: sds-api
clientRoles:
realm-management:
- manage-realm
- manage-users
- manage-clients
- view-users
users:
- service-account-sds-api
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcRealm
metadata:
name: sds-retailers
spec:
realm: sds-retailers
displayName: "SDS Retailers"
loginWithEmailAllowed: true
bruteForceDetection:
enabled: true
excludeDefaultRoles:
- client: account
name: manage-account
- client: account
name: view-profile
roles:
- name: "sds:retailer"
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcClient
metadata:
name: sds-retailers-api
spec:
realm: sds-retailers
clientId: sds-api
name: SDS Api
publicClient: false
standardFlowEnabled: true
serviceAccountsEnabled: true
clientSecretName: sds-retailers-api-client-credentials
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcGroup
metadata:
name: sds-retailers-api
spec:
realm: sds-retailers
name: sds-api
clientRoles:
realm-management:
- manage-realm
- manage-users
- manage-clients
- view-users
users:
- service-account-sds-api
---
apiVersion: keycloak-configurator.rcs/v1alpha1
kind: KcRealm
metadata:
name: sds-docs
spec:
realm: sds-docs
displayName: "SDS Docs"
loginWithEmailAllowed: true
bruteForceDetection:
enabled: true
excludeDefaultRoles:
- client: account
name: manage-account
- client: account
name: view-profile
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sds-api
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: sds-api
app.kubernetes.io/name: sds-api
template:
metadata:
labels:
app.kubernetes.io/instance: sds-api
app.kubernetes.io/name: sds-api
spec:
containers:
- name: api
image: registry.dev.k8s.transcity/sds/api:latest
imagePullPolicy: Always
env:
- name: SERVER_PORT
value: "3000"
- name: EVENTS_TURNIT_ENABLED
value: "false"
- name: EVENTS_AZURE_ENABLED
value: "false"
- name: SDS_PUPPETEER_API_URL
value: "http://sds-puppeteer:3000"
- name: PRINT_REPORT_URL
value: "http://sds-ui-admin:3000"
- name: REDIS_HOST
value: "valkey"
- name: ELASTICSEARCH_URL
value: "http://elastic:GTRNZHCJTiGJ2CfY@elasticsearch-master:9200"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: sds-api-db-credentials
key: connectionString
- name: KEYCLOAK_URL
value: "https://keycloak.alpha.k8s.transcity"
- name: KEYCLOAK_AGENTS_REALM
value: sds-agents
- name: KEYCLOAK_AGENTS_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-agents-api-client-credentials
key: clientId
- name: KEYCLOAK_AGENTS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-agents-api-client-credentials
key: clientSecret
- name: PRINTER_REALM
value: sds-agents
- name: PRINTER_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-agents-api-client-credentials
key: clientId
- name: PRINTER_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-agents-api-client-credentials
key: clientSecret
- name: KEYCLOAK_THIRD_PARTY_REALM
value: sds-third-parties
- name: KEYCLOAK_THIRD_PARTY_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-third-parties-api-client-credentials
key: clientId
- name: KEYCLOAK_THIRD_PARTY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-third-parties-api-client-credentials
key: clientSecret
- name: KEYCLOAK_CUSTOMERS_REALM
value: sds-customers
- name: KEYCLOAK_CUSTOMERS_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-customers-api-client-credentials
key: clientId
- name: KEYCLOAK_CUSTOMERS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-customers-api-client-credentials
key: clientSecret
- name: KEYCLOAK_RETAILERS_REALM
value: sds-retailers
- name: KEYCLOAK_RETAILERS_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-retailers-api-client-credentials
key: clientId
- name: KEYCLOAK_RETAILERS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-retailers-api-client-credentials
key: clientSecret
- name: AZ_STORAGE_CONNECTION_STRING
value: "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite:10000/devstoreaccount1;"
- name: NODE_EXTRA_CA_CERTS
value: /etc/ssl/certs/ca-certificates.crt
ports:
- name: http
containerPort: 3000
readinessProbe:
exec:
command: [ "wget", "-O", "/dev/null", "-q", "http://127.0.0.1:3000/health" ]
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
volumeMounts:
- name: root-ca-bundle
mountPath: /etc/ssl/certs/ca-certificates.crt
subPath: ca.crt
readOnly: true
volumes:
- name: root-ca-bundle
secret:
secretName: root-ca-bundle
---
apiVersion: v1
kind: Service
metadata:
name: sds-api
spec:
ports:
- name: http
port: 3000
targetPort: http
selector:
app.kubernetes.io/instance: sds-api
app.kubernetes.io/name: sds-api
type: ClusterIP

View File

@ -0,0 +1,67 @@
# puppeteer:
# platform: linux/amd64
# image: registry.dev.k8s.transcity/sds/puppeteer:latest
# ports:
# - "13004:3000"
# extra_hosts:
# - "host.docker.internal:host-gateway"
# environment:
# LOG_FORMAT: "text"
# SERVER_PORT: 3000
# healthcheck:
# test: ["CMD", "wget", "--spider", "http://127.0.0.1:3000/health"]
# interval: 5s
# timeout: 2s
# retries: 10
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sds-puppeteer
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: puppeteer
app.kubernetes.io/name: puppeteer
template:
metadata:
labels:
app.kubernetes.io/instance: puppeteer
app.kubernetes.io/name: puppeteer
spec:
containers:
- name: puppeteer
image: registry.dev.k8s.transcity/sds/puppeteer:latest
imagePullPolicy: Always
env:
- name: LOG_FORMAT
value: text
- name: SERVER_PORT
value: "3000"
ports:
- name: http
containerPort: 3000
readinessProbe:
exec:
command: [ "wget", "-O", "/dev/null", "-q", "http://127.0.0.1:3000/health" ]
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
---
apiVersion: v1
kind: Service
metadata:
name: sds-puppeteer
spec:
ports:
- name: http
port: 3000
targetPort: http
selector:
app.kubernetes.io/instance: puppeteer
app.kubernetes.io/name: puppeteer
type: ClusterIP

159
sds/app/sds-ui-admin.yaml Normal file
View File

@ -0,0 +1,159 @@
# admin:
# image: registry.dev.k8s.transcity/sds/ui-admin:latest
# ports:
# - "13001:3000"
# depends_on:
# api:
# condition: service_healthy
# environment:
# SERVER_PORT: 3000
# SDS_API_URL: "http://api:3000"
# KEYCLOAK_URL: "https://keycloak.alpha.k8s.transcity"
# KEYCLOAK_AGENTS_REALM: sds-agents
# KEYCLOAK_THIRD_PARTY_REALM: sds-third-parties
# KEYCLOAK_CUSTOMERS_REALM: sds-customers
# KEYCLOAK_RETAILERS_REALM: sds-retailers
# NODE_EXTRA_CA_CERTS: /etc/ssl/certs/ca-certificates.crt
# healthcheck:
# test: ["CMD", "wget", "--spider", "http://127.0.0.1:3000/health"]
# interval: 5s
# timeout: 2s
# retries: 10
#
# volumes:
# - '/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sds-ui-admin
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: sds-ui-admin
app.kubernetes.io/name: sds-ui-admin
template:
metadata:
labels:
app.kubernetes.io/instance: sds-ui-admin
app.kubernetes.io/name: sds-ui-admin
spec:
containers:
- name: api
image: registry.dev.k8s.transcity/sds/ui-admin:latest
imagePullPolicy: Always
env:
- name: SERVER_PORT
value: "3000"
- name: SDS_API_URL
value: "http://sds-api:3000"
- name: KEYCLOAK_URL
value: "https://keycloak.alpha.k8s.transcity"
- name: KEYCLOAK_AGENTS_REALM
value: sds-agents
- name: KEYCLOAK_AGENTS_CLIENT_ID
value: sds-ui-admin
- name: KEYCLOAK_THIRD_PARTY_REALM
value: sds-third-parties
- name: KEYCLOAK_THIRD_PARTY_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-third-parties-api-client-credentials
key: clientId
- name: KEYCLOAK_THIRD_PARTY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-third-parties-api-client-credentials
key: clientSecret
- name: KEYCLOAK_CUSTOMERS_REALM
value: sds-customers
- name: KEYCLOAK_CUSTOMERS_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-customers-api-client-credentials
key: clientId
- name: KEYCLOAK_CUSTOMERS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-customers-api-client-credentials
key: clientSecret
- name: KEYCLOAK_RETAILERS_REALM
value: sds-retailers
- name: KEYCLOAK_RETAILERS_CLIENT_ID
valueFrom:
secretKeyRef:
name: sds-retailers-api-client-credentials
key: clientId
- name: KEYCLOAK_RETAILERS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: sds-retailers-api-client-credentials
key: clientSecret
- name: NODE_EXTRA_CA_CERTS
value: /etc/ssl/certs/ca-certificates.crt
ports:
- name: http
containerPort: 3000
readinessProbe:
exec:
command: [ "wget", "-O", "/dev/null", "-q", "http://127.0.0.1:3000/health" ]
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
volumeMounts:
- name: root-ca-bundle
mountPath: /etc/ssl/certs/ca-certificates.crt
subPath: ca.crt
readOnly: true
volumes:
- name: root-ca-bundle
secret:
secretName: root-ca-bundle
---
apiVersion: v1
kind: Service
metadata:
name: sds-ui-admin
spec:
ports:
- name: http
port: 3000
targetPort: http
selector:
app.kubernetes.io/instance: sds-ui-admin
app.kubernetes.io/name: sds-ui-admin
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sds-ui-admin
annotations:
cert-manager.io/cluster-issuer: admin
cert-manager.io/common-name: admin.sds.alpha.k8s.transcity
cert-manager.io/private-key-algorithm: ECDSA
forecastle.stakater.com/appName: SDS
forecastle.stakater.com/expose: 'true'
forecastle.stakater.com/group: Reports
forecastle.stakater.com/instance: admin
spec:
ingressClassName: admin
rules:
- host: admin.sds.alpha.k8s.transcity
http:
paths:
- backend:
service:
name: sds-ui-admin
port:
name: http
path: /
pathType: Prefix
tls:
- hosts:
- admin.sds.alpha.k8s.transcity
secretName: sds-ui-tls

108
sds/base/azurite.yaml Normal file
View File

@ -0,0 +1,108 @@
---
# Source: azurite/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: azurite
labels:
app: azurite
helm.sh/chart: azurite-2.0.0
app.kubernetes.io/name: azurite
app.kubernetes.io/instance: azurite
app.kubernetes.io/version: "3.29.0"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
ports:
- port: 10000
targetPort: blobs
protocol: TCP
name: blobs
- port: 10001
targetPort: queues
protocol: TCP
name: queues
- port: 10002
targetPort: tables
protocol: TCP
name: tables
selector:
app: azurite
---
# Source: azurite/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: azurite
labels:
helm.sh/chart: azurite-2.0.0
app.kubernetes.io/name: azurite
app.kubernetes.io/instance: azurite
app.kubernetes.io/version: "3.29.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
serviceName: azurite
selector:
matchLabels:
app: azurite
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
template:
metadata:
labels:
app: azurite
app.kubernetes.io/name: azurite
app.kubernetes.io/instance: azurite
spec:
volumes:
- name: storage
emptyDir: {}
securityContext:
fsGroup: 10000
runAsGroup: 10000
runAsNonRoot: true
runAsUser: 10000
containers:
- name: azurite
image: "mcr.microsoft.com/azure-storage/azurite:3.35.0"
imagePullPolicy: Always
command:
- "azurite"
- "-l"
- "/data"
- "--disableProductStyleUrl"
- "--blobHost"
- "0.0.0.0"
- "--queueHost"
- "0.0.0.0"
- "--tableHost"
- "0.0.0.0"
ports:
- containerPort: 10000
name: blobs
- containerPort: 10001
name: queues
- containerPort: 10002
name: tables
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
volumeMounts:
- name: storage
mountPath: /data
resources:
{}

266
sds/base/elasticsearch.yaml Normal file
View File

@ -0,0 +1,266 @@
---
# Source: elasticsearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: "elasticsearch-master-pdb"
spec:
maxUnavailable: 1
selector:
matchLabels:
app: "elasticsearch-master"
---
# Source: elasticsearch/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: elasticsearch-master-credentials
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
type: Opaque
data:
username: ZWxhc3RpYw==
password: R1RSTlpIQ0pUaUdKMkNmWQ==
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
{}
spec:
type: ClusterIP
selector:
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
publishNotReadyAddresses: false
ports:
- name: http
protocol: TCP
port: 9200
- name: transport
protocol: TCP
port: 9300
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master-headless
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
# Create endpoints also if the related pod isn't ready
publishNotReadyAddresses: true
selector:
app: "elasticsearch-master"
ports:
- name: http
port: 9200
- name: transport
port: 9300
---
# Source: elasticsearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
esMajorVersion: "8"
spec:
serviceName: elasticsearch-master-headless
selector:
matchLabels:
app: "elasticsearch-master"
replicas: 1
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elasticsearch-master
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 30Gi
template:
metadata:
name: "elasticsearch-master"
labels:
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
automountServiceAccountToken: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-master"
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 120
volumes:
enableServiceLinks: true
initContainers:
- name: configure-sysctl
securityContext:
runAsUser: 0
privileged: true
image: "docker.io/library/elasticsearch:9.1.3"
imagePullPolicy: "IfNotPresent"
command: ["sysctl", "-w", "vm.max_map_count=262144"]
resources:
{}
containers:
- name: "elasticsearch"
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
image: "docker.io/library/elasticsearch:9.1.3"
imagePullPolicy: "IfNotPresent"
readinessProbe:
exec:
command:
- bash
- -c
- |
set -e
# Exit if ELASTIC_PASSWORD in unset
if [ -z "${ELASTIC_PASSWORD}" ]; then
echo "ELASTIC_PASSWORD variable is missing, exiting"
exit 1
fi
# If the node is starting up wait for the cluster to be ready (request params: "wait_for_status=green&timeout=1s" )
# Once it has started only check that the node itself is responding
START_FILE=/tmp/.es_start_file
# Disable nss cache to avoid filling dentry cache when calling curl
# This is required with Elasticsearch Docker using nss < 3.52
export NSS_SDB_USE_CACHE=no
http () {
local path="${1}"
local args="${2}"
set -- -XGET -s
if [ "$args" != "" ]; then
set -- "$@" $args
fi
set -- "$@" -u "elastic:${ELASTIC_PASSWORD}"
curl --output /dev/null -k "$@" "http://127.0.0.1:9200${path}"
}
if [ -f "${START_FILE}" ]; then
echo 'Elasticsearch is already running, lets check the node is healthy'
HTTP_CODE=$(http "/" "-w %{http_code}")
RC=$?
if [[ ${RC} -ne 0 ]]; then
echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} http://127.0.0.1:9200/ failed with RC ${RC}"
exit ${RC}
fi
# ready if HTTP code 200, 503 is tolerable if ES version is 6.x
if [[ ${HTTP_CODE} == "200" ]]; then
exit 0
elif [[ ${HTTP_CODE} == "503" && "8" == "6" ]]; then
exit 0
else
echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} http://127.0.0.1:9200/ failed with HTTP code ${HTTP_CODE}"
exit 1
fi
else
echo 'Waiting for elasticsearch cluster to become ready (request params: "wait_for_status=green&timeout=1s" )'
if http "/_cluster/health?wait_for_status=green&timeout=1s" "--fail" ; then
touch ${START_FILE}
exit 0
else
echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
exit 1
fi
fi
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
resources:
limits:
cpu: 1000m
memory: 3Gi
requests:
cpu: 100m
memory: 2Gi
env:
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "elasticsearch-master-0,"
- name: node.roles
value: "master,data,data_content,data_hot,data_warm,data_cold,ingest,ml,remote_cluster_client,transform,"
- name: discovery.seed_hosts
value: "elasticsearch-master-headless"
- name: cluster.name
value: "elasticsearch"
- name: network.host
value: "0.0.0.0"
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: elasticsearch-master-credentials
key: password
- name: ES_JAVA_OPTS
value: "-Xms2g -Xmx2g"
- name: xpack.security.enabled
value: "false"
volumeMounts:
- name: "elasticsearch-master"
mountPath: /usr/share/elasticsearch/data

View File

@ -0,0 +1,4 @@
resources:
- valkey.yaml
- azurite.yaml
- elasticsearch.yaml

191
sds/base/valkey.yaml Normal file
View File

@ -0,0 +1,191 @@
---
# Source: valkey/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
app.kubernetes.io/version: "9.0.1"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false
---
# Source: valkey/templates/init_config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: valkey-init-scripts
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
app.kubernetes.io/version: "9.0.1"
app.kubernetes.io/managed-by: Helm
data:
init.sh: |-
#!/bin/sh
set -eu
# Default config paths
VALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}
LOGFILE="/data/init.log"
DATA_DIR="/data/conf"
# Logging function (outputs to stderr and file)
log() {
echo "$(date) $1" | tee -a "$LOGFILE" >&2
}
# Clean old log if requested
if [ "${KEEP_OLD_LOGS:-false}" != "true" ]; then
rm -f "$LOGFILE"
fi
if [ -f "$LOGFILE" ]; then
log "Detected restart of this instance ($HOSTNAME)"
fi
log "Creating configuration in $DATA_DIR..."
mkdir -p "$DATA_DIR"
rm -f "$VALKEY_CONFIG"
# Base valkey.conf
log "Generating base valkey.conf"
{
echo "port 6379"
echo "protected-mode no"
echo "bind * -::*"
echo "dir /data"
} >>"$VALKEY_CONFIG"
# Append extra configs if present
if [ -f /usr/local/etc/valkey/valkey.conf ]; then
log "Appending /usr/local/etc/valkey/valkey.conf"
cat /usr/local/etc/valkey/valkey.conf >>"$VALKEY_CONFIG"
fi
if [ -d /extravalkeyconfigs ]; then
log "Appending files in /extravalkeyconfigs/"
cat /extravalkeyconfigs/* >>"$VALKEY_CONFIG"
fi
---
# Source: valkey/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
app.kubernetes.io/version: "9.0.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: tcp
protocol: TCP
name: tcp
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
---
# Source: valkey/templates/deploy_valkey.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
app.kubernetes.io/version: "9.0.1"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
template:
metadata:
labels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: valkey
annotations:
checksum/initconfig: 085c7380f8b46ec02c949176200b2290
spec:
automountServiceAccountToken: false
serviceAccountName: valkey
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
initContainers:
- name: valkey-init
image: docker.io/valkey/valkey:9.0.1
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command: [ "/scripts/init.sh" ]
volumeMounts:
- name: valkey-data
mountPath: /data
- name: scripts
mountPath: /scripts
containers:
- name: valkey
image: docker.io/valkey/valkey:9.0.1
imagePullPolicy: IfNotPresent
command: [ "valkey-server" ]
args: [ "/data/conf/valkey.conf" ]
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: VALKEY_LOGLEVEL
value: "notice"
ports:
- name: tcp
containerPort: 6379
protocol: TCP
startupProbe:
exec:
command: [ "sh", "-c", "valkey-cli ping" ]
livenessProbe:
exec:
command: [ "sh", "-c", "valkey-cli ping" ]
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- name: valkey-data
mountPath: /data
volumes:
- name: scripts
configMap:
name: valkey-init-scripts
defaultMode: 0555
- name: valkey-data
emptyDir: {}

View File

@ -0,0 +1,56 @@
resources:
- ../app
patches:
- patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: sds-api
spec:
template:
spec:
containers:
- name: api
env:
- name: KEYCLOAK_URL
value: "https://keycloak.demo.aws.myseamlesstravel.com"
- patch: |-
- op: replace
path: /metadata/annotations/cert-manager.io~1common-name
value: admin.sds.demo.aws.myseamlesstravel.com
- op: replace
path: /spec/rules/0/host
value: admin.sds.demo.aws.myseamlesstravel.com
- op: replace
path: /spec/tls/0/hosts/0
value: admin.sds.demo.aws.myseamlesstravel.com
target:
group: networking.k8s.io
version: v1
kind: Ingress
name: sds-ui-admin
- target:
kind: StatefulSet
patch: |-
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: all
spec:
template:
spec:
nodeSelector:
"workload": "transcity-pg"
- target:
kind: Deployment
patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: all
spec:
template:
spec:
nodeSelector:
"workload": "transcity-pg"