Add ClickHouse

This commit is contained in:
Paul-Henry PERRIN 2026-01-27 17:12:30 +01:00
parent 63e152f59c
commit d0e3918950
10 changed files with 6786 additions and 0 deletions

View File

@ -15,3 +15,5 @@ helm template superset https://github.com/apache/superset/releases/download/supe
docker buildx build --push -t registry.dev.k8s.transcity/reports/superset:5.0.0 superset-image docker buildx build --push -t registry.dev.k8s.transcity/reports/superset:5.0.0 superset-image
helm template ch-operator https://github.com/Altinity/helm-charts/releases/download/release-0.25.6/altinity-clickhouse-operator-0.25.6.tgz --namespace reports-clickhouse --values clickhouse-operator-values.yaml > clickhouse/base/clickhouse-operator.yaml

View File

@ -0,0 +1,931 @@
namespaceOverride: ""
# commonLabels -- set of labels that will be applied to all the resources for the operator
commonLabels: {}
# commonAnnotations -- set of annotations that will be applied to all the resources for the operator
commonAnnotations: {}
deployment:
# look details in `kubectl explain deployment.spec.strategy`
strategy:
type: Recreate
crdHook:
# crdHook.enabled -- enable automatic CRD installation/update via pre-install/pre-upgrade hooks
# when disabled, CRDs must be installed manually using kubectl apply
enabled: true
image:
# crdHook.image.repository -- image repository for CRD installation job
repository: bitnami/kubectl
# crdHook.image.tag -- image tag for CRD installation job
tag: "latest"
# crdHook.image.pullPolicy -- image pull policy for CRD installation job
pullPolicy: IfNotPresent
# crdHook.resources -- resource limits and requests for CRD installation job
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# crdHook.nodeSelector -- node selector for CRD installation job
nodeSelector: {}
# crdHook.tolerations -- tolerations for CRD installation job
tolerations: []
# crdHook.affinity -- affinity for CRD installation job
affinity: {}
operator:
image:
# operator.image.repository -- image repository
repository: altinity/clickhouse-operator
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)
tag: ""
# operator.image.pullPolicy -- image pull policy
pullPolicy: IfNotPresent
containerSecurityContext: {}
# operator.resources -- custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# operator.priorityClassName -- priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details
# @default -- ""
priorityClassName: ""
# operator.env -- additional environment variables for the clickhouse-operator container in deployment
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
env: []
metrics:
enabled: true
image:
# metrics.image.repository -- image repository
repository: altinity/metrics-exporter
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
tag: ""
# metrics.image.pullPolicy -- image pull policy
pullPolicy: IfNotPresent
containerSecurityContext: {}
# metrics.resources -- custom resource configuration
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# metrics.env -- additional environment variables for the deployment of metrics-exporter containers
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
env: []
# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod
# possible value format `[{"name":"your-secret-name"}]`,
# check `kubectl explain pod.spec.imagePullSecrets` for details
imagePullSecrets: []
# podLabels -- labels to add to the clickhouse-operator pod
podLabels: {}
# podAnnotations -- annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details
# @default -- check the `values.yaml` file
podAnnotations:
prometheus.io/port: '8888'
prometheus.io/scrape: 'true'
clickhouse-operator-metrics/port: '9999'
clickhouse-operator-metrics/scrape: 'true'
# nameOverride -- override name of the chart
nameOverride: ""
# fullnameOverride -- full name of the chart.
fullnameOverride: ""
serviceAccount:
# serviceAccount.create -- specifies whether a service account should be created
create: true
# serviceAccount.annotations -- annotations to add to the service account
annotations: {}
# serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template
name:
rbac:
# rbac.create -- specifies whether rbac resources should be created
create: true
# rbac.namespaceScoped -- specifies whether to create roles and rolebindings at the cluster level or namespace level
namespaceScoped: false
secret:
# secret.create -- create a secret with operator credentials
create: true
# secret.username -- operator credentials username
username: clickhouse_operator
# secret.password -- operator credentials password
password: clickhouse_operator_password
# nodeSelector -- node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details
nodeSelector: {}
# tolerations -- tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details
tolerations: []
# affinity -- affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details
affinity: {}
# podSecurityContext - operator deployment SecurityContext, check `kubectl explain pod.spec.securityContext` for details
podSecurityContext: {}
# topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, check `kubectl explain pod.spec.topologySpreadConstraints` for details
topologySpreadConstraints: []
serviceMonitor:
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator)
# In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below
enabled: false
# serviceMonitor.additionalLabels -- additional labels for service monitor
additionalLabels: {}
clickhouseMetrics:
# serviceMonitor.interval for ch-metrics endpoint --
interval: 30s
# serviceMonitor.scrapeTimeout for ch-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
scrapeTimeout: ""
# serviceMonitor.relabelings for ch-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
relabelings: []
# serviceMonitor.metricRelabelings for ch-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
metricRelabelings: []
operatorMetrics:
# serviceMonitor.interval for op-metrics endpoint --
interval: 30s
# serviceMonitor.scrapeTimeout for op-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
scrapeTimeout: ""
# serviceMonitor.relabelings for op-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
relabelings: []
# serviceMonitor.metricRelabelings for op-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
metricRelabelings: []
# configs -- clickhouse operator configs
# @default -- check the `values.yaml` file for the config content (auto-generated from latest operator release)
configs:
confdFiles: null
configdFiles:
01-clickhouse-01-listen.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
</yandex>
01-clickhouse-02-logger.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<logger>
<!-- Possible levels: https://github.com/pocoproject/poco/blob/devel/Foundation/include/Poco/Logger.h#L439 -->
<level>debug</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
<console>1</console>
</logger>
</yandex>
01-clickhouse-03-query_log.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<query_log replace="1">
<database>system</database>
<table>query_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<query_thread_log remove="1"/>
</yandex>
01-clickhouse-04-part_log.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<part_log replace="1">
<database>system</database>
<table>part_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
</yandex>
01-clickhouse-05-trace_log.xml: |-
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<trace_log replace="1">
<database>system</database>
<table>trace_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</trace_log>
</yandex>
files:
config.yaml:
# IMPORTANT
# This file is auto-generated
# Do not edit this file - all changes would be lost
# Edit appropriate template in the following folder:
# deploy/builder/templates-config
# IMPORTANT
#
# Template parameters available:
# WATCH_NAMESPACES=
# CH_USERNAME_PLAIN=
# CH_PASSWORD_PLAIN=
# CH_CREDENTIALS_SECRET_NAMESPACE=
# CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
# VERBOSITY=1
################################################
##
## Watch section
##
################################################
watch:
# List of namespaces where clickhouse-operator watches for events.
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
namespaces: []
clickhouse:
configuration:
################################################
##
## Configuration files section
##
################################################
file:
# Each 'path' can be either absolute or relative.
# In case path is absolute - it is used as is
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
path:
# Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
common: chi/config.d
# Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
host: chi/conf.d
# Path to the folder where ClickHouse configuration files with users' settings are located.
# Files are common for all instances within a CHI.
user: chi/users.d
################################################
##
## Configuration users section
##
################################################
user:
# Default settings for user accounts, created by the operator.
# IMPORTANT. These are not access credentials or settings for 'default' user account,
# it is a template for filling out missing fields for all user accounts to be created by the operator,
# with the following EXCEPTIONS:
# 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields.
# Password for 'default' user account has to be provided explicitly, if to be used.
# 2. CHOP user account DOES NOT use:
# - profile setting. It uses predefined profile called 'clickhouse_operator'
# - quota setting. It uses empty quota name.
# - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only.
# - password setting. Password for CHOP account is used from 'clickhouse.access.*' section
default:
# Default values for ClickHouse user account(s) created by the operator
# 1. user/profile - string
# 2. user/quota - string
# 3. user/networks/ip - multiple strings
# 4. user/password - string
# These values can be overwritten on per-user basis.
profile: "default"
quota: "default"
networksIP:
- "::1"
- "127.0.0.1"
password: "default"
################################################
##
## Configuration network section
##
################################################
network:
# Default host_regexp to limit network connectivity from outside
hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
################################################
##
## Configuration restart policy section
## Configuration restart policy describes what configuration changes require ClickHouse restart
##
################################################
configurationRestartPolicy:
rules:
# IMPORTANT!
# Special version of "*" - default version - has to satisfy all ClickHouse versions.
# Default version will also be used in case ClickHouse version is unknown.
# ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
# see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
# to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
- settings/*: "yes"
# single values
- settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
# structured XML
- settings/logger/*: "no"
- settings/macros/*: "no"
- settings/remote_servers/*: "no"
- settings/user_directories/*: "no"
# these settings should not lead to pod restarts
- settings/display_secrets_in_show_and_select: "no"
- zookeeper/*: "no"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
- files/config.d/*no_restart*: "no"
# exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
rules:
- settings/logger: "yes"
#################################################
##
## Access to ClickHouse instances
##
################################################
access:
# Possible values for 'scheme' are:
# 1. http - force http to be used to connect to ClickHouse instances
# 2. https - force https to be used to connect to ClickHouse instances
# 3. auto - either http or https is selected based on open ports
scheme: "auto"
# ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances.
# These credentials are used for:
# 1. Metrics requests
# 2. Schema maintenance
# User with these credentials can be specified in additional ClickHouse .xml config files,
# located in 'clickhouse.configuration.file.path.user' folder
username: ""
password: ""
rootCA: ""
# Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances.
# Can be used instead of explicitly specified username and password available in sections:
# - clickhouse.access.username
# - clickhouse.access.password
# Secret should have two keys:
# 1. username
# 2. password
secret:
# Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running.
namespace: ""
# Empty `name` means no k8s Secret would be looked for
name: '{{ include "altinity-clickhouse-operator.fullname" . }}'
# Port where to connect to ClickHouse instances to
port: 8123
# Timeouts used to limit connection and queries from the operator to ClickHouse instances
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
connect: 1
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
################################################
##
## Addons specifies additional configuration sections
## Should it be called something like "templates"?
##
################################################
addons:
rules:
- version: "*"
spec:
configuration:
users:
profiles:
quotas:
settings:
files:
- version: ">= 23.3"
spec:
configuration:
###
### users.d is global while description depends on CH version which may vary on per-host basis
### In case of global-ness this may be better to implement via auto-templates
###
### As a solution, this may be applied on the whole cluster based on any of its hosts
###
### What to do when host is just created? CH version is not known prior to CH started and user config is required before CH started.
### We do not have any info about the cluster on initial creation
###
users:
"{clickhouseOperatorUser}/access_management": 1
"{clickhouseOperatorUser}/named_collection_control": 1
"{clickhouseOperatorUser}/show_named_collections": 1
"{clickhouseOperatorUser}/show_named_collections_secrets": 1
profiles:
quotas:
settings:
files:
- version: ">= 23.5"
spec:
configuration:
users:
profiles:
clickhouse_operator/format_display_secrets_in_show_and_select: 1
quotas:
settings:
##
## this may be added on per-host basis into host's conf.d folder
##
display_secrets_in_show_and_select: 1
files:
#################################################
##
## Metrics collection
##
################################################
metrics:
# Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
# Specified in seconds.
timeouts:
# Timeout used to limit metrics collection request. In seconds.
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
keeper:
configuration:
################################################
##
## Configuration files section
##
################################################
file:
# Each 'path' can be either absolute or relative.
# In case path is absolute - it is used as is
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
path:
# Path to the folder where Keeper configuration files common for all instances within a CHK are located.
common: chk/keeper_config.d
# Path to the folder where Keeper configuration files unique for each instance (host) within a CHK are located.
host: chk/conf.d
# Path to the folder where Keeper configuration files with users' settings are located.
# Files are common for all instances within a CHI.
user: chk/users.d
################################################
##
## Template(s) management section
##
################################################
template:
chi:
# CHI template updates handling policy
# Possible policy values:
# - ReadOnStart. Accept CHIT updates on the operator's start only.
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
policy: ApplyOnNextReconcile
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
# Templates are added to the list of all templates and used when CHI is reconciled.
# Templates are applied in sorted alpha-numeric order.
path: chi/templates.d
chk:
# CHK template updates handling policy
# Possible policy values:
# - ReadOnStart. Accept CHIT updates on the operators start only.
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
policy: ApplyOnNextReconcile
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
# Templates are added to the list of all templates and used when CHI is reconciled.
# Templates are applied in sorted alpha-numeric order.
path: chk/templates.d
################################################
##
## Reconcile section
##
################################################
reconcile:
# Reconcile runtime settings
runtime:
# Max number of concurrent CHI reconciles in progress
reconcileCHIsThreadsNumber: 10
# The operator reconciles shards concurrently in each CHI with the following limitations:
# 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
# can not be greater than 'reconcileShardsThreadsNumber'.
# 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
# can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
# 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
# Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
# Max number of concurrent shard reconciles within one cluster in progress
reconcileShardsThreadsNumber: 5
# Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
# Reconcile StatefulSet scenario
statefulSet:
# Create StatefulSet scenario
create:
# What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
# do not try to fix or delete or update it, just abort reconcile cycle.
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
# 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: ignore
# Update StatefulSet scenario
update:
# How many seconds to wait for created/updated StatefulSet to be 'Ready'
timeout: 300
# How many seconds to wait between checks/polls for created/updated StatefulSet status
pollInterval: 5
# What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
# do not try to fix or delete or update it, just abort reconcile cycle.
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
# Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
# Follow 'abort' path afterwards.
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
wait:
# Whether the operator during reconcile procedure should wait for a ClickHouse host:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
# respectfully before moving forward with host reconcile
exclude: true
queries: true
include: false
# The operator during reconcile procedure should wait for replicas to catch-up
# replication delay a.k.a replication lag for the following replicas
replicas:
# All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
# New replicas only are requested to wait for replication to catch-up
new: yes
# Replication catch-up is considered to be completed as soon as replication delay
# a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
# is within this specified delay (in seconds)
delay: 10
probes:
# Whether the operator during host launch procedure should wait for startup probe to succeed.
# In case probe is unspecified wait is assumed to be completed successfully.
# Default option value is to do not wait.
startup: no
# Whether the operator during host launch procedure should wait for readiness probe to succeed.
# In case probe is unspecified wait is assumed to be completed successfully.
# Default option value is to wait.
readiness: yes
# The operator during reconcile procedure should drop the following entities:
drop:
replicas:
# Whether the operator during reconcile procedure should drop replicas when replica is deleted
onDelete: yes
# Whether the operator during reconcile procedure should drop replicas when replica volume is lost
onLostVolume: yes
# Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
active: no
################################################
##
## Annotations management section
##
################################################
annotation:
# Applied when:
# 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
# 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
# Include annotations from the following list:
# Applied only when not empty. Empty list means "include all, no selection"
include: []
# Exclude annotations from the following list:
exclude: []
################################################
##
## Labels management section
##
################################################
label:
# Applied when:
# 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
# 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
# Include labels from the following list:
# Applied only when not empty. Empty list means "include all, no selection"
include: []
# Exclude labels from the following list:
# Applied only when not empty. Empty list means "nothing to exclude, no selection"
exclude: []
# Whether to append *Scope* labels to StatefulSet and Pod.
# Full list of available *scope* labels check in 'labeler.go'
# LabelShardScopeIndex
# LabelReplicaScopeIndex
# LabelCHIScopeIndex
# LabelCHIScopeCycleSize
# LabelCHIScopeCycleIndex
# LabelCHIScopeCycleOffset
# LabelClusterScopeIndex
# LabelClusterScopeCycleSize
# LabelClusterScopeCycleIndex
# LabelClusterScopeCycleOffset
appendScope: "no"
################################################
##
## Metrics management section
##
################################################
metrics:
labels:
exclude: []
################################################
##
## Status management section
##
################################################
status:
fields:
action: false
actions: false
error: true
errors: true
################################################
##
## StatefulSet management section
##
################################################
statefulSet:
revisionHistoryLimit: 0
################################################
##
## Pod management section
##
################################################
pod:
# Grace period for Pod termination.
# How many seconds to wait between sending
# SIGTERM and SIGKILL during Pod termination process.
# Increase this number is case of slow shutdown.
terminationGracePeriod: 30
################################################
##
## Log parameters section
##
################################################
logger:
logtostderr: "true"
alsologtostderr: "false"
v: "1"
stderrthreshold: ""
vmodule: ""
log_backtrace_at: ""
templatesdFiles:
001-templates.json.example: |
{
"apiVersion": "clickhouse.altinity.com/v1",
"kind": "ClickHouseInstallationTemplate",
"metadata": {
"name": "01-default-volumeclaimtemplate"
},
"spec": {
"templates": {
"volumeClaimTemplates": [
{
"name": "chi-default-volume-claim-template",
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "2Gi"
}
}
}
}
],
"podTemplates": [
{
"name": "chi-default-oneperhost-pod-template",
"distribution": "OnePerHost",
"spec": {
"containers" : [
{
"name": "clickhouse",
"image": "clickhouse/clickhouse-server:23.8",
"ports": [
{
"name": "http",
"containerPort": 8123
},
{
"name": "client",
"containerPort": 9000
},
{
"name": "interserver",
"containerPort": 9009
}
]
}
]
}
}
]
}
}
}
default-pod-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-oneperhost-pod-template"
spec:
templates:
podTemplates:
- name: default-oneperhost-pod-template
distribution: "OnePerHost"
default-storage-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-storage-template-2Gi"
spec:
templates:
volumeClaimTemplates:
- name: default-storage-template-2Gi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
readme: |-
Templates in this folder are packaged with an operator and available via 'useTemplate'
usersdFiles:
01-clickhouse-operator-profile.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<!--
#
# Template parameters available:
#
-->
<yandex>
<!-- clickhouse-operator user is generated by the operator based on config.yaml in runtime -->
<profiles>
<clickhouse_operator>
<log_queries>0</log_queries>
<skip_unavailable_shards>1</skip_unavailable_shards>
<http_connection_timeout>10</http_connection_timeout>
<max_concurrent_queries_for_all_users>0</max_concurrent_queries_for_all_users>
<os_thread_priority>0</os_thread_priority>
</clickhouse_operator>
</profiles>
</yandex>
02-clickhouse-default-profile.xml: |-
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<profiles>
<default>
<os_thread_priority>2</os_thread_priority>
<log_queries>1</log_queries>
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
<parallel_view_processing>1</parallel_view_processing>
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
<load_balancing>nearest_hostname</load_balancing>
<prefer_localhost_replica>0</prefer_localhost_replica>
<!-- materialize_ttl_recalculate_only>1</materialize_ttl_recalculate_only> 21.10 and above -->
</default>
</profiles>
</yandex>
keeperConfdFiles: null
keeperConfigdFiles:
01-keeper-01-default-config.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<clickhouse>
<keeper_server>
<coordination_settings>
<min_session_timeout_ms>10000</min_session_timeout_ms>
<operation_timeout_ms>10000</operation_timeout_ms>
<raft_logs_level>information</raft_logs_level>
<session_timeout_ms>100000</session_timeout_ms>
</coordination_settings>
<hostname_checks_enabled>true</hostname_checks_enabled>
<log_storage_path>/var/lib/clickhouse-keeper/coordination/logs</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse-keeper/coordination/snapshots</snapshot_storage_path>
<storage_path>/var/lib/clickhouse-keeper</storage_path>
<tcp_port>2181</tcp_port>
</keeper_server>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
<logger>
<console>1</console>
<level>information</level>
</logger>
<max_connections>4096</max_connections>
</clickhouse>
01-keeper-02-readiness.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<clickhouse>
<keeper_server>
<http_control>
<port>9182</port>
<readiness>
<endpoint>/ready</endpoint>
</readiness>
</http_control>
</keeper_server>
</clickhouse>
01-keeper-03-enable-reconfig.xml: |-
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<clickhouse>
<keeper_server>
<enable_reconfiguration>false</enable_reconfiguration>
</keeper_server>
</clickhouse>
keeperTemplatesdFiles:
readme: |-
Templates in this folder are packaged with an operator and available via 'useTemplate'
keeperUsersdFiles: null
# additionalResources -- list of additional resources to create (processed via `tpl` function),
# useful for create ClickHouse clusters together with clickhouse-operator.
# check `kubectl explain chi` for details
additionalResources: []
# - |
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
# - |
# apiVersion: v1
# kind: Secret
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
# stringData:
# mykey: my-value
# - |
# apiVersion: clickhouse.altinity.com/v1
# kind: ClickHouseInstallation
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
# spec:
# configuration:
# clusters:
# - name: default
# layout:
# shardsCount: 1
dashboards:
# dashboards.enabled -- provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 )
enabled: false
# dashboards.additionalLabels -- labels to add to a secret with dashboards
additionalLabels:
# dashboards.additionalLabels.grafana_dashboard - will watch when official grafana helm chart sidecar.dashboards.enabled=true
grafana_dashboard: "1"
# dashboards.annotations -- annotations to add to a secret with dashboards
annotations:
# dashboards.annotations.grafana_folder -- folder where will place dashboards, requires define values in official grafana helm chart sidecar.dashboards.folderAnnotation: grafana_folder
grafana_folder: clickhouse-operator

View File

View File

@ -0,0 +1,58 @@
apiVersion: clickhouse.altinity.com/v1
kind: ClickHouseInstallation
metadata:
name: clickhouse
spec:
configuration:
users:
# printf 'test_password' | sha256sum
#test_user/password_sha256_hex: 10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01
admin/password: Transcity123
# to allow access outside from kubernetes
admin/networks/ip:
- 0.0.0.0/0
zookeeper:
nodes:
- host: chk-keeper-chk-0-0
port: 2181
clusters:
- name: "report"
layout:
shardsCount: 1
replicasCount: 1
defaults:
templates:
podTemplate: default
templates:
podTemplates:
- name: default
spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:24.8
volumeMounts:
- name: data-storage-vc-template
mountPath: /var/lib/clickhouse
- name: log-storage-vc-template
mountPath: /var/log/clickhouse-server
volumeClaimTemplates:
- name: data-storage-vc-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 30Gi
- name: log-storage-vc-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi

View File

@ -0,0 +1,30 @@
apiVersion: clickhouse-keeper.altinity.com/v1
kind: ClickHouseKeeperInstallation
metadata:
name: keeper
spec:
defaults:
templates:
podTemplate: default
volumeClaimTemplate: default
templates:
podTemplates:
- name: default
spec:
containers:
- name: clickhouse-keeper
image: "clickhouse/clickhouse-keeper:24.3.5.46"
volumeClaimTemplates:
- name: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
configuration:
clusters:
- name: chk
layout:
replicasCount: 1

View File

@ -0,0 +1,6 @@
namespace: reports-clickhouse
resources:
- ../base
- namespace.yaml
- keeper.yaml
- clickhouse.yaml

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: reports-clickhouse
name: reports-clickhouse

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,2 @@
resources:
- clickhouse-operator.yaml

View File

@ -0,0 +1,2 @@
resources:
- ../app